diff --git a/ffmpeg/CMakeLists.txt b/ffmpeg/CMakeLists.txt index d7a4fc6..8682f76 100644 --- a/ffmpeg/CMakeLists.txt +++ b/ffmpeg/CMakeLists.txt @@ -8,6 +8,8 @@ set(PROJECT_SOURCES event/seekevent.hpp event/trackevent.hpp event/valueevent.hpp + filter/filter.cc + filter/filter.hpp filter/filtercontext.cc filter/filtercontext.hpp filter/filtergraph.cc diff --git a/ffmpeg/audiofifo.hpp b/ffmpeg/audiofifo.hpp index 7192404..72f2bc1 100644 --- a/ffmpeg/audiofifo.hpp +++ b/ffmpeg/audiofifo.hpp @@ -12,12 +12,12 @@ class AudioFifo : public QObject explicit AudioFifo(CodecContext *ctx, QObject *parent = nullptr); ~AudioFifo() override; - bool realloc(int nb_samples); + auto realloc(int nb_samples) -> bool; - bool write(void **data, int nb_samples); - bool read(void **data, int nb_samples); + auto write(void **data, int nb_samples) -> bool; + auto read(void **data, int nb_samples) -> bool; - int size() const; + auto size() const -> int; private: class AudioFifoPrivtate; diff --git a/ffmpeg/audioframeconverter.cpp b/ffmpeg/audioframeconverter.cpp index f87c136..a914955 100644 --- a/ffmpeg/audioframeconverter.cpp +++ b/ffmpeg/audioframeconverter.cpp @@ -121,19 +121,23 @@ AudioFrameConverter::AudioFrameConverter(CodecContext *codecCtx, { d_ptr->format = format; d_ptr->avSampleFormat = getAVSampleFormat(d_ptr->format.sampleFormat()); - auto channelLayout = getChannelLayout(d_ptr->format.channelConfig()); auto *avCodecCtx = codecCtx->avCodecCtx(); - d_ptr->swrContext = swr_alloc_set_opts(d_ptr->swrContext, - channelLayout, - d_ptr->avSampleFormat, - d_ptr->format.sampleRate(), - avCodecCtx->ch_layout.u.mask, - avCodecCtx->sample_fmt, - avCodecCtx->sample_rate, - 0, - nullptr); - - int ret = swr_init(d_ptr->swrContext); + AVChannelLayout channelLayout = {AV_CHANNEL_ORDER_UNSPEC}; + av_channel_layout_default(&channelLayout, d_ptr->format.channelCount()); + // av_channel_layout_from_mask(&channelLayout, getChannelLayout(d_ptr->format.channelConfig())); + auto ret = swr_alloc_set_opts2(&d_ptr->swrContext, + &channelLayout, + d_ptr->avSampleFormat, + d_ptr->format.sampleRate(), + &avCodecCtx->ch_layout, + avCodecCtx->sample_fmt, + avCodecCtx->sample_rate, + 0, + nullptr); + if (ret != 0) { + SET_ERROR_CODE(ret); + } + ret = swr_init(d_ptr->swrContext); if (ret < 0) { SET_ERROR_CODE(ret); } @@ -186,8 +190,8 @@ auto getAudioFormatFromCodecCtx(CodecContext *codecCtx, int &sampleSize) -> QAud autioFormat.setChannelCount(ctx->ch_layout.nb_channels); //autioFormat.setByteOrder(QAudioFormat::LittleEndian); - if (ctx->channel_layout <= 0) { - ctx->channel_layout = getChannaLayoutFromChannalCount(ctx->ch_layout.nb_channels); + if (ctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) { + av_channel_layout_default(&ctx->ch_layout, ctx->ch_layout.nb_channels); } auto channelConfig = getChannelConfig(ctx->ch_layout.u.mask); if (channelConfig == QAudioFormat::ChannelConfigUnknown) { @@ -221,8 +225,8 @@ auto getAudioFormatFromCodecCtx(CodecContext *codecCtx, int &sampleSize) -> QAud sampleSize = 8 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16); } - qInfo() << "Current Audio parameters:" << ctx->sample_rate << ctx->channels - << ctx->channel_layout << ctx->sample_fmt; + qInfo() << "Current Audio parameters:" << ctx->sample_rate << ctx->ch_layout.nb_channels + << ctx->ch_layout.u.mask << ctx->sample_fmt; qInfo() << autioFormat << autioFormat.channelConfig(); return autioFormat; diff --git a/ffmpeg/avcontextinfo.h b/ffmpeg/avcontextinfo.h index 262d313..73eae72 100644 --- a/ffmpeg/avcontextinfo.h +++ b/ffmpeg/avcontextinfo.h @@ -67,6 +67,8 @@ class FFMPEG_EXPORT AVContextInfo : public QObject QScopedPointer d_ptr; }; +using AVContextInfoPtr = QSharedPointer; + } // namespace Ffmpeg #endif // AVCONTEXTINFO_H diff --git a/ffmpeg/filter/filter.cc b/ffmpeg/filter/filter.cc new file mode 100644 index 0000000..0a62f3f --- /dev/null +++ b/ffmpeg/filter/filter.cc @@ -0,0 +1,143 @@ +#include "filter.hpp" +#include "filtercontext.hpp" +#include "filtergraph.hpp" +#include "filterinout.hpp" + +#include +#include +#include + +#include + +extern "C" { +#include +#include +} + +namespace Ffmpeg { + +class Filter::FilterPrivate +{ +public: + explicit FilterPrivate(Filter *q) + : q_ptr(q) + { + filterGraph = new FilterGraph(q_ptr); + } + + void initVideoFilter(Frame *frame) + { + auto *avCodecCtx = decContextInfo->codecCtx()->avCodecCtx(); + buffersrcCtx = new FilterContext("buffer", q_ptr); + buffersinkCtx = new FilterContext("buffersink", q_ptr); + auto timeBase = decContextInfo->timebase(); + auto args + = QString::asprintf("video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", + avCodecCtx->width, + avCodecCtx->height, + frame->avFrame()->format, //dec_ctx->pix_fmt, + timeBase.num, + timeBase.den, + avCodecCtx->sample_aspect_ratio.num, + avCodecCtx->sample_aspect_ratio.den); + qDebug() << "Video filter in args:" << args; + + create(args); + } + + void initAudioFilter() + { + auto *avCodecCtx = decContextInfo->codecCtx()->avCodecCtx(); + buffersrcCtx = new FilterContext("abuffer", q_ptr); + buffersinkCtx = new FilterContext("abuffersink", q_ptr); + if (avCodecCtx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) { + av_channel_layout_default(&avCodecCtx->ch_layout, avCodecCtx->ch_layout.nb_channels); + } + char buf[64]; + av_channel_layout_describe(&avCodecCtx->ch_layout, buf, sizeof(buf)); + auto args = QString::asprintf( + "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=%s" PRIx64, + 1, + avCodecCtx->sample_rate, + avCodecCtx->sample_rate, + av_get_sample_fmt_name(avCodecCtx->sample_fmt), + buf); + qDebug() << "Audio filter in args:" << args; + + create(args); + } + + void create(const QString &args) + { + buffersrcCtx->create("in", args, filterGraph); + buffersinkCtx->create("out", "", filterGraph); + } + + void config(const QString &filterSpec) + { + QScopedPointer fliterOutPtr(new FilterInOut); + QScopedPointer fliterInPtr(new FilterInOut); + auto *outputs = fliterOutPtr->avFilterInOut(); + auto *inputs = fliterInPtr->avFilterInOut(); + /* Endpoints for the filter graph. */ + outputs->name = av_strdup("in"); + outputs->filter_ctx = buffersrcCtx->avFilterContext(); + outputs->pad_idx = 0; + outputs->next = nullptr; + + inputs->name = av_strdup("out"); + inputs->filter_ctx = buffersinkCtx->avFilterContext(); + inputs->pad_idx = 0; + inputs->next = nullptr; + + filterGraph->parse(filterSpec, fliterInPtr.data(), fliterOutPtr.data()); + filterGraph->config(); + + // if (!(enc_ctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) { + // buffersink_ctx->buffersink_setFrameSize(enc_ctx->frame_size); + // } + } + + Filter *q_ptr; + + AVContextInfo *decContextInfo; + FilterContext *buffersrcCtx; + FilterContext *buffersinkCtx; + FilterGraph *filterGraph; +}; + +Filter::Filter(AVContextInfo *decContextInfo, QObject *parent) + : QObject{parent} + , d_ptr(new FilterPrivate(this)) +{ + d_ptr->decContextInfo = decContextInfo; +} + +Filter::~Filter() = default; + +auto Filter::init(Frame *frame) -> bool +{ + switch (d_ptr->decContextInfo->mediaType()) { + case AVMEDIA_TYPE_AUDIO: d_ptr->initAudioFilter(); break; + case AVMEDIA_TYPE_VIDEO: d_ptr->initVideoFilter(frame); break; + default: return false; + } + + return true; +} + +auto Filter::filterFrame(Frame *frame) -> QVector +{ + QVector framepPtrs{}; + if (d_ptr->buffersrcCtx->buffersrcAddFrameFlags(frame)) { + return framepPtrs; + } + std::unique_ptr framePtr(new Frame); + while (d_ptr->buffersinkCtx->buffersinkGetFrame(framePtr.get())) { + framePtr->setPictType(AV_PICTURE_TYPE_NONE); + framepPtrs.emplace_back(framePtr.release()); + } + return framepPtrs; +} + +} // namespace Ffmpeg diff --git a/ffmpeg/filter/filter.hpp b/ffmpeg/filter/filter.hpp new file mode 100644 index 0000000..530277e --- /dev/null +++ b/ffmpeg/filter/filter.hpp @@ -0,0 +1,29 @@ +#ifndef FILTER_HPP +#define FILTER_HPP + +#include "frame.hpp" +#include + +namespace Ffmpeg { + +class AVContextInfo; +class Frame; +class Filter : public QObject +{ + Q_OBJECT +public: + explicit Filter(AVContextInfo *decContextInfo, QObject *parent = nullptr); + ~Filter() override; + + auto init(Frame *frame) -> bool; + + auto filterFrame(Frame *frame) -> QVector; + +private: + class FilterPrivate; + QScopedPointer d_ptr; +}; + +} // namespace Ffmpeg + +#endif // FILTER_HPP diff --git a/ffmpeg/filter/filter.pri b/ffmpeg/filter/filter.pri index cdff9df..b358936 100644 --- a/ffmpeg/filter/filter.pri +++ b/ffmpeg/filter/filter.pri @@ -1,9 +1,11 @@ HEADERS += \ + $$PWD/filter.hpp \ $$PWD/filtercontext.hpp \ $$PWD/filtergraph.hpp \ $$PWD/filterinout.hpp SOURCES += \ + $$PWD/filter.cc \ $$PWD/filtercontext.cc \ $$PWD/filtergraph.cc \ $$PWD/filterinout.cc diff --git a/ffmpeg/filter/filtercontext.cc b/ffmpeg/filter/filtercontext.cc index dc8df52..48b1b29 100644 --- a/ffmpeg/filter/filtercontext.cc +++ b/ffmpeg/filter/filtercontext.cc @@ -17,7 +17,7 @@ namespace Ffmpeg { class FilterContext::FilterContextPrivate { public: - FilterContextPrivate(FilterContext *q) + explicit FilterContextPrivate(FilterContext *q) : q_ptr(q) {} @@ -40,14 +40,15 @@ FilterContext::FilterContext(const QString &name, QObject *parent) d_ptr->createFilter(name); } -FilterContext::~FilterContext() {} +FilterContext::~FilterContext() = default; -bool FilterContext::isValid() +auto FilterContext::isValid() -> bool { return nullptr != d_ptr->filter; } -bool FilterContext::create(const QString &name, const QString &args, FilterGraph *filterGraph) +auto FilterContext::create(const QString &name, const QString &args, FilterGraph *filterGraph) + -> bool { auto ret = avfilter_graph_create_filter(&d_ptr->filterContext, d_ptr->filter, @@ -58,7 +59,7 @@ bool FilterContext::create(const QString &name, const QString &args, FilterGraph ERROR_RETURN(ret) } -bool FilterContext::buffersrc_addFrameFlags(Frame *frame) +auto FilterContext::buffersrcAddFrameFlags(Frame *frame) -> bool { auto ret = av_buffersrc_add_frame_flags(d_ptr->filterContext, frame->avFrame(), @@ -66,7 +67,7 @@ bool FilterContext::buffersrc_addFrameFlags(Frame *frame) ERROR_RETURN(ret) } -bool FilterContext::buffersink_getFrame(Frame *frame) +auto FilterContext::buffersinkGetFrame(Frame *frame) -> bool { auto ret = av_buffersink_get_frame(d_ptr->filterContext, frame->avFrame()); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { @@ -75,12 +76,12 @@ bool FilterContext::buffersink_getFrame(Frame *frame) ERROR_RETURN(ret) } -void FilterContext::buffersink_setFrameSize(int size) +void FilterContext::buffersinkSetFrameSize(int size) { av_buffersink_set_frame_size(d_ptr->filterContext, size); } -AVFilterContext *FilterContext::avFilterContext() +auto FilterContext::avFilterContext() -> AVFilterContext * { return d_ptr->filterContext; } diff --git a/ffmpeg/filter/filtercontext.hpp b/ffmpeg/filter/filtercontext.hpp index c0517e6..423ac55 100644 --- a/ffmpeg/filter/filtercontext.hpp +++ b/ffmpeg/filter/filtercontext.hpp @@ -16,17 +16,17 @@ class FilterContext : public QObject // Video: {buffer, buffersink} // Audio: {abuffer, abuffersink} explicit FilterContext(const QString &name, QObject *parent = nullptr); - ~FilterContext(); + ~FilterContext() override; - bool isValid(); + auto isValid() -> bool; - bool create(const QString &name, const QString &args, FilterGraph *filterGraph); + auto create(const QString &name, const QString &args, FilterGraph *filterGraph) -> bool; - bool buffersrc_addFrameFlags(Frame *frame); - bool buffersink_getFrame(Frame *frame); - void buffersink_setFrameSize(int size); + auto buffersrcAddFrameFlags(Frame *frame) -> bool; + auto buffersinkGetFrame(Frame *frame) -> bool; + void buffersinkSetFrameSize(int size); - AVFilterContext *avFilterContext(); + auto avFilterContext() -> AVFilterContext *; private: class FilterContextPrivate; diff --git a/ffmpeg/filter/filtergraph.cc b/ffmpeg/filter/filtergraph.cc index e702fdc..1e7d637 100644 --- a/ffmpeg/filter/filtergraph.cc +++ b/ffmpeg/filter/filtergraph.cc @@ -12,7 +12,7 @@ namespace Ffmpeg { class FilterGraph::FilterGraphPrivate { public: - FilterGraphPrivate(FilterGraph *q) + explicit FilterGraphPrivate(FilterGraph *q) : q_ptr(q) { filterGraph = avfilter_graph_alloc(); @@ -35,12 +35,12 @@ FilterGraph::FilterGraph(QObject *parent) , d_ptr(new FilterGraphPrivate(this)) {} -FilterGraph::~FilterGraph() {} +FilterGraph::~FilterGraph() = default; -bool FilterGraph::parse(const QString &filters, FilterInOut *in, FilterInOut *out) +auto FilterGraph::parse(const QString &filters, FilterInOut *in, FilterInOut *out) -> bool { - auto inputs = in->avFilterInOut(); - auto outputs = out->avFilterInOut(); + auto *inputs = in->avFilterInOut(); + auto *outputs = out->avFilterInOut(); auto ret = avfilter_graph_parse_ptr(d_ptr->filterGraph, filters.toLocal8Bit().constData(), &inputs, @@ -51,13 +51,13 @@ bool FilterGraph::parse(const QString &filters, FilterInOut *in, FilterInOut *ou ERROR_RETURN(ret) } -bool FilterGraph::config() +auto FilterGraph::config() -> bool { auto ret = avfilter_graph_config(d_ptr->filterGraph, nullptr); ERROR_RETURN(ret) } -AVFilterGraph *FilterGraph::avFilterGraph() +auto FilterGraph::avFilterGraph() -> AVFilterGraph * { return d_ptr->filterGraph; } diff --git a/ffmpeg/filter/filtergraph.hpp b/ffmpeg/filter/filtergraph.hpp index 7969366..0dc6c1b 100644 --- a/ffmpeg/filter/filtergraph.hpp +++ b/ffmpeg/filter/filtergraph.hpp @@ -12,13 +12,13 @@ class FilterGraph : public QObject { public: explicit FilterGraph(QObject *parent = nullptr); - ~FilterGraph(); + ~FilterGraph() override; - bool parse(const QString &filters, FilterInOut *in, FilterInOut *out); + auto parse(const QString &filters, FilterInOut *in, FilterInOut *out) -> bool; - bool config(); + auto config() -> bool; - AVFilterGraph *avFilterGraph(); + auto avFilterGraph() -> AVFilterGraph *; private: class FilterGraphPrivate; diff --git a/ffmpeg/filter/filterinout.cc b/ffmpeg/filter/filterinout.cc index 362226f..c41217b 100644 --- a/ffmpeg/filter/filterinout.cc +++ b/ffmpeg/filter/filterinout.cc @@ -9,7 +9,7 @@ namespace Ffmpeg { class FilterInOut::FilterInOutPrivate { public: - FilterInOutPrivate(QObject *parent) + explicit FilterInOutPrivate(QObject *parent) : owner(parent) { inOut = avfilter_inout_alloc(); @@ -26,9 +26,9 @@ FilterInOut::FilterInOut(QObject *parent) , d_ptr(new FilterInOutPrivate(this)) {} -FilterInOut::~FilterInOut() {} +FilterInOut::~FilterInOut() = default; -AVFilterInOut *FilterInOut::avFilterInOut() +auto FilterInOut::avFilterInOut() -> AVFilterInOut * { return d_ptr->inOut; } diff --git a/ffmpeg/filter/filterinout.hpp b/ffmpeg/filter/filterinout.hpp index 23dc7b7..b106140 100644 --- a/ffmpeg/filter/filterinout.hpp +++ b/ffmpeg/filter/filterinout.hpp @@ -12,9 +12,9 @@ class FilterInOut : public QObject Q_OBJECT public: explicit FilterInOut(QObject *parent = nullptr); - ~FilterInOut(); + ~FilterInOut() override; - AVFilterInOut *avFilterInOut(); + auto avFilterInOut() -> AVFilterInOut *; void setAVFilterInOut(AVFilterInOut *avFilterInOut); private: diff --git a/ffmpeg/gpu/hardwaredecode.hpp b/ffmpeg/gpu/hardwaredecode.hpp index 9d36219..799b0da 100644 --- a/ffmpeg/gpu/hardwaredecode.hpp +++ b/ffmpeg/gpu/hardwaredecode.hpp @@ -13,7 +13,7 @@ class HardWareDecode : public QObject { public: explicit HardWareDecode(QObject *parent = nullptr); - ~HardWareDecode(); + ~HardWareDecode() override; auto initPixelFormat(const AVCodec *decoder) -> bool; auto initHardWareDevice(CodecContext *codecContext) -> bool; diff --git a/ffmpeg/gpu/hardwareencode.hpp b/ffmpeg/gpu/hardwareencode.hpp index 4778d57..38137e9 100644 --- a/ffmpeg/gpu/hardwareencode.hpp +++ b/ffmpeg/gpu/hardwareencode.hpp @@ -17,7 +17,7 @@ class HardWareEncode : public QObject { public: explicit HardWareEncode(QObject *parent = nullptr); - ~HardWareEncode(); + ~HardWareEncode() override; auto initEncoder(const AVCodec *encoder) -> bool; auto initHardWareDevice(CodecContext *codecContext) -> bool; diff --git a/ffmpeg/transcode.cc b/ffmpeg/transcode.cc index 8c61a93..ea324f5 100644 --- a/ffmpeg/transcode.cc +++ b/ffmpeg/transcode.cc @@ -36,13 +36,13 @@ struct TranscodeContext int64_t audioPts = 0; }; -bool init_filter(TranscodeContext *transcodeContext, const char *filter_spec, Frame *frame) +auto init_filter(TranscodeContext *transcodeContext, const char *filter_spec, Frame *frame) -> bool { QSharedPointer buffersrc_ctx; QSharedPointer buffersink_ctx; QSharedPointer filter_graph(new FilterGraph); - auto dec_ctx = transcodeContext->decContextInfoPtr->codecCtx()->avCodecCtx(); - auto enc_ctx = transcodeContext->encContextInfoPtr->codecCtx()->avCodecCtx(); + auto *dec_ctx = transcodeContext->decContextInfoPtr->codecCtx()->avCodecCtx(); + auto *enc_ctx = transcodeContext->encContextInfoPtr->codecCtx()->avCodecCtx(); switch (transcodeContext->decContextInfoPtr->mediaType()) { case AVMEDIA_TYPE_VIDEO: { buffersrc_ctx.reset(new FilterContext("buffer")); @@ -63,7 +63,7 @@ bool init_filter(TranscodeContext *transcodeContext, const char *filter_spec, Fr auto pix_fmt = transcodeContext->encContextInfoPtr->pixfmt(); av_opt_set_bin(buffersink_ctx->avFilterContext(), "pix_fmts", - (uint8_t *) &pix_fmt, + reinterpret_cast(&pix_fmt), sizeof(pix_fmt), AV_OPT_SEARCH_CHILDREN); // av_opt_set_bin(buffersink_ctx->avFilterContext(), @@ -75,7 +75,7 @@ bool init_filter(TranscodeContext *transcodeContext, const char *filter_spec, Fr case AVMEDIA_TYPE_AUDIO: { buffersrc_ctx.reset(new FilterContext("abuffer")); buffersink_ctx.reset(new FilterContext("abuffersink")); - if (!dec_ctx->channel_layout) { + if (dec_ctx->channel_layout == 0U) { dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels); } auto args = QString::asprintf( @@ -90,17 +90,17 @@ bool init_filter(TranscodeContext *transcodeContext, const char *filter_spec, Fr buffersink_ctx->create("out", "", filter_graph.data()); av_opt_set_bin(buffersink_ctx->avFilterContext(), "sample_rates", - (uint8_t *) &enc_ctx->sample_rate, + reinterpret_cast(&enc_ctx->sample_rate), sizeof(enc_ctx->sample_rate), AV_OPT_SEARCH_CHILDREN); av_opt_set_bin(buffersink_ctx->avFilterContext(), "sample_fmts", - (uint8_t *) &enc_ctx->sample_fmt, + reinterpret_cast(&enc_ctx->sample_fmt), sizeof(enc_ctx->sample_fmt), AV_OPT_SEARCH_CHILDREN); av_opt_set_bin(buffersink_ctx->avFilterContext(), "channel_layouts", - (uint8_t *) &enc_ctx->channel_layout, + reinterpret_cast(&enc_ctx->channel_layout), sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN); } break; @@ -109,8 +109,8 @@ bool init_filter(TranscodeContext *transcodeContext, const char *filter_spec, Fr QScopedPointer fliterOut(new FilterInOut); QScopedPointer fliterIn(new FilterInOut); - auto outputs = fliterOut->avFilterInOut(); - auto inputs = fliterIn->avFilterInOut(); + auto *outputs = fliterOut->avFilterInOut(); + auto *inputs = fliterIn->avFilterInOut(); /* Endpoints for the filter graph. */ outputs->name = av_strdup("in"); outputs->filter_ctx = buffersrc_ctx->avFilterContext(); @@ -139,7 +139,7 @@ bool init_filter(TranscodeContext *transcodeContext, const char *filter_spec, Fr class Transcode::TranscodePrivate { public: - TranscodePrivate(QObject *parent) + explicit TranscodePrivate(QObject *parent) : owner(parent) , inFormatContext(new FormatContext(owner)) , outFormatContext(new FormatContext(owner)) @@ -148,7 +148,7 @@ class Transcode::TranscodePrivate ~TranscodePrivate() { reset(); } - bool openInputFile() + auto openInputFile() -> bool { Q_ASSERT(!inFilePath.isEmpty()); auto ret = inFormatContext->openFilePath(inFilePath); @@ -158,11 +158,11 @@ class Transcode::TranscodePrivate inFormatContext->findStream(); auto stream_num = inFormatContext->streams(); for (int i = 0; i < stream_num; i++) { - auto transContext = new TranscodeContext; + auto *transContext = new TranscodeContext; transcodeContexts.append(transContext); - auto stream = inFormatContext->stream(i); + auto *stream = inFormatContext->stream(i); auto codec_type = stream->codecpar->codec_type; - if (stream->disposition & AV_DISPOSITION_ATTACHED_PIC) { + if ((stream->disposition & AV_DISPOSITION_ATTACHED_PIC) != 0) { continue; } QSharedPointer contextInfoPtr; @@ -185,7 +185,7 @@ class Transcode::TranscodePrivate return true; } - bool openOutputFile() + auto openOutputFile() -> bool { Q_ASSERT(!outFilepath.isEmpty()); auto ret = outFormatContext->openFilePath(outFilepath, FormatContext::WriteOnly); @@ -195,9 +195,9 @@ class Transcode::TranscodePrivate outFormatContext->copyChapterFrom(inFormatContext); auto stream_num = inFormatContext->streams(); for (int i = 0; i < stream_num; i++) { - auto inStream = inFormatContext->stream(i); - auto stream = outFormatContext->createStream(); - if (!stream) { + auto *inStream = inFormatContext->stream(i); + auto *stream = outFormatContext->createStream(); + if (stream == nullptr) { return false; } av_dict_copy(&stream->metadata, inStream->metadata, 0); @@ -206,13 +206,13 @@ class Transcode::TranscodePrivate stream->sample_aspect_ratio = inStream->sample_aspect_ratio; stream->avg_frame_rate = inStream->avg_frame_rate; stream->event_flags = inStream->event_flags; - auto transContext = transcodeContexts.at(i); + auto *transContext = transcodeContexts.at(i); auto decContextInfo = transContext->decContextInfoPtr; - if (inStream->disposition & AV_DISPOSITION_ATTACHED_PIC) { + if ((inStream->disposition & AV_DISPOSITION_ATTACHED_PIC) != 0) { auto ret = avcodec_parameters_copy(stream->codecpar, inStream->codecpar); if (ret < 0) { qErrnoWarning("Copying parameters for stream #%u failed", i); - return ret; + return ret != 0; } stream->time_base = inStream->time_base; stream->codecpar->width = inStream->codecpar->width > 0 ? inStream->codecpar->width @@ -233,8 +233,8 @@ class Transcode::TranscodePrivate ? audioEncoderName : videoEncoderName); //contextInfoPtr->initEncoder(decContextInfo->codecCtx()->avCodecCtx()->codec_id); - auto codecCtx = contextInfoPtr->codecCtx(); - auto avCodecCtx = codecCtx->avCodecCtx(); + auto *codecCtx = contextInfoPtr->codecCtx(); + auto *avCodecCtx = codecCtx->avCodecCtx(); decContextInfo->codecCtx()->copyToCodecParameters(codecCtx); // ffmpeg example transcoding.c ? framerate, sample_rate codecCtx->avCodecCtx()->time_base = decContextInfo->timebase(); @@ -248,7 +248,8 @@ class Transcode::TranscodePrivate codecCtx->setMaxBitrate(maxBitrate); codecCtx->setProfile(profile); } - if (outFormatContext->avFormatContext()->oformat->flags & AVFMT_GLOBALHEADER) { + if ((outFormatContext->avFormatContext()->oformat->flags & AVFMT_GLOBALHEADER) + != 0) { avCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } contextInfoPtr->openCodec(AVContextInfo::GpuEncode); @@ -256,7 +257,7 @@ class Transcode::TranscodePrivate contextInfoPtr->codecCtx()->avCodecCtx()); if (ret < 0) { SET_ERROR_CODE(ret); - return ret; + return ret != 0; } stream->time_base = decContextInfo->timebase(); transContext->encContextInfoPtr = contextInfoPtr; @@ -268,7 +269,7 @@ class Transcode::TranscodePrivate auto ret = avcodec_parameters_copy(stream->codecpar, inStream->codecpar); if (ret < 0) { SET_ERROR_CODE(ret); - return ret; + return ret != 0; } stream->time_base = inStream->time_base; } break; @@ -281,7 +282,7 @@ class Transcode::TranscodePrivate void initFilters(int stream_index, Frame *frame) { - auto transcodeCtx = transcodeContexts.at(stream_index); + auto *transcodeCtx = transcodeContexts.at(stream_index); if (transcodeCtx->decContextInfoPtr.isNull()) { return; } @@ -320,7 +321,7 @@ class Transcode::TranscodePrivate { auto stream_num = inFormatContext->streams(); for (int i = 0; i < stream_num; i++) { - auto transCtx = transcodeContexts.at(i); + auto *transCtx = transcodeContexts.at(i); if (transCtx->decContextInfoPtr.isNull()) { continue; } @@ -349,14 +350,14 @@ class Transcode::TranscodePrivate reset(); } - bool filterEncodeWriteframe(Frame *frame, uint stream_index) + auto filterEncodeWriteframe(Frame *frame, uint stream_index) -> bool { - auto transcodeCtx = transcodeContexts.at(stream_index); - if (!transcodeCtx->buffersrcCtxPtr->buffersrc_addFrameFlags(frame)) { + auto *transcodeCtx = transcodeContexts.at(stream_index); + if (!transcodeCtx->buffersrcCtxPtr->buffersrcAddFrameFlags(frame)) { return false; } QSharedPointer framePtr(new Frame); - while (transcodeCtx->buffersinkCtxPtr->buffersink_getFrame(framePtr.data())) { + while (transcodeCtx->buffersinkCtxPtr->buffersinkGetFrame(framePtr.data())) { framePtr->setPictType(AV_PICTURE_TYPE_NONE); if (transcodeCtx->audioFifoPtr.isNull()) { encodeWriteFrame(stream_index, 0, framePtr); @@ -382,9 +383,9 @@ class Transcode::TranscodePrivate } } - bool addSamplesToFifo(Frame *frame, uint stream_index) + auto addSamplesToFifo(Frame *frame, uint stream_index) -> bool { - auto transcodeCtx = transcodeContexts.at(stream_index); + auto *transcodeCtx = transcodeContexts.at(stream_index); auto audioFifoPtr = transcodeCtx->audioFifoPtr; if (audioFifoPtr.isNull()) { return false; @@ -397,23 +398,24 @@ class Transcode::TranscodePrivate if (!audioFifoPtr->realloc(audioFifoPtr->size() + frame->avFrame()->nb_samples)) { return false; } - return audioFifoPtr->write((void **) frame->avFrame()->data, frame->avFrame()->nb_samples); + return audioFifoPtr->write(reinterpret_cast(frame->avFrame()->data), + frame->avFrame()->nb_samples); } - QSharedPointer takeSamplesFromFifo(uint stream_index, bool finished = false) + auto takeSamplesFromFifo(uint stream_index, bool finished = false) -> QSharedPointer { - auto transcodeCtx = transcodeContexts.at(stream_index); + auto *transcodeCtx = transcodeContexts.at(stream_index); auto audioFifoPtr = transcodeCtx->audioFifoPtr; if (audioFifoPtr.isNull()) { return nullptr; } - auto enc_ctx = transcodeCtx->encContextInfoPtr->codecCtx()->avCodecCtx(); + auto *enc_ctx = transcodeCtx->encContextInfoPtr->codecCtx()->avCodecCtx(); if (audioFifoPtr->size() < enc_ctx->frame_size && !finished) { return nullptr; } const int frame_size = FFMIN(audioFifoPtr->size(), enc_ctx->frame_size); QSharedPointer framePtr(new Frame); - auto frame = framePtr->avFrame(); + auto *frame = framePtr->avFrame(); frame->nb_samples = frame_size; frame->channel_layout = enc_ctx->channel_layout; frame->format = enc_ctx->sample_fmt; @@ -421,7 +423,7 @@ class Transcode::TranscodePrivate if (!framePtr->getBuffer()) { return nullptr; } - if (!audioFifoPtr->read((void **) framePtr->avFrame()->data, frame_size)) { + if (!audioFifoPtr->read(reinterpret_cast(framePtr->avFrame()->data), frame_size)) { return nullptr; } // fix me? @@ -433,11 +435,11 @@ class Transcode::TranscodePrivate return framePtr; } - bool encodeWriteFrame(uint stream_index, int flush, QSharedPointer framePtr) + auto encodeWriteFrame(uint stream_index, int flush, QSharedPointer framePtr) -> bool { - auto transcodeCtx = transcodeContexts.at(stream_index); + auto *transcodeCtx = transcodeContexts.at(stream_index); std::vector packetPtrs{}; - if (flush) { + if (flush != 0) { QSharedPointer frame_tmp_ptr(new Frame); frame_tmp_ptr->destroyFrame(); packetPtrs = transcodeCtx->encContextInfoPtr->encodeFrame(frame_tmp_ptr); @@ -453,17 +455,17 @@ class Transcode::TranscodePrivate return true; } - bool flushEncoder(uint stream_index) + auto flushEncoder(uint stream_index) -> bool { - auto codecCtx + auto *codecCtx = transcodeContexts.at(stream_index)->encContextInfoPtr->codecCtx()->avCodecCtx(); - if (!(codecCtx->codec->capabilities & AV_CODEC_CAP_DELAY)) { + if ((codecCtx->codec->capabilities & AV_CODEC_CAP_DELAY) == 0) { return true; } return encodeWriteFrame(stream_index, 1, nullptr); } - bool setInMediaIndex(AVContextInfo *contextInfo, int index) + auto setInMediaIndex(AVContextInfo *contextInfo, int index) -> bool { contextInfo->setIndex(index); contextInfo->setStream(inFormatContext->stream(index)); @@ -590,7 +592,7 @@ void Transcode::setSubtitleFilename(const QString &filename) d_ptr->subtitleFilename.replace('/', "\\\\"); auto index = d_ptr->subtitleFilename.indexOf(":\\"); if (index > 0) { - d_ptr->subtitleFilename.insert(index, char('\\')); + d_ptr->subtitleFilename.insert(index, ('\\')); } } @@ -620,12 +622,12 @@ void Transcode::setPreset(const QString &preset) d_ptr->preset = preset; } -QString Transcode::preset() const +auto Transcode::preset() const -> QString { return d_ptr->preset; } -QStringList Transcode::presets() const +auto Transcode::presets() const -> QStringList { return d_ptr->presets; } @@ -636,12 +638,12 @@ void Transcode::setTune(const QString &tune) d_ptr->tune = tune; } -QString Transcode::tune() const +auto Transcode::tune() const -> QString { return d_ptr->tune; } -QStringList Transcode::tunes() const +auto Transcode::tunes() const -> QStringList { return d_ptr->tunes; } @@ -652,12 +654,12 @@ void Transcode::setProfile(const QString &profile) d_ptr->profile = profile; } -QString Transcode::profile() const +auto Transcode::profile() const -> QString { return d_ptr->profile; } -QStringList Transcode::profiles() const +auto Transcode::profiles() const -> QStringList { return d_ptr->profiles; } @@ -679,7 +681,7 @@ void Transcode::stopTranscode() d_ptr->reset(); } -float Transcode::fps() +auto Transcode::fps() -> float { return d_ptr->fpsPtr->getFps(); } @@ -718,7 +720,7 @@ void Transcode::loop() break; } auto stream_index = packetPtr->streamIndex(); - auto transcodeCtx = d_ptr->transcodeContexts.at(stream_index); + auto *transcodeCtx = d_ptr->transcodeContexts.at(stream_index); auto encContextInfoPtr = transcodeCtx->encContextInfoPtr; if (encContextInfoPtr.isNull()) { packetPtr->rescaleTs(d_ptr->inFormatContext->stream(stream_index)->time_base, diff --git a/ffmpeg/videorender/openglshader.cc b/ffmpeg/videorender/openglshader.cc index 0bc17d0..5331170 100644 --- a/ffmpeg/videorender/openglshader.cc +++ b/ffmpeg/videorender/openglshader.cc @@ -86,7 +86,7 @@ auto OpenglShader::generate(Frame *frame, // Tone map if (type == Tonemap::AUTO && ShaderUtils::trcIsHdr(avFrame->color_trc)) { - type = Tonemap::ACES_APPROX; + type = Tonemap::FILMIC; } Tonemap::toneMap(header, frag, type); diff --git a/ffmpeg/videorender/shader/tonemap.frag b/ffmpeg/videorender/shader/tonemap.frag index 29b561d..79bcf54 100644 --- a/ffmpeg/videorender/shader/tonemap.frag +++ b/ffmpeg/videorender/shader/tonemap.frag @@ -1,33 +1,17 @@ -float luminance(vec3 color) +vec3 clip(vec3 color) { - return dot(color, vec3(0.2126f, 0.7152f, 0.0722f)); + return clamp(color, vec3(0.0), vec3(1.0)); } -float lerp(float a, float b, float t) +vec3 linear(vec3 color) { - return a * (1.0f - t) + b * t; + return color; } -vec3 lerp(vec3 a, vec3 b, vec3 t) +vec3 gamma(vec3 color) { - return vec3(lerp(a.x, b.x, t.x), lerp(a.y, b.y, t.y), lerp(a.z, b.z, t.z)); -} - -vec3 mul(const mat3 m, const vec3 v) -{ - vec3 result; - result.x = dot(m[0], v); - result.y = dot(m[1], v); - result.z = dot(m[2], v); - return result; -} - -vec3 rtt_and_odt_fit(vec3 v) -{ - vec3 a = v * (v + vec3(0.0245786)) - vec3(0.000090537); - vec3 b = v * (vec3(0.983729) * v + vec3(0.4329510)) + vec3(0.238081); - return a / b; + return pow(color, vec3(2.2)); } vec3 reinhard(vec3 color) @@ -35,22 +19,6 @@ vec3 reinhard(vec3 color) return color / (color + vec3(1.0)); } -vec3 reinhard_jodie(vec3 v) -{ - float l = luminance(v); - vec3 tv = v / (1.0f + v); - return lerp(v / (1.0f + l), tv, tv); -} - -vec3 const_luminance_reinhard(vec3 c) -{ - vec3 lv = vec3(0.2126f, 0.7152f, 0.0722f); - vec3 nv = lv / (1.0f - lv); - c /= 1.0f + dot(c, vec3(lv)); - vec3 nc = vec3(max(c.x - 1.0f, 0.0f), max(c.y - 1.0f, 0.0f), max(c.z - 1.0f, 0.0f)) * nv; - return c + vec3(nc.y + nc.z, nc.x + nc.z, nc.x + nc.y); -} - vec3 hable(vec3 color) { float A = 0.15; @@ -62,35 +30,17 @@ vec3 hable(vec3 color) return ((color * (A * color + C * B) + D * E) / (color * (A * color + B) + D * F)) - E / F; } -vec3 aces(vec3 color) -{ - color = color * (color + 0.0245786) / (color * (0.983729 * color + 0.4329510) + 0.238081); - return pow(color, vec3(1.0 / 2.2)); -} - -vec3 aces_fitted(vec3 v) +vec3 mobius(vec3 color) { - const mat3 aces_input_matrix = mat3(vec3(0.59719, 0.35458, 0.04823), - vec3(0.07600, 0.90834, 0.01566), - vec3(0.02840, 0.13383, 0.83777)); - - const mat3 aces_output_matrix = mat3(vec3(1.60475, -0.53108, -0.07367), - vec3(-0.10208, 1.10813, -0.00605), - vec3(-0.00327, -0.07276, 1.07602)); - v = mul(aces_input_matrix, v); - v = rtt_and_odt_fit(v); - return mul(aces_output_matrix, v); + color = max(vec3(0.0), color - vec3(0.004)); + color = (color * (6.2 * color + 0.5)) / (color * (6.2 * color + 1.7) + 0.06); + return pow(color, vec3(2.2)); } -vec3 aces_approx(vec3 v) +vec3 aces(vec3 color) { - v *= 0.6; - float a = 2.51; - float b = 0.03; - float c = 2.43; - float d = 0.59; - float e = 0.14; - return clamp((v * (a * v + b)) / (v * (c * v + d) + e), 0.0, 1.0); + color = color * (color + 0.0245786) / (color * (0.983729 * color + 0.4329510) + 0.238081); + return pow(color, vec3(1.0 / 2.2)); } vec3 filmic(vec3 color) @@ -99,13 +49,3 @@ vec3 filmic(vec3 color) color = (color * (6.2 * color + 0.5)) / (color * (6.2 * color + 1.7) + 0.06); return pow(color, vec3(2.2)); } - -vec3 uncharted2_filmic(vec3 v) -{ - float exposure_bias = 2.0f; - vec3 curr = hable(v * exposure_bias); - - vec3 W = vec3(11.2f); - vec3 white_scale = vec3(1.0f) / hable(W); - return curr * white_scale; -} diff --git a/ffmpeg/videorender/tonemap.cc b/ffmpeg/videorender/tonemap.cc index 3847e06..540d502 100644 --- a/ffmpeg/videorender/tonemap.cc +++ b/ffmpeg/videorender/tonemap.cc @@ -19,17 +19,14 @@ void Tonemap::toneMap(QByteArray &header, QByteArray &frag, Type type) { frag.append("\n// pass tone map\n"); switch (type) { + case CLIP: frag.append(GLSL(color.rgb = clip(color.rgb);\n)); break; + case LINEAR: frag.append(GLSL(color.rgb = linear(color.rgb);\n)); break; + case GAMMA: frag.append(GLSL(color.rgb = gamma(color.rgb);\n)); break; case REINHARD: frag.append(GLSL(color.rgb = reinhard(color.rgb);\n)); break; - case REINHARD_JODIE: frag.append(GLSL(color.rgb = reinhard_jodie(color.rgb);\n)); break; - case CONST_LUMINANCE_REINHARD: - frag.append(GLSL(color.rgb = const_luminance_reinhard(color.rgb);\n)); - break; case HABLE: frag.append(GLSL(color.rgb = hable(color.rgb);\n)); break; + case MOBIUS: frag.append(GLSL(color.rgb = mobius(color.rgb);\n)); break; case ACES: frag.append(GLSL(color.rgb = aces(color.rgb);\n)); break; - case ACES_FITTED: frag.append(GLSL(color.rgb = aces_fitted(color.rgb);\n)); break; - case ACES_APPROX: frag.append(GLSL(color.rgb = aces_approx(color.rgb);\n)); break; case FILMIC: frag.append(GLSL(color.rgb = filmic(color.rgb);\n)); break; - case UNCHARTED2_FILMIC: frag.append(GLSL(color.rgb = uncharted2_filmic(color.rgb);\n)); break; default: return; } header.append(Utils::readAllFile(":/shader/tonemap.frag")); diff --git a/ffmpeg/videorender/tonemap.hpp b/ffmpeg/videorender/tonemap.hpp index 76c5e1a..104bcb3 100644 --- a/ffmpeg/videorender/tonemap.hpp +++ b/ffmpeg/videorender/tonemap.hpp @@ -11,19 +11,7 @@ class FFMPEG_EXPORT Tonemap : public QObject { Q_OBJECT public: - enum Type { - NONE = 0, - AUTO, - REINHARD, - REINHARD_JODIE, - CONST_LUMINANCE_REINHARD, - HABLE, - ACES, - ACES_FITTED, - ACES_APPROX, - FILMIC, - UNCHARTED2_FILMIC - }; + enum Type { NONE = 0, AUTO, CLIP, LINEAR, GAMMA, REINHARD, HABLE, MOBIUS, ACES, FILMIC }; Q_ENUM(Type); using QObject::QObject;