mirror of https://github.com/procxx/kepka.git
Move some common code to lib_ffmpeg.
This commit is contained in:
parent
10772f4ac5
commit
56e137b20f
|
@ -0,0 +1,341 @@
|
|||
/*
|
||||
This file is part of Telegram Desktop,
|
||||
the official desktop application for the Telegram messaging service.
|
||||
|
||||
For license and copyright information please follow this link:
|
||||
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||
*/
|
||||
#include "ffmpeg/ffmpeg_utility.h"
|
||||
|
||||
#include "base/algorithm.h"
|
||||
#include "logs.h"
|
||||
|
||||
#include <QImage>
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/opt.h>
|
||||
} // extern "C"
|
||||
|
||||
namespace FFmpeg {
|
||||
namespace {
|
||||
|
||||
constexpr auto kAlignImageBy = 16;
|
||||
constexpr auto kImageFormat = QImage::Format_ARGB32_Premultiplied;
|
||||
constexpr auto kMaxScaleByAspectRatio = 16;
|
||||
constexpr auto kAvioBlockSize = 4096;
|
||||
constexpr auto kTimeUnknown = std::numeric_limits<crl::time>::min();
|
||||
constexpr auto kDurationMax = crl::time(std::numeric_limits<int>::max());
|
||||
|
||||
void AlignedImageBufferCleanupHandler(void* data) {
|
||||
const auto buffer = static_cast<uchar*>(data);
|
||||
delete[] buffer;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsValidAspectRatio(AVRational aspect) {
|
||||
return (aspect.num > 0)
|
||||
&& (aspect.den > 0)
|
||||
&& (aspect.num <= aspect.den * kMaxScaleByAspectRatio)
|
||||
&& (aspect.den <= aspect.num * kMaxScaleByAspectRatio);
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsAlignedImage(const QImage &image) {
|
||||
return !(reinterpret_cast<uintptr_t>(image.bits()) % kAlignImageBy)
|
||||
&& !(image.bytesPerLine() % kAlignImageBy);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
IOPointer MakeIOPointer(
|
||||
void *opaque,
|
||||
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int64_t(*seek)(void *opaque, int64_t offset, int whence)) {
|
||||
auto buffer = reinterpret_cast<uchar*>(av_malloc(kAvioBlockSize));
|
||||
if (!buffer) {
|
||||
LogError(qstr("av_malloc"));
|
||||
return {};
|
||||
}
|
||||
auto result = IOPointer(avio_alloc_context(
|
||||
buffer,
|
||||
kAvioBlockSize,
|
||||
write ? 1 : 0,
|
||||
opaque,
|
||||
read,
|
||||
write,
|
||||
seek));
|
||||
if (!result) {
|
||||
av_freep(&buffer);
|
||||
LogError(qstr("avio_alloc_context"));
|
||||
return {};
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void IODeleter::operator()(AVIOContext *value) {
|
||||
if (value) {
|
||||
av_freep(&value->buffer);
|
||||
avio_context_free(&value);
|
||||
}
|
||||
}
|
||||
|
||||
FormatPointer MakeFormatPointer(
|
||||
void *opaque,
|
||||
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int64_t(*seek)(void *opaque, int64_t offset, int whence)) {
|
||||
auto io = MakeIOPointer(opaque, read, write, seek);
|
||||
if (!io) {
|
||||
return {};
|
||||
}
|
||||
auto result = avformat_alloc_context();
|
||||
if (!result) {
|
||||
LogError(qstr("avformat_alloc_context"));
|
||||
return {};
|
||||
}
|
||||
result->pb = io.get();
|
||||
|
||||
auto options = (AVDictionary*)nullptr;
|
||||
const auto guard = gsl::finally([&] { av_dict_free(&options); });
|
||||
av_dict_set(&options, "usetoc", "1", 0);
|
||||
const auto error = AvErrorWrap(avformat_open_input(
|
||||
&result,
|
||||
nullptr,
|
||||
nullptr,
|
||||
&options));
|
||||
if (error) {
|
||||
// avformat_open_input freed 'result' in case an error happened.
|
||||
LogError(qstr("avformat_open_input"), error);
|
||||
return {};
|
||||
}
|
||||
result->flags |= AVFMT_FLAG_FAST_SEEK;
|
||||
|
||||
// Now FormatPointer will own and free the IO context.
|
||||
io.release();
|
||||
return FormatPointer(result);
|
||||
}
|
||||
|
||||
void FormatDeleter::operator()(AVFormatContext *value) {
|
||||
if (value) {
|
||||
const auto deleter = IOPointer(value->pb);
|
||||
avformat_close_input(&value);
|
||||
}
|
||||
}
|
||||
|
||||
CodecPointer MakeCodecPointer(not_null<AVStream*> stream) {
|
||||
auto error = AvErrorWrap();
|
||||
|
||||
auto result = CodecPointer(avcodec_alloc_context3(nullptr));
|
||||
const auto context = result.get();
|
||||
if (!context) {
|
||||
LogError(qstr("avcodec_alloc_context3"));
|
||||
return {};
|
||||
}
|
||||
error = avcodec_parameters_to_context(context, stream->codecpar);
|
||||
if (error) {
|
||||
LogError(qstr("avcodec_parameters_to_context"), error);
|
||||
return {};
|
||||
}
|
||||
av_codec_set_pkt_timebase(context, stream->time_base);
|
||||
av_opt_set_int(context, "refcounted_frames", 1, 0);
|
||||
|
||||
const auto codec = avcodec_find_decoder(context->codec_id);
|
||||
if (!codec) {
|
||||
LogError(qstr("avcodec_find_decoder"), context->codec_id);
|
||||
return {};
|
||||
} else if ((error = avcodec_open2(context, codec, nullptr))) {
|
||||
LogError(qstr("avcodec_open2"), error);
|
||||
return {};
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void CodecDeleter::operator()(AVCodecContext *value) {
|
||||
if (value) {
|
||||
avcodec_free_context(&value);
|
||||
}
|
||||
}
|
||||
|
||||
FramePointer MakeFramePointer() {
|
||||
return FramePointer(av_frame_alloc());
|
||||
}
|
||||
|
||||
bool FrameHasData(AVFrame *frame) {
|
||||
return (frame && frame->data[0] != nullptr);
|
||||
}
|
||||
|
||||
void ClearFrameMemory(AVFrame *frame) {
|
||||
if (FrameHasData(frame)) {
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
|
||||
void FrameDeleter::operator()(AVFrame *value) {
|
||||
av_frame_free(&value);
|
||||
}
|
||||
|
||||
SwscalePointer MakeSwscalePointer(
|
||||
not_null<AVFrame*> frame,
|
||||
QSize resize,
|
||||
SwscalePointer *existing) {
|
||||
// We have to use custom caching for SwsContext, because
|
||||
// sws_getCachedContext checks passed flags with existing context flags,
|
||||
// and re-creates context if they're different, but in the process of
|
||||
// context creation the passed flags are modified before being written
|
||||
// to the resulting context, so the caching doesn't work.
|
||||
if (existing && (*existing) != nullptr) {
|
||||
const auto &deleter = existing->get_deleter();
|
||||
if (deleter.resize == resize
|
||||
&& deleter.frameSize == QSize(frame->width, frame->height)
|
||||
&& deleter.frameFormat == frame->format) {
|
||||
return std::move(*existing);
|
||||
}
|
||||
}
|
||||
if (frame->format <= AV_PIX_FMT_NONE || frame->format >= AV_PIX_FMT_NB) {
|
||||
LogError(qstr("frame->format"));
|
||||
return SwscalePointer();
|
||||
}
|
||||
|
||||
const auto result = sws_getCachedContext(
|
||||
existing ? existing->release() : nullptr,
|
||||
frame->width,
|
||||
frame->height,
|
||||
AVPixelFormat(frame->format),
|
||||
resize.width(),
|
||||
resize.height(),
|
||||
AV_PIX_FMT_BGRA,
|
||||
0,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr);
|
||||
if (!result) {
|
||||
LogError(qstr("sws_getCachedContext"));
|
||||
}
|
||||
return SwscalePointer(
|
||||
result,
|
||||
{ resize, QSize{ frame->width, frame->height }, frame->format });
|
||||
}
|
||||
|
||||
void SwscaleDeleter::operator()(SwsContext *value) {
|
||||
if (value) {
|
||||
sws_freeContext(value);
|
||||
}
|
||||
}
|
||||
|
||||
void LogError(QLatin1String method) {
|
||||
LOG(("Streaming Error: Error in %1.").arg(method));
|
||||
}
|
||||
|
||||
void LogError(QLatin1String method, AvErrorWrap error) {
|
||||
LOG(("Streaming Error: Error in %1 (code: %2, text: %3)."
|
||||
).arg(method
|
||||
).arg(error.code()
|
||||
).arg(error.text()));
|
||||
}
|
||||
|
||||
crl::time PtsToTime(int64_t pts, AVRational timeBase) {
|
||||
return (pts == AV_NOPTS_VALUE || !timeBase.den)
|
||||
? kTimeUnknown
|
||||
: ((pts * 1000LL * timeBase.num) / timeBase.den);
|
||||
}
|
||||
|
||||
crl::time PtsToTimeCeil(int64_t pts, AVRational timeBase) {
|
||||
return (pts == AV_NOPTS_VALUE || !timeBase.den)
|
||||
? kTimeUnknown
|
||||
: ((pts * 1000LL * timeBase.num + timeBase.den - 1) / timeBase.den);
|
||||
}
|
||||
|
||||
int64_t TimeToPts(crl::time time, AVRational timeBase) {
|
||||
return (time == kTimeUnknown || !timeBase.num)
|
||||
? AV_NOPTS_VALUE
|
||||
: (time * timeBase.den) / (1000LL * timeBase.num);
|
||||
}
|
||||
|
||||
crl::time PacketPosition(const Packet &packet, AVRational timeBase) {
|
||||
const auto &native = packet.fields();
|
||||
return PtsToTime(
|
||||
(native.pts == AV_NOPTS_VALUE) ? native.dts : native.pts,
|
||||
timeBase);
|
||||
}
|
||||
|
||||
crl::time PacketDuration(const Packet &packet, AVRational timeBase) {
|
||||
return PtsToTime(packet.fields().duration, timeBase);
|
||||
}
|
||||
|
||||
int DurationByPacket(const Packet &packet, AVRational timeBase) {
|
||||
const auto position = PacketPosition(packet, timeBase);
|
||||
const auto duration = std::max(
|
||||
PacketDuration(packet, timeBase),
|
||||
crl::time(1));
|
||||
const auto bad = [](crl::time time) {
|
||||
return (time < 0) || (time > kDurationMax);
|
||||
};
|
||||
if (bad(position) || bad(duration) || bad(position + duration + 1)) {
|
||||
LOG(("Streaming Error: Wrong duration by packet: %1 + %2"
|
||||
).arg(position
|
||||
).arg(duration));
|
||||
return -1;
|
||||
}
|
||||
return int(position + duration + 1);
|
||||
}
|
||||
|
||||
int ReadRotationFromMetadata(not_null<AVStream*> stream) {
|
||||
const auto tag = av_dict_get(stream->metadata, "rotate", nullptr, 0);
|
||||
if (tag && *tag->value) {
|
||||
const auto string = QString::fromUtf8(tag->value);
|
||||
auto ok = false;
|
||||
const auto degrees = string.toInt(&ok);
|
||||
if (ok && (degrees == 90 || degrees == 180 || degrees == 270)) {
|
||||
return degrees;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVRational ValidateAspectRatio(AVRational aspect) {
|
||||
return IsValidAspectRatio(aspect) ? aspect : kNormalAspect;
|
||||
}
|
||||
|
||||
QSize CorrectByAspect(QSize size, AVRational aspect) {
|
||||
Expects(IsValidAspectRatio(aspect));
|
||||
|
||||
return QSize(size.width() * aspect.num / aspect.den, size.height());
|
||||
}
|
||||
|
||||
bool RotationSwapWidthHeight(int rotation) {
|
||||
return (rotation == 90 || rotation == 270);
|
||||
}
|
||||
|
||||
bool GoodStorageForFrame(const QImage &storage, QSize size) {
|
||||
return !storage.isNull()
|
||||
&& (storage.format() == kImageFormat)
|
||||
&& (storage.size() == size)
|
||||
&& storage.isDetached()
|
||||
&& IsAlignedImage(storage);
|
||||
}
|
||||
|
||||
// Create a QImage of desired size where all the data is properly aligned.
|
||||
QImage CreateFrameStorage(QSize size) {
|
||||
const auto width = size.width();
|
||||
const auto height = size.height();
|
||||
const auto widthAlign = kAlignImageBy / kPixelBytesSize;
|
||||
const auto neededWidth = width + ((width % widthAlign)
|
||||
? (widthAlign - (width % widthAlign))
|
||||
: 0);
|
||||
const auto perLine = neededWidth * kPixelBytesSize;
|
||||
const auto buffer = new uchar[perLine * height + kAlignImageBy];
|
||||
const auto cleanupData = static_cast<void *>(buffer);
|
||||
const auto address = reinterpret_cast<uintptr_t>(buffer);
|
||||
const auto alignedBuffer = buffer + ((address % kAlignImageBy)
|
||||
? (kAlignImageBy - (address % kAlignImageBy))
|
||||
: 0);
|
||||
return QImage(
|
||||
alignedBuffer,
|
||||
width,
|
||||
height,
|
||||
perLine,
|
||||
kImageFormat,
|
||||
AlignedImageBufferCleanupHandler,
|
||||
cleanupData);
|
||||
}
|
||||
|
||||
} // namespace FFmpeg
|
|
@ -0,0 +1,185 @@
|
|||
/*
|
||||
This file is part of Telegram Desktop,
|
||||
the official desktop application for the Telegram messaging service.
|
||||
|
||||
For license and copyright information please follow this link:
|
||||
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include "base/bytes.h"
|
||||
|
||||
#include <crl/crl_time.h>
|
||||
|
||||
#include <QSize>
|
||||
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
} // extern "C"
|
||||
|
||||
class QImage;
|
||||
|
||||
namespace FFmpeg {
|
||||
|
||||
inline constexpr auto kPixelBytesSize = 4;
|
||||
|
||||
constexpr auto kUniversalTimeBase = AVRational{ 1, AV_TIME_BASE };
|
||||
constexpr auto kNormalAspect = AVRational{ 1, 1 };
|
||||
|
||||
class AvErrorWrap {
|
||||
public:
|
||||
AvErrorWrap(int code = 0) : _code(code) {
|
||||
}
|
||||
|
||||
[[nodiscard]] bool failed() const {
|
||||
return (_code < 0);
|
||||
}
|
||||
[[nodiscard]] explicit operator bool() const {
|
||||
return failed();
|
||||
}
|
||||
|
||||
[[nodiscard]] int code() const {
|
||||
return _code;
|
||||
}
|
||||
|
||||
[[nodiscard]] QString text() const {
|
||||
char string[AV_ERROR_MAX_STRING_SIZE] = { 0 };
|
||||
return QString::fromUtf8(av_make_error_string(
|
||||
string,
|
||||
sizeof(string),
|
||||
_code));
|
||||
}
|
||||
|
||||
private:
|
||||
int _code = 0;
|
||||
|
||||
};
|
||||
|
||||
class Packet {
|
||||
public:
|
||||
Packet() {
|
||||
setEmpty();
|
||||
}
|
||||
Packet(const AVPacket &data) {
|
||||
bytes::copy(_data, bytes::object_as_span(&data));
|
||||
}
|
||||
Packet(Packet &&other) {
|
||||
bytes::copy(_data, other._data);
|
||||
if (!other.empty()) {
|
||||
other.release();
|
||||
}
|
||||
}
|
||||
Packet &operator=(Packet &&other) {
|
||||
if (this != &other) {
|
||||
av_packet_unref(&fields());
|
||||
bytes::copy(_data, other._data);
|
||||
if (!other.empty()) {
|
||||
other.release();
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
~Packet() {
|
||||
av_packet_unref(&fields());
|
||||
}
|
||||
|
||||
[[nodiscard]] AVPacket &fields() {
|
||||
return *reinterpret_cast<AVPacket*>(_data);
|
||||
}
|
||||
[[nodiscard]] const AVPacket &fields() const {
|
||||
return *reinterpret_cast<const AVPacket*>(_data);
|
||||
}
|
||||
|
||||
[[nodiscard]] bool empty() const {
|
||||
return !fields().data;
|
||||
}
|
||||
void release() {
|
||||
setEmpty();
|
||||
}
|
||||
|
||||
private:
|
||||
void setEmpty() {
|
||||
auto &native = fields();
|
||||
av_init_packet(&native);
|
||||
native.data = nullptr;
|
||||
native.size = 0;
|
||||
}
|
||||
|
||||
alignas(alignof(AVPacket)) bytes::type _data[sizeof(AVPacket)];
|
||||
|
||||
};
|
||||
|
||||
struct IODeleter {
|
||||
void operator()(AVIOContext *value);
|
||||
};
|
||||
using IOPointer = std::unique_ptr<AVIOContext, IODeleter>;
|
||||
[[nodiscard]] IOPointer MakeIOPointer(
|
||||
void *opaque,
|
||||
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int64_t(*seek)(void *opaque, int64_t offset, int whence));
|
||||
|
||||
struct FormatDeleter {
|
||||
void operator()(AVFormatContext *value);
|
||||
};
|
||||
using FormatPointer = std::unique_ptr<AVFormatContext, FormatDeleter>;
|
||||
[[nodiscard]] FormatPointer MakeFormatPointer(
|
||||
void *opaque,
|
||||
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int64_t(*seek)(void *opaque, int64_t offset, int whence));
|
||||
|
||||
struct CodecDeleter {
|
||||
void operator()(AVCodecContext *value);
|
||||
};
|
||||
using CodecPointer = std::unique_ptr<AVCodecContext, CodecDeleter>;
|
||||
[[nodiscard]] CodecPointer MakeCodecPointer(not_null<AVStream*> stream);
|
||||
|
||||
struct FrameDeleter {
|
||||
void operator()(AVFrame *value);
|
||||
};
|
||||
using FramePointer = std::unique_ptr<AVFrame, FrameDeleter>;
|
||||
[[nodiscard]] FramePointer MakeFramePointer();
|
||||
[[nodiscard]] bool FrameHasData(AVFrame *frame);
|
||||
void ClearFrameMemory(AVFrame *frame);
|
||||
|
||||
struct SwscaleDeleter {
|
||||
QSize resize;
|
||||
QSize frameSize;
|
||||
int frameFormat = int(AV_PIX_FMT_NONE);
|
||||
|
||||
void operator()(SwsContext *value);
|
||||
};
|
||||
using SwscalePointer = std::unique_ptr<SwsContext, SwscaleDeleter>;
|
||||
[[nodiscard]] SwscalePointer MakeSwscalePointer(
|
||||
not_null<AVFrame*> frame,
|
||||
QSize resize,
|
||||
SwscalePointer *existing = nullptr);
|
||||
|
||||
void LogError(QLatin1String method);
|
||||
void LogError(QLatin1String method, FFmpeg::AvErrorWrap error);
|
||||
|
||||
[[nodiscard]] crl::time PtsToTime(int64_t pts, AVRational timeBase);
|
||||
// Used for full duration conversion.
|
||||
[[nodiscard]] crl::time PtsToTimeCeil(int64_t pts, AVRational timeBase);
|
||||
[[nodiscard]] int64_t TimeToPts(crl::time time, AVRational timeBase);
|
||||
[[nodiscard]] crl::time PacketPosition(
|
||||
const FFmpeg::Packet &packet,
|
||||
AVRational timeBase);
|
||||
[[nodiscard]] crl::time PacketDuration(
|
||||
const FFmpeg::Packet &packet,
|
||||
AVRational timeBase);
|
||||
[[nodiscard]] int DurationByPacket(
|
||||
const FFmpeg::Packet &packet,
|
||||
AVRational timeBase);
|
||||
[[nodiscard]] int ReadRotationFromMetadata(not_null<AVStream*> stream);
|
||||
[[nodiscard]] AVRational ValidateAspectRatio(AVRational aspect);
|
||||
[[nodiscard]] bool RotationSwapWidthHeight(int rotation);
|
||||
[[nodiscard]] QSize CorrectByAspect(QSize size, AVRational aspect);
|
||||
|
||||
[[nodiscard]] bool GoodStorageForFrame(const QImage &storage, QSize size);
|
||||
[[nodiscard]] QImage CreateFrameStorage(QSize size);
|
||||
|
||||
} // namespace FFmpeg
|
|
@ -8,6 +8,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
#include "lottie/lottie_cache.h"
|
||||
|
||||
#include "lottie/lottie_frame_renderer.h"
|
||||
#include "ffmpeg/ffmpeg_utility.h"
|
||||
#include "base/bytes.h"
|
||||
|
||||
#include <QDataStream>
|
||||
|
@ -30,6 +31,9 @@ bool UncompressToRaw(AlignedStorage &to, bytes::const_span from) {
|
|||
}
|
||||
|
||||
void Decode(QImage &to, const AlignedStorage &from, const QSize &fromSize) {
|
||||
if (!FFmpeg::GoodStorageForFrame(to, fromSize)) {
|
||||
to = FFmpeg::CreateFrameStorage(fromSize);
|
||||
}
|
||||
auto fromBytes = static_cast<const char*>(from.aligned());
|
||||
auto toBytes = to.bits();
|
||||
const auto fromPerLine = from.bytesPerLine();
|
||||
|
|
|
@ -191,7 +191,7 @@ AbstractAudioFFMpegLoader::AbstractAudioFFMpegLoader(
|
|||
const QByteArray &data,
|
||||
bytes::vector &&buffer)
|
||||
: AbstractFFMpegLoader(file, data, std::move(buffer))
|
||||
, _frame(Streaming::MakeFramePointer()) {
|
||||
, _frame(FFmpeg::MakeFramePointer()) {
|
||||
}
|
||||
|
||||
bool AbstractAudioFFMpegLoader::initUsingContext(
|
||||
|
@ -264,7 +264,7 @@ bool AbstractAudioFFMpegLoader::initUsingContext(
|
|||
}
|
||||
|
||||
auto AbstractAudioFFMpegLoader::replaceFrameAndRead(
|
||||
Streaming::FramePointer frame,
|
||||
FFmpeg::FramePointer frame,
|
||||
QByteArray &result,
|
||||
int64 &samplesAdded)
|
||||
-> ReadResult {
|
||||
|
|
|
@ -105,7 +105,7 @@ protected:
|
|||
// Streaming player provides the first frame to the ChildFFMpegLoader
|
||||
// so we replace our allocated frame with the one provided.
|
||||
ReadResult replaceFrameAndRead(
|
||||
Streaming::FramePointer frame,
|
||||
FFmpeg::FramePointer frame,
|
||||
QByteArray &result,
|
||||
int64 &samplesAdded);
|
||||
|
||||
|
@ -126,7 +126,7 @@ private:
|
|||
uint8_t **data,
|
||||
int count) const;
|
||||
|
||||
Streaming::FramePointer _frame;
|
||||
FFmpeg::FramePointer _frame;
|
||||
int _outputFormat = AL_FORMAT_STEREO16;
|
||||
int _outputChannels = 2;
|
||||
int _outputSampleSize = 2 * sizeof(uint16);
|
||||
|
|
|
@ -37,7 +37,7 @@ public:
|
|||
virtual ReadResult readMore(
|
||||
QByteArray &samples,
|
||||
int64 &samplesCount) = 0;
|
||||
virtual void enqueuePackets(std::deque<Streaming::Packet> &&packets) {
|
||||
virtual void enqueuePackets(std::deque<FFmpeg::Packet> &&packets) {
|
||||
Unexpected("enqueuePackets() call on not ChildFFMpegLoader.");
|
||||
}
|
||||
virtual void setForceToBuffer(bool force) {
|
||||
|
|
|
@ -47,7 +47,7 @@ private:
|
|||
QMutex _fromExternalMutex;
|
||||
base::flat_map<
|
||||
AudioMsgId,
|
||||
std::deque<Streaming::Packet>> _fromExternalQueues;
|
||||
std::deque<FFmpeg::Packet>> _fromExternalQueues;
|
||||
base::flat_set<AudioMsgId> _fromExternalForceToBuffer;
|
||||
SingleQueuedInvokation _fromExternalNotify;
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ AudioPlayerLoader::ReadResult ChildFFMpegLoader::readMore(
|
|||
}
|
||||
|
||||
void ChildFFMpegLoader::enqueuePackets(
|
||||
std::deque<Streaming::Packet> &&packets) {
|
||||
std::deque<FFmpeg::Packet> &&packets) {
|
||||
if (_queue.empty()) {
|
||||
_queue = std::move(packets);
|
||||
} else {
|
||||
|
|
|
@ -13,8 +13,8 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
namespace Media {
|
||||
|
||||
struct ExternalSoundData {
|
||||
Streaming::CodecPointer codec;
|
||||
Streaming::FramePointer frame;
|
||||
FFmpeg::CodecPointer codec;
|
||||
FFmpeg::FramePointer frame;
|
||||
int32 frequency = Media::Player::kDefaultFrequency;
|
||||
int64 length = 0;
|
||||
float64 speed = 1.; // 0.5 <= speed <= 2.
|
||||
|
@ -22,7 +22,7 @@ struct ExternalSoundData {
|
|||
|
||||
struct ExternalSoundPart {
|
||||
AudioMsgId audio;
|
||||
Streaming::Packet packet;
|
||||
FFmpeg::Packet packet;
|
||||
};
|
||||
|
||||
class ChildFFMpegLoader : public AbstractAudioFFMpegLoader {
|
||||
|
@ -36,7 +36,7 @@ public:
|
|||
}
|
||||
|
||||
ReadResult readMore(QByteArray &result, int64 &samplesAdded) override;
|
||||
void enqueuePackets(std::deque<Streaming::Packet> &&packets) override;
|
||||
void enqueuePackets(std::deque<FFmpeg::Packet> &&packets) override;
|
||||
void setForceToBuffer(bool force) override;
|
||||
bool forceToBuffer() const override;
|
||||
|
||||
|
@ -55,7 +55,7 @@ private:
|
|||
int64 &samplesAdded);
|
||||
|
||||
std::unique_ptr<ExternalSoundData> _parentData;
|
||||
std::deque<Streaming::Packet> _queue;
|
||||
std::deque<FFmpeg::Packet> _queue;
|
||||
bool _forceToBuffer = false;
|
||||
bool _eofReached = false;
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ FFMpegReaderImplementation::FFMpegReaderImplementation(
|
|||
QByteArray *data,
|
||||
const AudioMsgId &audio)
|
||||
: ReaderImplementation(location, data)
|
||||
, _frame(Streaming::MakeFramePointer())
|
||||
, _frame(FFmpeg::MakeFramePointer())
|
||||
, _audioMsgId(audio) {
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,7 @@ ReaderImplementation::ReadResult FFMpegReaderImplementation::readNextFrame() {
|
|||
] {
|
||||
native->size = size;
|
||||
native->data = data;
|
||||
packet = Streaming::Packet();
|
||||
packet = FFmpeg::Packet();
|
||||
});
|
||||
|
||||
res = avcodec_send_packet(_codecContext, native);
|
||||
|
@ -275,7 +275,7 @@ bool FFMpegReaderImplementation::renderFrame(QImage &to, bool &hasAlpha, const Q
|
|||
}
|
||||
}
|
||||
|
||||
Streaming::ClearFrameMemory(_frame.get());
|
||||
FFmpeg::ClearFrameMemory(_frame.get());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -385,7 +385,7 @@ bool FFMpegReaderImplementation::start(Mode mode, crl::time &positionMs) {
|
|||
_audioStreamId = -1;
|
||||
} else {
|
||||
soundData = std::make_unique<ExternalSoundData>();
|
||||
soundData->codec = Streaming::CodecPointer(audioContext);
|
||||
soundData->codec = FFmpeg::CodecPointer(audioContext);
|
||||
soundData->frequency = _fmtContext->streams[_audioStreamId]->codecpar->sample_rate;
|
||||
if (_fmtContext->streams[_audioStreamId]->duration == AV_NOPTS_VALUE) {
|
||||
soundData->length = (_fmtContext->duration * soundData->frequency) / AV_TIME_BASE;
|
||||
|
@ -405,7 +405,7 @@ bool FFMpegReaderImplementation::start(Mode mode, crl::time &positionMs) {
|
|||
}
|
||||
}
|
||||
|
||||
Streaming::Packet packet;
|
||||
FFmpeg::Packet packet;
|
||||
auto readResult = readPacket(packet);
|
||||
if (readResult == PacketResult::Ok && positionMs > 0) {
|
||||
positionMs = countPacketMs(packet);
|
||||
|
@ -436,7 +436,7 @@ bool FFMpegReaderImplementation::inspectAt(crl::time &positionMs) {
|
|||
|
||||
_packetQueue.clear();
|
||||
|
||||
Streaming::Packet packet;
|
||||
FFmpeg::Packet packet;
|
||||
auto readResult = readPacket(packet);
|
||||
if (readResult == PacketResult::Ok && positionMs > 0) {
|
||||
positionMs = countPacketMs(packet);
|
||||
|
@ -481,7 +481,7 @@ FFMpegReaderImplementation::~FFMpegReaderImplementation() {
|
|||
if (_fmtContext) avformat_free_context(_fmtContext);
|
||||
}
|
||||
|
||||
FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readPacket(Streaming::Packet &packet) {
|
||||
FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readPacket(FFmpeg::Packet &packet) {
|
||||
int res = 0;
|
||||
if ((res = av_read_frame(_fmtContext, &packet.fields())) < 0) {
|
||||
if (res == AVERROR_EOF) {
|
||||
|
@ -489,7 +489,7 @@ FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readPacket(
|
|||
// queue terminating packet to audio player
|
||||
Player::mixer()->feedFromExternal({
|
||||
_audioMsgId,
|
||||
Streaming::Packet()
|
||||
FFmpeg::Packet()
|
||||
});
|
||||
}
|
||||
return PacketResult::EndOfFile;
|
||||
|
@ -501,7 +501,7 @@ FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readPacket(
|
|||
return PacketResult::Ok;
|
||||
}
|
||||
|
||||
void FFMpegReaderImplementation::processPacket(Streaming::Packet &&packet) {
|
||||
void FFMpegReaderImplementation::processPacket(FFmpeg::Packet &&packet) {
|
||||
const auto &native = packet.fields();
|
||||
auto videoPacket = (native.stream_index == _streamId);
|
||||
auto audioPacket = (_audioStreamId >= 0 && native.stream_index == _audioStreamId);
|
||||
|
@ -523,7 +523,7 @@ void FFMpegReaderImplementation::processPacket(Streaming::Packet &&packet) {
|
|||
}
|
||||
|
||||
crl::time FFMpegReaderImplementation::countPacketMs(
|
||||
const Streaming::Packet &packet) const {
|
||||
const FFmpeg::Packet &packet) const {
|
||||
const auto &native = packet.fields();
|
||||
int64 packetPts = (native.pts == AV_NOPTS_VALUE) ? native.dts : native.pts;
|
||||
crl::time packetMs = (packetPts * 1000LL * _fmtContext->streams[native.stream_index]->time_base.num) / _fmtContext->streams[native.stream_index]->time_base.den;
|
||||
|
@ -531,7 +531,7 @@ crl::time FFMpegReaderImplementation::countPacketMs(
|
|||
}
|
||||
|
||||
FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readAndProcessPacket() {
|
||||
Streaming::Packet packet;
|
||||
FFmpeg::Packet packet;
|
||||
auto result = readPacket(packet);
|
||||
if (result == PacketResult::Ok) {
|
||||
processPacket(std::move(packet));
|
||||
|
|
|
@ -55,9 +55,9 @@ private:
|
|||
EndOfFile,
|
||||
Error,
|
||||
};
|
||||
PacketResult readPacket(Streaming::Packet &packet);
|
||||
void processPacket(Streaming::Packet &&packet);
|
||||
crl::time countPacketMs(const Streaming::Packet &packet) const;
|
||||
PacketResult readPacket(FFmpeg::Packet &packet);
|
||||
void processPacket(FFmpeg::Packet &&packet);
|
||||
crl::time countPacketMs(const FFmpeg::Packet &packet) const;
|
||||
PacketResult readAndProcessPacket();
|
||||
|
||||
enum class Rotation {
|
||||
|
@ -83,7 +83,7 @@ private:
|
|||
AVFormatContext *_fmtContext = nullptr;
|
||||
AVCodecContext *_codecContext = nullptr;
|
||||
int _streamId = 0;
|
||||
Streaming::FramePointer _frame;
|
||||
FFmpeg::FramePointer _frame;
|
||||
bool _opened = false;
|
||||
bool _hadFrame = false;
|
||||
bool _frameRead = false;
|
||||
|
@ -95,7 +95,7 @@ private:
|
|||
crl::time _lastReadVideoMs = 0;
|
||||
crl::time _lastReadAudioMs = 0;
|
||||
|
||||
std::deque<Streaming::Packet> _packetQueue;
|
||||
std::deque<FFmpeg::Packet> _packetQueue;
|
||||
|
||||
int _width = 0;
|
||||
int _height = 0;
|
||||
|
|
|
@ -47,7 +47,7 @@ crl::time AudioTrack::streamDuration() const {
|
|||
return _stream.duration;
|
||||
}
|
||||
|
||||
void AudioTrack::process(Packet &&packet) {
|
||||
void AudioTrack::process(FFmpeg::Packet &&packet) {
|
||||
if (packet.empty()) {
|
||||
_readTillEnd = true;
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ bool AudioTrack::initialized() const {
|
|||
return !_ready;
|
||||
}
|
||||
|
||||
bool AudioTrack::tryReadFirstFrame(Packet &&packet) {
|
||||
bool AudioTrack::tryReadFirstFrame(FFmpeg::Packet &&packet) {
|
||||
if (ProcessPacket(_stream, std::move(packet)).failed()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -97,13 +97,13 @@ bool AudioTrack::tryReadFirstFrame(Packet &&packet) {
|
|||
// Try skipping frames until one is after the requested position.
|
||||
std::swap(_initialSkippingFrame, _stream.frame);
|
||||
if (!_stream.frame) {
|
||||
_stream.frame = MakeFramePointer();
|
||||
_stream.frame = FFmpeg::MakeFramePointer();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool AudioTrack::processFirstFrame() {
|
||||
if (!FrameHasData(_stream.frame.get())) {
|
||||
if (!FFmpeg::FrameHasData(_stream.frame.get())) {
|
||||
return false;
|
||||
}
|
||||
mixerInit();
|
||||
|
@ -148,7 +148,7 @@ void AudioTrack::callReady() {
|
|||
base::take(_ready)({ VideoInformation(), data });
|
||||
}
|
||||
|
||||
void AudioTrack::mixerEnqueue(Packet &&packet) {
|
||||
void AudioTrack::mixerEnqueue(FFmpeg::Packet &&packet) {
|
||||
Media::Player::mixer()->feedFromExternal({
|
||||
_audioId,
|
||||
std::move(packet)
|
||||
|
|
|
@ -43,7 +43,7 @@ public:
|
|||
[[nodiscard]] crl::time streamDuration() const;
|
||||
|
||||
// Called from the same unspecified thread.
|
||||
void process(Packet &&packet);
|
||||
void process(FFmpeg::Packet &&packet);
|
||||
void waitForData();
|
||||
|
||||
// Called from the main thread.
|
||||
|
@ -52,11 +52,11 @@ public:
|
|||
private:
|
||||
// Called from the same unspecified thread.
|
||||
[[nodiscard]] bool initialized() const;
|
||||
[[nodiscard]] bool tryReadFirstFrame(Packet &&packet);
|
||||
[[nodiscard]] bool tryReadFirstFrame(FFmpeg::Packet &&packet);
|
||||
[[nodiscard]] bool fillStateFromFrame();
|
||||
[[nodiscard]] bool processFirstFrame();
|
||||
void mixerInit();
|
||||
void mixerEnqueue(Packet &&packet);
|
||||
void mixerEnqueue(FFmpeg::Packet &&packet);
|
||||
void mixerForceToBuffer();
|
||||
void callReady();
|
||||
|
||||
|
@ -83,7 +83,7 @@ private:
|
|||
rpl::variable<crl::time> _playPosition;
|
||||
|
||||
// For initial frame skipping for an exact seek.
|
||||
FramePointer _initialSkippingFrame;
|
||||
FFmpeg::FramePointer _initialSkippingFrame;
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -9,9 +9,9 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
|
||||
namespace Media {
|
||||
|
||||
constexpr auto kTimeUnknown = std::numeric_limits<crl::time>::min();
|
||||
constexpr auto kDurationMax = crl::time(std::numeric_limits<int>::max());
|
||||
constexpr auto kDurationUnavailable = std::numeric_limits<crl::time>::max();
|
||||
inline constexpr auto kTimeUnknown = std::numeric_limits<crl::time>::min();
|
||||
inline constexpr auto kDurationMax = crl::time(std::numeric_limits<int>::max());
|
||||
inline constexpr auto kDurationUnavailable = std::numeric_limits<crl::time>::max();
|
||||
|
||||
namespace Audio {
|
||||
bool SupportsSpeedControl();
|
||||
|
|
|
@ -9,6 +9,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
|
||||
#include "media/streaming/media_streaming_loader.h"
|
||||
#include "media/streaming/media_streaming_file_delegate.h"
|
||||
#include "ffmpeg/ffmpeg_utility.h"
|
||||
|
||||
namespace Media {
|
||||
namespace Streaming {
|
||||
|
@ -85,26 +86,30 @@ int64_t File::Context::seek(int64_t offset, int whence) {
|
|||
|
||||
void File::Context::logError(QLatin1String method) {
|
||||
if (!unroll()) {
|
||||
LogError(method);
|
||||
FFmpeg::LogError(method);
|
||||
}
|
||||
}
|
||||
|
||||
void File::Context::logError(QLatin1String method, AvErrorWrap error) {
|
||||
void File::Context::logError(
|
||||
QLatin1String method,
|
||||
FFmpeg::AvErrorWrap error) {
|
||||
if (!unroll()) {
|
||||
LogError(method, error);
|
||||
FFmpeg::LogError(method, error);
|
||||
}
|
||||
}
|
||||
|
||||
void File::Context::logFatal(QLatin1String method) {
|
||||
if (!unroll()) {
|
||||
LogError(method);
|
||||
FFmpeg::LogError(method);
|
||||
fail(_format ? Error::InvalidData : Error::OpenFailed);
|
||||
}
|
||||
}
|
||||
|
||||
void File::Context::logFatal(QLatin1String method, AvErrorWrap error) {
|
||||
void File::Context::logFatal(
|
||||
QLatin1String method,
|
||||
FFmpeg::AvErrorWrap error) {
|
||||
if (!unroll()) {
|
||||
LogError(method, error);
|
||||
FFmpeg::LogError(method, error);
|
||||
fail(_format ? Error::InvalidData : Error::OpenFailed);
|
||||
}
|
||||
}
|
||||
|
@ -126,8 +131,8 @@ Stream File::Context::initStream(
|
|||
|
||||
const auto info = format->streams[index];
|
||||
if (type == AVMEDIA_TYPE_VIDEO) {
|
||||
result.rotation = ReadRotationFromMetadata(info);
|
||||
result.aspect = ValidateAspectRatio(info->sample_aspect_ratio);
|
||||
result.rotation = FFmpeg::ReadRotationFromMetadata(info);
|
||||
result.aspect = FFmpeg::ValidateAspectRatio(info->sample_aspect_ratio);
|
||||
} else if (type == AVMEDIA_TYPE_AUDIO) {
|
||||
result.frequency = info->codecpar->sample_rate;
|
||||
if (!result.frequency) {
|
||||
|
@ -135,20 +140,20 @@ Stream File::Context::initStream(
|
|||
}
|
||||
}
|
||||
|
||||
result.codec = MakeCodecPointer(info);
|
||||
result.codec = FFmpeg::MakeCodecPointer(info);
|
||||
if (!result.codec) {
|
||||
return result;
|
||||
}
|
||||
|
||||
result.frame = MakeFramePointer();
|
||||
result.frame = FFmpeg::MakeFramePointer();
|
||||
if (!result.frame) {
|
||||
result.codec = nullptr;
|
||||
return result;
|
||||
}
|
||||
result.timeBase = info->time_base;
|
||||
result.duration = (info->duration != AV_NOPTS_VALUE)
|
||||
? PtsToTime(info->duration, result.timeBase)
|
||||
: PtsToTime(format->duration, kUniversalTimeBase);
|
||||
? FFmpeg::PtsToTime(info->duration, result.timeBase)
|
||||
: FFmpeg::PtsToTime(format->duration, FFmpeg::kUniversalTimeBase);
|
||||
if (result.duration <= 0) {
|
||||
result.codec = nullptr;
|
||||
} else if (result.duration == kTimeUnknown) {
|
||||
|
@ -167,7 +172,7 @@ void File::Context::seekToPosition(
|
|||
not_null<AVFormatContext*> format,
|
||||
const Stream &stream,
|
||||
crl::time position) {
|
||||
auto error = AvErrorWrap();
|
||||
auto error = FFmpeg::AvErrorWrap();
|
||||
|
||||
if (!position) {
|
||||
return;
|
||||
|
@ -192,7 +197,7 @@ void File::Context::seekToPosition(
|
|||
error = av_seek_frame(
|
||||
format,
|
||||
stream.index,
|
||||
TimeToPts(
|
||||
FFmpeg::TimeToPts(
|
||||
std::clamp(position, crl::time(0), stream.duration - 1),
|
||||
stream.timeBase),
|
||||
AVSEEK_FLAG_BACKWARD);
|
||||
|
@ -202,13 +207,13 @@ void File::Context::seekToPosition(
|
|||
return logFatal(qstr("av_seek_frame"), error);
|
||||
}
|
||||
|
||||
base::variant<Packet, AvErrorWrap> File::Context::readPacket() {
|
||||
auto error = AvErrorWrap();
|
||||
base::variant<FFmpeg::Packet, FFmpeg::AvErrorWrap> File::Context::readPacket() {
|
||||
auto error = FFmpeg::AvErrorWrap();
|
||||
|
||||
auto result = Packet();
|
||||
auto result = FFmpeg::Packet();
|
||||
error = av_read_frame(_format.get(), &result.fields());
|
||||
if (unroll()) {
|
||||
return AvErrorWrap();
|
||||
return FFmpeg::AvErrorWrap();
|
||||
} else if (!error) {
|
||||
return std::move(result);
|
||||
} else if (error.code() != AVERROR_EOF) {
|
||||
|
@ -218,12 +223,12 @@ base::variant<Packet, AvErrorWrap> File::Context::readPacket() {
|
|||
}
|
||||
|
||||
void File::Context::start(crl::time position) {
|
||||
auto error = AvErrorWrap();
|
||||
auto error = FFmpeg::AvErrorWrap();
|
||||
|
||||
if (unroll()) {
|
||||
return;
|
||||
}
|
||||
auto format = MakeFormatPointer(
|
||||
auto format = FFmpeg::MakeFormatPointer(
|
||||
static_cast<void *>(this),
|
||||
&Context::Read,
|
||||
nullptr,
|
||||
|
@ -279,7 +284,7 @@ void File::Context::readNextPacket() {
|
|||
auto result = readPacket();
|
||||
if (unroll()) {
|
||||
return;
|
||||
} else if (const auto packet = base::get_if<Packet>(&result)) {
|
||||
} else if (const auto packet = base::get_if<FFmpeg::Packet>(&result)) {
|
||||
const auto more = _delegate->fileProcessPacket(std::move(*packet));
|
||||
if (!more) {
|
||||
do {
|
||||
|
@ -290,17 +295,17 @@ void File::Context::readNextPacket() {
|
|||
}
|
||||
} else {
|
||||
// Still trying to read by drain.
|
||||
Assert(result.is<AvErrorWrap>());
|
||||
Assert(result.get<AvErrorWrap>().code() == AVERROR_EOF);
|
||||
Assert(result.is<FFmpeg::AvErrorWrap>());
|
||||
Assert(result.get<FFmpeg::AvErrorWrap>().code() == AVERROR_EOF);
|
||||
handleEndOfFile();
|
||||
}
|
||||
}
|
||||
|
||||
void File::Context::handleEndOfFile() {
|
||||
const auto more = _delegate->fileProcessPacket(Packet());
|
||||
const auto more = _delegate->fileProcessPacket(FFmpeg::Packet());
|
||||
if (_delegate->fileReadMore()) {
|
||||
_readTillEnd = false;
|
||||
auto error = AvErrorWrap(av_seek_frame(
|
||||
auto error = FFmpeg::AvErrorWrap(av_seek_frame(
|
||||
_format.get(),
|
||||
-1, // stream_index
|
||||
0, // timestamp
|
||||
|
|
|
@ -10,6 +10,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
#include "media/streaming/media_streaming_common.h"
|
||||
#include "media/streaming/media_streaming_utility.h"
|
||||
#include "media/streaming/media_streaming_reader.h"
|
||||
#include "ffmpeg/ffmpeg_utility.h"
|
||||
#include "base/bytes.h"
|
||||
#include "base/weak_ptr.h"
|
||||
|
||||
|
@ -64,9 +65,9 @@ private:
|
|||
|
||||
[[nodiscard]] bool unroll() const;
|
||||
void logError(QLatin1String method);
|
||||
void logError(QLatin1String method, AvErrorWrap error);
|
||||
void logError(QLatin1String method, FFmpeg::AvErrorWrap error);
|
||||
void logFatal(QLatin1String method);
|
||||
void logFatal(QLatin1String method, AvErrorWrap error);
|
||||
void logFatal(QLatin1String method, FFmpeg::AvErrorWrap error);
|
||||
void fail(Error error);
|
||||
|
||||
Stream initStream(
|
||||
|
@ -78,7 +79,8 @@ private:
|
|||
crl::time position);
|
||||
|
||||
// TODO base::expected.
|
||||
[[nodiscard]] base::variant<Packet, AvErrorWrap> readPacket();
|
||||
[[nodiscard]] auto readPacket()
|
||||
-> base::variant<FFmpeg::Packet, FFmpeg::AvErrorWrap>;
|
||||
|
||||
void handleEndOfFile();
|
||||
void sendFullInCache(bool force = false);
|
||||
|
@ -94,7 +96,7 @@ private:
|
|||
crl::semaphore _semaphore;
|
||||
std::atomic<bool> _interrupted = false;
|
||||
|
||||
FormatPointer _format;
|
||||
FFmpeg::FormatPointer _format;
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -7,11 +7,14 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
*/
|
||||
#pragma once
|
||||
|
||||
namespace FFmpeg {
|
||||
class Packet;
|
||||
} // namespace FFmpeg
|
||||
|
||||
namespace Media {
|
||||
namespace Streaming {
|
||||
|
||||
struct Stream;
|
||||
class Packet;
|
||||
enum class Error;
|
||||
|
||||
class FileDelegate {
|
||||
|
@ -27,7 +30,8 @@ public:
|
|||
// Return true if reading and processing more packets is desired.
|
||||
// Return false if sleeping until 'wake()' is called is desired.
|
||||
// Return true after the EOF packet if looping is desired.
|
||||
[[nodiscard]] virtual bool fileProcessPacket(Packet &&packet) = 0;
|
||||
[[nodiscard]] virtual bool fileProcessPacket(
|
||||
FFmpeg::Packet &&packet) = 0;
|
||||
[[nodiscard]] virtual bool fileReadMore() = 0;
|
||||
};
|
||||
|
||||
|
|
|
@ -324,7 +324,7 @@ void Player::fileWaitingForData() {
|
|||
}
|
||||
}
|
||||
|
||||
bool Player::fileProcessPacket(Packet &&packet) {
|
||||
bool Player::fileProcessPacket(FFmpeg::Packet &&packet) {
|
||||
_waitingForData = false;
|
||||
|
||||
const auto &native = packet.fields();
|
||||
|
@ -337,14 +337,14 @@ bool Player::fileProcessPacket(Packet &&packet) {
|
|||
crl::on_main(&_sessionGuard, [=] {
|
||||
audioReceivedTill(till);
|
||||
});
|
||||
_audio->process(Packet());
|
||||
_audio->process(FFmpeg::Packet());
|
||||
}
|
||||
if (_video) {
|
||||
const auto till = _loopingShift + computeVideoDuration();
|
||||
crl::on_main(&_sessionGuard, [=] {
|
||||
videoReceivedTill(till);
|
||||
});
|
||||
_video->process(Packet());
|
||||
_video->process(FFmpeg::Packet());
|
||||
}
|
||||
} else if (_audio && _audio->streamIndex() == native.stream_index) {
|
||||
accumulate_max(
|
||||
|
@ -352,7 +352,7 @@ bool Player::fileProcessPacket(Packet &&packet) {
|
|||
durationByPacket(*_audio, packet));
|
||||
|
||||
const auto till = _loopingShift + std::clamp(
|
||||
PacketPosition(packet, _audio->streamTimeBase()),
|
||||
FFmpeg::PacketPosition(packet, _audio->streamTimeBase()),
|
||||
crl::time(0),
|
||||
computeAudioDuration() - 1);
|
||||
crl::on_main(&_sessionGuard, [=] {
|
||||
|
@ -365,7 +365,7 @@ bool Player::fileProcessPacket(Packet &&packet) {
|
|||
durationByPacket(*_video, packet));
|
||||
|
||||
const auto till = _loopingShift + std::clamp(
|
||||
PacketPosition(packet, _video->streamTimeBase()),
|
||||
FFmpeg::PacketPosition(packet, _video->streamTimeBase()),
|
||||
crl::time(0),
|
||||
computeVideoDuration() - 1);
|
||||
crl::on_main(&_sessionGuard, [=] {
|
||||
|
@ -404,7 +404,7 @@ void Player::streamFailed(Error error) {
|
|||
template <typename Track>
|
||||
int Player::durationByPacket(
|
||||
const Track &track,
|
||||
const Packet &packet) {
|
||||
const FFmpeg::Packet &packet) {
|
||||
// We've set this value on the first cycle.
|
||||
if (_loopingShift || _totalDuration != kDurationUnavailable) {
|
||||
return 0;
|
||||
|
|
|
@ -84,7 +84,7 @@ private:
|
|||
void fileError(Error error) override;
|
||||
void fileWaitingForData() override;
|
||||
void fileFullInCache(bool fullInCache) override;
|
||||
bool fileProcessPacket(Packet &&packet) override;
|
||||
bool fileProcessPacket(FFmpeg::Packet &&packet) override;
|
||||
bool fileReadMore() override;
|
||||
|
||||
// Called from the main thread.
|
||||
|
@ -117,7 +117,7 @@ private:
|
|||
[[nodiscard]] crl::time loadInAdvanceFor() const;
|
||||
|
||||
template <typename Track>
|
||||
int durationByPacket(const Track &track, const Packet &packet);
|
||||
int durationByPacket(const Track &track, const FFmpeg::Packet &packet);
|
||||
|
||||
// Valid after fileReady call ends. Thread-safe.
|
||||
[[nodiscard]] crl::time computeAudioDuration() const;
|
||||
|
|
|
@ -9,307 +9,16 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
|
||||
#include "media/streaming/media_streaming_common.h"
|
||||
#include "ui/image/image_prepare.h"
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/opt.h>
|
||||
} // extern "C"
|
||||
#include "ffmpeg/ffmpeg_utility.h"
|
||||
|
||||
namespace Media {
|
||||
namespace Streaming {
|
||||
namespace {
|
||||
|
||||
constexpr auto kSkipInvalidDataPackets = 10;
|
||||
constexpr auto kAlignImageBy = 16;
|
||||
constexpr auto kPixelBytesSize = 4;
|
||||
constexpr auto kImageFormat = QImage::Format_ARGB32_Premultiplied;
|
||||
constexpr auto kAvioBlockSize = 4096;
|
||||
constexpr auto kMaxScaleByAspectRatio = 16;
|
||||
|
||||
void AlignedImageBufferCleanupHandler(void* data) {
|
||||
const auto buffer = static_cast<uchar*>(data);
|
||||
delete[] buffer;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsAlignedImage(const QImage &image) {
|
||||
return !(reinterpret_cast<uintptr_t>(image.bits()) % kAlignImageBy)
|
||||
&& !(image.bytesPerLine() % kAlignImageBy);
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsValidAspectRatio(AVRational aspect) {
|
||||
return (aspect.num > 0)
|
||||
&& (aspect.den > 0)
|
||||
&& (aspect.num <= aspect.den * kMaxScaleByAspectRatio)
|
||||
&& (aspect.den <= aspect.num * kMaxScaleByAspectRatio);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool GoodStorageForFrame(const QImage &storage, QSize size) {
|
||||
return !storage.isNull()
|
||||
&& (storage.format() == kImageFormat)
|
||||
&& (storage.size() == size)
|
||||
&& storage.isDetached()
|
||||
&& IsAlignedImage(storage);
|
||||
}
|
||||
|
||||
// Create a QImage of desired size where all the data is properly aligned.
|
||||
QImage CreateFrameStorage(QSize size) {
|
||||
const auto width = size.width();
|
||||
const auto height = size.height();
|
||||
const auto widthAlign = kAlignImageBy / kPixelBytesSize;
|
||||
const auto neededWidth = width + ((width % widthAlign)
|
||||
? (widthAlign - (width % widthAlign))
|
||||
: 0);
|
||||
const auto perLine = neededWidth * kPixelBytesSize;
|
||||
const auto buffer = new uchar[perLine * height + kAlignImageBy];
|
||||
const auto cleanupData = static_cast<void *>(buffer);
|
||||
const auto address = reinterpret_cast<uintptr_t>(buffer);
|
||||
const auto alignedBuffer = buffer + ((address % kAlignImageBy)
|
||||
? (kAlignImageBy - (address % kAlignImageBy))
|
||||
: 0);
|
||||
return QImage(
|
||||
alignedBuffer,
|
||||
width,
|
||||
height,
|
||||
perLine,
|
||||
kImageFormat,
|
||||
AlignedImageBufferCleanupHandler,
|
||||
cleanupData);
|
||||
}
|
||||
|
||||
IOPointer MakeIOPointer(
|
||||
void *opaque,
|
||||
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int64_t(*seek)(void *opaque, int64_t offset, int whence)) {
|
||||
auto buffer = reinterpret_cast<uchar*>(av_malloc(kAvioBlockSize));
|
||||
if (!buffer) {
|
||||
LogError(qstr("av_malloc"));
|
||||
return {};
|
||||
}
|
||||
auto result = IOPointer(avio_alloc_context(
|
||||
buffer,
|
||||
kAvioBlockSize,
|
||||
write ? 1 : 0,
|
||||
opaque,
|
||||
read,
|
||||
write,
|
||||
seek));
|
||||
if (!result) {
|
||||
av_freep(&buffer);
|
||||
LogError(qstr("avio_alloc_context"));
|
||||
return {};
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void IODeleter::operator()(AVIOContext *value) {
|
||||
if (value) {
|
||||
av_freep(&value->buffer);
|
||||
avio_context_free(&value);
|
||||
}
|
||||
}
|
||||
|
||||
FormatPointer MakeFormatPointer(
|
||||
void *opaque,
|
||||
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int64_t(*seek)(void *opaque, int64_t offset, int whence)) {
|
||||
auto io = MakeIOPointer(opaque, read, write, seek);
|
||||
if (!io) {
|
||||
return {};
|
||||
}
|
||||
auto result = avformat_alloc_context();
|
||||
if (!result) {
|
||||
LogError(qstr("avformat_alloc_context"));
|
||||
return {};
|
||||
}
|
||||
result->pb = io.get();
|
||||
|
||||
auto options = (AVDictionary*)nullptr;
|
||||
const auto guard = gsl::finally([&] { av_dict_free(&options); });
|
||||
av_dict_set(&options, "usetoc", "1", 0);
|
||||
const auto error = AvErrorWrap(avformat_open_input(
|
||||
&result,
|
||||
nullptr,
|
||||
nullptr,
|
||||
&options));
|
||||
if (error) {
|
||||
// avformat_open_input freed 'result' in case an error happened.
|
||||
LogError(qstr("avformat_open_input"), error);
|
||||
return {};
|
||||
}
|
||||
result->flags |= AVFMT_FLAG_FAST_SEEK;
|
||||
|
||||
// Now FormatPointer will own and free the IO context.
|
||||
io.release();
|
||||
return FormatPointer(result);
|
||||
}
|
||||
|
||||
void FormatDeleter::operator()(AVFormatContext *value) {
|
||||
if (value) {
|
||||
const auto deleter = IOPointer(value->pb);
|
||||
avformat_close_input(&value);
|
||||
}
|
||||
}
|
||||
|
||||
CodecPointer MakeCodecPointer(not_null<AVStream*> stream) {
|
||||
auto error = AvErrorWrap();
|
||||
|
||||
auto result = CodecPointer(avcodec_alloc_context3(nullptr));
|
||||
const auto context = result.get();
|
||||
if (!context) {
|
||||
LogError(qstr("avcodec_alloc_context3"));
|
||||
return {};
|
||||
}
|
||||
error = avcodec_parameters_to_context(context, stream->codecpar);
|
||||
if (error) {
|
||||
LogError(qstr("avcodec_parameters_to_context"), error);
|
||||
return {};
|
||||
}
|
||||
av_codec_set_pkt_timebase(context, stream->time_base);
|
||||
av_opt_set_int(context, "refcounted_frames", 1, 0);
|
||||
|
||||
const auto codec = avcodec_find_decoder(context->codec_id);
|
||||
if (!codec) {
|
||||
LogError(qstr("avcodec_find_decoder"), context->codec_id);
|
||||
return {};
|
||||
} else if ((error = avcodec_open2(context, codec, nullptr))) {
|
||||
LogError(qstr("avcodec_open2"), error);
|
||||
return {};
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void CodecDeleter::operator()(AVCodecContext *value) {
|
||||
if (value) {
|
||||
avcodec_free_context(&value);
|
||||
}
|
||||
}
|
||||
|
||||
FramePointer MakeFramePointer() {
|
||||
return FramePointer(av_frame_alloc());
|
||||
}
|
||||
|
||||
bool FrameHasData(AVFrame *frame) {
|
||||
return (frame && frame->data[0] != nullptr);
|
||||
}
|
||||
|
||||
void ClearFrameMemory(AVFrame *frame) {
|
||||
if (FrameHasData(frame)) {
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
|
||||
void FrameDeleter::operator()(AVFrame *value) {
|
||||
av_frame_free(&value);
|
||||
}
|
||||
|
||||
SwscalePointer MakeSwscalePointer(
|
||||
not_null<AVFrame*> frame,
|
||||
QSize resize,
|
||||
SwscalePointer *existing) {
|
||||
// We have to use custom caching for SwsContext, because
|
||||
// sws_getCachedContext checks passed flags with existing context flags,
|
||||
// and re-creates context if they're different, but in the process of
|
||||
// context creation the passed flags are modified before being written
|
||||
// to the resulting context, so the caching doesn't work.
|
||||
if (existing && (*existing) != nullptr) {
|
||||
const auto &deleter = existing->get_deleter();
|
||||
if (deleter.resize == resize
|
||||
&& deleter.frameSize == QSize(frame->width, frame->height)
|
||||
&& deleter.frameFormat == frame->format) {
|
||||
return std::move(*existing);
|
||||
}
|
||||
}
|
||||
if (frame->format <= AV_PIX_FMT_NONE || frame->format >= AV_PIX_FMT_NB) {
|
||||
LogError(qstr("frame->format"));
|
||||
return SwscalePointer();
|
||||
}
|
||||
|
||||
const auto result = sws_getCachedContext(
|
||||
existing ? existing->release() : nullptr,
|
||||
frame->width,
|
||||
frame->height,
|
||||
AVPixelFormat(frame->format),
|
||||
resize.width(),
|
||||
resize.height(),
|
||||
AV_PIX_FMT_BGRA,
|
||||
0,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr);
|
||||
if (!result) {
|
||||
LogError(qstr("sws_getCachedContext"));
|
||||
}
|
||||
return SwscalePointer(
|
||||
result,
|
||||
{ resize, QSize{ frame->width, frame->height }, frame->format });
|
||||
}
|
||||
|
||||
void SwscaleDeleter::operator()(SwsContext *value) {
|
||||
if (value) {
|
||||
sws_freeContext(value);
|
||||
}
|
||||
}
|
||||
|
||||
void LogError(QLatin1String method) {
|
||||
LOG(("Streaming Error: Error in %1.").arg(method));
|
||||
}
|
||||
|
||||
void LogError(QLatin1String method, AvErrorWrap error) {
|
||||
LOG(("Streaming Error: Error in %1 (code: %2, text: %3)."
|
||||
).arg(method
|
||||
).arg(error.code()
|
||||
).arg(error.text()));
|
||||
}
|
||||
|
||||
crl::time PtsToTime(int64_t pts, AVRational timeBase) {
|
||||
return (pts == AV_NOPTS_VALUE || !timeBase.den)
|
||||
? kTimeUnknown
|
||||
: ((pts * 1000LL * timeBase.num) / timeBase.den);
|
||||
}
|
||||
|
||||
crl::time PtsToTimeCeil(int64_t pts, AVRational timeBase) {
|
||||
return (pts == AV_NOPTS_VALUE || !timeBase.den)
|
||||
? kTimeUnknown
|
||||
: ((pts * 1000LL * timeBase.num + timeBase.den - 1) / timeBase.den);
|
||||
}
|
||||
|
||||
int64_t TimeToPts(crl::time time, AVRational timeBase) {
|
||||
return (time == kTimeUnknown || !timeBase.num)
|
||||
? AV_NOPTS_VALUE
|
||||
: (time * timeBase.den) / (1000LL * timeBase.num);
|
||||
}
|
||||
|
||||
crl::time PacketPosition(const Packet &packet, AVRational timeBase) {
|
||||
const auto &native = packet.fields();
|
||||
return PtsToTime(
|
||||
(native.pts == AV_NOPTS_VALUE) ? native.dts : native.pts,
|
||||
timeBase);
|
||||
}
|
||||
|
||||
crl::time PacketDuration(const Packet &packet, AVRational timeBase) {
|
||||
return PtsToTime(packet.fields().duration, timeBase);
|
||||
}
|
||||
|
||||
int DurationByPacket(const Packet &packet, AVRational timeBase) {
|
||||
const auto position = PacketPosition(packet, timeBase);
|
||||
const auto duration = std::max(
|
||||
PacketDuration(packet, timeBase),
|
||||
crl::time(1));
|
||||
const auto bad = [](crl::time time) {
|
||||
return (time < 0) || (time > kDurationMax);
|
||||
};
|
||||
if (bad(position) || bad(duration) || bad(position + duration + 1)) {
|
||||
LOG(("Streaming Error: Wrong duration by packet: %1 + %2"
|
||||
).arg(position
|
||||
).arg(duration));
|
||||
return -1;
|
||||
}
|
||||
return int(position + duration + 1);
|
||||
}
|
||||
|
||||
crl::time FramePosition(const Stream &stream) {
|
||||
const auto pts = !stream.frame
|
||||
? AV_NOPTS_VALUE
|
||||
|
@ -318,40 +27,13 @@ crl::time FramePosition(const Stream &stream) {
|
|||
: (stream.frame->pts != AV_NOPTS_VALUE)
|
||||
? stream.frame->pts
|
||||
: stream.frame->pkt_dts;
|
||||
return PtsToTime(pts, stream.timeBase);
|
||||
return FFmpeg::PtsToTime(pts, stream.timeBase);
|
||||
}
|
||||
|
||||
int ReadRotationFromMetadata(not_null<AVStream*> stream) {
|
||||
const auto tag = av_dict_get(stream->metadata, "rotate", nullptr, 0);
|
||||
if (tag && *tag->value) {
|
||||
const auto string = QString::fromUtf8(tag->value);
|
||||
auto ok = false;
|
||||
const auto degrees = string.toInt(&ok);
|
||||
if (ok && (degrees == 90 || degrees == 180 || degrees == 270)) {
|
||||
return degrees;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVRational ValidateAspectRatio(AVRational aspect) {
|
||||
return IsValidAspectRatio(aspect) ? aspect : kNormalAspect;
|
||||
}
|
||||
|
||||
QSize CorrectByAspect(QSize size, AVRational aspect) {
|
||||
Expects(IsValidAspectRatio(aspect));
|
||||
|
||||
return QSize(size.width() * aspect.num / aspect.den, size.height());
|
||||
}
|
||||
|
||||
bool RotationSwapWidthHeight(int rotation) {
|
||||
return (rotation == 90 || rotation == 270);
|
||||
}
|
||||
|
||||
AvErrorWrap ProcessPacket(Stream &stream, Packet &&packet) {
|
||||
FFmpeg::AvErrorWrap ProcessPacket(Stream &stream, FFmpeg::Packet &&packet) {
|
||||
Expects(stream.codec != nullptr);
|
||||
|
||||
auto error = AvErrorWrap();
|
||||
auto error = FFmpeg::AvErrorWrap();
|
||||
|
||||
const auto native = &packet.fields();
|
||||
const auto guard = gsl::finally([
|
||||
|
@ -361,7 +43,7 @@ AvErrorWrap ProcessPacket(Stream &stream, Packet &&packet) {
|
|||
] {
|
||||
native->size = size;
|
||||
native->data = data;
|
||||
packet = Packet();
|
||||
packet = FFmpeg::Packet();
|
||||
});
|
||||
|
||||
error = avcodec_send_packet(
|
||||
|
@ -374,17 +56,17 @@ AvErrorWrap ProcessPacket(Stream &stream, Packet &&packet) {
|
|||
// results in a crash (read_access to nullptr) in swr_convert().
|
||||
&& stream.codec->codec_id != AV_CODEC_ID_OPUS) {
|
||||
if (++stream.invalidDataPackets < kSkipInvalidDataPackets) {
|
||||
return AvErrorWrap(); // Try to skip a bad packet.
|
||||
return FFmpeg::AvErrorWrap(); // Try to skip a bad packet.
|
||||
}
|
||||
}
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
AvErrorWrap ReadNextFrame(Stream &stream) {
|
||||
FFmpeg::AvErrorWrap ReadNextFrame(Stream &stream) {
|
||||
Expects(stream.frame != nullptr);
|
||||
|
||||
auto error = AvErrorWrap();
|
||||
auto error = FFmpeg::AvErrorWrap();
|
||||
|
||||
do {
|
||||
error = avcodec_receive_frame(
|
||||
|
@ -427,28 +109,28 @@ QImage ConvertFrame(
|
|||
).arg(frameSize.width()
|
||||
).arg(frameSize.height()));
|
||||
return QImage();
|
||||
} else if (!FrameHasData(frame)) {
|
||||
} else if (!FFmpeg::FrameHasData(frame)) {
|
||||
LOG(("Streaming Error: Bad frame data."));
|
||||
return QImage();
|
||||
}
|
||||
if (resize.isEmpty()) {
|
||||
resize = frameSize;
|
||||
} else if (RotationSwapWidthHeight(stream.rotation)) {
|
||||
} else if (FFmpeg::RotationSwapWidthHeight(stream.rotation)) {
|
||||
resize.transpose();
|
||||
}
|
||||
|
||||
if (!GoodStorageForFrame(storage, resize)) {
|
||||
storage = CreateFrameStorage(resize);
|
||||
if (!FFmpeg::GoodStorageForFrame(storage, resize)) {
|
||||
storage = FFmpeg::CreateFrameStorage(resize);
|
||||
}
|
||||
const auto format = AV_PIX_FMT_BGRA;
|
||||
const auto hasDesiredFormat = (frame->format == format);
|
||||
if (frameSize == storage.size() && hasDesiredFormat) {
|
||||
static_assert(sizeof(uint32) == kPixelBytesSize);
|
||||
static_assert(sizeof(uint32) == FFmpeg::kPixelBytesSize);
|
||||
auto to = reinterpret_cast<uint32*>(storage.bits());
|
||||
auto from = reinterpret_cast<const uint32*>(frame->data[0]);
|
||||
const auto deltaTo = (storage.bytesPerLine() / kPixelBytesSize)
|
||||
const auto deltaTo = (storage.bytesPerLine() / sizeof(uint32))
|
||||
- storage.width();
|
||||
const auto deltaFrom = (frame->linesize[0] / kPixelBytesSize)
|
||||
const auto deltaFrom = (frame->linesize[0] / sizeof(uint32))
|
||||
- frame->width;
|
||||
for (const auto y : ranges::view::ints(0, frame->height)) {
|
||||
for (const auto x : ranges::view::ints(0, frame->width)) {
|
||||
|
@ -488,7 +170,7 @@ QImage ConvertFrame(
|
|||
}
|
||||
}
|
||||
|
||||
ClearFrameMemory(frame);
|
||||
FFmpeg::ClearFrameMemory(frame);
|
||||
return storage;
|
||||
}
|
||||
|
||||
|
@ -498,8 +180,8 @@ QImage PrepareByRequest(
|
|||
QImage storage) {
|
||||
Expects(!request.outer.isEmpty());
|
||||
|
||||
if (!GoodStorageForFrame(storage, request.outer)) {
|
||||
storage = CreateFrameStorage(request.outer);
|
||||
if (!FFmpeg::GoodStorageForFrame(storage, request.outer)) {
|
||||
storage = FFmpeg::CreateFrameStorage(request.outer);
|
||||
}
|
||||
{
|
||||
Painter p(&storage);
|
||||
|
|
|
@ -8,19 +8,11 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
#pragma once
|
||||
|
||||
#include "media/streaming/media_streaming_common.h"
|
||||
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
} // extern "C"
|
||||
#include "ffmpeg/ffmpeg_utility.h"
|
||||
|
||||
namespace Media {
|
||||
namespace Streaming {
|
||||
|
||||
constexpr auto kUniversalTimeBase = AVRational{ 1, AV_TIME_BASE };
|
||||
constexpr auto kNormalAspect = AVRational{ 1, 1 };
|
||||
|
||||
struct TimePoint {
|
||||
crl::time trackTime = kTimeUnknown;
|
||||
crl::time worldTime = kTimeUnknown;
|
||||
|
@ -33,143 +25,13 @@ struct TimePoint {
|
|||
}
|
||||
};
|
||||
|
||||
class AvErrorWrap {
|
||||
public:
|
||||
AvErrorWrap(int code = 0) : _code(code) {
|
||||
}
|
||||
|
||||
[[nodiscard]] bool failed() const {
|
||||
return (_code < 0);
|
||||
}
|
||||
[[nodiscard]] explicit operator bool() const {
|
||||
return failed();
|
||||
}
|
||||
|
||||
[[nodiscard]] int code() const {
|
||||
return _code;
|
||||
}
|
||||
|
||||
[[nodiscard]] QString text() const {
|
||||
char string[AV_ERROR_MAX_STRING_SIZE] = { 0 };
|
||||
return QString::fromUtf8(av_make_error_string(
|
||||
string,
|
||||
sizeof(string),
|
||||
_code));
|
||||
}
|
||||
|
||||
private:
|
||||
int _code = 0;
|
||||
|
||||
};
|
||||
|
||||
class Packet {
|
||||
public:
|
||||
Packet() {
|
||||
setEmpty();
|
||||
}
|
||||
Packet(const AVPacket &data) {
|
||||
bytes::copy(_data, bytes::object_as_span(&data));
|
||||
}
|
||||
Packet(Packet &&other) {
|
||||
bytes::copy(_data, other._data);
|
||||
if (!other.empty()) {
|
||||
other.release();
|
||||
}
|
||||
}
|
||||
Packet &operator=(Packet &&other) {
|
||||
if (this != &other) {
|
||||
av_packet_unref(&fields());
|
||||
bytes::copy(_data, other._data);
|
||||
if (!other.empty()) {
|
||||
other.release();
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
~Packet() {
|
||||
av_packet_unref(&fields());
|
||||
}
|
||||
|
||||
[[nodiscard]] AVPacket &fields() {
|
||||
return *reinterpret_cast<AVPacket*>(_data);
|
||||
}
|
||||
[[nodiscard]] const AVPacket &fields() const {
|
||||
return *reinterpret_cast<const AVPacket*>(_data);
|
||||
}
|
||||
|
||||
[[nodiscard]] bool empty() const {
|
||||
return !fields().data;
|
||||
}
|
||||
void release() {
|
||||
setEmpty();
|
||||
}
|
||||
|
||||
private:
|
||||
void setEmpty() {
|
||||
auto &native = fields();
|
||||
av_init_packet(&native);
|
||||
native.data = nullptr;
|
||||
native.size = 0;
|
||||
}
|
||||
|
||||
alignas(alignof(AVPacket)) bytes::type _data[sizeof(AVPacket)];
|
||||
|
||||
};
|
||||
|
||||
struct IODeleter {
|
||||
void operator()(AVIOContext *value);
|
||||
};
|
||||
using IOPointer = std::unique_ptr<AVIOContext, IODeleter>;
|
||||
[[nodiscard]] IOPointer MakeIOPointer(
|
||||
void *opaque,
|
||||
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int64_t(*seek)(void *opaque, int64_t offset, int whence));
|
||||
|
||||
struct FormatDeleter {
|
||||
void operator()(AVFormatContext *value);
|
||||
};
|
||||
using FormatPointer = std::unique_ptr<AVFormatContext, FormatDeleter>;
|
||||
[[nodiscard]] FormatPointer MakeFormatPointer(
|
||||
void *opaque,
|
||||
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||
int64_t(*seek)(void *opaque, int64_t offset, int whence));
|
||||
|
||||
struct CodecDeleter {
|
||||
void operator()(AVCodecContext *value);
|
||||
};
|
||||
using CodecPointer = std::unique_ptr<AVCodecContext, CodecDeleter>;
|
||||
[[nodiscard]] CodecPointer MakeCodecPointer(not_null<AVStream*> stream);
|
||||
|
||||
struct FrameDeleter {
|
||||
void operator()(AVFrame *value);
|
||||
};
|
||||
using FramePointer = std::unique_ptr<AVFrame, FrameDeleter>;
|
||||
[[nodiscard]] FramePointer MakeFramePointer();
|
||||
[[nodiscard]] bool FrameHasData(AVFrame *frame);
|
||||
void ClearFrameMemory(AVFrame *frame);
|
||||
|
||||
struct SwscaleDeleter {
|
||||
QSize resize;
|
||||
QSize frameSize;
|
||||
int frameFormat = int(AV_PIX_FMT_NONE);
|
||||
|
||||
void operator()(SwsContext *value);
|
||||
};
|
||||
using SwscalePointer = std::unique_ptr<SwsContext, SwscaleDeleter>;
|
||||
[[nodiscard]] SwscalePointer MakeSwscalePointer(
|
||||
not_null<AVFrame*> frame,
|
||||
QSize resize,
|
||||
SwscalePointer *existing = nullptr);
|
||||
|
||||
struct Stream {
|
||||
int index = -1;
|
||||
crl::time duration = kTimeUnknown;
|
||||
AVRational timeBase = kUniversalTimeBase;
|
||||
CodecPointer codec;
|
||||
FramePointer frame;
|
||||
std::deque<Packet> queue;
|
||||
AVRational timeBase = FFmpeg::kUniversalTimeBase;
|
||||
FFmpeg::CodecPointer codec;
|
||||
FFmpeg::FramePointer frame;
|
||||
std::deque<FFmpeg::Packet> queue;
|
||||
int invalidDataPackets = 0;
|
||||
|
||||
// Audio only.
|
||||
|
@ -177,39 +39,19 @@ struct Stream {
|
|||
|
||||
// Video only.
|
||||
int rotation = 0;
|
||||
AVRational aspect = kNormalAspect;
|
||||
SwscalePointer swscale;
|
||||
AVRational aspect = FFmpeg::kNormalAspect;
|
||||
FFmpeg::SwscalePointer swscale;
|
||||
};
|
||||
|
||||
void LogError(QLatin1String method);
|
||||
void LogError(QLatin1String method, AvErrorWrap error);
|
||||
|
||||
[[nodiscard]] crl::time PtsToTime(int64_t pts, AVRational timeBase);
|
||||
// Used for full duration conversion.
|
||||
[[nodiscard]] crl::time PtsToTimeCeil(int64_t pts, AVRational timeBase);
|
||||
[[nodiscard]] int64_t TimeToPts(crl::time time, AVRational timeBase);
|
||||
[[nodiscard]] crl::time PacketPosition(
|
||||
const Packet &packet,
|
||||
AVRational timeBase);
|
||||
[[nodiscard]] crl::time PacketDuration(
|
||||
const Packet &packet,
|
||||
AVRational timeBase);
|
||||
[[nodiscard]] int DurationByPacket(
|
||||
const Packet &packet,
|
||||
AVRational timeBase);
|
||||
[[nodiscard]] crl::time FramePosition(const Stream &stream);
|
||||
[[nodiscard]] int ReadRotationFromMetadata(not_null<AVStream*> stream);
|
||||
[[nodiscard]] AVRational ValidateAspectRatio(AVRational aspect);
|
||||
[[nodiscard]] bool RotationSwapWidthHeight(int rotation);
|
||||
[[nodiscard]] QSize CorrectByAspect(QSize size, AVRational aspect);
|
||||
[[nodiscard]] AvErrorWrap ProcessPacket(Stream &stream, Packet &&packet);
|
||||
[[nodiscard]] AvErrorWrap ReadNextFrame(Stream &stream);
|
||||
[[nodiscard]] FFmpeg::AvErrorWrap ProcessPacket(
|
||||
Stream &stream,
|
||||
FFmpeg::Packet &&packet);
|
||||
[[nodiscard]] FFmpeg::AvErrorWrap ReadNextFrame(Stream &stream);
|
||||
|
||||
[[nodiscard]] bool GoodForRequest(
|
||||
const QImage &image,
|
||||
const FrameRequest &request);
|
||||
[[nodiscard]] bool GoodStorageForFrame(const QImage &storage, QSize size);
|
||||
[[nodiscard]] QImage CreateFrameStorage(QSize size);
|
||||
[[nodiscard]] QImage ConvertFrame(
|
||||
Stream &stream,
|
||||
AVFrame *frame,
|
||||
|
|
|
@ -34,7 +34,7 @@ public:
|
|||
FnMut<void(const Information &)> ready,
|
||||
Fn<void(Error)> error);
|
||||
|
||||
void process(Packet &&packet);
|
||||
void process(FFmpeg::Packet &&packet);
|
||||
|
||||
[[nodisacrd]] rpl::producer<> checkNextFrame() const;
|
||||
[[nodisacrd]] rpl::producer<> waitingForData() const;
|
||||
|
@ -60,7 +60,7 @@ private:
|
|||
|
||||
void fail(Error error);
|
||||
[[nodiscard]] bool interrupted() const;
|
||||
[[nodiscard]] bool tryReadFirstFrame(Packet &&packet);
|
||||
[[nodiscard]] bool tryReadFirstFrame(FFmpeg::Packet &&packet);
|
||||
[[nodiscard]] bool fillStateFromFrame();
|
||||
[[nodiscard]] bool processFirstFrame();
|
||||
void queueReadFrames(crl::time delay = 0);
|
||||
|
@ -71,7 +71,7 @@ private:
|
|||
void callReady();
|
||||
[[nodiscard]] bool loopAround();
|
||||
[[nodiscard]] crl::time computeDuration() const;
|
||||
[[nodiscard]] int durationByPacket(const Packet &packet);
|
||||
[[nodiscard]] int durationByPacket(const FFmpeg::Packet &packet);
|
||||
|
||||
// Force frame position to be clamped to [0, duration] and monotonic.
|
||||
[[nodiscard]] crl::time currentFramePosition() const;
|
||||
|
@ -103,7 +103,7 @@ private:
|
|||
base::ConcurrentTimer _readFramesTimer;
|
||||
|
||||
// For initial frame skipping for an exact seek.
|
||||
FramePointer _initialSkippingFrame;
|
||||
FFmpeg::FramePointer _initialSkippingFrame;
|
||||
|
||||
};
|
||||
|
||||
|
@ -142,7 +142,7 @@ rpl::producer<> VideoTrackObject::waitingForData() const {
|
|||
: _waitingForData.events();
|
||||
}
|
||||
|
||||
void VideoTrackObject::process(Packet &&packet) {
|
||||
void VideoTrackObject::process(FFmpeg::Packet &&packet) {
|
||||
if (interrupted()) {
|
||||
return;
|
||||
}
|
||||
|
@ -164,12 +164,12 @@ void VideoTrackObject::process(Packet &&packet) {
|
|||
}
|
||||
}
|
||||
|
||||
int VideoTrackObject::durationByPacket(const Packet &packet) {
|
||||
int VideoTrackObject::durationByPacket(const FFmpeg::Packet &packet) {
|
||||
// We've set this value on the first cycle.
|
||||
if (_loopingShift || _stream.duration != kDurationUnavailable) {
|
||||
return 0;
|
||||
}
|
||||
const auto result = DurationByPacket(packet, _stream.timeBase);
|
||||
const auto result = FFmpeg::DurationByPacket(packet, _stream.timeBase);
|
||||
if (result < 0) {
|
||||
fail(Error::InvalidData);
|
||||
return 0;
|
||||
|
@ -395,7 +395,7 @@ void VideoTrackObject::updateFrameRequest(const FrameRequest &request) {
|
|||
_request = request;
|
||||
}
|
||||
|
||||
bool VideoTrackObject::tryReadFirstFrame(Packet &&packet) {
|
||||
bool VideoTrackObject::tryReadFirstFrame(FFmpeg::Packet &&packet) {
|
||||
if (ProcessPacket(_stream, std::move(packet)).failed()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ bool VideoTrackObject::tryReadFirstFrame(Packet &&packet) {
|
|||
// Try skipping frames until one is after the requested position.
|
||||
std::swap(_initialSkippingFrame, _stream.frame);
|
||||
if (!_stream.frame) {
|
||||
_stream.frame = MakeFramePointer();
|
||||
_stream.frame = FFmpeg::MakeFramePointer();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -470,8 +470,10 @@ void VideoTrackObject::callReady() {
|
|||
const auto frame = _shared->frameForPaint();
|
||||
|
||||
auto data = VideoInformation();
|
||||
data.size = CorrectByAspect(frame->original.size(), _stream.aspect);
|
||||
if (RotationSwapWidthHeight(_stream.rotation)) {
|
||||
data.size = FFmpeg::CorrectByAspect(
|
||||
frame->original.size(),
|
||||
_stream.aspect);
|
||||
if (FFmpeg::RotationSwapWidthHeight(_stream.rotation)) {
|
||||
data.size.transpose();
|
||||
}
|
||||
data.cover = frame->original;
|
||||
|
@ -756,7 +758,7 @@ crl::time VideoTrack::streamDuration() const {
|
|||
return _streamDuration;
|
||||
}
|
||||
|
||||
void VideoTrack::process(Packet &&packet) {
|
||||
void VideoTrack::process(FFmpeg::Packet &&packet) {
|
||||
_wrapped.with([
|
||||
packet = std::move(packet)
|
||||
](Implementation &unwrapped) mutable {
|
||||
|
|
|
@ -33,7 +33,7 @@ public:
|
|||
[[nodiscard]] crl::time streamDuration() const;
|
||||
|
||||
// Called from the same unspecified thread.
|
||||
void process(Packet &&packet);
|
||||
void process(FFmpeg::Packet &&packet);
|
||||
void waitForData();
|
||||
|
||||
// Called from the main thread.
|
||||
|
@ -59,7 +59,7 @@ private:
|
|||
friend class VideoTrackObject;
|
||||
|
||||
struct Frame {
|
||||
FramePointer decoded = MakeFramePointer();
|
||||
FFmpeg::FramePointer decoded = FFmpeg::MakeFramePointer();
|
||||
QImage original;
|
||||
crl::time position = kTimeUnknown;
|
||||
crl::time displayed = kTimeUnknown;
|
||||
|
|
|
@ -82,6 +82,7 @@
|
|||
'lib_export.gyp:lib_export',
|
||||
'lib_storage.gyp:lib_storage',
|
||||
'lib_lottie.gyp:lib_lottie',
|
||||
'lib_ffmpeg.gyp:lib_ffmpeg',
|
||||
],
|
||||
|
||||
'defines': [
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
# This file is part of Telegram Desktop,
|
||||
# the official desktop application for the Telegram messaging service.
|
||||
#
|
||||
# For license and copyright information please follow this link:
|
||||
# https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||
|
||||
{
|
||||
'includes': [
|
||||
'common.gypi',
|
||||
],
|
||||
'targets': [{
|
||||
'target_name': 'lib_ffmpeg',
|
||||
'type': 'static_library',
|
||||
'includes': [
|
||||
'common.gypi',
|
||||
'qt.gypi',
|
||||
'telegram_linux.gypi',
|
||||
],
|
||||
'variables': {
|
||||
'src_loc': '../SourceFiles',
|
||||
'res_loc': '../Resources',
|
||||
'libs_loc': '../../../Libraries',
|
||||
'official_build_target%': '',
|
||||
'submodules_loc': '../ThirdParty',
|
||||
},
|
||||
'dependencies': [
|
||||
'crl.gyp:crl',
|
||||
'lib_base.gyp:lib_base',
|
||||
],
|
||||
'export_dependent_settings': [
|
||||
'crl.gyp:crl',
|
||||
'lib_base.gyp:lib_base',
|
||||
],
|
||||
'defines': [
|
||||
],
|
||||
'include_dirs': [
|
||||
'<(src_loc)',
|
||||
'<(SHARED_INTERMEDIATE_DIR)',
|
||||
'<(libs_loc)/ffmpeg',
|
||||
'<(libs_loc)/range-v3/include',
|
||||
'<(submodules_loc)/GSL/include',
|
||||
'<(submodules_loc)/variant/include',
|
||||
'<(submodules_loc)/crl/src',
|
||||
],
|
||||
'sources': [
|
||||
'<(src_loc)/ffmpeg/ffmpeg_utility.cpp',
|
||||
'<(src_loc)/ffmpeg/ffmpeg_utility.h',
|
||||
],
|
||||
'conditions': [[ 'build_macold', {
|
||||
'xcode_settings': {
|
||||
'OTHER_CPLUSPLUSFLAGS': [ '-nostdinc++' ],
|
||||
},
|
||||
'include_dirs': [
|
||||
'/usr/local/macold/include/c++/v1',
|
||||
],
|
||||
}]],
|
||||
}],
|
||||
}
|
|
@ -30,12 +30,14 @@
|
|||
'lib_base.gyp:lib_base',
|
||||
'lib_rlottie.gyp:lib_rlottie',
|
||||
'lib_storage.gyp:lib_storage',
|
||||
'lib_ffmpeg.gyp:lib_ffmpeg',
|
||||
],
|
||||
'export_dependent_settings': [
|
||||
'crl.gyp:crl',
|
||||
'lib_base.gyp:lib_base',
|
||||
'lib_rlottie.gyp:lib_rlottie',
|
||||
'lib_storage.gyp:lib_storage',
|
||||
'lib_ffmpeg.gyp:lib_ffmpeg',
|
||||
],
|
||||
'defines': [
|
||||
'LOT_BUILD',
|
||||
|
@ -45,6 +47,7 @@
|
|||
'<(SHARED_INTERMEDIATE_DIR)',
|
||||
'<(libs_loc)/range-v3/include',
|
||||
'<(libs_loc)/zlib',
|
||||
'<(libs_loc)/ffmpeg',
|
||||
'<(rlottie_loc)',
|
||||
'<(submodules_loc)/GSL/include',
|
||||
'<(submodules_loc)/variant/include',
|
||||
|
|
Loading…
Reference in New Issue