Move some common code to lib_ffmpeg.

This commit is contained in:
John Preston 2019-06-26 17:04:38 +02:00
parent 10772f4ac5
commit 56e137b20f
26 changed files with 730 additions and 601 deletions

View File

@ -0,0 +1,341 @@
/*
This file is part of Telegram Desktop,
the official desktop application for the Telegram messaging service.
For license and copyright information please follow this link:
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
*/
#include "ffmpeg/ffmpeg_utility.h"
#include "base/algorithm.h"
#include "logs.h"
#include <QImage>
extern "C" {
#include <libavutil/opt.h>
} // extern "C"
namespace FFmpeg {
namespace {
constexpr auto kAlignImageBy = 16;
constexpr auto kImageFormat = QImage::Format_ARGB32_Premultiplied;
constexpr auto kMaxScaleByAspectRatio = 16;
constexpr auto kAvioBlockSize = 4096;
constexpr auto kTimeUnknown = std::numeric_limits<crl::time>::min();
constexpr auto kDurationMax = crl::time(std::numeric_limits<int>::max());
void AlignedImageBufferCleanupHandler(void* data) {
const auto buffer = static_cast<uchar*>(data);
delete[] buffer;
}
[[nodiscard]] bool IsValidAspectRatio(AVRational aspect) {
return (aspect.num > 0)
&& (aspect.den > 0)
&& (aspect.num <= aspect.den * kMaxScaleByAspectRatio)
&& (aspect.den <= aspect.num * kMaxScaleByAspectRatio);
}
[[nodiscard]] bool IsAlignedImage(const QImage &image) {
return !(reinterpret_cast<uintptr_t>(image.bits()) % kAlignImageBy)
&& !(image.bytesPerLine() % kAlignImageBy);
}
} // namespace
IOPointer MakeIOPointer(
void *opaque,
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
int64_t(*seek)(void *opaque, int64_t offset, int whence)) {
auto buffer = reinterpret_cast<uchar*>(av_malloc(kAvioBlockSize));
if (!buffer) {
LogError(qstr("av_malloc"));
return {};
}
auto result = IOPointer(avio_alloc_context(
buffer,
kAvioBlockSize,
write ? 1 : 0,
opaque,
read,
write,
seek));
if (!result) {
av_freep(&buffer);
LogError(qstr("avio_alloc_context"));
return {};
}
return result;
}
void IODeleter::operator()(AVIOContext *value) {
if (value) {
av_freep(&value->buffer);
avio_context_free(&value);
}
}
FormatPointer MakeFormatPointer(
void *opaque,
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
int64_t(*seek)(void *opaque, int64_t offset, int whence)) {
auto io = MakeIOPointer(opaque, read, write, seek);
if (!io) {
return {};
}
auto result = avformat_alloc_context();
if (!result) {
LogError(qstr("avformat_alloc_context"));
return {};
}
result->pb = io.get();
auto options = (AVDictionary*)nullptr;
const auto guard = gsl::finally([&] { av_dict_free(&options); });
av_dict_set(&options, "usetoc", "1", 0);
const auto error = AvErrorWrap(avformat_open_input(
&result,
nullptr,
nullptr,
&options));
if (error) {
// avformat_open_input freed 'result' in case an error happened.
LogError(qstr("avformat_open_input"), error);
return {};
}
result->flags |= AVFMT_FLAG_FAST_SEEK;
// Now FormatPointer will own and free the IO context.
io.release();
return FormatPointer(result);
}
void FormatDeleter::operator()(AVFormatContext *value) {
if (value) {
const auto deleter = IOPointer(value->pb);
avformat_close_input(&value);
}
}
CodecPointer MakeCodecPointer(not_null<AVStream*> stream) {
auto error = AvErrorWrap();
auto result = CodecPointer(avcodec_alloc_context3(nullptr));
const auto context = result.get();
if (!context) {
LogError(qstr("avcodec_alloc_context3"));
return {};
}
error = avcodec_parameters_to_context(context, stream->codecpar);
if (error) {
LogError(qstr("avcodec_parameters_to_context"), error);
return {};
}
av_codec_set_pkt_timebase(context, stream->time_base);
av_opt_set_int(context, "refcounted_frames", 1, 0);
const auto codec = avcodec_find_decoder(context->codec_id);
if (!codec) {
LogError(qstr("avcodec_find_decoder"), context->codec_id);
return {};
} else if ((error = avcodec_open2(context, codec, nullptr))) {
LogError(qstr("avcodec_open2"), error);
return {};
}
return result;
}
void CodecDeleter::operator()(AVCodecContext *value) {
if (value) {
avcodec_free_context(&value);
}
}
FramePointer MakeFramePointer() {
return FramePointer(av_frame_alloc());
}
bool FrameHasData(AVFrame *frame) {
return (frame && frame->data[0] != nullptr);
}
void ClearFrameMemory(AVFrame *frame) {
if (FrameHasData(frame)) {
av_frame_unref(frame);
}
}
void FrameDeleter::operator()(AVFrame *value) {
av_frame_free(&value);
}
SwscalePointer MakeSwscalePointer(
not_null<AVFrame*> frame,
QSize resize,
SwscalePointer *existing) {
// We have to use custom caching for SwsContext, because
// sws_getCachedContext checks passed flags with existing context flags,
// and re-creates context if they're different, but in the process of
// context creation the passed flags are modified before being written
// to the resulting context, so the caching doesn't work.
if (existing && (*existing) != nullptr) {
const auto &deleter = existing->get_deleter();
if (deleter.resize == resize
&& deleter.frameSize == QSize(frame->width, frame->height)
&& deleter.frameFormat == frame->format) {
return std::move(*existing);
}
}
if (frame->format <= AV_PIX_FMT_NONE || frame->format >= AV_PIX_FMT_NB) {
LogError(qstr("frame->format"));
return SwscalePointer();
}
const auto result = sws_getCachedContext(
existing ? existing->release() : nullptr,
frame->width,
frame->height,
AVPixelFormat(frame->format),
resize.width(),
resize.height(),
AV_PIX_FMT_BGRA,
0,
nullptr,
nullptr,
nullptr);
if (!result) {
LogError(qstr("sws_getCachedContext"));
}
return SwscalePointer(
result,
{ resize, QSize{ frame->width, frame->height }, frame->format });
}
void SwscaleDeleter::operator()(SwsContext *value) {
if (value) {
sws_freeContext(value);
}
}
void LogError(QLatin1String method) {
LOG(("Streaming Error: Error in %1.").arg(method));
}
void LogError(QLatin1String method, AvErrorWrap error) {
LOG(("Streaming Error: Error in %1 (code: %2, text: %3)."
).arg(method
).arg(error.code()
).arg(error.text()));
}
crl::time PtsToTime(int64_t pts, AVRational timeBase) {
return (pts == AV_NOPTS_VALUE || !timeBase.den)
? kTimeUnknown
: ((pts * 1000LL * timeBase.num) / timeBase.den);
}
crl::time PtsToTimeCeil(int64_t pts, AVRational timeBase) {
return (pts == AV_NOPTS_VALUE || !timeBase.den)
? kTimeUnknown
: ((pts * 1000LL * timeBase.num + timeBase.den - 1) / timeBase.den);
}
int64_t TimeToPts(crl::time time, AVRational timeBase) {
return (time == kTimeUnknown || !timeBase.num)
? AV_NOPTS_VALUE
: (time * timeBase.den) / (1000LL * timeBase.num);
}
crl::time PacketPosition(const Packet &packet, AVRational timeBase) {
const auto &native = packet.fields();
return PtsToTime(
(native.pts == AV_NOPTS_VALUE) ? native.dts : native.pts,
timeBase);
}
crl::time PacketDuration(const Packet &packet, AVRational timeBase) {
return PtsToTime(packet.fields().duration, timeBase);
}
int DurationByPacket(const Packet &packet, AVRational timeBase) {
const auto position = PacketPosition(packet, timeBase);
const auto duration = std::max(
PacketDuration(packet, timeBase),
crl::time(1));
const auto bad = [](crl::time time) {
return (time < 0) || (time > kDurationMax);
};
if (bad(position) || bad(duration) || bad(position + duration + 1)) {
LOG(("Streaming Error: Wrong duration by packet: %1 + %2"
).arg(position
).arg(duration));
return -1;
}
return int(position + duration + 1);
}
int ReadRotationFromMetadata(not_null<AVStream*> stream) {
const auto tag = av_dict_get(stream->metadata, "rotate", nullptr, 0);
if (tag && *tag->value) {
const auto string = QString::fromUtf8(tag->value);
auto ok = false;
const auto degrees = string.toInt(&ok);
if (ok && (degrees == 90 || degrees == 180 || degrees == 270)) {
return degrees;
}
}
return 0;
}
AVRational ValidateAspectRatio(AVRational aspect) {
return IsValidAspectRatio(aspect) ? aspect : kNormalAspect;
}
QSize CorrectByAspect(QSize size, AVRational aspect) {
Expects(IsValidAspectRatio(aspect));
return QSize(size.width() * aspect.num / aspect.den, size.height());
}
bool RotationSwapWidthHeight(int rotation) {
return (rotation == 90 || rotation == 270);
}
bool GoodStorageForFrame(const QImage &storage, QSize size) {
return !storage.isNull()
&& (storage.format() == kImageFormat)
&& (storage.size() == size)
&& storage.isDetached()
&& IsAlignedImage(storage);
}
// Create a QImage of desired size where all the data is properly aligned.
QImage CreateFrameStorage(QSize size) {
const auto width = size.width();
const auto height = size.height();
const auto widthAlign = kAlignImageBy / kPixelBytesSize;
const auto neededWidth = width + ((width % widthAlign)
? (widthAlign - (width % widthAlign))
: 0);
const auto perLine = neededWidth * kPixelBytesSize;
const auto buffer = new uchar[perLine * height + kAlignImageBy];
const auto cleanupData = static_cast<void *>(buffer);
const auto address = reinterpret_cast<uintptr_t>(buffer);
const auto alignedBuffer = buffer + ((address % kAlignImageBy)
? (kAlignImageBy - (address % kAlignImageBy))
: 0);
return QImage(
alignedBuffer,
width,
height,
perLine,
kImageFormat,
AlignedImageBufferCleanupHandler,
cleanupData);
}
} // namespace FFmpeg

View File

@ -0,0 +1,185 @@
/*
This file is part of Telegram Desktop,
the official desktop application for the Telegram messaging service.
For license and copyright information please follow this link:
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
*/
#pragma once
#include "base/bytes.h"
#include <crl/crl_time.h>
#include <QSize>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
} // extern "C"
class QImage;
namespace FFmpeg {
inline constexpr auto kPixelBytesSize = 4;
constexpr auto kUniversalTimeBase = AVRational{ 1, AV_TIME_BASE };
constexpr auto kNormalAspect = AVRational{ 1, 1 };
class AvErrorWrap {
public:
AvErrorWrap(int code = 0) : _code(code) {
}
[[nodiscard]] bool failed() const {
return (_code < 0);
}
[[nodiscard]] explicit operator bool() const {
return failed();
}
[[nodiscard]] int code() const {
return _code;
}
[[nodiscard]] QString text() const {
char string[AV_ERROR_MAX_STRING_SIZE] = { 0 };
return QString::fromUtf8(av_make_error_string(
string,
sizeof(string),
_code));
}
private:
int _code = 0;
};
class Packet {
public:
Packet() {
setEmpty();
}
Packet(const AVPacket &data) {
bytes::copy(_data, bytes::object_as_span(&data));
}
Packet(Packet &&other) {
bytes::copy(_data, other._data);
if (!other.empty()) {
other.release();
}
}
Packet &operator=(Packet &&other) {
if (this != &other) {
av_packet_unref(&fields());
bytes::copy(_data, other._data);
if (!other.empty()) {
other.release();
}
}
return *this;
}
~Packet() {
av_packet_unref(&fields());
}
[[nodiscard]] AVPacket &fields() {
return *reinterpret_cast<AVPacket*>(_data);
}
[[nodiscard]] const AVPacket &fields() const {
return *reinterpret_cast<const AVPacket*>(_data);
}
[[nodiscard]] bool empty() const {
return !fields().data;
}
void release() {
setEmpty();
}
private:
void setEmpty() {
auto &native = fields();
av_init_packet(&native);
native.data = nullptr;
native.size = 0;
}
alignas(alignof(AVPacket)) bytes::type _data[sizeof(AVPacket)];
};
struct IODeleter {
void operator()(AVIOContext *value);
};
using IOPointer = std::unique_ptr<AVIOContext, IODeleter>;
[[nodiscard]] IOPointer MakeIOPointer(
void *opaque,
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
int64_t(*seek)(void *opaque, int64_t offset, int whence));
struct FormatDeleter {
void operator()(AVFormatContext *value);
};
using FormatPointer = std::unique_ptr<AVFormatContext, FormatDeleter>;
[[nodiscard]] FormatPointer MakeFormatPointer(
void *opaque,
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
int64_t(*seek)(void *opaque, int64_t offset, int whence));
struct CodecDeleter {
void operator()(AVCodecContext *value);
};
using CodecPointer = std::unique_ptr<AVCodecContext, CodecDeleter>;
[[nodiscard]] CodecPointer MakeCodecPointer(not_null<AVStream*> stream);
struct FrameDeleter {
void operator()(AVFrame *value);
};
using FramePointer = std::unique_ptr<AVFrame, FrameDeleter>;
[[nodiscard]] FramePointer MakeFramePointer();
[[nodiscard]] bool FrameHasData(AVFrame *frame);
void ClearFrameMemory(AVFrame *frame);
struct SwscaleDeleter {
QSize resize;
QSize frameSize;
int frameFormat = int(AV_PIX_FMT_NONE);
void operator()(SwsContext *value);
};
using SwscalePointer = std::unique_ptr<SwsContext, SwscaleDeleter>;
[[nodiscard]] SwscalePointer MakeSwscalePointer(
not_null<AVFrame*> frame,
QSize resize,
SwscalePointer *existing = nullptr);
void LogError(QLatin1String method);
void LogError(QLatin1String method, FFmpeg::AvErrorWrap error);
[[nodiscard]] crl::time PtsToTime(int64_t pts, AVRational timeBase);
// Used for full duration conversion.
[[nodiscard]] crl::time PtsToTimeCeil(int64_t pts, AVRational timeBase);
[[nodiscard]] int64_t TimeToPts(crl::time time, AVRational timeBase);
[[nodiscard]] crl::time PacketPosition(
const FFmpeg::Packet &packet,
AVRational timeBase);
[[nodiscard]] crl::time PacketDuration(
const FFmpeg::Packet &packet,
AVRational timeBase);
[[nodiscard]] int DurationByPacket(
const FFmpeg::Packet &packet,
AVRational timeBase);
[[nodiscard]] int ReadRotationFromMetadata(not_null<AVStream*> stream);
[[nodiscard]] AVRational ValidateAspectRatio(AVRational aspect);
[[nodiscard]] bool RotationSwapWidthHeight(int rotation);
[[nodiscard]] QSize CorrectByAspect(QSize size, AVRational aspect);
[[nodiscard]] bool GoodStorageForFrame(const QImage &storage, QSize size);
[[nodiscard]] QImage CreateFrameStorage(QSize size);
} // namespace FFmpeg

View File

@ -8,6 +8,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "lottie/lottie_cache.h" #include "lottie/lottie_cache.h"
#include "lottie/lottie_frame_renderer.h" #include "lottie/lottie_frame_renderer.h"
#include "ffmpeg/ffmpeg_utility.h"
#include "base/bytes.h" #include "base/bytes.h"
#include <QDataStream> #include <QDataStream>
@ -30,6 +31,9 @@ bool UncompressToRaw(AlignedStorage &to, bytes::const_span from) {
} }
void Decode(QImage &to, const AlignedStorage &from, const QSize &fromSize) { void Decode(QImage &to, const AlignedStorage &from, const QSize &fromSize) {
if (!FFmpeg::GoodStorageForFrame(to, fromSize)) {
to = FFmpeg::CreateFrameStorage(fromSize);
}
auto fromBytes = static_cast<const char*>(from.aligned()); auto fromBytes = static_cast<const char*>(from.aligned());
auto toBytes = to.bits(); auto toBytes = to.bits();
const auto fromPerLine = from.bytesPerLine(); const auto fromPerLine = from.bytesPerLine();

View File

@ -191,13 +191,13 @@ AbstractAudioFFMpegLoader::AbstractAudioFFMpegLoader(
const QByteArray &data, const QByteArray &data,
bytes::vector &&buffer) bytes::vector &&buffer)
: AbstractFFMpegLoader(file, data, std::move(buffer)) : AbstractFFMpegLoader(file, data, std::move(buffer))
, _frame(Streaming::MakeFramePointer()) { , _frame(FFmpeg::MakeFramePointer()) {
} }
bool AbstractAudioFFMpegLoader::initUsingContext( bool AbstractAudioFFMpegLoader::initUsingContext(
not_null<AVCodecContext *> context, not_null<AVCodecContext*> context,
int64 initialCount, int64 initialCount,
int initialFrequency) { int initialFrequency) {
const auto layout = ComputeChannelLayout( const auto layout = ComputeChannelLayout(
context->channel_layout, context->channel_layout,
context->channels); context->channels);
@ -264,7 +264,7 @@ bool AbstractAudioFFMpegLoader::initUsingContext(
} }
auto AbstractAudioFFMpegLoader::replaceFrameAndRead( auto AbstractAudioFFMpegLoader::replaceFrameAndRead(
Streaming::FramePointer frame, FFmpeg::FramePointer frame,
QByteArray &result, QByteArray &result,
int64 &samplesAdded) int64 &samplesAdded)
-> ReadResult { -> ReadResult {

View File

@ -105,7 +105,7 @@ protected:
// Streaming player provides the first frame to the ChildFFMpegLoader // Streaming player provides the first frame to the ChildFFMpegLoader
// so we replace our allocated frame with the one provided. // so we replace our allocated frame with the one provided.
ReadResult replaceFrameAndRead( ReadResult replaceFrameAndRead(
Streaming::FramePointer frame, FFmpeg::FramePointer frame,
QByteArray &result, QByteArray &result,
int64 &samplesAdded); int64 &samplesAdded);
@ -126,7 +126,7 @@ private:
uint8_t **data, uint8_t **data,
int count) const; int count) const;
Streaming::FramePointer _frame; FFmpeg::FramePointer _frame;
int _outputFormat = AL_FORMAT_STEREO16; int _outputFormat = AL_FORMAT_STEREO16;
int _outputChannels = 2; int _outputChannels = 2;
int _outputSampleSize = 2 * sizeof(uint16); int _outputSampleSize = 2 * sizeof(uint16);

View File

@ -37,7 +37,7 @@ public:
virtual ReadResult readMore( virtual ReadResult readMore(
QByteArray &samples, QByteArray &samples,
int64 &samplesCount) = 0; int64 &samplesCount) = 0;
virtual void enqueuePackets(std::deque<Streaming::Packet> &&packets) { virtual void enqueuePackets(std::deque<FFmpeg::Packet> &&packets) {
Unexpected("enqueuePackets() call on not ChildFFMpegLoader."); Unexpected("enqueuePackets() call on not ChildFFMpegLoader.");
} }
virtual void setForceToBuffer(bool force) { virtual void setForceToBuffer(bool force) {

View File

@ -47,7 +47,7 @@ private:
QMutex _fromExternalMutex; QMutex _fromExternalMutex;
base::flat_map< base::flat_map<
AudioMsgId, AudioMsgId,
std::deque<Streaming::Packet>> _fromExternalQueues; std::deque<FFmpeg::Packet>> _fromExternalQueues;
base::flat_set<AudioMsgId> _fromExternalForceToBuffer; base::flat_set<AudioMsgId> _fromExternalForceToBuffer;
SingleQueuedInvokation _fromExternalNotify; SingleQueuedInvokation _fromExternalNotify;

View File

@ -109,7 +109,7 @@ AudioPlayerLoader::ReadResult ChildFFMpegLoader::readMore(
} }
void ChildFFMpegLoader::enqueuePackets( void ChildFFMpegLoader::enqueuePackets(
std::deque<Streaming::Packet> &&packets) { std::deque<FFmpeg::Packet> &&packets) {
if (_queue.empty()) { if (_queue.empty()) {
_queue = std::move(packets); _queue = std::move(packets);
} else { } else {

View File

@ -13,8 +13,8 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
namespace Media { namespace Media {
struct ExternalSoundData { struct ExternalSoundData {
Streaming::CodecPointer codec; FFmpeg::CodecPointer codec;
Streaming::FramePointer frame; FFmpeg::FramePointer frame;
int32 frequency = Media::Player::kDefaultFrequency; int32 frequency = Media::Player::kDefaultFrequency;
int64 length = 0; int64 length = 0;
float64 speed = 1.; // 0.5 <= speed <= 2. float64 speed = 1.; // 0.5 <= speed <= 2.
@ -22,7 +22,7 @@ struct ExternalSoundData {
struct ExternalSoundPart { struct ExternalSoundPart {
AudioMsgId audio; AudioMsgId audio;
Streaming::Packet packet; FFmpeg::Packet packet;
}; };
class ChildFFMpegLoader : public AbstractAudioFFMpegLoader { class ChildFFMpegLoader : public AbstractAudioFFMpegLoader {
@ -36,7 +36,7 @@ public:
} }
ReadResult readMore(QByteArray &result, int64 &samplesAdded) override; ReadResult readMore(QByteArray &result, int64 &samplesAdded) override;
void enqueuePackets(std::deque<Streaming::Packet> &&packets) override; void enqueuePackets(std::deque<FFmpeg::Packet> &&packets) override;
void setForceToBuffer(bool force) override; void setForceToBuffer(bool force) override;
bool forceToBuffer() const override; bool forceToBuffer() const override;
@ -55,7 +55,7 @@ private:
int64 &samplesAdded); int64 &samplesAdded);
std::unique_ptr<ExternalSoundData> _parentData; std::unique_ptr<ExternalSoundData> _parentData;
std::deque<Streaming::Packet> _queue; std::deque<FFmpeg::Packet> _queue;
bool _forceToBuffer = false; bool _forceToBuffer = false;
bool _eofReached = false; bool _eofReached = false;

View File

@ -49,7 +49,7 @@ FFMpegReaderImplementation::FFMpegReaderImplementation(
QByteArray *data, QByteArray *data,
const AudioMsgId &audio) const AudioMsgId &audio)
: ReaderImplementation(location, data) : ReaderImplementation(location, data)
, _frame(Streaming::MakeFramePointer()) , _frame(FFmpeg::MakeFramePointer())
, _audioMsgId(audio) { , _audioMsgId(audio) {
} }
@ -119,7 +119,7 @@ ReaderImplementation::ReadResult FFMpegReaderImplementation::readNextFrame() {
] { ] {
native->size = size; native->size = size;
native->data = data; native->data = data;
packet = Streaming::Packet(); packet = FFmpeg::Packet();
}); });
res = avcodec_send_packet(_codecContext, native); res = avcodec_send_packet(_codecContext, native);
@ -275,7 +275,7 @@ bool FFMpegReaderImplementation::renderFrame(QImage &to, bool &hasAlpha, const Q
} }
} }
Streaming::ClearFrameMemory(_frame.get()); FFmpeg::ClearFrameMemory(_frame.get());
return true; return true;
} }
@ -385,7 +385,7 @@ bool FFMpegReaderImplementation::start(Mode mode, crl::time &positionMs) {
_audioStreamId = -1; _audioStreamId = -1;
} else { } else {
soundData = std::make_unique<ExternalSoundData>(); soundData = std::make_unique<ExternalSoundData>();
soundData->codec = Streaming::CodecPointer(audioContext); soundData->codec = FFmpeg::CodecPointer(audioContext);
soundData->frequency = _fmtContext->streams[_audioStreamId]->codecpar->sample_rate; soundData->frequency = _fmtContext->streams[_audioStreamId]->codecpar->sample_rate;
if (_fmtContext->streams[_audioStreamId]->duration == AV_NOPTS_VALUE) { if (_fmtContext->streams[_audioStreamId]->duration == AV_NOPTS_VALUE) {
soundData->length = (_fmtContext->duration * soundData->frequency) / AV_TIME_BASE; soundData->length = (_fmtContext->duration * soundData->frequency) / AV_TIME_BASE;
@ -405,7 +405,7 @@ bool FFMpegReaderImplementation::start(Mode mode, crl::time &positionMs) {
} }
} }
Streaming::Packet packet; FFmpeg::Packet packet;
auto readResult = readPacket(packet); auto readResult = readPacket(packet);
if (readResult == PacketResult::Ok && positionMs > 0) { if (readResult == PacketResult::Ok && positionMs > 0) {
positionMs = countPacketMs(packet); positionMs = countPacketMs(packet);
@ -436,7 +436,7 @@ bool FFMpegReaderImplementation::inspectAt(crl::time &positionMs) {
_packetQueue.clear(); _packetQueue.clear();
Streaming::Packet packet; FFmpeg::Packet packet;
auto readResult = readPacket(packet); auto readResult = readPacket(packet);
if (readResult == PacketResult::Ok && positionMs > 0) { if (readResult == PacketResult::Ok && positionMs > 0) {
positionMs = countPacketMs(packet); positionMs = countPacketMs(packet);
@ -481,7 +481,7 @@ FFMpegReaderImplementation::~FFMpegReaderImplementation() {
if (_fmtContext) avformat_free_context(_fmtContext); if (_fmtContext) avformat_free_context(_fmtContext);
} }
FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readPacket(Streaming::Packet &packet) { FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readPacket(FFmpeg::Packet &packet) {
int res = 0; int res = 0;
if ((res = av_read_frame(_fmtContext, &packet.fields())) < 0) { if ((res = av_read_frame(_fmtContext, &packet.fields())) < 0) {
if (res == AVERROR_EOF) { if (res == AVERROR_EOF) {
@ -489,7 +489,7 @@ FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readPacket(
// queue terminating packet to audio player // queue terminating packet to audio player
Player::mixer()->feedFromExternal({ Player::mixer()->feedFromExternal({
_audioMsgId, _audioMsgId,
Streaming::Packet() FFmpeg::Packet()
}); });
} }
return PacketResult::EndOfFile; return PacketResult::EndOfFile;
@ -501,7 +501,7 @@ FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readPacket(
return PacketResult::Ok; return PacketResult::Ok;
} }
void FFMpegReaderImplementation::processPacket(Streaming::Packet &&packet) { void FFMpegReaderImplementation::processPacket(FFmpeg::Packet &&packet) {
const auto &native = packet.fields(); const auto &native = packet.fields();
auto videoPacket = (native.stream_index == _streamId); auto videoPacket = (native.stream_index == _streamId);
auto audioPacket = (_audioStreamId >= 0 && native.stream_index == _audioStreamId); auto audioPacket = (_audioStreamId >= 0 && native.stream_index == _audioStreamId);
@ -523,7 +523,7 @@ void FFMpegReaderImplementation::processPacket(Streaming::Packet &&packet) {
} }
crl::time FFMpegReaderImplementation::countPacketMs( crl::time FFMpegReaderImplementation::countPacketMs(
const Streaming::Packet &packet) const { const FFmpeg::Packet &packet) const {
const auto &native = packet.fields(); const auto &native = packet.fields();
int64 packetPts = (native.pts == AV_NOPTS_VALUE) ? native.dts : native.pts; int64 packetPts = (native.pts == AV_NOPTS_VALUE) ? native.dts : native.pts;
crl::time packetMs = (packetPts * 1000LL * _fmtContext->streams[native.stream_index]->time_base.num) / _fmtContext->streams[native.stream_index]->time_base.den; crl::time packetMs = (packetPts * 1000LL * _fmtContext->streams[native.stream_index]->time_base.num) / _fmtContext->streams[native.stream_index]->time_base.den;
@ -531,7 +531,7 @@ crl::time FFMpegReaderImplementation::countPacketMs(
} }
FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readAndProcessPacket() { FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readAndProcessPacket() {
Streaming::Packet packet; FFmpeg::Packet packet;
auto result = readPacket(packet); auto result = readPacket(packet);
if (result == PacketResult::Ok) { if (result == PacketResult::Ok) {
processPacket(std::move(packet)); processPacket(std::move(packet));

View File

@ -55,9 +55,9 @@ private:
EndOfFile, EndOfFile,
Error, Error,
}; };
PacketResult readPacket(Streaming::Packet &packet); PacketResult readPacket(FFmpeg::Packet &packet);
void processPacket(Streaming::Packet &&packet); void processPacket(FFmpeg::Packet &&packet);
crl::time countPacketMs(const Streaming::Packet &packet) const; crl::time countPacketMs(const FFmpeg::Packet &packet) const;
PacketResult readAndProcessPacket(); PacketResult readAndProcessPacket();
enum class Rotation { enum class Rotation {
@ -83,7 +83,7 @@ private:
AVFormatContext *_fmtContext = nullptr; AVFormatContext *_fmtContext = nullptr;
AVCodecContext *_codecContext = nullptr; AVCodecContext *_codecContext = nullptr;
int _streamId = 0; int _streamId = 0;
Streaming::FramePointer _frame; FFmpeg::FramePointer _frame;
bool _opened = false; bool _opened = false;
bool _hadFrame = false; bool _hadFrame = false;
bool _frameRead = false; bool _frameRead = false;
@ -95,7 +95,7 @@ private:
crl::time _lastReadVideoMs = 0; crl::time _lastReadVideoMs = 0;
crl::time _lastReadAudioMs = 0; crl::time _lastReadAudioMs = 0;
std::deque<Streaming::Packet> _packetQueue; std::deque<FFmpeg::Packet> _packetQueue;
int _width = 0; int _width = 0;
int _height = 0; int _height = 0;

View File

@ -47,7 +47,7 @@ crl::time AudioTrack::streamDuration() const {
return _stream.duration; return _stream.duration;
} }
void AudioTrack::process(Packet &&packet) { void AudioTrack::process(FFmpeg::Packet &&packet) {
if (packet.empty()) { if (packet.empty()) {
_readTillEnd = true; _readTillEnd = true;
} }
@ -68,7 +68,7 @@ bool AudioTrack::initialized() const {
return !_ready; return !_ready;
} }
bool AudioTrack::tryReadFirstFrame(Packet &&packet) { bool AudioTrack::tryReadFirstFrame(FFmpeg::Packet &&packet) {
if (ProcessPacket(_stream, std::move(packet)).failed()) { if (ProcessPacket(_stream, std::move(packet)).failed()) {
return false; return false;
} }
@ -97,13 +97,13 @@ bool AudioTrack::tryReadFirstFrame(Packet &&packet) {
// Try skipping frames until one is after the requested position. // Try skipping frames until one is after the requested position.
std::swap(_initialSkippingFrame, _stream.frame); std::swap(_initialSkippingFrame, _stream.frame);
if (!_stream.frame) { if (!_stream.frame) {
_stream.frame = MakeFramePointer(); _stream.frame = FFmpeg::MakeFramePointer();
} }
} }
} }
bool AudioTrack::processFirstFrame() { bool AudioTrack::processFirstFrame() {
if (!FrameHasData(_stream.frame.get())) { if (!FFmpeg::FrameHasData(_stream.frame.get())) {
return false; return false;
} }
mixerInit(); mixerInit();
@ -148,7 +148,7 @@ void AudioTrack::callReady() {
base::take(_ready)({ VideoInformation(), data }); base::take(_ready)({ VideoInformation(), data });
} }
void AudioTrack::mixerEnqueue(Packet &&packet) { void AudioTrack::mixerEnqueue(FFmpeg::Packet &&packet) {
Media::Player::mixer()->feedFromExternal({ Media::Player::mixer()->feedFromExternal({
_audioId, _audioId,
std::move(packet) std::move(packet)

View File

@ -43,7 +43,7 @@ public:
[[nodiscard]] crl::time streamDuration() const; [[nodiscard]] crl::time streamDuration() const;
// Called from the same unspecified thread. // Called from the same unspecified thread.
void process(Packet &&packet); void process(FFmpeg::Packet &&packet);
void waitForData(); void waitForData();
// Called from the main thread. // Called from the main thread.
@ -52,11 +52,11 @@ public:
private: private:
// Called from the same unspecified thread. // Called from the same unspecified thread.
[[nodiscard]] bool initialized() const; [[nodiscard]] bool initialized() const;
[[nodiscard]] bool tryReadFirstFrame(Packet &&packet); [[nodiscard]] bool tryReadFirstFrame(FFmpeg::Packet &&packet);
[[nodiscard]] bool fillStateFromFrame(); [[nodiscard]] bool fillStateFromFrame();
[[nodiscard]] bool processFirstFrame(); [[nodiscard]] bool processFirstFrame();
void mixerInit(); void mixerInit();
void mixerEnqueue(Packet &&packet); void mixerEnqueue(FFmpeg::Packet &&packet);
void mixerForceToBuffer(); void mixerForceToBuffer();
void callReady(); void callReady();
@ -83,7 +83,7 @@ private:
rpl::variable<crl::time> _playPosition; rpl::variable<crl::time> _playPosition;
// For initial frame skipping for an exact seek. // For initial frame skipping for an exact seek.
FramePointer _initialSkippingFrame; FFmpeg::FramePointer _initialSkippingFrame;
}; };

View File

@ -9,9 +9,9 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
namespace Media { namespace Media {
constexpr auto kTimeUnknown = std::numeric_limits<crl::time>::min(); inline constexpr auto kTimeUnknown = std::numeric_limits<crl::time>::min();
constexpr auto kDurationMax = crl::time(std::numeric_limits<int>::max()); inline constexpr auto kDurationMax = crl::time(std::numeric_limits<int>::max());
constexpr auto kDurationUnavailable = std::numeric_limits<crl::time>::max(); inline constexpr auto kDurationUnavailable = std::numeric_limits<crl::time>::max();
namespace Audio { namespace Audio {
bool SupportsSpeedControl(); bool SupportsSpeedControl();

View File

@ -9,6 +9,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "media/streaming/media_streaming_loader.h" #include "media/streaming/media_streaming_loader.h"
#include "media/streaming/media_streaming_file_delegate.h" #include "media/streaming/media_streaming_file_delegate.h"
#include "ffmpeg/ffmpeg_utility.h"
namespace Media { namespace Media {
namespace Streaming { namespace Streaming {
@ -85,26 +86,30 @@ int64_t File::Context::seek(int64_t offset, int whence) {
void File::Context::logError(QLatin1String method) { void File::Context::logError(QLatin1String method) {
if (!unroll()) { if (!unroll()) {
LogError(method); FFmpeg::LogError(method);
} }
} }
void File::Context::logError(QLatin1String method, AvErrorWrap error) { void File::Context::logError(
QLatin1String method,
FFmpeg::AvErrorWrap error) {
if (!unroll()) { if (!unroll()) {
LogError(method, error); FFmpeg::LogError(method, error);
} }
} }
void File::Context::logFatal(QLatin1String method) { void File::Context::logFatal(QLatin1String method) {
if (!unroll()) { if (!unroll()) {
LogError(method); FFmpeg::LogError(method);
fail(_format ? Error::InvalidData : Error::OpenFailed); fail(_format ? Error::InvalidData : Error::OpenFailed);
} }
} }
void File::Context::logFatal(QLatin1String method, AvErrorWrap error) { void File::Context::logFatal(
QLatin1String method,
FFmpeg::AvErrorWrap error) {
if (!unroll()) { if (!unroll()) {
LogError(method, error); FFmpeg::LogError(method, error);
fail(_format ? Error::InvalidData : Error::OpenFailed); fail(_format ? Error::InvalidData : Error::OpenFailed);
} }
} }
@ -126,8 +131,8 @@ Stream File::Context::initStream(
const auto info = format->streams[index]; const auto info = format->streams[index];
if (type == AVMEDIA_TYPE_VIDEO) { if (type == AVMEDIA_TYPE_VIDEO) {
result.rotation = ReadRotationFromMetadata(info); result.rotation = FFmpeg::ReadRotationFromMetadata(info);
result.aspect = ValidateAspectRatio(info->sample_aspect_ratio); result.aspect = FFmpeg::ValidateAspectRatio(info->sample_aspect_ratio);
} else if (type == AVMEDIA_TYPE_AUDIO) { } else if (type == AVMEDIA_TYPE_AUDIO) {
result.frequency = info->codecpar->sample_rate; result.frequency = info->codecpar->sample_rate;
if (!result.frequency) { if (!result.frequency) {
@ -135,20 +140,20 @@ Stream File::Context::initStream(
} }
} }
result.codec = MakeCodecPointer(info); result.codec = FFmpeg::MakeCodecPointer(info);
if (!result.codec) { if (!result.codec) {
return result; return result;
} }
result.frame = MakeFramePointer(); result.frame = FFmpeg::MakeFramePointer();
if (!result.frame) { if (!result.frame) {
result.codec = nullptr; result.codec = nullptr;
return result; return result;
} }
result.timeBase = info->time_base; result.timeBase = info->time_base;
result.duration = (info->duration != AV_NOPTS_VALUE) result.duration = (info->duration != AV_NOPTS_VALUE)
? PtsToTime(info->duration, result.timeBase) ? FFmpeg::PtsToTime(info->duration, result.timeBase)
: PtsToTime(format->duration, kUniversalTimeBase); : FFmpeg::PtsToTime(format->duration, FFmpeg::kUniversalTimeBase);
if (result.duration <= 0) { if (result.duration <= 0) {
result.codec = nullptr; result.codec = nullptr;
} else if (result.duration == kTimeUnknown) { } else if (result.duration == kTimeUnknown) {
@ -167,7 +172,7 @@ void File::Context::seekToPosition(
not_null<AVFormatContext*> format, not_null<AVFormatContext*> format,
const Stream &stream, const Stream &stream,
crl::time position) { crl::time position) {
auto error = AvErrorWrap(); auto error = FFmpeg::AvErrorWrap();
if (!position) { if (!position) {
return; return;
@ -192,7 +197,7 @@ void File::Context::seekToPosition(
error = av_seek_frame( error = av_seek_frame(
format, format,
stream.index, stream.index,
TimeToPts( FFmpeg::TimeToPts(
std::clamp(position, crl::time(0), stream.duration - 1), std::clamp(position, crl::time(0), stream.duration - 1),
stream.timeBase), stream.timeBase),
AVSEEK_FLAG_BACKWARD); AVSEEK_FLAG_BACKWARD);
@ -202,13 +207,13 @@ void File::Context::seekToPosition(
return logFatal(qstr("av_seek_frame"), error); return logFatal(qstr("av_seek_frame"), error);
} }
base::variant<Packet, AvErrorWrap> File::Context::readPacket() { base::variant<FFmpeg::Packet, FFmpeg::AvErrorWrap> File::Context::readPacket() {
auto error = AvErrorWrap(); auto error = FFmpeg::AvErrorWrap();
auto result = Packet(); auto result = FFmpeg::Packet();
error = av_read_frame(_format.get(), &result.fields()); error = av_read_frame(_format.get(), &result.fields());
if (unroll()) { if (unroll()) {
return AvErrorWrap(); return FFmpeg::AvErrorWrap();
} else if (!error) { } else if (!error) {
return std::move(result); return std::move(result);
} else if (error.code() != AVERROR_EOF) { } else if (error.code() != AVERROR_EOF) {
@ -218,12 +223,12 @@ base::variant<Packet, AvErrorWrap> File::Context::readPacket() {
} }
void File::Context::start(crl::time position) { void File::Context::start(crl::time position) {
auto error = AvErrorWrap(); auto error = FFmpeg::AvErrorWrap();
if (unroll()) { if (unroll()) {
return; return;
} }
auto format = MakeFormatPointer( auto format = FFmpeg::MakeFormatPointer(
static_cast<void *>(this), static_cast<void *>(this),
&Context::Read, &Context::Read,
nullptr, nullptr,
@ -279,7 +284,7 @@ void File::Context::readNextPacket() {
auto result = readPacket(); auto result = readPacket();
if (unroll()) { if (unroll()) {
return; return;
} else if (const auto packet = base::get_if<Packet>(&result)) { } else if (const auto packet = base::get_if<FFmpeg::Packet>(&result)) {
const auto more = _delegate->fileProcessPacket(std::move(*packet)); const auto more = _delegate->fileProcessPacket(std::move(*packet));
if (!more) { if (!more) {
do { do {
@ -290,17 +295,17 @@ void File::Context::readNextPacket() {
} }
} else { } else {
// Still trying to read by drain. // Still trying to read by drain.
Assert(result.is<AvErrorWrap>()); Assert(result.is<FFmpeg::AvErrorWrap>());
Assert(result.get<AvErrorWrap>().code() == AVERROR_EOF); Assert(result.get<FFmpeg::AvErrorWrap>().code() == AVERROR_EOF);
handleEndOfFile(); handleEndOfFile();
} }
} }
void File::Context::handleEndOfFile() { void File::Context::handleEndOfFile() {
const auto more = _delegate->fileProcessPacket(Packet()); const auto more = _delegate->fileProcessPacket(FFmpeg::Packet());
if (_delegate->fileReadMore()) { if (_delegate->fileReadMore()) {
_readTillEnd = false; _readTillEnd = false;
auto error = AvErrorWrap(av_seek_frame( auto error = FFmpeg::AvErrorWrap(av_seek_frame(
_format.get(), _format.get(),
-1, // stream_index -1, // stream_index
0, // timestamp 0, // timestamp

View File

@ -10,6 +10,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "media/streaming/media_streaming_common.h" #include "media/streaming/media_streaming_common.h"
#include "media/streaming/media_streaming_utility.h" #include "media/streaming/media_streaming_utility.h"
#include "media/streaming/media_streaming_reader.h" #include "media/streaming/media_streaming_reader.h"
#include "ffmpeg/ffmpeg_utility.h"
#include "base/bytes.h" #include "base/bytes.h"
#include "base/weak_ptr.h" #include "base/weak_ptr.h"
@ -64,9 +65,9 @@ private:
[[nodiscard]] bool unroll() const; [[nodiscard]] bool unroll() const;
void logError(QLatin1String method); void logError(QLatin1String method);
void logError(QLatin1String method, AvErrorWrap error); void logError(QLatin1String method, FFmpeg::AvErrorWrap error);
void logFatal(QLatin1String method); void logFatal(QLatin1String method);
void logFatal(QLatin1String method, AvErrorWrap error); void logFatal(QLatin1String method, FFmpeg::AvErrorWrap error);
void fail(Error error); void fail(Error error);
Stream initStream( Stream initStream(
@ -78,7 +79,8 @@ private:
crl::time position); crl::time position);
// TODO base::expected. // TODO base::expected.
[[nodiscard]] base::variant<Packet, AvErrorWrap> readPacket(); [[nodiscard]] auto readPacket()
-> base::variant<FFmpeg::Packet, FFmpeg::AvErrorWrap>;
void handleEndOfFile(); void handleEndOfFile();
void sendFullInCache(bool force = false); void sendFullInCache(bool force = false);
@ -94,7 +96,7 @@ private:
crl::semaphore _semaphore; crl::semaphore _semaphore;
std::atomic<bool> _interrupted = false; std::atomic<bool> _interrupted = false;
FormatPointer _format; FFmpeg::FormatPointer _format;
}; };

View File

@ -7,11 +7,14 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
*/ */
#pragma once #pragma once
namespace FFmpeg {
class Packet;
} // namespace FFmpeg
namespace Media { namespace Media {
namespace Streaming { namespace Streaming {
struct Stream; struct Stream;
class Packet;
enum class Error; enum class Error;
class FileDelegate { class FileDelegate {
@ -27,7 +30,8 @@ public:
// Return true if reading and processing more packets is desired. // Return true if reading and processing more packets is desired.
// Return false if sleeping until 'wake()' is called is desired. // Return false if sleeping until 'wake()' is called is desired.
// Return true after the EOF packet if looping is desired. // Return true after the EOF packet if looping is desired.
[[nodiscard]] virtual bool fileProcessPacket(Packet &&packet) = 0; [[nodiscard]] virtual bool fileProcessPacket(
FFmpeg::Packet &&packet) = 0;
[[nodiscard]] virtual bool fileReadMore() = 0; [[nodiscard]] virtual bool fileReadMore() = 0;
}; };

View File

@ -324,7 +324,7 @@ void Player::fileWaitingForData() {
} }
} }
bool Player::fileProcessPacket(Packet &&packet) { bool Player::fileProcessPacket(FFmpeg::Packet &&packet) {
_waitingForData = false; _waitingForData = false;
const auto &native = packet.fields(); const auto &native = packet.fields();
@ -337,14 +337,14 @@ bool Player::fileProcessPacket(Packet &&packet) {
crl::on_main(&_sessionGuard, [=] { crl::on_main(&_sessionGuard, [=] {
audioReceivedTill(till); audioReceivedTill(till);
}); });
_audio->process(Packet()); _audio->process(FFmpeg::Packet());
} }
if (_video) { if (_video) {
const auto till = _loopingShift + computeVideoDuration(); const auto till = _loopingShift + computeVideoDuration();
crl::on_main(&_sessionGuard, [=] { crl::on_main(&_sessionGuard, [=] {
videoReceivedTill(till); videoReceivedTill(till);
}); });
_video->process(Packet()); _video->process(FFmpeg::Packet());
} }
} else if (_audio && _audio->streamIndex() == native.stream_index) { } else if (_audio && _audio->streamIndex() == native.stream_index) {
accumulate_max( accumulate_max(
@ -352,7 +352,7 @@ bool Player::fileProcessPacket(Packet &&packet) {
durationByPacket(*_audio, packet)); durationByPacket(*_audio, packet));
const auto till = _loopingShift + std::clamp( const auto till = _loopingShift + std::clamp(
PacketPosition(packet, _audio->streamTimeBase()), FFmpeg::PacketPosition(packet, _audio->streamTimeBase()),
crl::time(0), crl::time(0),
computeAudioDuration() - 1); computeAudioDuration() - 1);
crl::on_main(&_sessionGuard, [=] { crl::on_main(&_sessionGuard, [=] {
@ -365,7 +365,7 @@ bool Player::fileProcessPacket(Packet &&packet) {
durationByPacket(*_video, packet)); durationByPacket(*_video, packet));
const auto till = _loopingShift + std::clamp( const auto till = _loopingShift + std::clamp(
PacketPosition(packet, _video->streamTimeBase()), FFmpeg::PacketPosition(packet, _video->streamTimeBase()),
crl::time(0), crl::time(0),
computeVideoDuration() - 1); computeVideoDuration() - 1);
crl::on_main(&_sessionGuard, [=] { crl::on_main(&_sessionGuard, [=] {
@ -404,7 +404,7 @@ void Player::streamFailed(Error error) {
template <typename Track> template <typename Track>
int Player::durationByPacket( int Player::durationByPacket(
const Track &track, const Track &track,
const Packet &packet) { const FFmpeg::Packet &packet) {
// We've set this value on the first cycle. // We've set this value on the first cycle.
if (_loopingShift || _totalDuration != kDurationUnavailable) { if (_loopingShift || _totalDuration != kDurationUnavailable) {
return 0; return 0;

View File

@ -84,7 +84,7 @@ private:
void fileError(Error error) override; void fileError(Error error) override;
void fileWaitingForData() override; void fileWaitingForData() override;
void fileFullInCache(bool fullInCache) override; void fileFullInCache(bool fullInCache) override;
bool fileProcessPacket(Packet &&packet) override; bool fileProcessPacket(FFmpeg::Packet &&packet) override;
bool fileReadMore() override; bool fileReadMore() override;
// Called from the main thread. // Called from the main thread.
@ -117,7 +117,7 @@ private:
[[nodiscard]] crl::time loadInAdvanceFor() const; [[nodiscard]] crl::time loadInAdvanceFor() const;
template <typename Track> template <typename Track>
int durationByPacket(const Track &track, const Packet &packet); int durationByPacket(const Track &track, const FFmpeg::Packet &packet);
// Valid after fileReady call ends. Thread-safe. // Valid after fileReady call ends. Thread-safe.
[[nodiscard]] crl::time computeAudioDuration() const; [[nodiscard]] crl::time computeAudioDuration() const;

View File

@ -9,307 +9,16 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "media/streaming/media_streaming_common.h" #include "media/streaming/media_streaming_common.h"
#include "ui/image/image_prepare.h" #include "ui/image/image_prepare.h"
#include "ffmpeg/ffmpeg_utility.h"
extern "C" {
#include <libavutil/opt.h>
} // extern "C"
namespace Media { namespace Media {
namespace Streaming { namespace Streaming {
namespace { namespace {
constexpr auto kSkipInvalidDataPackets = 10; constexpr auto kSkipInvalidDataPackets = 10;
constexpr auto kAlignImageBy = 16;
constexpr auto kPixelBytesSize = 4;
constexpr auto kImageFormat = QImage::Format_ARGB32_Premultiplied;
constexpr auto kAvioBlockSize = 4096;
constexpr auto kMaxScaleByAspectRatio = 16;
void AlignedImageBufferCleanupHandler(void* data) {
const auto buffer = static_cast<uchar*>(data);
delete[] buffer;
}
[[nodiscard]] bool IsAlignedImage(const QImage &image) {
return !(reinterpret_cast<uintptr_t>(image.bits()) % kAlignImageBy)
&& !(image.bytesPerLine() % kAlignImageBy);
}
[[nodiscard]] bool IsValidAspectRatio(AVRational aspect) {
return (aspect.num > 0)
&& (aspect.den > 0)
&& (aspect.num <= aspect.den * kMaxScaleByAspectRatio)
&& (aspect.den <= aspect.num * kMaxScaleByAspectRatio);
}
} // namespace } // namespace
bool GoodStorageForFrame(const QImage &storage, QSize size) {
return !storage.isNull()
&& (storage.format() == kImageFormat)
&& (storage.size() == size)
&& storage.isDetached()
&& IsAlignedImage(storage);
}
// Create a QImage of desired size where all the data is properly aligned.
QImage CreateFrameStorage(QSize size) {
const auto width = size.width();
const auto height = size.height();
const auto widthAlign = kAlignImageBy / kPixelBytesSize;
const auto neededWidth = width + ((width % widthAlign)
? (widthAlign - (width % widthAlign))
: 0);
const auto perLine = neededWidth * kPixelBytesSize;
const auto buffer = new uchar[perLine * height + kAlignImageBy];
const auto cleanupData = static_cast<void *>(buffer);
const auto address = reinterpret_cast<uintptr_t>(buffer);
const auto alignedBuffer = buffer + ((address % kAlignImageBy)
? (kAlignImageBy - (address % kAlignImageBy))
: 0);
return QImage(
alignedBuffer,
width,
height,
perLine,
kImageFormat,
AlignedImageBufferCleanupHandler,
cleanupData);
}
IOPointer MakeIOPointer(
void *opaque,
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
int64_t(*seek)(void *opaque, int64_t offset, int whence)) {
auto buffer = reinterpret_cast<uchar*>(av_malloc(kAvioBlockSize));
if (!buffer) {
LogError(qstr("av_malloc"));
return {};
}
auto result = IOPointer(avio_alloc_context(
buffer,
kAvioBlockSize,
write ? 1 : 0,
opaque,
read,
write,
seek));
if (!result) {
av_freep(&buffer);
LogError(qstr("avio_alloc_context"));
return {};
}
return result;
}
void IODeleter::operator()(AVIOContext *value) {
if (value) {
av_freep(&value->buffer);
avio_context_free(&value);
}
}
FormatPointer MakeFormatPointer(
void *opaque,
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
int64_t(*seek)(void *opaque, int64_t offset, int whence)) {
auto io = MakeIOPointer(opaque, read, write, seek);
if (!io) {
return {};
}
auto result = avformat_alloc_context();
if (!result) {
LogError(qstr("avformat_alloc_context"));
return {};
}
result->pb = io.get();
auto options = (AVDictionary*)nullptr;
const auto guard = gsl::finally([&] { av_dict_free(&options); });
av_dict_set(&options, "usetoc", "1", 0);
const auto error = AvErrorWrap(avformat_open_input(
&result,
nullptr,
nullptr,
&options));
if (error) {
// avformat_open_input freed 'result' in case an error happened.
LogError(qstr("avformat_open_input"), error);
return {};
}
result->flags |= AVFMT_FLAG_FAST_SEEK;
// Now FormatPointer will own and free the IO context.
io.release();
return FormatPointer(result);
}
void FormatDeleter::operator()(AVFormatContext *value) {
if (value) {
const auto deleter = IOPointer(value->pb);
avformat_close_input(&value);
}
}
CodecPointer MakeCodecPointer(not_null<AVStream*> stream) {
auto error = AvErrorWrap();
auto result = CodecPointer(avcodec_alloc_context3(nullptr));
const auto context = result.get();
if (!context) {
LogError(qstr("avcodec_alloc_context3"));
return {};
}
error = avcodec_parameters_to_context(context, stream->codecpar);
if (error) {
LogError(qstr("avcodec_parameters_to_context"), error);
return {};
}
av_codec_set_pkt_timebase(context, stream->time_base);
av_opt_set_int(context, "refcounted_frames", 1, 0);
const auto codec = avcodec_find_decoder(context->codec_id);
if (!codec) {
LogError(qstr("avcodec_find_decoder"), context->codec_id);
return {};
} else if ((error = avcodec_open2(context, codec, nullptr))) {
LogError(qstr("avcodec_open2"), error);
return {};
}
return result;
}
void CodecDeleter::operator()(AVCodecContext *value) {
if (value) {
avcodec_free_context(&value);
}
}
FramePointer MakeFramePointer() {
return FramePointer(av_frame_alloc());
}
bool FrameHasData(AVFrame *frame) {
return (frame && frame->data[0] != nullptr);
}
void ClearFrameMemory(AVFrame *frame) {
if (FrameHasData(frame)) {
av_frame_unref(frame);
}
}
void FrameDeleter::operator()(AVFrame *value) {
av_frame_free(&value);
}
SwscalePointer MakeSwscalePointer(
not_null<AVFrame*> frame,
QSize resize,
SwscalePointer *existing) {
// We have to use custom caching for SwsContext, because
// sws_getCachedContext checks passed flags with existing context flags,
// and re-creates context if they're different, but in the process of
// context creation the passed flags are modified before being written
// to the resulting context, so the caching doesn't work.
if (existing && (*existing) != nullptr) {
const auto &deleter = existing->get_deleter();
if (deleter.resize == resize
&& deleter.frameSize == QSize(frame->width, frame->height)
&& deleter.frameFormat == frame->format) {
return std::move(*existing);
}
}
if (frame->format <= AV_PIX_FMT_NONE || frame->format >= AV_PIX_FMT_NB) {
LogError(qstr("frame->format"));
return SwscalePointer();
}
const auto result = sws_getCachedContext(
existing ? existing->release() : nullptr,
frame->width,
frame->height,
AVPixelFormat(frame->format),
resize.width(),
resize.height(),
AV_PIX_FMT_BGRA,
0,
nullptr,
nullptr,
nullptr);
if (!result) {
LogError(qstr("sws_getCachedContext"));
}
return SwscalePointer(
result,
{ resize, QSize{ frame->width, frame->height }, frame->format });
}
void SwscaleDeleter::operator()(SwsContext *value) {
if (value) {
sws_freeContext(value);
}
}
void LogError(QLatin1String method) {
LOG(("Streaming Error: Error in %1.").arg(method));
}
void LogError(QLatin1String method, AvErrorWrap error) {
LOG(("Streaming Error: Error in %1 (code: %2, text: %3)."
).arg(method
).arg(error.code()
).arg(error.text()));
}
crl::time PtsToTime(int64_t pts, AVRational timeBase) {
return (pts == AV_NOPTS_VALUE || !timeBase.den)
? kTimeUnknown
: ((pts * 1000LL * timeBase.num) / timeBase.den);
}
crl::time PtsToTimeCeil(int64_t pts, AVRational timeBase) {
return (pts == AV_NOPTS_VALUE || !timeBase.den)
? kTimeUnknown
: ((pts * 1000LL * timeBase.num + timeBase.den - 1) / timeBase.den);
}
int64_t TimeToPts(crl::time time, AVRational timeBase) {
return (time == kTimeUnknown || !timeBase.num)
? AV_NOPTS_VALUE
: (time * timeBase.den) / (1000LL * timeBase.num);
}
crl::time PacketPosition(const Packet &packet, AVRational timeBase) {
const auto &native = packet.fields();
return PtsToTime(
(native.pts == AV_NOPTS_VALUE) ? native.dts : native.pts,
timeBase);
}
crl::time PacketDuration(const Packet &packet, AVRational timeBase) {
return PtsToTime(packet.fields().duration, timeBase);
}
int DurationByPacket(const Packet &packet, AVRational timeBase) {
const auto position = PacketPosition(packet, timeBase);
const auto duration = std::max(
PacketDuration(packet, timeBase),
crl::time(1));
const auto bad = [](crl::time time) {
return (time < 0) || (time > kDurationMax);
};
if (bad(position) || bad(duration) || bad(position + duration + 1)) {
LOG(("Streaming Error: Wrong duration by packet: %1 + %2"
).arg(position
).arg(duration));
return -1;
}
return int(position + duration + 1);
}
crl::time FramePosition(const Stream &stream) { crl::time FramePosition(const Stream &stream) {
const auto pts = !stream.frame const auto pts = !stream.frame
? AV_NOPTS_VALUE ? AV_NOPTS_VALUE
@ -318,40 +27,13 @@ crl::time FramePosition(const Stream &stream) {
: (stream.frame->pts != AV_NOPTS_VALUE) : (stream.frame->pts != AV_NOPTS_VALUE)
? stream.frame->pts ? stream.frame->pts
: stream.frame->pkt_dts; : stream.frame->pkt_dts;
return PtsToTime(pts, stream.timeBase); return FFmpeg::PtsToTime(pts, stream.timeBase);
} }
int ReadRotationFromMetadata(not_null<AVStream*> stream) { FFmpeg::AvErrorWrap ProcessPacket(Stream &stream, FFmpeg::Packet &&packet) {
const auto tag = av_dict_get(stream->metadata, "rotate", nullptr, 0);
if (tag && *tag->value) {
const auto string = QString::fromUtf8(tag->value);
auto ok = false;
const auto degrees = string.toInt(&ok);
if (ok && (degrees == 90 || degrees == 180 || degrees == 270)) {
return degrees;
}
}
return 0;
}
AVRational ValidateAspectRatio(AVRational aspect) {
return IsValidAspectRatio(aspect) ? aspect : kNormalAspect;
}
QSize CorrectByAspect(QSize size, AVRational aspect) {
Expects(IsValidAspectRatio(aspect));
return QSize(size.width() * aspect.num / aspect.den, size.height());
}
bool RotationSwapWidthHeight(int rotation) {
return (rotation == 90 || rotation == 270);
}
AvErrorWrap ProcessPacket(Stream &stream, Packet &&packet) {
Expects(stream.codec != nullptr); Expects(stream.codec != nullptr);
auto error = AvErrorWrap(); auto error = FFmpeg::AvErrorWrap();
const auto native = &packet.fields(); const auto native = &packet.fields();
const auto guard = gsl::finally([ const auto guard = gsl::finally([
@ -361,7 +43,7 @@ AvErrorWrap ProcessPacket(Stream &stream, Packet &&packet) {
] { ] {
native->size = size; native->size = size;
native->data = data; native->data = data;
packet = Packet(); packet = FFmpeg::Packet();
}); });
error = avcodec_send_packet( error = avcodec_send_packet(
@ -374,17 +56,17 @@ AvErrorWrap ProcessPacket(Stream &stream, Packet &&packet) {
// results in a crash (read_access to nullptr) in swr_convert(). // results in a crash (read_access to nullptr) in swr_convert().
&& stream.codec->codec_id != AV_CODEC_ID_OPUS) { && stream.codec->codec_id != AV_CODEC_ID_OPUS) {
if (++stream.invalidDataPackets < kSkipInvalidDataPackets) { if (++stream.invalidDataPackets < kSkipInvalidDataPackets) {
return AvErrorWrap(); // Try to skip a bad packet. return FFmpeg::AvErrorWrap(); // Try to skip a bad packet.
} }
} }
} }
return error; return error;
} }
AvErrorWrap ReadNextFrame(Stream &stream) { FFmpeg::AvErrorWrap ReadNextFrame(Stream &stream) {
Expects(stream.frame != nullptr); Expects(stream.frame != nullptr);
auto error = AvErrorWrap(); auto error = FFmpeg::AvErrorWrap();
do { do {
error = avcodec_receive_frame( error = avcodec_receive_frame(
@ -427,28 +109,28 @@ QImage ConvertFrame(
).arg(frameSize.width() ).arg(frameSize.width()
).arg(frameSize.height())); ).arg(frameSize.height()));
return QImage(); return QImage();
} else if (!FrameHasData(frame)) { } else if (!FFmpeg::FrameHasData(frame)) {
LOG(("Streaming Error: Bad frame data.")); LOG(("Streaming Error: Bad frame data."));
return QImage(); return QImage();
} }
if (resize.isEmpty()) { if (resize.isEmpty()) {
resize = frameSize; resize = frameSize;
} else if (RotationSwapWidthHeight(stream.rotation)) { } else if (FFmpeg::RotationSwapWidthHeight(stream.rotation)) {
resize.transpose(); resize.transpose();
} }
if (!GoodStorageForFrame(storage, resize)) { if (!FFmpeg::GoodStorageForFrame(storage, resize)) {
storage = CreateFrameStorage(resize); storage = FFmpeg::CreateFrameStorage(resize);
} }
const auto format = AV_PIX_FMT_BGRA; const auto format = AV_PIX_FMT_BGRA;
const auto hasDesiredFormat = (frame->format == format); const auto hasDesiredFormat = (frame->format == format);
if (frameSize == storage.size() && hasDesiredFormat) { if (frameSize == storage.size() && hasDesiredFormat) {
static_assert(sizeof(uint32) == kPixelBytesSize); static_assert(sizeof(uint32) == FFmpeg::kPixelBytesSize);
auto to = reinterpret_cast<uint32*>(storage.bits()); auto to = reinterpret_cast<uint32*>(storage.bits());
auto from = reinterpret_cast<const uint32*>(frame->data[0]); auto from = reinterpret_cast<const uint32*>(frame->data[0]);
const auto deltaTo = (storage.bytesPerLine() / kPixelBytesSize) const auto deltaTo = (storage.bytesPerLine() / sizeof(uint32))
- storage.width(); - storage.width();
const auto deltaFrom = (frame->linesize[0] / kPixelBytesSize) const auto deltaFrom = (frame->linesize[0] / sizeof(uint32))
- frame->width; - frame->width;
for (const auto y : ranges::view::ints(0, frame->height)) { for (const auto y : ranges::view::ints(0, frame->height)) {
for (const auto x : ranges::view::ints(0, frame->width)) { for (const auto x : ranges::view::ints(0, frame->width)) {
@ -488,7 +170,7 @@ QImage ConvertFrame(
} }
} }
ClearFrameMemory(frame); FFmpeg::ClearFrameMemory(frame);
return storage; return storage;
} }
@ -498,8 +180,8 @@ QImage PrepareByRequest(
QImage storage) { QImage storage) {
Expects(!request.outer.isEmpty()); Expects(!request.outer.isEmpty());
if (!GoodStorageForFrame(storage, request.outer)) { if (!FFmpeg::GoodStorageForFrame(storage, request.outer)) {
storage = CreateFrameStorage(request.outer); storage = FFmpeg::CreateFrameStorage(request.outer);
} }
{ {
Painter p(&storage); Painter p(&storage);

View File

@ -8,19 +8,11 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#pragma once #pragma once
#include "media/streaming/media_streaming_common.h" #include "media/streaming/media_streaming_common.h"
#include "ffmpeg/ffmpeg_utility.h"
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
} // extern "C"
namespace Media { namespace Media {
namespace Streaming { namespace Streaming {
constexpr auto kUniversalTimeBase = AVRational{ 1, AV_TIME_BASE };
constexpr auto kNormalAspect = AVRational{ 1, 1 };
struct TimePoint { struct TimePoint {
crl::time trackTime = kTimeUnknown; crl::time trackTime = kTimeUnknown;
crl::time worldTime = kTimeUnknown; crl::time worldTime = kTimeUnknown;
@ -33,143 +25,13 @@ struct TimePoint {
} }
}; };
class AvErrorWrap {
public:
AvErrorWrap(int code = 0) : _code(code) {
}
[[nodiscard]] bool failed() const {
return (_code < 0);
}
[[nodiscard]] explicit operator bool() const {
return failed();
}
[[nodiscard]] int code() const {
return _code;
}
[[nodiscard]] QString text() const {
char string[AV_ERROR_MAX_STRING_SIZE] = { 0 };
return QString::fromUtf8(av_make_error_string(
string,
sizeof(string),
_code));
}
private:
int _code = 0;
};
class Packet {
public:
Packet() {
setEmpty();
}
Packet(const AVPacket &data) {
bytes::copy(_data, bytes::object_as_span(&data));
}
Packet(Packet &&other) {
bytes::copy(_data, other._data);
if (!other.empty()) {
other.release();
}
}
Packet &operator=(Packet &&other) {
if (this != &other) {
av_packet_unref(&fields());
bytes::copy(_data, other._data);
if (!other.empty()) {
other.release();
}
}
return *this;
}
~Packet() {
av_packet_unref(&fields());
}
[[nodiscard]] AVPacket &fields() {
return *reinterpret_cast<AVPacket*>(_data);
}
[[nodiscard]] const AVPacket &fields() const {
return *reinterpret_cast<const AVPacket*>(_data);
}
[[nodiscard]] bool empty() const {
return !fields().data;
}
void release() {
setEmpty();
}
private:
void setEmpty() {
auto &native = fields();
av_init_packet(&native);
native.data = nullptr;
native.size = 0;
}
alignas(alignof(AVPacket)) bytes::type _data[sizeof(AVPacket)];
};
struct IODeleter {
void operator()(AVIOContext *value);
};
using IOPointer = std::unique_ptr<AVIOContext, IODeleter>;
[[nodiscard]] IOPointer MakeIOPointer(
void *opaque,
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
int64_t(*seek)(void *opaque, int64_t offset, int whence));
struct FormatDeleter {
void operator()(AVFormatContext *value);
};
using FormatPointer = std::unique_ptr<AVFormatContext, FormatDeleter>;
[[nodiscard]] FormatPointer MakeFormatPointer(
void *opaque,
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
int64_t(*seek)(void *opaque, int64_t offset, int whence));
struct CodecDeleter {
void operator()(AVCodecContext *value);
};
using CodecPointer = std::unique_ptr<AVCodecContext, CodecDeleter>;
[[nodiscard]] CodecPointer MakeCodecPointer(not_null<AVStream*> stream);
struct FrameDeleter {
void operator()(AVFrame *value);
};
using FramePointer = std::unique_ptr<AVFrame, FrameDeleter>;
[[nodiscard]] FramePointer MakeFramePointer();
[[nodiscard]] bool FrameHasData(AVFrame *frame);
void ClearFrameMemory(AVFrame *frame);
struct SwscaleDeleter {
QSize resize;
QSize frameSize;
int frameFormat = int(AV_PIX_FMT_NONE);
void operator()(SwsContext *value);
};
using SwscalePointer = std::unique_ptr<SwsContext, SwscaleDeleter>;
[[nodiscard]] SwscalePointer MakeSwscalePointer(
not_null<AVFrame*> frame,
QSize resize,
SwscalePointer *existing = nullptr);
struct Stream { struct Stream {
int index = -1; int index = -1;
crl::time duration = kTimeUnknown; crl::time duration = kTimeUnknown;
AVRational timeBase = kUniversalTimeBase; AVRational timeBase = FFmpeg::kUniversalTimeBase;
CodecPointer codec; FFmpeg::CodecPointer codec;
FramePointer frame; FFmpeg::FramePointer frame;
std::deque<Packet> queue; std::deque<FFmpeg::Packet> queue;
int invalidDataPackets = 0; int invalidDataPackets = 0;
// Audio only. // Audio only.
@ -177,39 +39,19 @@ struct Stream {
// Video only. // Video only.
int rotation = 0; int rotation = 0;
AVRational aspect = kNormalAspect; AVRational aspect = FFmpeg::kNormalAspect;
SwscalePointer swscale; FFmpeg::SwscalePointer swscale;
}; };
void LogError(QLatin1String method);
void LogError(QLatin1String method, AvErrorWrap error);
[[nodiscard]] crl::time PtsToTime(int64_t pts, AVRational timeBase);
// Used for full duration conversion.
[[nodiscard]] crl::time PtsToTimeCeil(int64_t pts, AVRational timeBase);
[[nodiscard]] int64_t TimeToPts(crl::time time, AVRational timeBase);
[[nodiscard]] crl::time PacketPosition(
const Packet &packet,
AVRational timeBase);
[[nodiscard]] crl::time PacketDuration(
const Packet &packet,
AVRational timeBase);
[[nodiscard]] int DurationByPacket(
const Packet &packet,
AVRational timeBase);
[[nodiscard]] crl::time FramePosition(const Stream &stream); [[nodiscard]] crl::time FramePosition(const Stream &stream);
[[nodiscard]] int ReadRotationFromMetadata(not_null<AVStream*> stream); [[nodiscard]] FFmpeg::AvErrorWrap ProcessPacket(
[[nodiscard]] AVRational ValidateAspectRatio(AVRational aspect); Stream &stream,
[[nodiscard]] bool RotationSwapWidthHeight(int rotation); FFmpeg::Packet &&packet);
[[nodiscard]] QSize CorrectByAspect(QSize size, AVRational aspect); [[nodiscard]] FFmpeg::AvErrorWrap ReadNextFrame(Stream &stream);
[[nodiscard]] AvErrorWrap ProcessPacket(Stream &stream, Packet &&packet);
[[nodiscard]] AvErrorWrap ReadNextFrame(Stream &stream);
[[nodiscard]] bool GoodForRequest( [[nodiscard]] bool GoodForRequest(
const QImage &image, const QImage &image,
const FrameRequest &request); const FrameRequest &request);
[[nodiscard]] bool GoodStorageForFrame(const QImage &storage, QSize size);
[[nodiscard]] QImage CreateFrameStorage(QSize size);
[[nodiscard]] QImage ConvertFrame( [[nodiscard]] QImage ConvertFrame(
Stream &stream, Stream &stream,
AVFrame *frame, AVFrame *frame,

View File

@ -34,7 +34,7 @@ public:
FnMut<void(const Information &)> ready, FnMut<void(const Information &)> ready,
Fn<void(Error)> error); Fn<void(Error)> error);
void process(Packet &&packet); void process(FFmpeg::Packet &&packet);
[[nodisacrd]] rpl::producer<> checkNextFrame() const; [[nodisacrd]] rpl::producer<> checkNextFrame() const;
[[nodisacrd]] rpl::producer<> waitingForData() const; [[nodisacrd]] rpl::producer<> waitingForData() const;
@ -60,7 +60,7 @@ private:
void fail(Error error); void fail(Error error);
[[nodiscard]] bool interrupted() const; [[nodiscard]] bool interrupted() const;
[[nodiscard]] bool tryReadFirstFrame(Packet &&packet); [[nodiscard]] bool tryReadFirstFrame(FFmpeg::Packet &&packet);
[[nodiscard]] bool fillStateFromFrame(); [[nodiscard]] bool fillStateFromFrame();
[[nodiscard]] bool processFirstFrame(); [[nodiscard]] bool processFirstFrame();
void queueReadFrames(crl::time delay = 0); void queueReadFrames(crl::time delay = 0);
@ -71,7 +71,7 @@ private:
void callReady(); void callReady();
[[nodiscard]] bool loopAround(); [[nodiscard]] bool loopAround();
[[nodiscard]] crl::time computeDuration() const; [[nodiscard]] crl::time computeDuration() const;
[[nodiscard]] int durationByPacket(const Packet &packet); [[nodiscard]] int durationByPacket(const FFmpeg::Packet &packet);
// Force frame position to be clamped to [0, duration] and monotonic. // Force frame position to be clamped to [0, duration] and monotonic.
[[nodiscard]] crl::time currentFramePosition() const; [[nodiscard]] crl::time currentFramePosition() const;
@ -103,7 +103,7 @@ private:
base::ConcurrentTimer _readFramesTimer; base::ConcurrentTimer _readFramesTimer;
// For initial frame skipping for an exact seek. // For initial frame skipping for an exact seek.
FramePointer _initialSkippingFrame; FFmpeg::FramePointer _initialSkippingFrame;
}; };
@ -142,7 +142,7 @@ rpl::producer<> VideoTrackObject::waitingForData() const {
: _waitingForData.events(); : _waitingForData.events();
} }
void VideoTrackObject::process(Packet &&packet) { void VideoTrackObject::process(FFmpeg::Packet &&packet) {
if (interrupted()) { if (interrupted()) {
return; return;
} }
@ -164,12 +164,12 @@ void VideoTrackObject::process(Packet &&packet) {
} }
} }
int VideoTrackObject::durationByPacket(const Packet &packet) { int VideoTrackObject::durationByPacket(const FFmpeg::Packet &packet) {
// We've set this value on the first cycle. // We've set this value on the first cycle.
if (_loopingShift || _stream.duration != kDurationUnavailable) { if (_loopingShift || _stream.duration != kDurationUnavailable) {
return 0; return 0;
} }
const auto result = DurationByPacket(packet, _stream.timeBase); const auto result = FFmpeg::DurationByPacket(packet, _stream.timeBase);
if (result < 0) { if (result < 0) {
fail(Error::InvalidData); fail(Error::InvalidData);
return 0; return 0;
@ -395,7 +395,7 @@ void VideoTrackObject::updateFrameRequest(const FrameRequest &request) {
_request = request; _request = request;
} }
bool VideoTrackObject::tryReadFirstFrame(Packet &&packet) { bool VideoTrackObject::tryReadFirstFrame(FFmpeg::Packet &&packet) {
if (ProcessPacket(_stream, std::move(packet)).failed()) { if (ProcessPacket(_stream, std::move(packet)).failed()) {
return false; return false;
} }
@ -424,7 +424,7 @@ bool VideoTrackObject::tryReadFirstFrame(Packet &&packet) {
// Try skipping frames until one is after the requested position. // Try skipping frames until one is after the requested position.
std::swap(_initialSkippingFrame, _stream.frame); std::swap(_initialSkippingFrame, _stream.frame);
if (!_stream.frame) { if (!_stream.frame) {
_stream.frame = MakeFramePointer(); _stream.frame = FFmpeg::MakeFramePointer();
} }
} }
} }
@ -470,8 +470,10 @@ void VideoTrackObject::callReady() {
const auto frame = _shared->frameForPaint(); const auto frame = _shared->frameForPaint();
auto data = VideoInformation(); auto data = VideoInformation();
data.size = CorrectByAspect(frame->original.size(), _stream.aspect); data.size = FFmpeg::CorrectByAspect(
if (RotationSwapWidthHeight(_stream.rotation)) { frame->original.size(),
_stream.aspect);
if (FFmpeg::RotationSwapWidthHeight(_stream.rotation)) {
data.size.transpose(); data.size.transpose();
} }
data.cover = frame->original; data.cover = frame->original;
@ -756,7 +758,7 @@ crl::time VideoTrack::streamDuration() const {
return _streamDuration; return _streamDuration;
} }
void VideoTrack::process(Packet &&packet) { void VideoTrack::process(FFmpeg::Packet &&packet) {
_wrapped.with([ _wrapped.with([
packet = std::move(packet) packet = std::move(packet)
](Implementation &unwrapped) mutable { ](Implementation &unwrapped) mutable {

View File

@ -33,7 +33,7 @@ public:
[[nodiscard]] crl::time streamDuration() const; [[nodiscard]] crl::time streamDuration() const;
// Called from the same unspecified thread. // Called from the same unspecified thread.
void process(Packet &&packet); void process(FFmpeg::Packet &&packet);
void waitForData(); void waitForData();
// Called from the main thread. // Called from the main thread.
@ -59,7 +59,7 @@ private:
friend class VideoTrackObject; friend class VideoTrackObject;
struct Frame { struct Frame {
FramePointer decoded = MakeFramePointer(); FFmpeg::FramePointer decoded = FFmpeg::MakeFramePointer();
QImage original; QImage original;
crl::time position = kTimeUnknown; crl::time position = kTimeUnknown;
crl::time displayed = kTimeUnknown; crl::time displayed = kTimeUnknown;

View File

@ -82,6 +82,7 @@
'lib_export.gyp:lib_export', 'lib_export.gyp:lib_export',
'lib_storage.gyp:lib_storage', 'lib_storage.gyp:lib_storage',
'lib_lottie.gyp:lib_lottie', 'lib_lottie.gyp:lib_lottie',
'lib_ffmpeg.gyp:lib_ffmpeg',
], ],
'defines': [ 'defines': [

View File

@ -0,0 +1,58 @@
# This file is part of Telegram Desktop,
# the official desktop application for the Telegram messaging service.
#
# For license and copyright information please follow this link:
# https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
{
'includes': [
'common.gypi',
],
'targets': [{
'target_name': 'lib_ffmpeg',
'type': 'static_library',
'includes': [
'common.gypi',
'qt.gypi',
'telegram_linux.gypi',
],
'variables': {
'src_loc': '../SourceFiles',
'res_loc': '../Resources',
'libs_loc': '../../../Libraries',
'official_build_target%': '',
'submodules_loc': '../ThirdParty',
},
'dependencies': [
'crl.gyp:crl',
'lib_base.gyp:lib_base',
],
'export_dependent_settings': [
'crl.gyp:crl',
'lib_base.gyp:lib_base',
],
'defines': [
],
'include_dirs': [
'<(src_loc)',
'<(SHARED_INTERMEDIATE_DIR)',
'<(libs_loc)/ffmpeg',
'<(libs_loc)/range-v3/include',
'<(submodules_loc)/GSL/include',
'<(submodules_loc)/variant/include',
'<(submodules_loc)/crl/src',
],
'sources': [
'<(src_loc)/ffmpeg/ffmpeg_utility.cpp',
'<(src_loc)/ffmpeg/ffmpeg_utility.h',
],
'conditions': [[ 'build_macold', {
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS': [ '-nostdinc++' ],
},
'include_dirs': [
'/usr/local/macold/include/c++/v1',
],
}]],
}],
}

View File

@ -30,12 +30,14 @@
'lib_base.gyp:lib_base', 'lib_base.gyp:lib_base',
'lib_rlottie.gyp:lib_rlottie', 'lib_rlottie.gyp:lib_rlottie',
'lib_storage.gyp:lib_storage', 'lib_storage.gyp:lib_storage',
'lib_ffmpeg.gyp:lib_ffmpeg',
], ],
'export_dependent_settings': [ 'export_dependent_settings': [
'crl.gyp:crl', 'crl.gyp:crl',
'lib_base.gyp:lib_base', 'lib_base.gyp:lib_base',
'lib_rlottie.gyp:lib_rlottie', 'lib_rlottie.gyp:lib_rlottie',
'lib_storage.gyp:lib_storage', 'lib_storage.gyp:lib_storage',
'lib_ffmpeg.gyp:lib_ffmpeg',
], ],
'defines': [ 'defines': [
'LOT_BUILD', 'LOT_BUILD',
@ -45,6 +47,7 @@
'<(SHARED_INTERMEDIATE_DIR)', '<(SHARED_INTERMEDIATE_DIR)',
'<(libs_loc)/range-v3/include', '<(libs_loc)/range-v3/include',
'<(libs_loc)/zlib', '<(libs_loc)/zlib',
'<(libs_loc)/ffmpeg',
'<(rlottie_loc)', '<(rlottie_loc)',
'<(submodules_loc)/GSL/include', '<(submodules_loc)/GSL/include',
'<(submodules_loc)/variant/include', '<(submodules_loc)/variant/include',