mirror of https://github.com/procxx/kepka.git
divided reading frames and preparing them by request
This commit is contained in:
parent
6cf2987536
commit
e840d2bb17
|
@ -61,7 +61,7 @@ In Terminal go to **/home/user/TBuild/Libraries** and run
|
|||
sudo apt-get -y --force-yes install autoconf automake build-essential libass-dev libfreetype6-dev libgpac-dev libsdl1.2-dev libtheora-dev libtool libva-dev libvdpau-dev libvorbis-dev libxcb1-dev libxcb-shm0-dev libxcb-xfixes0-dev pkg-config texi2html zlib1g-dev
|
||||
sudo apt-get install yasm
|
||||
|
||||
./configure --prefix=/usr/local --disable-programs --disable-doc --disable-everything --enable-libopus --enable-decoder=aac --enable-decoder=aac_latm --enable-decoder=aasc --enable-decoder=flac --enable-decoder=gif --enable-decoder=h264 --enable-decoder=h264_vdpau --enable-decoder=mp1 --enable-decoder=mp1float --enable-decoder=mp2 --enable-decoder=mp2float --enable-decoder=mp3 --enable-decoder=mp3adu --enable-decoder=mp3adufloat --enable-decoder=mp3float --enable-decoder=mp3on4 --enable-decoder=mp3on4float --enable-decoder=mpeg4 --enable-decoder=mpeg4_vdpau --enable-decoder=msmpeg4v2 --enable-decoder=msmpeg4v3 --enable-decoder=opus --enable-decoder=vorbis --enable-decoder=wavpack --enable-decoder=wmalossless --enable-decoder=wmapro --enable-decoder=wmav1 --enable-decoder=wmav2 --enable-decoder=wmavoice --enable-encoder=libopus --enable-hwaccel=h264_vaapi --enable-hwaccel=h264_vdpau --enable-hwaccel=mpeg4_vaapi --enable-hwaccel=mpeg4_vdpau --enable-parser=aac --enable-parser=aac_latm --enable-parser=flac --enable-parser=h264 --enable-parser=mpeg4video --enable-parser=mpegaudio --enable-parser=opus --enable-parser=vorbis --enable-demuxer=aac --enable-demuxer=flac --enable-demuxer=gif --enable-demuxer=h264 --enable-demuxer=mov --enable-demuxer=mp3 --enable-demuxer=ogg --enable-demuxer=wav --enable-muxer=ogg --enable-muxer=opus
|
||||
./configure --prefix=/usr/local --disable-programs --disable-doc --disable-pthreads --disable-mmx --disable-everything --enable-libopus --enable-decoder=aac --enable-decoder=aac_latm --enable-decoder=aasc --enable-decoder=flac --enable-decoder=gif --enable-decoder=h264 --enable-decoder=h264_vdpau --enable-decoder=mp1 --enable-decoder=mp1float --enable-decoder=mp2 --enable-decoder=mp2float --enable-decoder=mp3 --enable-decoder=mp3adu --enable-decoder=mp3adufloat --enable-decoder=mp3float --enable-decoder=mp3on4 --enable-decoder=mp3on4float --enable-decoder=mpeg4 --enable-decoder=mpeg4_vdpau --enable-decoder=msmpeg4v2 --enable-decoder=msmpeg4v3 --enable-decoder=opus --enable-decoder=vorbis --enable-decoder=wavpack --enable-decoder=wmalossless --enable-decoder=wmapro --enable-decoder=wmav1 --enable-decoder=wmav2 --enable-decoder=wmavoice --enable-encoder=libopus --enable-hwaccel=h264_vaapi --enable-hwaccel=h264_vdpau --enable-hwaccel=mpeg4_vaapi --enable-hwaccel=mpeg4_vdpau --enable-parser=aac --enable-parser=aac_latm --enable-parser=flac --enable-parser=h264 --enable-parser=mpeg4video --enable-parser=mpegaudio --enable-parser=opus --enable-parser=vorbis --enable-demuxer=aac --enable-demuxer=flac --enable-demuxer=gif --enable-demuxer=h264 --enable-demuxer=mov --enable-demuxer=mp3 --enable-demuxer=ogg --enable-demuxer=wav --enable-muxer=ogg --enable-muxer=opus
|
||||
|
||||
make
|
||||
sudo make install
|
||||
|
|
|
@ -435,7 +435,8 @@ public:
|
|||
, _device(0)
|
||||
, _dataSize(0) {
|
||||
}
|
||||
virtual bool readNextFrame(QImage &to, bool &hasAlpha, const QSize &size) = 0;
|
||||
virtual bool readNextFrame() = 0;
|
||||
virtual bool renderFrame(QImage &to, bool &hasAlpha, const QSize &size) = 0;
|
||||
virtual int32 nextFrameDelay() = 0;
|
||||
virtual bool start(bool onlyGifv) = 0;
|
||||
virtual ~ClipReaderImplementation() {
|
||||
|
@ -482,30 +483,35 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
QImage frame; // QGifHandler always reads first to internal QImage and returns it
|
||||
if (!_reader->read(&frame)) {
|
||||
_frame = QImage(); // QGifHandler always reads first to internal QImage and returns it
|
||||
if (!_reader->read(&_frame) || _frame.isNull()) {
|
||||
return false;
|
||||
}
|
||||
--_framesLeft;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (size.isEmpty() || size == frame.size()) {
|
||||
int32 w = frame.width(), h = frame.height();
|
||||
if (to.width() == w && to.height() == h && to.format() == frame.format()) {
|
||||
if (to.byteCount() != frame.byteCount()) {
|
||||
int bpl = qMin(to.bytesPerLine(), frame.bytesPerLine());
|
||||
bool renderFrame(QImage &to, bool &hasAlpha, const QSize &size) {
|
||||
t_assert(!_frame.isNull());
|
||||
if (size.isEmpty() || size == _frame.size()) {
|
||||
int32 w = _frame.width(), h = _frame.height();
|
||||
if (to.width() == w && to.height() == h && to.format() == _frame.format()) {
|
||||
if (to.byteCount() != _frame.byteCount()) {
|
||||
int bpl = qMin(to.bytesPerLine(), _frame.bytesPerLine());
|
||||
for (int i = 0; i < h; ++i) {
|
||||
memcpy(to.scanLine(i), frame.constScanLine(i), bpl);
|
||||
memcpy(to.scanLine(i), _frame.constScanLine(i), bpl);
|
||||
}
|
||||
} else {
|
||||
memcpy(to.bits(), frame.constBits(), frame.byteCount());
|
||||
memcpy(to.bits(), _frame.constBits(), _frame.byteCount());
|
||||
}
|
||||
} else {
|
||||
to = frame.copy();
|
||||
to = _frame.copy();
|
||||
}
|
||||
} else {
|
||||
to = frame.scaled(size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
||||
to = _frame.scaled(size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
|
||||
}
|
||||
hasAlpha = frame.hasAlphaChannel();
|
||||
hasAlpha = _frame.hasAlphaChannel();
|
||||
_frame = QImage();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -525,6 +531,7 @@ public:
|
|||
private:
|
||||
QImageReader *_reader;
|
||||
int32 _framesLeft, _frameDelay;
|
||||
QImage _frame;
|
||||
|
||||
bool jumpToStart() {
|
||||
if (_reader && _reader->jumpToImage(0)) {
|
||||
|
@ -563,6 +570,7 @@ public:
|
|||
, _frame(0)
|
||||
, _opened(false)
|
||||
, _hadFrame(false)
|
||||
, _frameRead(false)
|
||||
, _packetSize(0)
|
||||
, _packetData(0)
|
||||
, _packetWas(false)
|
||||
|
@ -578,7 +586,7 @@ public:
|
|||
_avpkt.size = 0;
|
||||
}
|
||||
|
||||
bool readNextFrame(QImage &to, bool &hasAlpha, const QSize &size) {
|
||||
bool readNextFrame() {
|
||||
int res;
|
||||
while (true) {
|
||||
if (_avpkt.size > 0) { // previous packet not finished
|
||||
|
@ -630,7 +638,34 @@ public:
|
|||
}
|
||||
|
||||
if (got_frame) {
|
||||
_hadFrame = true;
|
||||
_hadFrame = _frameRead = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (finished) {
|
||||
if ((res = avformat_seek_file(_fmtContext, _streamId, std::numeric_limits<int64_t>::min(), 0, std::numeric_limits<int64_t>::max(), 0)) < 0) {
|
||||
if ((res = av_seek_frame(_fmtContext, _streamId, 0, AVSEEK_FLAG_BYTE)) < 0) {
|
||||
if ((res = av_seek_frame(_fmtContext, _streamId, 0, AVSEEK_FLAG_FRAME)) < 0) {
|
||||
if ((res = av_seek_frame(_fmtContext, _streamId, 0, 0)) < 0) {
|
||||
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
|
||||
LOG(("Gif Error: Unable to av_seek_frame() to the start %1, error %2, %3").arg(logData()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
avcodec_flush_buffers(_codecContext);
|
||||
_hadFrame = false;
|
||||
_frameMs = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool renderFrame(QImage &to, bool &hasAlpha, const QSize &size) {
|
||||
t_assert(_frameRead);
|
||||
_frameRead = false;
|
||||
|
||||
if (!_width || !_height) {
|
||||
_width = _frame->width;
|
||||
|
@ -658,7 +693,7 @@ public:
|
|||
_swsContext = sws_getCachedContext(_swsContext, _frame->width, _frame->height, AVPixelFormat(_frame->format), toSize.width(), toSize.height(), AV_PIX_FMT_BGRA, 0, 0, 0, 0);
|
||||
}
|
||||
uint8_t * toData[1] = { to.bits() };
|
||||
int toLinesize[1] = { to.bytesPerLine() };
|
||||
int toLinesize[1] = { to.bytesPerLine() }, res;
|
||||
if ((res = sws_scale(_swsContext, _frame->data, _frame->linesize, 0, _frame->height, toData, toLinesize)) != _swsSize.height()) {
|
||||
LOG(("Gif Error: Unable to sws_scale to good size %1, height %2, should be %3").arg(logData()).arg(res).arg(_swsSize.height()));
|
||||
return false;
|
||||
|
@ -683,27 +718,6 @@ public:
|
|||
return true;
|
||||
}
|
||||
|
||||
if (finished) {
|
||||
if ((res = avformat_seek_file(_fmtContext, _streamId, std::numeric_limits<int64_t>::min(), 0, std::numeric_limits<int64_t>::max(), 0)) < 0) {
|
||||
if ((res = av_seek_frame(_fmtContext, _streamId, 0, AVSEEK_FLAG_BYTE)) < 0) {
|
||||
if ((res = av_seek_frame(_fmtContext, _streamId, 0, AVSEEK_FLAG_FRAME)) < 0) {
|
||||
if ((res = av_seek_frame(_fmtContext, _streamId, 0, 0)) < 0) {
|
||||
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
|
||||
LOG(("Gif Error: Unable to av_seek_frame() to the start %1, error %2, %3").arg(logData()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
avcodec_flush_buffers(_codecContext);
|
||||
_hadFrame = false;
|
||||
_frameMs = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int32 nextFrameDelay() {
|
||||
return _currentFrameDelay;
|
||||
}
|
||||
|
@ -799,7 +813,7 @@ private:
|
|||
AVCodecContext *_codecContext;
|
||||
int32 _streamId;
|
||||
AVFrame *_frame;
|
||||
bool _opened, _hadFrame;
|
||||
bool _opened, _hadFrame, _frameRead;
|
||||
|
||||
AVPacket _avpkt;
|
||||
int _packetSize;
|
||||
|
@ -872,7 +886,10 @@ public:
|
|||
return error();
|
||||
}
|
||||
if (frame() && frame()->original.isNull()) {
|
||||
if (!_implementation->readNextFrame(frame()->original, frame()->alpha, QSize())) {
|
||||
if (!_implementation->readNextFrame()) {
|
||||
return error();
|
||||
}
|
||||
if (!_implementation->renderFrame(frame()->original, frame()->alpha, QSize())) {
|
||||
return error();
|
||||
}
|
||||
_width = frame()->original.width();
|
||||
|
@ -896,10 +913,13 @@ public:
|
|||
}
|
||||
|
||||
ClipProcessResult finishProcess(uint64 ms) {
|
||||
if (!prepareNextFrame()) {
|
||||
if (!readNextFrame()) {
|
||||
return error();
|
||||
}
|
||||
if (ms >= _nextFrameWhen && !prepareNextFrame(true)) {
|
||||
if (ms >= _nextFrameWhen && !readNextFrame(true)) {
|
||||
return error();
|
||||
}
|
||||
if (!renderFrame()) {
|
||||
return error();
|
||||
}
|
||||
return ClipProcessCopyFrame;
|
||||
|
@ -910,13 +930,22 @@ public:
|
|||
return qMax(delay, 5);
|
||||
}
|
||||
|
||||
bool prepareNextFrame(bool keepup = false) {
|
||||
t_assert(frame() != 0 && _request.valid());
|
||||
if (!_implementation->readNextFrame(frame()->original, frame()->alpha, QSize(_request.framew, _request.frameh))) {
|
||||
bool readNextFrame(bool keepup = false) {
|
||||
if (!_implementation->readNextFrame()) {
|
||||
return false;
|
||||
}
|
||||
_nextFrameWhen += nextFrameDelay();
|
||||
if (keepup) _nextFrameWhen = qMax(_nextFrameWhen, getms());
|
||||
if (keepup) {
|
||||
_nextFrameWhen = qMax(_nextFrameWhen, getms());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool renderFrame() {
|
||||
t_assert(frame() != 0 && _request.valid());
|
||||
if (!_implementation->renderFrame(frame()->original, frame()->alpha, QSize(_request.framew, _request.frameh))) {
|
||||
return false;
|
||||
}
|
||||
frame()->original.setDevicePixelRatio(_request.factor);
|
||||
frame()->pix = QPixmap();
|
||||
frame()->pix = _prepareFrame(_request, frame()->original, frame()->alpha, frame()->cache);
|
||||
|
@ -1250,7 +1279,7 @@ MTPDocumentAttribute clipReadAnimatedAttributes(const QString &fname, const QByt
|
|||
FFMpegReaderImplementation *reader = new FFMpegReaderImplementation(&localloc, &localdata);
|
||||
if (reader->start(true)) {
|
||||
bool hasAlpha = false;
|
||||
if (reader->readNextFrame(cover, hasAlpha, QSize())) {
|
||||
if (reader->readNextFrame() && reader->renderFrame(cover, hasAlpha, QSize())) {
|
||||
if (cover.width() > 0 && cover.height() > 0 && cover.width() < cover.height() * 10 && cover.height() < cover.width() * 10) {
|
||||
if (hasAlpha) {
|
||||
QImage cacheForResize;
|
||||
|
|
Loading…
Reference in New Issue