From 44c562d8ba03d15fdafefc037b51f08421b93816 Mon Sep 17 00:00:00 2001 From: John Preston Date: Fri, 22 Feb 2019 15:58:26 +0400 Subject: [PATCH] Fix streaming seek, display progress. --- Telegram/SourceFiles/data/data_document.cpp | 156 ++++++++++++------ .../SourceFiles/media/audio/media_audio.cpp | 10 +- .../media/audio/media_audio_loaders.cpp | 8 + .../media/streaming/media_streaming_common.h | 6 +- .../media/streaming/media_streaming_file.cpp | 53 ++++-- .../media/streaming/media_streaming_file.h | 2 +- .../media_streaming_loader_mtproto.cpp | 2 +- .../streaming/media_streaming_player.cpp | 30 +++- .../media/streaming/media_streaming_player.h | 2 + .../streaming/media_streaming_utility.cpp | 32 ++-- .../media/streaming/media_streaming_utility.h | 3 + .../streaming/media_streaming_video_track.cpp | 52 +++++- Telegram/SourceFiles/rpl/event_stream.h | 4 +- 13 files changed, 248 insertions(+), 112 deletions(-) diff --git a/Telegram/SourceFiles/data/data_document.cpp b/Telegram/SourceFiles/data/data_document.cpp index 99fba8d8e..fd8c97f4b 100644 --- a/Telegram/SourceFiles/data/data_document.cpp +++ b/Telegram/SourceFiles/data/data_document.cpp @@ -296,9 +296,10 @@ void StartStreaming( if (auto loader = document->createStreamingLoader(origin)) { static auto player = std::unique_ptr(); static auto pauseOnSeek = false; + static auto position = crl::time(0); + static auto preloaded = crl::time(0); static auto duration = crl::time(0); static auto options = Media::Streaming::PlaybackOptions(); - static auto subscribe = Fn(); static auto speed = 1.; static auto step = pow(2., 1. / 12); @@ -338,67 +339,16 @@ void StartStreaming( player->pause(); } void mouseReleaseEvent(QMouseEvent *e) override { - options.position = std::clamp( + preloaded = position = options.position = std::clamp( (duration * e->pos().x()) / width(), crl::time(0), crl::time(duration)); player->play(options); - subscribe(); } }; static auto video = base::unique_qptr(); - subscribe = [] { - player->updates( - ) | rpl::start_with_next_error_done([=](Update &&update) { - update.data.match([&](Information &update) { - duration = update.video.state.duration; - if (!video && !update.video.cover.isNull()) { - video = base::make_unique_q(); - video->setAttribute(Qt::WA_OpaquePaintEvent); - video->paintRequest( - ) | rpl::start_with_next([=](QRect rect) { - if (player->ready()) { - Painter(video.get()).drawImage( - video->rect(), - player->frame(FrameRequest())); - } else { - Painter(video.get()).fillRect( - rect, - Qt::black); - } - }, video->lifetime()); - const auto size = QSize( - ConvertScale(update.video.size.width()), - ConvertScale(update.video.size.height())); - const auto center = App::wnd()->geometry().center(); - video->setGeometry(QRect( - center - QPoint(size.width(), size.height()) / 2, - size)); - video->show(); - video->shownValue( - ) | rpl::start_with_next([=](bool shown) { - if (!shown) { - base::take(player) = nullptr; - } - }, video->lifetime()); - } - }, [&](PreloadedVideo &update) { - }, [&](UpdateVideo &update) { - Expects(video != nullptr); - - video->update(); - }, [&](PreloadedAudio &update) { - }, [&](UpdateAudio &update) { - }, [&](WaitingForData &update) { - }, [&](MutedByOther &update) { - }); - }, [=](const Error &error) { - base::take(video) = nullptr; - }, [=] { - }, player->lifetime()); - }; player = std::make_unique( &document->owner(), @@ -413,9 +363,105 @@ void StartStreaming( options.speed = speed; //options.syncVideoByAudio = false; - options.position = 0; + preloaded = position = options.position = 0; player->play(options); - subscribe(); + player->updates( + ) | rpl::start_with_next_error_done([=](Update &&update) { + update.data.match([&](Information &update) { + duration = std::max( + update.video.state.duration, + update.audio.state.duration); + if (video) { + if (update.video.cover.isNull()) { + base::take(video) = nullptr; + } else { + video->update(); + } + } else if (!update.video.cover.isNull()) { + video = base::make_unique_q(); + video->setAttribute(Qt::WA_OpaquePaintEvent); + video->paintRequest( + ) | rpl::start_with_next([=](QRect rect) { + const auto till1 = duration + ? (position * video->width() / duration) + : 0; + const auto till2 = duration + ? (preloaded * video->width() / duration) + : 0; + if (player->ready()) { + Painter(video.get()).drawImage( + video->rect(), + player->frame(FrameRequest())); + } else { + Painter(video.get()).fillRect( + rect, + Qt::black); + } + Painter(video.get()).fillRect( + 0, + 0, + till1, + video->height(), + QColor(255, 255, 255, 64)); + if (till2 > till1) { + Painter(video.get()).fillRect( + till1, + 0, + till2 - till1, + video->height(), + QColor(255, 255, 255, 32)); + } + }, video->lifetime()); + const auto size = QSize( + ConvertScale(update.video.size.width()), + ConvertScale(update.video.size.height())); + const auto center = App::wnd()->geometry().center(); + video->setGeometry(QRect( + center - QPoint(size.width(), size.height()) / 2, + size)); + video->show(); + video->shownValue( + ) | rpl::start_with_next([=](bool shown) { + if (!shown) { + base::take(player) = nullptr; + } + }, video->lifetime()); + } + }, [&](PreloadedVideo &update) { + if (preloaded < update.till) { + preloaded = update.till; + video->update(); + } + }, [&](UpdateVideo &update) { + Expects(video != nullptr); + + if (position < update.position) { + position = update.position; + } + video->update(); + }, [&](PreloadedAudio &update) { + if (preloaded < update.till) { + preloaded = update.till; + if (video) { + video->update(); + } + } + }, [&](UpdateAudio &update) { + if (position < update.position) { + position = update.position; + if (video) { + video->update(); + } + } + }, [&](WaitingForData) { + }, [&](MutedByOther) { + }, [&](Finished) { + base::take(player) = nullptr; + }); + }, [=](const Error &error) { + base::take(video) = nullptr; + }, [=] { + }, player->lifetime()); } } diff --git a/Telegram/SourceFiles/media/audio/media_audio.cpp b/Telegram/SourceFiles/media/audio/media_audio.cpp index 71631b216..add31c10f 100644 --- a/Telegram/SourceFiles/media/audio/media_audio.cpp +++ b/Telegram/SourceFiles/media/audio/media_audio.cpp @@ -1011,12 +1011,6 @@ void Mixer::resume(const AudioMsgId &audio, bool fast) { resetFadeStartPosition(type); } else { Audio::AttachToDevice(); - if (track->state.state == State::PausedAtEnd) { - if (track->isStreamCreated()) { - alSourcei(track->stream.source, AL_SAMPLE_OFFSET, qMax(track->state.position - track->bufferedPosition, 0LL)); - if (!checkCurrentALError(type)) return; - } - } } track->state.state = fast ? State::Playing : State::Resuming; @@ -1035,6 +1029,10 @@ void Mixer::resume(const AudioMsgId &audio, bool fast) { alSourcef(track->stream.source, AL_GAIN, ComputeVolume(type)); if (!checkCurrentALError(type)) return; + if (state == AL_STOPPED) { + alSourcei(track->stream.source, AL_SAMPLE_OFFSET, qMax(track->state.position - track->bufferedPosition, 0LL)); + if (!checkCurrentALError(type)) return; + } alSourcePlay(track->stream.source); if (!checkCurrentALError(type)) return; } diff --git a/Telegram/SourceFiles/media/audio/media_audio_loaders.cpp b/Telegram/SourceFiles/media/audio/media_audio_loaders.cpp index 15964a2fc..95aeb7f1a 100644 --- a/Telegram/SourceFiles/media/audio/media_audio_loaders.cpp +++ b/Telegram/SourceFiles/media/audio/media_audio_loaders.cpp @@ -311,6 +311,14 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) { return; } + if (state == AL_STOPPED) { + alSourcei(track->stream.source, AL_SAMPLE_OFFSET, qMax(track->state.position - track->bufferedPosition, 0LL)); + if (!internal::audioCheckError()) { + setStoppedState(track, State::StoppedAtError); + emitError(type); + return; + } + } alSourcePlay(track->stream.source); if (!internal::audioCheckError()) { setStoppedState(track, State::StoppedAtError); diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_common.h b/Telegram/SourceFiles/media/streaming/media_streaming_common.h index 5a9f1dbaf..8e4040b65 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_common.h +++ b/Telegram/SourceFiles/media/streaming/media_streaming_common.h @@ -81,6 +81,9 @@ struct WaitingForData { struct MutedByOther { }; +struct Finished { +}; + struct Update { base::variant< Information, @@ -89,7 +92,8 @@ struct Update { PreloadedAudio, UpdateAudio, WaitingForData, - MutedByOther> data; + MutedByOther, + Finished> data; }; struct Error { diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_file.cpp b/Telegram/SourceFiles/media/streaming/media_streaming_file.cpp index bc71b873a..53d662a9a 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_file.cpp +++ b/Telegram/SourceFiles/media/streaming/media_streaming_file.cpp @@ -110,8 +110,15 @@ Stream File::Context::initStream(AVMediaType type) { } const auto info = _formatContext->streams[index]; + result.codec = MakeCodecPointer(info); + if (!result.codec) { + return {}; + } + if (type == AVMEDIA_TYPE_VIDEO) { + const auto codec = result.codec.get(); result.rotation = ReadRotationFromMetadata(info); + result.dimensions = QSize(codec->width, codec->height); } else if (type == AVMEDIA_TYPE_AUDIO) { result.frequency = info->codecpar->sample_rate; if (!result.frequency) { @@ -119,10 +126,6 @@ Stream File::Context::initStream(AVMediaType type) { } } - result.codec = MakeCodecPointer(info); - if (!result.codec) { - return {}; - } result.frame = MakeFramePointer(); if (!result.frame) { return {}; @@ -137,19 +140,35 @@ Stream File::Context::initStream(AVMediaType type) { return result; } -void File::Context::seekToPosition(crl::time position) { +void File::Context::seekToPosition( + const Stream &stream, + crl::time position) { auto error = AvErrorWrap(); if (!position) { return; } - const auto streamIndex = -1; - const auto seekFlags = 0; + // + // Non backward search reads the whole file if the position is after + // the last keyframe inside the index. So we search only backward. + // + //const auto seekFlags = 0; + //error = av_seek_frame( + // _formatContext, + // streamIndex, + // TimeToPts(position, kUniversalTimeBase), + // seekFlags); + //if (!error) { + // return; + //} + // error = av_seek_frame( _formatContext, - streamIndex, - TimeToPts(position, kUniversalTimeBase), - seekFlags); + stream.index, + TimeToPts( + std::clamp(position, crl::time(0), stream.duration - 1), + stream.timeBase), + AVSEEK_FLAG_BACKWARD); if (!error) { return; } @@ -192,12 +211,20 @@ void File::Context::start(crl::time position) { } _formatContext->pb = _ioContext; - error = avformat_open_input(&_formatContext, nullptr, nullptr, nullptr); + auto options = (AVDictionary*)nullptr; + const auto guard = gsl::finally([&] { av_dict_free(&options); }); + av_dict_set(&options, "usetoc", "1", 0); + error = avformat_open_input( + &_formatContext, + nullptr, + nullptr, + &options); if (error) { _ioBuffer = nullptr; return logFatal(qstr("avformat_open_input"), error); } _opened = true; + _formatContext->flags |= AVFMT_FLAG_FAST_SEEK; if ((error = avformat_find_stream_info(_formatContext, nullptr))) { return logFatal(qstr("avformat_find_stream_info"), error); @@ -213,7 +240,9 @@ void File::Context::start(crl::time position) { return; } - seekToPosition(position); + if (video.codec || audio.codec) { + seekToPosition(video.codec ? video : audio, position); + } if (unroll()) { return; } diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_file.h b/Telegram/SourceFiles/media/streaming/media_streaming_file.h index 5a61a6318..ab590c004 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_file.h +++ b/Telegram/SourceFiles/media/streaming/media_streaming_file.h @@ -69,7 +69,7 @@ private: void fail(); Stream initStream(AVMediaType type); - void seekToPosition(crl::time position); + void seekToPosition(const Stream &stream, crl::time position); // TODO base::expected. [[nodiscard]] base::variant readPacket(); diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_loader_mtproto.cpp b/Telegram/SourceFiles/media/streaming/media_streaming_loader_mtproto.cpp index 8e3445b06..c5bacb8d0 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_loader_mtproto.cpp +++ b/Telegram/SourceFiles/media/streaming/media_streaming_loader_mtproto.cpp @@ -13,7 +13,7 @@ namespace Media { namespace Streaming { namespace { -constexpr auto kMaxConcurrentRequests = 2; // #TODO streaming +constexpr auto kMaxConcurrentRequests = 2; } // namespace diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_player.cpp b/Telegram/SourceFiles/media/streaming/media_streaming_player.cpp index bc5f3b121..801d8ef73 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_player.cpp +++ b/Telegram/SourceFiles/media/streaming/media_streaming_player.cpp @@ -153,7 +153,7 @@ void Player::fileReady(Stream &&video, Stream &&audio) { _waitingForData = false; const auto weak = base::make_weak(&_sessionGuard); - const auto ready = [=](const Information & data) { + const auto ready = [=](const Information &data) { crl::on_main(weak, [=, data = data]() mutable { streamReady(std::move(data)); }); @@ -371,7 +371,12 @@ void Player::start() { ) | rpl::start_with_next_done([=](crl::time position) { audioPlayedTill(position); }, [=] { - // audio finished + if (_stage == Stage::Started) { + _audioFinished = true; + if (!_video || _videoFinished) { + _updates.fire({ Finished() }); + } + } }, _lifetime); } if (_video) { @@ -380,22 +385,29 @@ void Player::start() { _nextFrameTime = when; checkNextFrame(); }, [=] { - // video finished + if (_stage == Stage::Started) { + _videoFinished = true; + if (!_audio || _audioFinished) { + _updates.fire({ Finished() }); + } + } }, _lifetime); } } void Player::stop() { _file->stop(); - _audio = nullptr; - _video = nullptr; - _paused = false; - _information = Information(); - invalidate_weak_ptrs(&_sessionGuard); if (_stage != Stage::Failed) { _stage = Stage::Uninitialized; } - _updates = rpl::event_stream(); + _audio = nullptr; + _video = nullptr; + invalidate_weak_ptrs(&_sessionGuard); + _paused = false; + _audioFinished = false; + _videoFinished = false; + _readTillEnd = false; + _information = Information(); } bool Player::failed() const { diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_player.h b/Telegram/SourceFiles/media/streaming/media_streaming_player.h index 8834c9988..2c9900649 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_player.h +++ b/Telegram/SourceFiles/media/streaming/media_streaming_player.h @@ -122,6 +122,8 @@ private: Information _information; Stage _stage = Stage::Uninitialized; bool _paused = false; + bool _audioFinished = false; + bool _videoFinished = false; crl::time _startedTime = kTimeUnknown; crl::time _pausedTime = kTimeUnknown; diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_utility.cpp b/Telegram/SourceFiles/media/streaming/media_streaming_utility.cpp index 56cc36c40..f75965bc9 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_utility.cpp +++ b/Telegram/SourceFiles/media/streaming/media_streaming_utility.cpp @@ -26,8 +26,21 @@ void AlignedImageBufferCleanupHandler(void* data) { delete[] buffer; } +bool IsAlignedImage(const QImage &image) { + return !(reinterpret_cast(image.bits()) % kAlignImageBy) + && !(image.bytesPerLine() % kAlignImageBy); +} + +void ClearFrameMemory(AVFrame *frame) { + if (frame && frame->data[0]) { + av_frame_unref(frame); + } +} + +} // namespace + // Create a QImage of desired size where all the data is properly aligned. -QImage CreateAlignedImage(QSize size) { +QImage CreateImageForOriginalFrame(QSize size) { const auto width = size.width(); const auto height = size.height(); const auto widthAlign = kAlignImageBy / kPixelBytesSize; @@ -36,7 +49,7 @@ QImage CreateAlignedImage(QSize size) { : 0); const auto perLine = neededWidth * kPixelBytesSize; const auto buffer = new uchar[perLine * height + kAlignImageBy]; - const auto cleanupData = static_cast(buffer); + const auto cleanupData = static_cast(buffer); const auto address = reinterpret_cast(buffer); const auto alignedBuffer = buffer + ((address % kAlignImageBy) ? (kAlignImageBy - (address % kAlignImageBy)) @@ -51,19 +64,6 @@ QImage CreateAlignedImage(QSize size) { cleanupData); } -bool IsAlignedImage(const QImage &image) { - return !(reinterpret_cast(image.bits()) % kAlignImageBy) - && !(image.bytesPerLine() % kAlignImageBy); -} - -void ClearFrameMemory(AVFrame *frame) { - if (frame && frame->data[0]) { - av_frame_unref(frame); - } -} - -} // namespace - CodecPointer MakeCodecPointer(not_null stream) { auto error = AvErrorWrap(); @@ -293,7 +293,7 @@ QImage ConvertFrame( || storage.size() != resize || !storage.isDetached() || !IsAlignedImage(storage)) { - storage = CreateAlignedImage(resize); + storage = CreateImageForOriginalFrame(resize); } const auto format = AV_PIX_FMT_BGRA; const auto hasDesiredFormat = (frame->format == format) diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_utility.h b/Telegram/SourceFiles/media/streaming/media_streaming_utility.h index 9fca94901..72b083230 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_utility.h +++ b/Telegram/SourceFiles/media/streaming/media_streaming_utility.h @@ -154,6 +154,7 @@ struct Stream { // Video only. int rotation = 0; + QSize dimensions; SwsContextPointer swsContext; }; @@ -172,6 +173,8 @@ void LogError(QLatin1String method, AvErrorWrap error); [[nodiscard]] bool RotationSwapWidthHeight(int rotation); [[nodiscard]] AvErrorWrap ProcessPacket(Stream &stream, Packet &&packet); [[nodiscard]] AvErrorWrap ReadNextFrame(Stream &stream); + +[[nodiscard]] QImage CreateImageForOriginalFrame(QSize size); [[nodiscard]] QImage ConvertFrame( Stream& stream, QSize resize, diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_video_track.cpp b/Telegram/SourceFiles/media/streaming/media_streaming_video_track.cpp index 37a776f10..8e503b0cf 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_video_track.cpp +++ b/Telegram/SourceFiles/media/streaming/media_streaming_video_track.cpp @@ -47,6 +47,10 @@ private: [[nodiscard]] bool interrupted() const; [[nodiscard]] bool tryReadFirstFrame(Packet &&packet); [[nodiscard]] bool fillStateFromFrame(); + [[nodiscard]] bool fillStateFromFakeLastFrame(); + [[nodiscard]] bool fillStateFromFrameTime(crl::time frameTime); + [[nodiscard]] QImage createFakeLastFrame() const; + [[nodiscard]] bool processFirstFrame(QImage frame); void queueReadFrames(crl::time delay = 0); void readFrames(); [[nodiscard]] bool readFrame(not_null frame); @@ -102,7 +106,9 @@ VideoTrackObject::VideoTrackObject( } rpl::producer VideoTrackObject::displayFrameAt() const { - return _nextFrameDisplayTime.value(); + return interrupted() + ? rpl::complete() + : _nextFrameDisplayTime.value(); } void VideoTrackObject::process(Packet &&packet) { @@ -151,7 +157,8 @@ void VideoTrackObject::readFrames() { bool VideoTrackObject::readFrame(not_null frame) { if (const auto error = ReadNextFrame(_stream)) { if (error.code() == AVERROR_EOF) { - // read till end + interrupt(); + _nextFrameDisplayTime.reset(kTimeUnknown); } else if (error.code() != AVERROR(EAGAIN) || _noMoreData) { interrupt(); _error(); @@ -249,22 +256,42 @@ bool VideoTrackObject::tryReadFirstFrame(Packet &&packet) { if (ProcessPacket(_stream, std::move(packet)).failed()) { return false; } + auto frame = QImage(); if (const auto error = ReadNextFrame(_stream)) { if (error.code() == AVERROR_EOF) { - // #TODO streaming fix seek to the end. - return false; + if (!fillStateFromFakeLastFrame()) { + return false; + } + return processFirstFrame(createFakeLastFrame()); } else if (error.code() != AVERROR(EAGAIN) || _noMoreData) { return false; + } else { + // Waiting for more packets. + return true; } - return true; } else if (!fillStateFromFrame()) { return false; } - auto frame = ConvertFrame(_stream, QSize(), QImage()); + return processFirstFrame(ConvertFrame(_stream, QSize(), QImage())); +} + +QImage VideoTrackObject::createFakeLastFrame() const { + if (_stream.dimensions.isEmpty()) { + LOG(("Streaming Error: Can't seek to the end of the video " + "in case the codec doesn't provide valid dimensions.")); + return QImage(); + } + auto result = CreateImageForOriginalFrame(_stream.dimensions); + result.fill(Qt::black); + return result; +} + +bool VideoTrackObject::processFirstFrame(QImage frame) { if (frame.isNull()) { return false; } _shared->init(std::move(frame), _syncTimePoint.trackTime); + _nextFrameDisplayTime.reset(_syncTimePoint.trackTime); callReady(); if (!_stream.queue.empty()) { queueReadFrames(); @@ -285,13 +312,20 @@ crl::time VideoTrackObject::currentFramePosition() const { } bool VideoTrackObject::fillStateFromFrame() { + return fillStateFromFrameTime(currentFramePosition()); +} + +bool VideoTrackObject::fillStateFromFakeLastFrame() { + return fillStateFromFrameTime(_stream.duration); +} + +bool VideoTrackObject::fillStateFromFrameTime(crl::time frameTime) { Expects(_syncTimePoint.trackTime == kTimeUnknown); - const auto position = currentFramePosition(); - if (position == kTimeUnknown) { + if (frameTime == kTimeUnknown) { return false; } - _nextFrameDisplayTime = _syncTimePoint.trackTime = position; + _syncTimePoint.trackTime = frameTime; return true; } diff --git a/Telegram/SourceFiles/rpl/event_stream.h b/Telegram/SourceFiles/rpl/event_stream.h index 52186490f..f98be33d6 100644 --- a/Telegram/SourceFiles/rpl/event_stream.h +++ b/Telegram/SourceFiles/rpl/event_stream.h @@ -108,8 +108,8 @@ template inline event_stream &event_stream::operator=( event_stream &&other) { if (this != &other) { - fire_done(); - _data = details::take(other._data); + std::swap(_data, other._data); + other.fire_done(); } return *this; }