From 4964b8b4880b08e861e8647345cfa0c5e3eba0ec Mon Sep 17 00:00:00 2001 From: John Preston Date: Wed, 25 Jan 2017 00:24:39 +0300 Subject: [PATCH] Beta 1000002001: Recreate audio device when no output or device changes. --- Telegram/Resources/winrc/Telegram.rc | 8 +- Telegram/Resources/winrc/Updater.rc | 8 +- Telegram/SourceFiles/config.h | 10 - Telegram/SourceFiles/core/version.h | 2 +- .../history/history_media_types.cpp | 33 +- Telegram/SourceFiles/historywidget.cpp | 6 +- .../inline_bot_layout_internal.cpp | 25 +- Telegram/SourceFiles/mainwidget.cpp | 17 +- Telegram/SourceFiles/media/media_audio.cpp | 1045 +++++++++++------ Telegram/SourceFiles/media/media_audio.h | 183 +-- .../SourceFiles/media/media_audio_capture.cpp | 35 +- .../media/media_audio_ffmpeg_loader.cpp | 2 +- .../media/media_audio_ffmpeg_loader.h | 7 +- .../SourceFiles/media/media_audio_loaders.cpp | 228 ++-- .../SourceFiles/media/media_audio_loaders.h | 4 +- .../media/media_child_ffmpeg_loader.cpp | 2 +- .../media/media_child_ffmpeg_loader.h | 7 +- .../SourceFiles/media/media_clip_ffmpeg.cpp | 6 +- .../media/player/media_player_cover.cpp | 66 +- .../media/player/media_player_cover.h | 7 +- .../media/player/media_player_instance.cpp | 55 +- .../media/player/media_player_instance.h | 12 +- .../media/player/media_player_widget.cpp | 64 +- .../media/player/media_player_widget.h | 7 +- .../media/view/media_clip_controller.cpp | 30 +- .../media/view/media_clip_controller.h | 12 +- .../media/view/media_clip_playback.cpp | 14 +- .../media/view/media_clip_playback.h | 8 +- Telegram/SourceFiles/mediaview.cpp | 16 +- Telegram/SourceFiles/mediaview.h | 7 +- .../SourceFiles/overview/overview_layout.cpp | 26 +- Telegram/SourceFiles/overviewwidget.cpp | 7 +- .../SourceFiles/platform/platform_audio.h | 37 + .../SourceFiles/platform/win/audio_win.cpp | 157 +++ Telegram/SourceFiles/platform/win/audio_win.h | 31 + Telegram/SourceFiles/pspecific_mac_p.mm | 16 +- Telegram/SourceFiles/structs.cpp | 28 +- Telegram/SourceFiles/window/window.style | 2 +- Telegram/build/version | 2 +- Telegram/gyp/Telegram.gyp | 9 +- 40 files changed, 1363 insertions(+), 878 deletions(-) create mode 100644 Telegram/SourceFiles/platform/platform_audio.h create mode 100644 Telegram/SourceFiles/platform/win/audio_win.cpp create mode 100644 Telegram/SourceFiles/platform/win/audio_win.h diff --git a/Telegram/Resources/winrc/Telegram.rc b/Telegram/Resources/winrc/Telegram.rc index 44142143c..b909adec5 100644 --- a/Telegram/Resources/winrc/Telegram.rc +++ b/Telegram/Resources/winrc/Telegram.rc @@ -34,8 +34,8 @@ IDI_ICON1 ICON "..\\art\\icon256.ico" // VS_VERSION_INFO VERSIONINFO - FILEVERSION 1,0,2,0 - PRODUCTVERSION 1,0,2,0 + FILEVERSION 1,0,2,1 + PRODUCTVERSION 1,0,2,1 FILEFLAGSMASK 0x3fL #ifdef _DEBUG FILEFLAGS 0x1L @@ -51,10 +51,10 @@ BEGIN BLOCK "040904b0" BEGIN VALUE "CompanyName", "Telegram Messenger LLP" - VALUE "FileVersion", "1.0.2.0" + VALUE "FileVersion", "1.0.2.1" VALUE "LegalCopyright", "Copyright (C) 2014-2017" VALUE "ProductName", "Telegram Desktop" - VALUE "ProductVersion", "1.0.2.0" + VALUE "ProductVersion", "1.0.2.1" END END BLOCK "VarFileInfo" diff --git a/Telegram/Resources/winrc/Updater.rc b/Telegram/Resources/winrc/Updater.rc index 2c321a820..e78702a93 100644 --- a/Telegram/Resources/winrc/Updater.rc +++ b/Telegram/Resources/winrc/Updater.rc @@ -25,8 +25,8 @@ LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US // VS_VERSION_INFO VERSIONINFO - FILEVERSION 1,0,2,0 - PRODUCTVERSION 1,0,2,0 + FILEVERSION 1,0,2,1 + PRODUCTVERSION 1,0,2,1 FILEFLAGSMASK 0x3fL #ifdef _DEBUG FILEFLAGS 0x1L @@ -43,10 +43,10 @@ BEGIN BEGIN VALUE "CompanyName", "Telegram Messenger LLP" VALUE "FileDescription", "Telegram Updater" - VALUE "FileVersion", "1.0.2.0" + VALUE "FileVersion", "1.0.2.1" VALUE "LegalCopyright", "Copyright (C) 2014-2017" VALUE "ProductName", "Telegram Desktop" - VALUE "ProductVersion", "1.0.2.0" + VALUE "ProductVersion", "1.0.2.1" END END BLOCK "VarFileInfo" diff --git a/Telegram/SourceFiles/config.h b/Telegram/SourceFiles/config.h index 95bdff9d0..2ec3e0307 100644 --- a/Telegram/SourceFiles/config.h +++ b/Telegram/SourceFiles/config.h @@ -97,21 +97,11 @@ enum { MediaOverviewStartPerPage = 5, MediaOverviewPreloadCount = 4, - AudioSimultaneousLimit = 4, - AudioCheckPositionTimeout = 100, // 100ms per check audio pos - AudioCheckPositionDelta = 2400, // update position called each 2400 samples - AudioFadeTimeout = 7, // 7ms - AudioFadeDuration = 500, - AudioVoiceMsgSkip = 400, // 200ms - AudioVoiceMsgFade = 300, // 300ms - AudioPreloadSamples = 2 * 48000, // preload next part if less than 5 seconds remains - AudioVoiceMsgFrequency = 48000, // 48 kHz AudioVoiceMsgMaxLength = 100 * 60, // 100 minutes AudioVoiceMsgUpdateView = 100, // 100ms AudioVoiceMsgChannels = 2, // stereo AudioVoiceMsgBufferSize = 256 * 1024, // 256 Kb buffers (1.3 - 3.0 secs) AudioVoiceMsgInMemory = 2 * 1024 * 1024, // 2 Mb audio is hold in memory and auto loaded - AudioPauseDeviceTimeout = 3000, // pause in 3 secs after playing is over WaveformSamplesCount = 100, diff --git a/Telegram/SourceFiles/core/version.h b/Telegram/SourceFiles/core/version.h index 1cf709c22..e625a8ff1 100644 --- a/Telegram/SourceFiles/core/version.h +++ b/Telegram/SourceFiles/core/version.h @@ -22,7 +22,7 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #include "core/utils.h" -#define BETA_VERSION_MACRO (0ULL) +#define BETA_VERSION_MACRO (1000002001ULL) constexpr int AppVersion = 1000002; constexpr str_const AppVersionStr = "1.0.2"; diff --git a/Telegram/SourceFiles/history/history_media_types.cpp b/Telegram/SourceFiles/history/history_media_types.cpp index 75dc4c42a..0bf167b2c 100644 --- a/Telegram/SourceFiles/history/history_media_types.cpp +++ b/Telegram/SourceFiles/history/history_media_types.cpp @@ -1436,44 +1436,43 @@ bool HistoryDocument::updateStatusText() const { } else if (_data->loading()) { statusSize = _data->loadOffset(); } else if (_data->loaded()) { + using State = Media::Player::State; statusSize = FileStatusSizeLoaded; if (_data->voice()) { - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice); - if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { + auto state = Media::Player::mixer()->currentState(AudioMsgId::Type::Voice); + if (state.id == AudioMsgId(_data, _parent->fullId()) && !Media::Player::IsStopped(state.state) && state.state != State::Finishing) { if (auto voice = Get()) { bool was = voice->_playback; voice->ensurePlayback(this); - if (!was || playbackState.position != voice->_playback->_position) { - float64 prg = playbackState.duration ? snap(float64(playbackState.position) / playbackState.duration, 0., 1.) : 0.; - if (voice->_playback->_position < playbackState.position) { + if (!was || state.position != voice->_playback->_position) { + float64 prg = state.duration ? snap(float64(state.position) / state.duration, 0., 1.) : 0.; + if (voice->_playback->_position < state.position) { voice->_playback->a_progress.start(prg); } else { voice->_playback->a_progress = anim::value(0., prg); } - voice->_playback->_position = playbackState.position; + voice->_playback->_position = state.position; voice->_playback->_a_progress.start(); } } - statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); - realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); + statusSize = -1 - (state.position / state.frequency); + realDuration = (state.duration / state.frequency); + showPause = (state.state == State::Playing || state.state == State::Resuming || state.state == State::Starting); } else { if (auto voice = Get()) { voice->checkPlaybackFinished(); } } } else if (_data->song()) { - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); - realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); + auto state = Media::Player::mixer()->currentState(AudioMsgId::Type::Song); + if (state.id == AudioMsgId(_data, _parent->fullId()) && !Media::Player::IsStopped(state.state) && state.state != State::Finishing) { + statusSize = -1 - (state.position / state.frequency); + realDuration = (state.duration / state.frequency); + showPause = (state.state == State::Playing || state.state == State::Resuming || state.state == State::Starting); } else { } - if (!showPause && (playing == AudioMsgId(_data, _parent->fullId()))) { + if (!showPause && (state.id == AudioMsgId(_data, _parent->fullId()))) { showPause = Media::Player::instance()->isSeeking(); } } diff --git a/Telegram/SourceFiles/historywidget.cpp b/Telegram/SourceFiles/historywidget.cpp index e88596faf..c28d64b79 100644 --- a/Telegram/SourceFiles/historywidget.cpp +++ b/Telegram/SourceFiles/historywidget.cpp @@ -3532,7 +3532,7 @@ void HistoryWidget::onRecordDone(QByteArray result, VoiceWaveform waveform, qint if (!canWriteMessage() || result.isEmpty()) return; App::wnd()->activateWindow(); - auto duration = samples / AudioVoiceMsgFrequency; + auto duration = samples / Media::Player::kDefaultFrequency; auto to = FileLoadTo(_peer->id, _silent->checked(), replyToId()); auto caption = QString(); _fileLoader.addTask(MakeShared(result, duration, waveform, to, caption)); @@ -3547,7 +3547,7 @@ void HistoryWidget::onRecordUpdate(quint16 level, qint32 samples) { a_recordingLevel.start(level); _a_recording.start(); _recordingSamples = samples; - if (samples < 0 || samples >= AudioVoiceMsgFrequency * AudioVoiceMsgMaxLength) { + if (samples < 0 || samples >= Media::Player::kDefaultFrequency * AudioVoiceMsgMaxLength) { stopRecording(_peer && samples > 0 && _inField); } updateField(); @@ -8736,7 +8736,7 @@ void HistoryWidget::drawRecording(Painter &p, float64 recordActive) { p.drawEllipse(_attachToggle->x() + (_attachEmoji->width() - d) / 2, _attachToggle->y() + (_attachToggle->height() - d) / 2, d, d); } - QString duration = formatDurationText(_recordingSamples / AudioVoiceMsgFrequency); + auto duration = formatDurationText(_recordingSamples / Media::Player::kDefaultFrequency); p.setFont(st::historyRecordFont); p.setPen(st::historyRecordDurationFg); diff --git a/Telegram/SourceFiles/inline_bots/inline_bot_layout_internal.cpp b/Telegram/SourceFiles/inline_bots/inline_bot_layout_internal.cpp index 5934bd616..08d8e63d5 100644 --- a/Telegram/SourceFiles/inline_bots/inline_bot_layout_internal.cpp +++ b/Telegram/SourceFiles/inline_bots/inline_bot_layout_internal.cpp @@ -830,25 +830,24 @@ bool File::updateStatusText() const { } else if (document->loading()) { statusSize = document->loadOffset(); } else if (document->loaded()) { + using State = Media::Player::State; if (document->voice()) { statusSize = FileStatusSizeLoaded; - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice); - if (playing == AudioMsgId(document, FullMsgId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); - realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); + auto state = Media::Player::mixer()->currentState(AudioMsgId::Type::Voice); + if (state.id == AudioMsgId(document, FullMsgId()) && !Media::Player::IsStopped(state.state) && state.state != State::Finishing) { + statusSize = -1 - (state.position / state.frequency); + realDuration = (state.duration / state.frequency); + showPause = (state.state == State::Playing || state.state == State::Resuming || state.state == State::Starting); } } else if (document->song()) { statusSize = FileStatusSizeLoaded; - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing == AudioMsgId(document, FullMsgId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); - realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); + auto state = Media::Player::mixer()->currentState(AudioMsgId::Type::Song); + if (state.id == AudioMsgId(document, FullMsgId()) && !Media::Player::IsStopped(state.state) && state.state != State::Finishing) { + statusSize = -1 - (state.position / state.frequency); + realDuration = (state.duration / state.frequency); + showPause = (state.state == State::Playing || state.state == State::Resuming || state.state == State::Starting); } - if (!showPause && (playing == AudioMsgId(document, FullMsgId())) && Media::Player::instance()->isSeeking()) { + if (!showPause && (state.id == AudioMsgId(document, FullMsgId())) && Media::Player::instance()->isSeeking()) { showPause = true; } } else { diff --git a/Telegram/SourceFiles/mainwidget.cpp b/Telegram/SourceFiles/mainwidget.cpp index 743a732fb..c8ea787d7 100644 --- a/Telegram/SourceFiles/mainwidget.cpp +++ b/Telegram/SourceFiles/mainwidget.cpp @@ -1563,10 +1563,10 @@ void MainWidget::ui_autoplayMediaInlineAsync(qint32 channelId, qint32 msgId) { } void MainWidget::handleAudioUpdate(const AudioMsgId &audioId) { - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, audioId.type()); - if (playing == audioId && playbackState.state == AudioPlayerStoppedAtStart) { - playbackState.state = AudioPlayerStopped; + using State = Media::Player::State; + auto state = Media::Player::mixer()->currentState(audioId.type()); + if (state.id == audioId && state.state == State::StoppedAtStart) { + state.state = State::Stopped; Media::Player::mixer()->clearStoppedAtStart(audioId); auto document = audioId.audio(); @@ -1578,8 +1578,8 @@ void MainWidget::handleAudioUpdate(const AudioMsgId &audioId) { } } - if (playing == audioId && audioId.type() == AudioMsgId::Type::Song) { - if (!(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { + if (state.id == audioId && audioId.type() == AudioMsgId::Type::Song) { + if (!Media::Player::IsStopped(state.state) && state.state != State::Finishing) { if (!_playerUsingPanel && !_player) { createPlayer(); } @@ -1675,9 +1675,8 @@ void MainWidget::playerHeightUpdated() { updateControlsGeometry(); } if (!_playerHeight && _player->isHidden()) { - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing && (playbackState.state & AudioPlayerStoppedMask)) { + auto state = Media::Player::mixer()->currentState(AudioMsgId::Type::Song); + if (state.id && Media::Player::IsStopped(state.state)) { _playerVolume.destroyDelayed(); _player.destroyDelayed(); } diff --git a/Telegram/SourceFiles/media/media_audio.cpp b/Telegram/SourceFiles/media/media_audio.cpp index 98862cba9..0bb5dea64 100644 --- a/Telegram/SourceFiles/media/media_audio.cpp +++ b/Telegram/SourceFiles/media/media_audio.cpp @@ -24,6 +24,7 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #include "media/media_audio_ffmpeg_loader.h" #include "media/media_child_ffmpeg_loader.h" #include "media/media_audio_loaders.h" +#include "platform/platform_audio.h" #include #include @@ -63,7 +64,8 @@ QMutex AudioMutex; ALCdevice *AudioDevice = nullptr; ALCcontext *AudioContext = nullptr; -float64 suppressAllGain = 1., suppressSongGain = 1.; +auto suppressAllGain = 1.; +auto suppressSongGain = 1.; } // namespace @@ -71,6 +73,13 @@ namespace Media { namespace Player { namespace { +constexpr auto kPreloadSamples = 2LL * 48000; // preload next part if less than 2 seconds remains +constexpr auto kFadeDuration = TimeMs(500); +constexpr auto kCheckPlaybackPositionTimeout = TimeMs(100); // 100ms per check audio position +constexpr auto kCheckPlaybackPositionDelta = 2400LL; // update position called each 2400 samples +constexpr auto kCheckFadingTimeout = TimeMs(7); // 7ms +constexpr auto kDetachDeviceTimeout = TimeMs(500); // destroy the audio device after 500ms of silence + struct NotifySound { QByteArray data; TimeMs lengthMs = 0; @@ -252,18 +261,18 @@ void CreateDefaultNotify() { void CloseAudioPlaybackDevice() { if (!AudioDevice) return; - delete base::take(MixerInstance); - + LOG(("Audio Info: closing audio playback device")); if (alIsSource(DefaultNotify.source)) { alSourceStop(DefaultNotify.source); - } - if (alIsBuffer(DefaultNotify.buffer)) { + alSourcei(DefaultNotify.source, AL_BUFFER, AL_NONE); alDeleteBuffers(1, &DefaultNotify.buffer); - DefaultNotify.buffer = 0; - } - if (alIsSource(DefaultNotify.source)) { alDeleteSources(1, &DefaultNotify.source); - DefaultNotify.source = 0; + } + DefaultNotify.buffer = 0; + DefaultNotify.source = 0; + + if (mixer()) { + mixer()->detachTracks(); } if (AudioContext) { @@ -290,9 +299,16 @@ void InitAudio() { EnumeratePlaybackDevices(); EnumerateCaptureDevices(); + + MixerInstance = new Mixer(); + + Platform::Audio::Init(); } void DeInitAudio() { + Platform::Audio::DeInit(); + + delete base::take(MixerInstance); CloseAudioPlaybackDevice(); } @@ -325,14 +341,30 @@ bool CreateAudioPlaybackDevice() { alDistanceModel(AL_NONE); - MixerInstance = new Mixer(); - return true; } +void DetachFromDeviceByTimer() { + QMutexLocker lock(&AudioMutex); + if (mixer()) { + mixer()->detachFromDeviceByTimer(); + } +} + +void DetachFromDevice() { + QMutexLocker lock(&AudioMutex); + CloseAudioPlaybackDevice(); + if (mixer()) { + mixer()->reattachIfNeeded(); + } +} + void PlayNotify() { + QMutexLocker lock(&AudioMutex); if (!mixer()) return; - if (!CreateAudioPlaybackDevice()) return; + + mixer()->reattachTracks(); + if (!AudioDevice) return; CreateDefaultNotify(); alSourcePlay(DefaultNotify.source); @@ -356,39 +388,172 @@ bool NotifyIsPlaying() { return false; } +float64 ComputeVolume(AudioMsgId::Type type) { + switch (type) { + case AudioMsgId::Type::Voice: return suppressAllGain; + case AudioMsgId::Type::Song: return suppressSongGain * Global::SongVolume(); + case AudioMsgId::Type::Video: return suppressSongGain * Global::VideoVolume(); + } + return 1.; +} + Mixer *mixer() { return MixerInstance; } -void Mixer::AudioMsg::clear() { - audio = AudioMsgId(); +void Mixer::Track::createStream() { + alGenSources(1, &stream.source); + alSourcef(stream.source, AL_PITCH, 1.f); + alSource3f(stream.source, AL_POSITION, 0, 0, 0); + alSource3f(stream.source, AL_VELOCITY, 0, 0, 0); + alSourcei(stream.source, AL_LOOPING, 0); + alGenBuffers(3, stream.buffers); +} + +void Mixer::Track::destroyStream() { + if (isStreamCreated()) { + alDeleteBuffers(3, stream.buffers); + alDeleteSources(1, &stream.source); + } + stream.source = 0; + for (auto i = 0; i != 3; ++i) { + stream.buffers[i] = 0; + } +} + +void Mixer::Track::reattach(AudioMsgId::Type type) { + if (isStreamCreated() || !samplesCount[0]) { + return; + } + + createStream(); + for (auto i = 0; i != kBuffersCount; ++i) { + if (!samplesCount[i]) { + break; + } + alBufferData(stream.buffers[i], format, bufferSamples[i].constData(), bufferSamples[i].size(), frequency); + alSourceQueueBuffers(stream.source, 1, stream.buffers + i); + } + + alSourcei(stream.source, AL_SAMPLE_OFFSET, qMax(state.position - bufferedPosition, 0LL)); + if (IsActive(state.state)) { + alSourcef(stream.source, AL_GAIN, ComputeVolume(type)); + alSourcePlay(stream.source); + } +} + +void Mixer::Track::detach() { + resetStream(); + destroyStream(); +} + +void Mixer::Track::clear() { + detach(); + + state = TrackState(); file = FileLocation(); data = QByteArray(); - playbackState = defaultState(); - skipStart = skipEnd = 0; + bufferedPosition = 0; + bufferedLength = 0; loading = false; - started = 0; - if (alIsSource(source)) { - alSourceStop(source); + loaded = false; + fadeStartPosition = 0; + + format = 0; + frequency = kDefaultFrequency; + for (int i = 0; i != kBuffersCount; ++i) { + samplesCount[i] = 0; + bufferSamples[i] = QByteArray(); } - for (int i = 0; i < 3; ++i) { - if (samplesCount[i]) { - ALuint buffer = 0; - // This cleans some random queued buffer, not exactly the buffers[i]. - alSourceUnqueueBuffers(source, 1, &buffer); - samplesCount[i] = 0; - } - } - nextBuffer = 0; videoData = nullptr; videoPlayId = 0; } +void Mixer::Track::started() { + resetStream(); + + bufferedPosition = 0; + bufferedLength = 0; + loaded = false; + fadeStartPosition = 0; + + format = 0; + frequency = kDefaultFrequency; + for (auto i = 0; i != kBuffersCount; ++i) { + samplesCount[i] = 0; + bufferSamples[i] = QByteArray(); + } +} + +bool Mixer::Track::isStreamCreated() const { + return alIsSource(stream.source); +} + +void Mixer::Track::ensureStreamCreated() { + if (!isStreamCreated()) { + createStream(); + } +} + +int Mixer::Track::getNotQueuedBufferIndex() { + // See if there are no free buffers right now. + while (samplesCount[kBuffersCount - 1] != 0) { + // Try to unqueue some buffer. + ALint processed = 0; + alGetSourcei(stream.source, AL_BUFFERS_PROCESSED, &processed); + if (processed < 1) { // No processed buffers, wait. + return -1; + } + + // Unqueue some processed buffer. + ALuint buffer = 0; + alSourceUnqueueBuffers(stream.source, 1, &buffer); + + // Find it in the list and clear it. + bool found = false; + for (auto i = 0; i != kBuffersCount; ++i) { + if (stream.buffers[i] == buffer) { + auto samplesInBuffer = samplesCount[i]; + bufferedPosition += samplesInBuffer; + bufferedLength -= samplesInBuffer; + for (auto j = i + 1; j != kBuffersCount; ++j) { + samplesCount[j - 1] = samplesCount[j]; + stream.buffers[j - 1] = stream.buffers[j]; + bufferSamples[j - 1] = bufferSamples[j]; + } + samplesCount[kBuffersCount - 1] = 0; + stream.buffers[kBuffersCount - 1] = buffer; + bufferSamples[kBuffersCount - 1] = QByteArray(); + found = true; + break; + } + } + if (!found) { + LOG(("Audio Error: Could not find the unqueued buffer! Buffer %1 in source %2 with processed count %3").arg(buffer).arg(stream.source).arg(processed)); + return -1; + } + } + + for (auto i = 0; i != kBuffersCount; ++i) { + if (!samplesCount[i]) { + return i; + } + } + return -1; +} + +void Mixer::Track::resetStream() { + if (isStreamCreated()) { + alSourceStop(stream.source); + alSourcei(stream.source, AL_BUFFER, AL_NONE); + } +} + Mixer::Mixer() : _fader(new Fader(&_faderThread)) , _loader(new Loaders(&_loaderThread)) { - connect(this, SIGNAL(faderOnTimer()), _fader, SLOT(onTimer())); + connect(this, SIGNAL(faderOnTimer()), _fader, SLOT(onTimer()), Qt::QueuedConnection); connect(this, SIGNAL(suppressSong()), _fader, SLOT(onSuppressSong())); connect(this, SIGNAL(unsuppressSong()), _fader, SLOT(onUnsuppressSong())); connect(this, SIGNAL(suppressAll()), _fader, SLOT(onSuppressAll())); @@ -416,29 +581,17 @@ Mixer::Mixer() Mixer::~Mixer() { { QMutexLocker lock(&AudioMutex); + + for (auto i = 0; i != kTogetherLimit; ++i) { + trackForType(AudioMsgId::Type::Voice, i)->clear(); + trackForType(AudioMsgId::Type::Song, i)->clear(); + } + _videoTrack.clear(); + + CloseAudioPlaybackDevice(); MixerInstance = nullptr; } - auto clearAudioMsg = [](AudioMsg *msg) { - alSourceStop(msg->source); - if (alIsBuffer(msg->buffers[0])) { - alDeleteBuffers(3, msg->buffers); - for (int j = 0; j < 3; ++j) { - msg->buffers[j] = msg->samplesCount[j] = 0; - } - } - if (alIsSource(msg->source)) { - alDeleteSources(1, &msg->source); - msg->source = 0; - } - }; - - for (int i = 0; i < AudioSimultaneousLimit; ++i) { - clearAudioMsg(dataForType(AudioMsgId::Type::Voice, i)); - clearAudioMsg(dataForType(AudioMsgId::Type::Song, i)); - } - clearAudioMsg(&_videoData); - _faderThread.quit(); _loaderThread.quit(); _faderThread.wait(); @@ -466,7 +619,7 @@ void Mixer::onStopped(const AudioMsgId &audio) { } } -Mixer::AudioMsg *Mixer::dataForType(AudioMsgId::Type type, int index) { +Mixer::Track *Mixer::trackForType(AudioMsgId::Type type, int index) { if (index < 0) { if (auto indexPtr = currentIndex(type)) { index = *indexPtr; @@ -475,15 +628,15 @@ Mixer::AudioMsg *Mixer::dataForType(AudioMsgId::Type type, int index) { } } switch (type) { - case AudioMsgId::Type::Voice: return &_audioData[index]; - case AudioMsgId::Type::Song: return &_songData[index]; - case AudioMsgId::Type::Video: return &_videoData; + case AudioMsgId::Type::Voice: return &_audioTracks[index]; + case AudioMsgId::Type::Song: return &_songTracks[index]; + case AudioMsgId::Type::Video: return &_videoTrack; } return nullptr; } -const Mixer::AudioMsg *Mixer::dataForType(AudioMsgId::Type type, int index) const { - return const_cast(this)->dataForType(type, index); +const Mixer::Track *Mixer::trackForType(AudioMsgId::Type type, int index) const { + return const_cast(this)->trackForType(type, index); } int *Mixer::currentIndex(AudioMsgId::Type type) { @@ -499,86 +652,97 @@ const int *Mixer::currentIndex(AudioMsgId::Type type) const { return const_cast(this)->currentIndex(type); } -bool Mixer::updateCurrentStarted(AudioMsgId::Type type, int32 pos) { - auto data = dataForType(type); - if (!data) return false; +void Mixer::resetFadeStartPosition(AudioMsgId::Type type, int positionInBuffered) { + auto track = trackForType(type); + if (!track) return; - if (pos < 0) { - if (alIsSource(data->source)) { - alGetSourcei(data->source, AL_SAMPLE_OFFSET, &pos); + if (positionInBuffered < 0) { + reattachTracks(); + if (track->isStreamCreated()) { + ALint currentPosition = 0; + alGetSourcei(track->stream.source, AL_SAMPLE_OFFSET, ¤tPosition); + + if (Media::Player::PlaybackErrorHappened()) { + setStoppedState(track, State::StoppedAtError); + onError(track->state.id); + return; + } + + if (currentPosition == 0 && !internal::CheckAudioDeviceConnected()) { + track->fadeStartPosition = track->state.position; + return; + } + + positionInBuffered = currentPosition; } else { - pos = 0; - } - if (Media::Player::PlaybackErrorHappened()) { - setStoppedState(data, AudioPlayerStoppedAtError); - onError(data->audio); - return false; + positionInBuffered = 0; } } - data->started = data->playbackState.position = pos + data->skipStart; - return true; + auto fullPosition = track->bufferedPosition + positionInBuffered; + track->state.position = fullPosition; + track->fadeStartPosition = fullPosition; } bool Mixer::fadedStop(AudioMsgId::Type type, bool *fadedStart) { - auto current = dataForType(type); + auto current = trackForType(type); if (!current) return false; - switch (current->playbackState.state) { - case AudioPlayerStarting: - case AudioPlayerResuming: - case AudioPlayerPlaying: - current->playbackState.state = AudioPlayerFinishing; - updateCurrentStarted(type); - if (fadedStart) *fadedStart = true; - break; - case AudioPlayerPausing: - current->playbackState.state = AudioPlayerFinishing; - if (fadedStart) *fadedStart = true; - break; - case AudioPlayerPaused: - case AudioPlayerPausedAtEnd: - setStoppedState(current); - return true; + switch (current->state.state) { + case State::Starting: + case State::Resuming: + case State::Playing: { + current->state.state = State::Finishing; + resetFadeStartPosition(type); + if (fadedStart) *fadedStart = true; + } break; + case State::Pausing: { + current->state.state = State::Finishing; + if (fadedStart) *fadedStart = true; + } break; + case State::Paused: + case State::PausedAtEnd: { + setStoppedState(current); + } return true; } return false; } void Mixer::play(const AudioMsgId &audio, int64 position) { - if (!Media::Player::CreateAudioPlaybackDevice()) return; - auto type = audio.type(); AudioMsgId stopped; auto notLoadedYet = false; { QMutexLocker lock(&AudioMutex); + reattachTracks(); + if (!AudioDevice) return; bool fadedStart = false; - auto current = dataForType(type); + auto current = trackForType(type); if (!current) return; - if (current->audio != audio) { + if (current->state.id != audio) { if (fadedStop(type, &fadedStart)) { - stopped = current->audio; + stopped = current->state.id; } - if (current->audio) { - emit loaderOnCancel(current->audio); + if (current->state.id) { + emit loaderOnCancel(current->state.id); emit faderOnTimer(); } auto foundCurrent = currentIndex(type); - int index = 0; - for (; index < AudioSimultaneousLimit; ++index) { - if (dataForType(type, index)->audio == audio) { + auto index = 0; + for (; index != kTogetherLimit; ++index) { + if (trackForType(type, index)->state.id == audio) { *foundCurrent = index; break; } } - if (index == AudioSimultaneousLimit && ++*foundCurrent >= AudioSimultaneousLimit) { - *foundCurrent -= AudioSimultaneousLimit; + if (index == kTogetherLimit && ++*foundCurrent >= kTogetherLimit) { + *foundCurrent -= kTogetherLimit; } - current = dataForType(type); + current = trackForType(type); } - current->audio = audio; + current->state.id = audio; current->file = audio.audio()->location(true); current->data = audio.audio()->data(); if (current->file.isEmpty() && current->data.isEmpty()) { @@ -586,11 +750,11 @@ void Mixer::play(const AudioMsgId &audio, int64 position) { if (audio.type() == AudioMsgId::Type::Song) { setStoppedState(current); } else { - setStoppedState(current, AudioPlayerStoppedAtError); + setStoppedState(current, State::StoppedAtError); } } else { - current->playbackState.position = position; - current->playbackState.state = fadedStart ? AudioPlayerStarting : AudioPlayerPlaying; + current->state.position = position; + current->state.state = fadedStart ? State::Starting : State::Playing; current->loading = true; emit loaderOnStart(audio, position); if (type == AudioMsgId::Type::Voice) { @@ -616,31 +780,33 @@ void Mixer::initFromVideo(uint64 videoPlayId, std_::unique_ptr & QMutexLocker lock(&AudioMutex); // Pause current song. - auto currentSong = dataForType(AudioMsgId::Type::Song); - float64 suppressGain = suppressSongGain * Global::SongVolume(); + auto songType = AudioMsgId::Type::Song; + auto currentSong = trackForType(songType); - switch (currentSong->playbackState.state) { - case AudioPlayerStarting: - case AudioPlayerResuming: - case AudioPlayerPlaying: - currentSong->playbackState.state = AudioPlayerPausing; - updateCurrentStarted(AudioMsgId::Type::Song); - break; - case AudioPlayerFinishing: currentSong->playbackState.state = AudioPlayerPausing; break; + switch (currentSong->state.state) { + case State::Starting: + case State::Resuming: + case State::Playing: { + currentSong->state.state = State::Pausing; + resetFadeStartPosition(songType); + } break; + case State::Finishing: { + currentSong->state.state = State::Pausing; + } break; } auto type = AudioMsgId::Type::Video; - auto current = dataForType(type); + auto current = trackForType(type); t_assert(current != nullptr); - if (current->audio) { + if (current->state.id) { fadedStop(type); - stopped = current->audio; - emit loaderOnCancel(current->audio); + stopped = current->state.id; + emit loaderOnCancel(current->state.id); } emit faderOnTimer(); current->clear(); - current->audio = AudioMsgId(AudioMsgId::Type::Video); + current->state.id = AudioMsgId(AudioMsgId::Type::Video); current->videoPlayId = videoPlayId; current->videoData = std_::move(data); { @@ -651,9 +817,9 @@ void Mixer::initFromVideo(uint64 videoPlayId, std_::unique_ptr & } _loader->startFromVideo(current->videoPlayId); - current->playbackState.state = AudioPlayerPaused; + current->state.state = State::Paused; current->loading = true; - emit loaderOnStart(current->audio, position); + emit loaderOnStart(current->state.id, position); } if (stopped) emit updated(stopped); } @@ -662,16 +828,16 @@ void Mixer::stopFromVideo(uint64 videoPlayId) { AudioMsgId current; { QMutexLocker lock(&AudioMutex); - auto data = dataForType(AudioMsgId::Type::Video); - t_assert(data != nullptr); + auto track = trackForType(AudioMsgId::Type::Video); + t_assert(track != nullptr); - if (data->videoPlayId != videoPlayId) { + if (track->videoPlayId != videoPlayId) { return; } - current = data->audio; + current = track->state.id; fadedStop(AudioMsgId::Type::Video); - data->clear(); + track->clear(); } if (current) emit updated(current); } @@ -681,28 +847,30 @@ void Mixer::pauseFromVideo(uint64 videoPlayId) { { QMutexLocker lock(&AudioMutex); auto type = AudioMsgId::Type::Video; - auto data = dataForType(type); - t_assert(data != nullptr); + auto track = trackForType(type); + t_assert(track != nullptr); - if (data->videoPlayId != videoPlayId) { + if (track->videoPlayId != videoPlayId) { return; } - current = data->audio; - switch (data->playbackState.state) { - case AudioPlayerStarting: - case AudioPlayerResuming: - case AudioPlayerPlaying: { - data->playbackState.state = AudioPlayerPaused; - updateCurrentStarted(type); + current = track->state.id; + switch (track->state.state) { + case State::Starting: + case State::Resuming: + case State::Playing: { + track->state.state = State::Paused; + resetFadeStartPosition(type); - ALint state = AL_INITIAL; - alGetSourcei(data->source, AL_SOURCE_STATE, &state); - if (!checkCurrentALError(type)) return; - - if (state == AL_PLAYING) { - alSourcePause(data->source); + if (track->isStreamCreated()) { + ALint state = AL_INITIAL; + alGetSourcei(track->stream.source, AL_SOURCE_STATE, &state); if (!checkCurrentALError(type)) return; + + if (state == AL_PLAYING) { + alSourcePause(track->stream.source); + if (!checkCurrentALError(type)) return; + } } } break; } @@ -722,40 +890,51 @@ void Mixer::resumeFromVideo(uint64 videoPlayId) { { QMutexLocker lock(&AudioMutex); auto type = AudioMsgId::Type::Video; - auto data = dataForType(type); - t_assert(data != nullptr); + auto track = trackForType(type); + t_assert(track != nullptr); - if (data->videoPlayId != videoPlayId) { + if (track->videoPlayId != videoPlayId) { return; } - float64 suppressGain = suppressSongGain * Global::VideoVolume(); - - current = data->audio; - switch (data->playbackState.state) { - case AudioPlayerPausing: - case AudioPlayerPaused: - case AudioPlayerPausedAtEnd: { - if (data->playbackState.state == AudioPlayerPaused) { - updateCurrentStarted(type); - } else if (data->playbackState.state == AudioPlayerPausedAtEnd) { - if (alIsSource(data->source)) { - alSourcei(data->source, AL_SAMPLE_OFFSET, qMax(data->playbackState.position - data->skipStart, 0LL)); - if (!checkCurrentALError(type)) return; + current = track->state.id; + switch (track->state.state) { + case State::Pausing: + case State::Paused: + case State::PausedAtEnd: { + reattachTracks(); + if (track->state.state == State::Paused) { + // This calls reattachTracks(). + resetFadeStartPosition(type); + } else { + reattachTracks(); + if (track->state.state == State::PausedAtEnd) { + if (track->isStreamCreated()) { + alSourcei(track->stream.source, AL_SAMPLE_OFFSET, qMax(track->state.position - track->bufferedPosition, 0LL)); + if (!checkCurrentALError(type)) return; + } } } - data->playbackState.state = AudioPlayerPlaying; + track->state.state = State::Playing; - ALint state = AL_INITIAL; - alGetSourcei(data->source, AL_SOURCE_STATE, &state); - if (!checkCurrentALError(type)) return; - - if (state != AL_PLAYING) { - alSourcef(data->source, AL_GAIN, suppressGain); + if (track->isStreamCreated()) { + // When starting the video audio is in paused state and + // gets resumed before the stream is created with any data. + ALint state = AL_INITIAL; + alGetSourcei(track->stream.source, AL_SOURCE_STATE, &state); if (!checkCurrentALError(type)) return; - alSourcePlay(data->source); - if (!checkCurrentALError(type)) return; + if (state != AL_PLAYING) { + if (state == AL_STOPPED && !internal::CheckAudioDeviceConnected()) { + return; + } + + alSourcef(track->stream.source, AL_GAIN, ComputeVolume(type)); + if (!checkCurrentALError(type)) return; + + alSourcePlay(track->stream.source); + if (!checkCurrentALError(type)) return; + } } } break; } @@ -789,13 +968,13 @@ void Mixer::videoSoundProgress(const AudioMsgId &audio) { QMutexLocker lock(&AudioMutex); QMutexLocker videoLock(&_lastVideoMutex); - auto current = dataForType(type); + auto current = trackForType(type); t_assert(current != nullptr); - if (current->videoPlayId == _lastVideoPlayId && current->playbackState.duration && current->playbackState.frequency) { - if (current->playbackState.state == AudioPlayerPlaying) { + if (current->videoPlayId == _lastVideoPlayId && current->state.duration && current->state.frequency) { + if (current->state.state == State::Playing) { _lastVideoPlaybackWhen = getms(); - _lastVideoPlaybackCorrectedMs = (current->playbackState.position * 1000ULL) / current->playbackState.frequency; + _lastVideoPlaybackCorrectedMs = (current->state.position * 1000ULL) / current->state.frequency; } } } @@ -803,10 +982,10 @@ void Mixer::videoSoundProgress(const AudioMsgId &audio) { bool Mixer::checkCurrentALError(AudioMsgId::Type type) { if (!Media::Player::PlaybackErrorHappened()) return true; - auto data = dataForType(type); + auto data = trackForType(type); if (!data) { - setStoppedState(data, AudioPlayerStoppedAtError); - onError(data->audio); + setStoppedState(data, State::StoppedAtError); + onError(data->state.id); } return false; } @@ -814,101 +993,106 @@ bool Mixer::checkCurrentALError(AudioMsgId::Type type) { void Mixer::pauseresume(AudioMsgId::Type type, bool fast) { QMutexLocker lock(&AudioMutex); - auto current = dataForType(type); - float64 suppressGain = 1.; - switch (type) { - case AudioMsgId::Type::Voice: suppressGain = suppressAllGain; break; - case AudioMsgId::Type::Song: suppressGain = suppressSongGain * Global::SongVolume(); break; - case AudioMsgId::Type::Video: suppressGain = suppressSongGain * Global::VideoVolume(); break; - } + auto current = trackForType(type); - switch (current->playbackState.state) { - case AudioPlayerPausing: - case AudioPlayerPaused: - case AudioPlayerPausedAtEnd: { - if (current->playbackState.state == AudioPlayerPaused) { - updateCurrentStarted(type); - } else if (current->playbackState.state == AudioPlayerPausedAtEnd) { - if (alIsSource(current->source)) { - alSourcei(current->source, AL_SAMPLE_OFFSET, qMax(current->playbackState.position - current->skipStart, 0LL)); + switch (current->state.state) { + case State::Pausing: + case State::Paused: + case State::PausedAtEnd: { + reattachTracks(); + if (current->state.state == State::Paused) { + resetFadeStartPosition(type); + } else if (current->state.state == State::PausedAtEnd) { + if (current->isStreamCreated()) { + alSourcei(current->stream.source, AL_SAMPLE_OFFSET, qMax(current->state.position - current->bufferedPosition, 0LL)); if (!checkCurrentALError(type)) return; } } - current->playbackState.state = fast ? AudioPlayerPlaying : AudioPlayerResuming; + current->state.state = fast ? State::Playing : State::Resuming; ALint state = AL_INITIAL; - alGetSourcei(current->source, AL_SOURCE_STATE, &state); + alGetSourcei(current->stream.source, AL_SOURCE_STATE, &state); if (!checkCurrentALError(type)) return; if (state != AL_PLAYING) { - alSourcef(current->source, AL_GAIN, suppressGain); + if (state == AL_STOPPED && !internal::CheckAudioDeviceConnected()) { + return; + } + + alSourcef(current->stream.source, AL_GAIN, ComputeVolume(type)); if (!checkCurrentALError(type)) return; - alSourcePlay(current->source); + alSourcePlay(current->stream.source); if (!checkCurrentALError(type)) return; } if (type == AudioMsgId::Type::Voice) emit suppressSong(); } break; - case AudioPlayerStarting: - case AudioPlayerResuming: - case AudioPlayerPlaying: - current->playbackState.state = AudioPlayerPausing; - updateCurrentStarted(type); - if (type == AudioMsgId::Type::Voice) emit unsuppressSong(); - break; - case AudioPlayerFinishing: current->playbackState.state = AudioPlayerPausing; break; + case State::Starting: + case State::Resuming: + case State::Playing: { + current->state.state = State::Pausing; + resetFadeStartPosition(type); + if (type == AudioMsgId::Type::Voice) emit unsuppressSong(); + } break; + case State::Finishing: { + current->state.state = State::Pausing; + } break; } emit faderOnTimer(); } -void Mixer::seek(int64 position) { +void Mixer::seek(AudioMsgId::Type type, int64 position) { QMutexLocker lock(&AudioMutex); - auto type = AudioMsgId::Type::Song; - auto current = dataForType(type); - float64 suppressGain = 1.; - switch (type) { - case AudioMsgId::Type::Voice: suppressGain = suppressAllGain; break; - case AudioMsgId::Type::Song: suppressGain = suppressSongGain * Global::SongVolume(); break; - } - auto audio = current->audio; + auto current = trackForType(type); + auto audio = current->state.id; - bool isSource = alIsSource(current->source); - bool fastSeek = (position >= current->skipStart && position < current->playbackState.duration - current->skipEnd - (current->skipEnd ? AudioVoiceMsgFrequency : 0)); - if (fastSeek && isSource) { - alSourcei(current->source, AL_SAMPLE_OFFSET, position - current->skipStart); + reattachTracks(); + auto streamCreated = current->isStreamCreated(); + auto fastSeek = (position >= current->bufferedPosition && position < current->bufferedPosition + current->bufferedLength - (current->loaded ? 0 : kDefaultFrequency)); + if (!streamCreated) { + fastSeek = false; + } else if (IsStopped(current->state.state) || (current->state.state == State::Finishing)) { + fastSeek = false; + } + if (fastSeek) { + alSourcei(current->stream.source, AL_SAMPLE_OFFSET, position - current->bufferedPosition); if (!checkCurrentALError(type)) return; - alSourcef(current->source, AL_GAIN, 1. * suppressGain); + + alSourcef(current->stream.source, AL_GAIN, ComputeVolume(type)); if (!checkCurrentALError(type)) return; - updateCurrentStarted(type, position - current->skipStart); + + resetFadeStartPosition(type, position - current->bufferedPosition); } else { setStoppedState(current); - if (isSource) alSourceStop(current->source); + if (streamCreated) alSourceStop(current->stream.source); } - switch (current->playbackState.state) { - case AudioPlayerPausing: - case AudioPlayerPaused: - case AudioPlayerPausedAtEnd: { - if (current->playbackState.state == AudioPlayerPausedAtEnd) { - current->playbackState.state = AudioPlayerPaused; + switch (current->state.state) { + case State::Pausing: + case State::Paused: + case State::PausedAtEnd: { + if (current->state.state == State::PausedAtEnd) { + current->state.state = State::Paused; } lock.unlock(); return pauseresume(type, true); } break; - case AudioPlayerStarting: - case AudioPlayerResuming: - case AudioPlayerPlaying: - current->playbackState.state = AudioPlayerPausing; - updateCurrentStarted(type); - if (type == AudioMsgId::Type::Voice) emit unsuppressSong(); - break; - case AudioPlayerFinishing: - case AudioPlayerStopped: - case AudioPlayerStoppedAtEnd: - case AudioPlayerStoppedAtError: - case AudioPlayerStoppedAtStart: - lock.unlock(); - return play(audio, position); + case State::Starting: + case State::Resuming: + case State::Playing: { + current->state.state = State::Pausing; + resetFadeStartPosition(type); + if (type == AudioMsgId::Type::Voice) { + emit unsuppressSong(); + } + } break; + case State::Finishing: + case State::Stopped: + case State::StoppedAtEnd: + case State::StoppedAtError: + case State::StoppedAtStart: { + lock.unlock(); + } return play(audio, position); } emit faderOnTimer(); } @@ -917,80 +1101,134 @@ void Mixer::stop(AudioMsgId::Type type) { AudioMsgId current; { QMutexLocker lock(&AudioMutex); - auto data = dataForType(type); - t_assert(data != nullptr); + auto track = trackForType(type); + t_assert(track != nullptr); - current = data->audio; + current = track->state.id; fadedStop(type); if (type == AudioMsgId::Type::Video) { - data->clear(); + track->clear(); } } if (current) emit updated(current); } void Mixer::stopAndClear() { - AudioMsg *current_audio = nullptr, *current_song = nullptr; + Track *current_audio = nullptr, *current_song = nullptr; { QMutexLocker lock(&AudioMutex); - if ((current_audio = dataForType(AudioMsgId::Type::Voice))) { + if ((current_audio = trackForType(AudioMsgId::Type::Voice))) { setStoppedState(current_audio); } - if ((current_song = dataForType(AudioMsgId::Type::Song))) { + if ((current_song = trackForType(AudioMsgId::Type::Song))) { setStoppedState(current_song); } } if (current_song) { - emit updated(current_song->audio); + emit updated(current_song->state.id); } if (current_audio) { - emit updated(current_audio->audio); + emit updated(current_audio->state.id); } { QMutexLocker lock(&AudioMutex); auto clearAndCancel = [this](AudioMsgId::Type type, int index) { - auto data = dataForType(type, index); - if (data->audio) { - emit loaderOnCancel(data->audio); + auto track = trackForType(type, index); + if (track->state.id) { + emit loaderOnCancel(track->state.id); } - data->clear(); + track->clear(); }; - for (int index = 0; index < AudioSimultaneousLimit; ++index) { + for (auto index = 0; index != kTogetherLimit; ++index) { clearAndCancel(AudioMsgId::Type::Voice, index); clearAndCancel(AudioMsgId::Type::Song, index); } - _videoData.clear(); + _videoTrack.clear(); _loader->stopFromVideo(); } } -AudioPlaybackState Mixer::currentVideoState(uint64 videoPlayId) { +TrackState Mixer::currentVideoState(uint64 videoPlayId) { QMutexLocker lock(&AudioMutex); - auto current = dataForType(AudioMsgId::Type::Video); - if (!current || current->videoPlayId != videoPlayId) return AudioPlaybackState(); - - return current->playbackState; + auto current = trackForType(AudioMsgId::Type::Video); + if (!current || current->videoPlayId != videoPlayId) { + return TrackState(); + } + return current->state; } -AudioPlaybackState Mixer::currentState(AudioMsgId *audio, AudioMsgId::Type type) { +TrackState Mixer::currentState(AudioMsgId::Type type) { QMutexLocker lock(&AudioMutex); - auto current = dataForType(type); - if (!current) return AudioPlaybackState(); - - if (audio) *audio = current->audio; - return current->playbackState; + auto current = trackForType(type); + if (!current) { + return TrackState(); + } + return current->state; } -void Mixer::setStoppedState(AudioMsg *current, AudioPlayerState state) { - current->playbackState.state = state; - current->playbackState.position = 0; +void Mixer::setStoppedState(Track *current, State state) { + current->state.state = state; + current->state.position = 0; } void Mixer::clearStoppedAtStart(const AudioMsgId &audio) { QMutexLocker lock(&AudioMutex); - auto data = dataForType(audio.type()); - if (data && data->audio == audio && data->playbackState.state == AudioPlayerStoppedAtStart) { - setStoppedState(data); + auto track = trackForType(audio.type()); + if (track && track->state.id == audio && track->state.state == State::StoppedAtStart) { + setStoppedState(track); + } +} + +void Mixer::detachFromDeviceByTimer() { + QMetaObject::invokeMethod(_fader, "onDetachFromDeviceByTimer", Qt::QueuedConnection, Q_ARG(bool, true)); +} + +void Mixer::detachTracks() { + for (auto i = 0; i != kTogetherLimit; ++i) { + trackForType(AudioMsgId::Type::Voice, i)->detach(); + trackForType(AudioMsgId::Type::Song, i)->detach(); + } + _videoTrack.detach(); +} + +void Mixer::reattachIfNeeded() { + _fader->keepAttachedToDevice(); + + auto reattachNeeded = [this] { + auto isPlayingState = [](const Track &track) { + auto state = track.state.state; + return (state == State::Starting) + || (state == State::Playing) + || (state == State::Finishing) + || (state == State::Pausing) + || (state == State::Resuming); + }; + for (auto i = 0; i != kTogetherLimit; ++i) { + if (isPlayingState(*trackForType(AudioMsgId::Type::Voice, i)) + || isPlayingState(*trackForType(AudioMsgId::Type::Song, i))) { + return true; + } + } + return isPlayingState(_videoTrack); + }; + + if (reattachNeeded()) { + reattachTracks(); + } +} + +void Mixer::reattachTracks() { + if (!AudioDevice) { + LOG(("Audio Info: recreating audio device and reattaching the tracks")); + + CreateAudioPlaybackDevice(); + for (auto i = 0; i != kTogetherLimit; ++i) { + trackForType(AudioMsgId::Type::Voice, i)->reattach(AudioMsgId::Type::Voice); + trackForType(AudioMsgId::Type::Song, i)->reattach(AudioMsgId::Type::Song); + } + _videoTrack.reattach(AudioMsgId::Type::Video); + + emit faderOnTimer(); } } @@ -1000,16 +1238,15 @@ Fader::Fader(QThread *thread) : QObject() , _suppressSongGain(1., 1.) { moveToThread(thread); _timer.moveToThread(thread); - _pauseTimer.moveToThread(thread); + _detachFromDeviceTimer.moveToThread(thread); connect(thread, SIGNAL(started()), this, SLOT(onInit())); connect(thread, SIGNAL(finished()), this, SLOT(deleteLater())); _timer.setSingleShot(true); connect(&_timer, SIGNAL(timeout()), this, SLOT(onTimer())); - _pauseTimer.setSingleShot(true); - connect(&_pauseTimer, SIGNAL(timeout()), this, SLOT(onPauseTimer())); - connect(this, SIGNAL(stopPauseDevice()), this, SLOT(onPauseTimerStop()), Qt::QueuedConnection); + _detachFromDeviceTimer.setSingleShot(true); + connect(&_detachFromDeviceTimer, SIGNAL(timeout()), this, SLOT(onDetachFromDeviceTimer())); } void Fader::onInit() { @@ -1017,8 +1254,7 @@ void Fader::onInit() { void Fader::onTimer() { QMutexLocker lock(&AudioMutex); - auto player = mixer(); - if (!player) return; + if (!mixer()) return; bool suppressAudioChanged = false, suppressSongChanged = false; if (_suppressAll || _suppressSongAnim) { @@ -1030,26 +1266,26 @@ void Fader::onTimer() { if (ms >= _suppressAllStart + notifyLengthMs || ms < _suppressAllStart) { _suppressAll = _suppressAllAnim = false; _suppressAllGain = anim::value(1., 1.); - } else if (ms > _suppressAllStart + notifyLengthMs - AudioFadeDuration) { + } else if (ms > _suppressAllStart + notifyLengthMs - kFadeDuration) { if (_suppressAllGain.to() != 1.) _suppressAllGain.start(1.); - _suppressAllGain.update(1. - ((_suppressAllStart + notifyLengthMs - ms) / float64(AudioFadeDuration)), anim::linear); + _suppressAllGain.update(1. - ((_suppressAllStart + notifyLengthMs - ms) / float64(kFadeDuration)), anim::linear); } else if (ms >= _suppressAllStart + st::mediaPlayerSuppressDuration) { if (_suppressAllAnim) { _suppressAllGain.finish(); _suppressAllAnim = false; } } else if (ms > _suppressAllStart) { - _suppressAllGain.update((ms - _suppressAllStart) / st::mediaPlayerSuppressDuration, anim::linear); + _suppressAllGain.update((ms - _suppressAllStart) / float64(st::mediaPlayerSuppressDuration), anim::linear); } suppressAllGain = _suppressAllGain.current(); suppressAudioChanged = (suppressAllGain != wasAudio); } if (_suppressSongAnim) { - if (ms >= _suppressSongStart + AudioFadeDuration) { + if (ms >= _suppressSongStart + kFadeDuration) { _suppressSongGain.finish(); _suppressSongAnim = false; } else { - _suppressSongGain.update((ms - _suppressSongStart) / float64(AudioFadeDuration), anim::linear); + _suppressSongGain.update((ms - _suppressSongStart) / float64(kFadeDuration), anim::linear); } } suppressSongGain = qMin(suppressAllGain, _suppressSongGain.current()); @@ -1058,19 +1294,19 @@ void Fader::onTimer() { bool hasFading = (_suppressAll || _suppressSongAnim); bool hasPlaying = false; - auto updatePlayback = [this, player, &hasPlaying, &hasFading](AudioMsgId::Type type, int index, float64 suppressGain, bool suppressGainChanged) { - auto data = player->dataForType(type, index); - if ((data->playbackState.state & AudioPlayerStoppedMask) || data->playbackState.state == AudioPlayerPaused || !data->source) return; + auto updatePlayback = [this, &hasPlaying, &hasFading](AudioMsgId::Type type, int index, float64 suppressGain, bool suppressGainChanged) { + auto track = mixer()->trackForType(type, index); + if (IsStopped(track->state.state) || track->state.state == State::Paused || !track->isStreamCreated()) return; - int32 emitSignals = updateOnePlayback(data, hasPlaying, hasFading, suppressGain, suppressGainChanged); - if (emitSignals & EmitError) emit error(data->audio); - if (emitSignals & EmitStopped) emit audioStopped(data->audio); - if (emitSignals & EmitPositionUpdated) emit playPositionUpdated(data->audio); - if (emitSignals & EmitNeedToPreload) emit needToPreload(data->audio); + int32 emitSignals = updateOnePlayback(track, hasPlaying, hasFading, suppressGain, suppressGainChanged); + if (emitSignals & EmitError) emit error(track->state.id); + if (emitSignals & EmitStopped) emit audioStopped(track->state.id); + if (emitSignals & EmitPositionUpdated) emit playPositionUpdated(track->state.id); + if (emitSignals & EmitNeedToPreload) emit needToPreload(track->state.id); }; auto suppressGainForMusic = suppressSongGain * Global::SongVolume(); auto suppressGainForMusicChanged = suppressSongChanged || _songVolumeChanged; - for (int i = 0; i < AudioSimultaneousLimit; ++i) { + for (auto i = 0; i != kTogetherLimit; ++i) { updatePlayback(AudioMsgId::Type::Voice, i, suppressAllGain, suppressAudioChanged); updatePlayback(AudioMsgId::Type::Song, i, suppressGainForMusic, suppressGainForMusicChanged); } @@ -1084,109 +1320,127 @@ void Fader::onTimer() { hasPlaying = true; } if (hasFading) { - _timer.start(AudioFadeTimeout); - resumeDevice(); + _timer.start(kCheckFadingTimeout); + keepAttachedToDevice(); } else if (hasPlaying) { - _timer.start(AudioCheckPositionTimeout); - resumeDevice(); + _timer.start(kCheckPlaybackPositionTimeout); + keepAttachedToDevice(); } else { - QMutexLocker lock(&_pauseMutex); - _pauseFlag = true; - _pauseTimer.start(AudioPauseDeviceTimeout); + onDetachFromDeviceByTimer(false); } } -int32 Fader::updateOnePlayback(Mixer::AudioMsg *m, bool &hasPlaying, bool &hasFading, float64 suppressGain, bool suppressGainChanged) { +int32 Fader::updateOnePlayback(Mixer::Track *track, bool &hasPlaying, bool &hasFading, float64 suppressGain, bool suppressGainChanged) { bool playing = false, fading = false; - ALint pos = 0; + auto errorHappened = [this, track] { + if (PlaybackErrorHappened()) { + setStoppedState(track, State::StoppedAtError); + return true; + } + return false; + }; + + ALint positionInBuffered = 0; ALint state = AL_INITIAL; - alGetSourcei(m->source, AL_SAMPLE_OFFSET, &pos); - if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } - alGetSourcei(m->source, AL_SOURCE_STATE, &state); - if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + alGetSourcei(track->stream.source, AL_SAMPLE_OFFSET, &positionInBuffered); + alGetSourcei(track->stream.source, AL_SOURCE_STATE, &state); + if (errorHappened()) return EmitError; int32 emitSignals = 0; - switch (m->playbackState.state) { - case AudioPlayerFinishing: - case AudioPlayerPausing: - case AudioPlayerStarting: - case AudioPlayerResuming: - fading = true; - break; - case AudioPlayerPlaying: - playing = true; - break; + + if (state == AL_STOPPED && positionInBuffered == 0 && !internal::CheckAudioDeviceConnected()) { + return emitSignals; } - if (fading && (state == AL_PLAYING || !m->loading)) { + + switch (track->state.state) { + case State::Finishing: + case State::Pausing: + case State::Starting: + case State::Resuming: { + fading = true; + } break; + case State::Playing: { + playing = true; + } break; + } + + auto fullPosition = track->bufferedPosition + positionInBuffered; + if (fading && (state == AL_PLAYING || !track->loading)) { + auto fadingForSamplesCount = (fullPosition - track->fadeStartPosition); + if (state != AL_PLAYING) { fading = false; - if (m->source) { - alSourceStop(m->source); - if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } - alSourcef(m->source, AL_GAIN, 1); - if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + if (track->stream.source) { + alSourceStop(track->stream.source); + alSourcef(track->stream.source, AL_GAIN, 1); + if (errorHappened()) return EmitError; } - if (m->playbackState.state == AudioPlayerPausing) { - m->playbackState.state = AudioPlayerPausedAtEnd; + if (track->state.state == State::Pausing) { + track->state.state = State::PausedAtEnd; } else { - setStoppedState(m, AudioPlayerStoppedAtEnd); + setStoppedState(track, State::StoppedAtEnd); } emitSignals |= EmitStopped; - } else if (1000 * (pos + m->skipStart - m->started) >= AudioFadeDuration * m->playbackState.frequency) { + } else if (TimeMs(1000) * fadingForSamplesCount >= kFadeDuration * track->state.frequency) { fading = false; - alSourcef(m->source, AL_GAIN, 1. * suppressGain); - if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } - switch (m->playbackState.state) { - case AudioPlayerFinishing: - alSourceStop(m->source); - if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } - setStoppedState(m); - state = AL_STOPPED; - break; - case AudioPlayerPausing: - alSourcePause(m->source); - if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } - m->playbackState.state = AudioPlayerPaused; - break; - case AudioPlayerStarting: - case AudioPlayerResuming: - m->playbackState.state = AudioPlayerPlaying; - playing = true; - break; + alSourcef(track->stream.source, AL_GAIN, 1. * suppressGain); + if (errorHappened()) return EmitError; + + switch (track->state.state) { + case State::Finishing: { + alSourceStop(track->stream.source); + if (errorHappened()) return EmitError; + + setStoppedState(track); + state = AL_STOPPED; + } break; + case State::Pausing: { + alSourcePause(track->stream.source); + if (errorHappened()) return EmitError; + + track->state.state = State::Paused; + } break; + case State::Starting: + case State::Resuming: { + track->state.state = State::Playing; + playing = true; + } break; } } else { - float64 newGain = 1000. * (pos + m->skipStart - m->started) / (AudioFadeDuration * m->playbackState.frequency); - if (m->playbackState.state == AudioPlayerPausing || m->playbackState.state == AudioPlayerFinishing) { + auto newGain = TimeMs(1000) * fadingForSamplesCount / float64(kFadeDuration * track->state.frequency); + if (track->state.state == State::Pausing || track->state.state == State::Finishing) { newGain = 1. - newGain; } - alSourcef(m->source, AL_GAIN, newGain * suppressGain); - if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + alSourcef(track->stream.source, AL_GAIN, newGain * suppressGain); + if (errorHappened()) return EmitError; } - } else if (playing && (state == AL_PLAYING || !m->loading)) { + } else if (playing && (state == AL_PLAYING || !track->loading)) { if (state != AL_PLAYING) { playing = false; - if (m->source) { - alSourceStop(m->source); - if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } - alSourcef(m->source, AL_GAIN, 1); - if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + if (track->isStreamCreated()) { + alSourceStop(track->stream.source); + alSourcef(track->stream.source, AL_GAIN, 1); + if (errorHappened()) return EmitError; } - setStoppedState(m, AudioPlayerStoppedAtEnd); + setStoppedState(track, State::StoppedAtEnd); emitSignals |= EmitStopped; } else if (suppressGainChanged) { - alSourcef(m->source, AL_GAIN, suppressGain); - if (Media::Player::PlaybackErrorHappened()) { setStoppedState(m, AudioPlayerStoppedAtError); return EmitError; } + alSourcef(track->stream.source, AL_GAIN, suppressGain); + if (errorHappened()) return EmitError; } } - if (state == AL_PLAYING && pos + m->skipStart - m->playbackState.position >= AudioCheckPositionDelta) { - m->playbackState.position = pos + m->skipStart; + if (state == AL_PLAYING && fullPosition >= track->state.position + kCheckPlaybackPositionDelta) { + track->state.position = fullPosition; emitSignals |= EmitPositionUpdated; } - if (playing || m->playbackState.state == AudioPlayerStarting || m->playbackState.state == AudioPlayerResuming) { - if (!m->loading && m->skipEnd > 0 && m->playbackState.position + AudioPreloadSamples + m->skipEnd > m->playbackState.duration) { - m->loading = true; - emitSignals |= EmitNeedToPreload; + if (playing || track->state.state == State::Starting || track->state.state == State::Resuming) { + if (!track->loaded && !track->loading) { + auto needPreload = (track->state.position + kPreloadSamples > track->bufferedPosition + track->bufferedLength); + if (needPreload) { + track->loading = true; + emitSignals |= EmitNeedToPreload; + } } } if (playing) hasPlaying = true; @@ -1195,21 +1449,17 @@ int32 Fader::updateOnePlayback(Mixer::AudioMsg *m, bool &hasPlaying, bool &hasFa return emitSignals; } -void Fader::setStoppedState(Mixer::AudioMsg *m, AudioPlayerState state) { - m->playbackState.state = state; - m->playbackState.position = 0; +void Fader::setStoppedState(Mixer::Track *track, State state) { + track->state.state = state; + track->state.position = 0; } -void Fader::onPauseTimer() { - QMutexLocker lock(&_pauseMutex); - if (_pauseFlag) { - _paused = true; - alcDevicePauseSOFT(AudioDevice); - } -} +void Fader::onDetachFromDeviceTimer() { + QMutexLocker lock(&_detachFromDeviceMutex); + _detachFromDeviceForce = false; + lock.unlock(); -void Fader::onPauseTimerStop() { - if (_pauseTimer.isActive()) _pauseTimer.stop(); + DetachFromDevice(); } void Fader::onSuppressSong() { @@ -1249,13 +1499,20 @@ void Fader::onVideoVolumeChanged() { onTimer(); } -void Fader::resumeDevice() { - QMutexLocker lock(&_pauseMutex); - _pauseFlag = false; - emit stopPauseDevice(); - if (_paused) { - _paused = false; - alcDeviceResumeSOFT(AudioDevice); +void Fader::keepAttachedToDevice() { + QMutexLocker lock(&_detachFromDeviceMutex); + if (!_detachFromDeviceForce) { + _detachFromDeviceTimer.stop(); + } +} + +void Fader::onDetachFromDeviceByTimer(bool force) { + QMutexLocker lock(&_detachFromDeviceMutex); + if (force) { + _detachFromDeviceForce = true; + } + if (!_detachFromDeviceTimer.isActive()) { + _detachFromDeviceTimer.start(kDetachDeviceTimeout); } } @@ -1268,18 +1525,32 @@ QMutex *audioPlayerMutex() { return &AudioMutex; } -float64 audioSuppressGain() { - return suppressAllGain; -} - -float64 audioSuppressSongGain() { - return suppressSongGain; -} - bool audioCheckError() { return !Media::Player::PlaybackErrorHappened(); } +bool audioDeviceIsConnected() { + if (!AudioDevice) { + return false; + } + ALint connected = 0; + alcGetIntegerv(AudioDevice, ALC_CONNECTED, 1, &connected); + if (Media::Player::ContextErrorHappened()) { + return false; + } + return (connected != 0); +} + +bool CheckAudioDeviceConnected() { + if (audioDeviceIsConnected()) { + return true; + } + if (auto mixer = Media::Player::mixer()) { + mixer->detachFromDeviceByTimer(); + } + return false; +} + } // namespace internal class FFMpegAttributesReader : public AbstractFFMpegLoader { diff --git a/Telegram/SourceFiles/media/media_audio.h b/Telegram/SourceFiles/media/media_audio.h index 6bd24ae81..040b96114 100644 --- a/Telegram/SourceFiles/media/media_audio.h +++ b/Telegram/SourceFiles/media/media_audio.h @@ -20,44 +20,73 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org */ #pragma once -enum AudioPlayerState { - AudioPlayerStopped = 0x01, - AudioPlayerStoppedAtEnd = 0x02, - AudioPlayerStoppedAtError = 0x03, - AudioPlayerStoppedAtStart = 0x04, - AudioPlayerStoppedMask = 0x07, - - AudioPlayerStarting = 0x08, - AudioPlayerPlaying = 0x10, - AudioPlayerFinishing = 0x18, - AudioPlayerPausing = 0x20, - AudioPlayerPaused = 0x28, - AudioPlayerPausedAtEnd = 0x30, - AudioPlayerResuming = 0x38, -}; - struct VideoSoundData; struct VideoSoundPart; -struct AudioPlaybackState { - AudioPlayerState state = AudioPlayerStopped; - int64 position = 0; - TimeMs duration = 0; - int32 frequency = 0; -}; namespace Media { namespace Player { +constexpr auto kDefaultFrequency = 48000; // 48 kHz +constexpr auto kTogetherLimit = 4; + +class Fader; +class Loaders; + void InitAudio(); void DeInitAudio(); base::Observable &Updated(); -bool CreateAudioPlaybackDevice(); +void DetachFromDeviceByTimer(); void PlayNotify(); -class Fader; -class Loaders; +float64 ComputeVolume(AudioMsgId::Type type); + +enum class State { + Stopped = 0x01, + StoppedAtEnd = 0x02, + StoppedAtError = 0x03, + StoppedAtStart = 0x04, + + Starting = 0x08, + Playing = 0x10, + Finishing = 0x18, + Pausing = 0x20, + Paused = 0x28, + PausedAtEnd = 0x30, + Resuming = 0x38, +}; + +inline bool IsStopped(State state) { + return (state == State::Stopped) + || (state == State::StoppedAtEnd) + || (state == State::StoppedAtError) + || (state == State::StoppedAtStart); +} + +inline bool IsPaused(State state) { + return (state == State::Paused) + || (state == State::PausedAtEnd); +} + +inline bool IsFading(State state) { + return (state == State::Starting) + || (state == State::Finishing) + || (state == State::Pausing) + || (state == State::Resuming); +} + +inline bool IsActive(State state) { + return !IsStopped(state) && !IsPaused(state); +} + +struct TrackState { + AudioMsgId id; + State state = State::Stopped; + int64 position = 0; + TimeMs duration = 0; + int frequency = kDefaultFrequency; +}; class Mixer : public QObject, private base::Subscriber { Q_OBJECT @@ -67,27 +96,32 @@ public: void play(const AudioMsgId &audio, int64 position = 0); void pauseresume(AudioMsgId::Type type, bool fast = false); - void seek(int64 position); // type == AudioMsgId::Type::Song + void seek(AudioMsgId::Type type, int64 position); // type == AudioMsgId::Type::Song void stop(AudioMsgId::Type type); // Video player audio stream interface. void initFromVideo(uint64 videoPlayId, std_::unique_ptr &&data, int64 position); void feedFromVideo(VideoSoundPart &&part); int64 getVideoCorrectedTime(uint64 playId, TimeMs frameMs, TimeMs systemMs); - AudioPlaybackState currentVideoState(uint64 videoPlayId); + TrackState currentVideoState(uint64 videoPlayId); void stopFromVideo(uint64 videoPlayId); void pauseFromVideo(uint64 videoPlayId); void resumeFromVideo(uint64 videoPlayId); void stopAndClear(); - AudioPlaybackState currentState(AudioMsgId *audio, AudioMsgId::Type type); + TrackState currentState(AudioMsgId::Type type); void clearStoppedAtStart(const AudioMsgId &audio); + void detachFromDeviceByTimer(); + void detachTracks(); + void reattachIfNeeded(); + void reattachTracks(); + ~Mixer(); - private slots: +private slots: void onError(const AudioMsgId &audio); void onStopped(const AudioMsgId &audio); @@ -107,55 +141,70 @@ signals: private: bool fadedStop(AudioMsgId::Type type, bool *fadedStart = 0); - bool updateCurrentStarted(AudioMsgId::Type type, int32 pos = -1); + void resetFadeStartPosition(AudioMsgId::Type type, int positionInBuffered = -1); bool checkCurrentALError(AudioMsgId::Type type); void videoSoundProgress(const AudioMsgId &audio); - struct AudioMsg { - void clear(); + class Track { + public: + static constexpr int kBuffersCount = 3; - AudioMsgId audio; + void reattach(AudioMsgId::Type type); + void detach(); + void clear(); + void started(); + + bool isStreamCreated() const; + void ensureStreamCreated(); + + int getNotQueuedBufferIndex(); + + TrackState state; FileLocation file; QByteArray data; - AudioPlaybackState playbackState = defaultState(); - int64 skipStart = 0; - int64 skipEnd = 0; + int64 bufferedPosition = 0; + int64 bufferedLength = 0; bool loading = false; - int64 started = 0; + bool loaded = false; + int64 fadeStartPosition = 0; - uint32 source = 0; - int32 nextBuffer = 0; - uint32 buffers[3] = { 0 }; - int64 samplesCount[3] = { 0 }; + int32 format = 0; + int32 frequency = kDefaultFrequency; + int samplesCount[kBuffersCount] = { 0 }; + QByteArray bufferSamples[kBuffersCount]; + + struct Stream { + uint32 source = 0; + uint32 buffers[kBuffersCount] = { 0 }; + }; + Stream stream; uint64 videoPlayId = 0; std_::unique_ptr videoData; private: - static AudioPlaybackState defaultState() { - AudioPlaybackState result; - result.frequency = AudioVoiceMsgFrequency; - return result; - } + void createStream(); + void destroyStream(); + void resetStream(); }; - void setStoppedState(AudioMsg *current, AudioPlayerState state = AudioPlayerStopped); + void setStoppedState(Track *current, State state = State::Stopped); - AudioMsg *dataForType(AudioMsgId::Type type, int index = -1); // -1 uses currentIndex(type) - const AudioMsg *dataForType(AudioMsgId::Type type, int index = -1) const; + Track *trackForType(AudioMsgId::Type type, int index = -1); // -1 uses currentIndex(type) + const Track *trackForType(AudioMsgId::Type type, int index = -1) const; int *currentIndex(AudioMsgId::Type type); const int *currentIndex(AudioMsgId::Type type) const; int _audioCurrent = 0; - AudioMsg _audioData[AudioSimultaneousLimit]; + Track _audioTracks[kTogetherLimit]; int _songCurrent = 0; - AudioMsg _songData[AudioSimultaneousLimit]; + Track _songTracks[kTogetherLimit]; - AudioMsg _videoData; + Track _videoTrack; uint64 _lastVideoPlayId = 0; TimeMs _lastVideoPlaybackWhen = 0; TimeMs _lastVideoPlaybackCorrectedMs = 0; @@ -179,7 +228,7 @@ class Fader : public QObject { public: Fader(QThread *thread); - void resumeDevice(); + void keepAttachedToDevice(); signals: void error(const AudioMsgId &audio); @@ -187,13 +236,12 @@ signals: void audioStopped(const AudioMsgId &audio); void needToPreload(const AudioMsgId &audio); - void stopPauseDevice(); - public slots: + void onDetachFromDeviceByTimer(bool force); + void onInit(); void onTimer(); - void onPauseTimer(); - void onPauseTimerStop(); + void onDetachFromDeviceTimer(); void onSuppressSong(); void onUnsuppressSong(); @@ -208,23 +256,25 @@ private: EmitPositionUpdated = 0x04, EmitNeedToPreload = 0x08, }; - int32 updateOnePlayback(Mixer::AudioMsg *m, bool &hasPlaying, bool &hasFading, float64 suppressGain, bool suppressGainChanged); - void setStoppedState(Mixer::AudioMsg *m, AudioPlayerState state = AudioPlayerStopped); + int32 updateOnePlayback(Mixer::Track *track, bool &hasPlaying, bool &hasFading, float64 suppressGain, bool suppressGainChanged); + void setStoppedState(Mixer::Track *track, State state = State::Stopped); - QTimer _timer, _pauseTimer; - QMutex _pauseMutex; - bool _pauseFlag = false; - bool _paused = true; + QTimer _timer; bool _suppressAll = false; bool _suppressAllAnim = false; bool _suppressSong = false; bool _suppressSongAnim = false; - bool _songVolumeChanged, _videoVolumeChanged; + bool _songVolumeChanged = false; + bool _videoVolumeChanged = false; anim::value _suppressAllGain, _suppressSongGain; TimeMs _suppressAllStart = 0; TimeMs _suppressSongStart = 0; + QTimer _detachFromDeviceTimer; + QMutex _detachFromDeviceMutex; + bool _detachFromDeviceForce = false; + }; } // namespace Player @@ -233,10 +283,11 @@ private: namespace internal { QMutex *audioPlayerMutex(); -float64 audioSuppressGain(); -float64 audioSuppressSongGain(); bool audioCheckError(); +// AudioMutex must be locked. +bool CheckAudioDeviceConnected(); + } // namespace internal MTPDocumentAttribute audioReadSongAttributes(const QString &fname, const QByteArray &data, QImage &cover, QByteArray &coverBytes, QByteArray &coverFormat); diff --git a/Telegram/SourceFiles/media/media_audio_capture.cpp b/Telegram/SourceFiles/media/media_audio_capture.cpp index d1ad976e1..480541eca 100644 --- a/Telegram/SourceFiles/media/media_audio_capture.cpp +++ b/Telegram/SourceFiles/media/media_audio_capture.cpp @@ -30,10 +30,13 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #include namespace Media { - namespace Capture { namespace { +constexpr auto kCaptureFrequency = Player::kDefaultFrequency; +constexpr auto kCaptureSkipDuration = TimeMs(400); +constexpr auto kCaptureFadeInDuration = TimeMs(300); + Instance *CaptureInstance = nullptr; bool ErrorHappened(ALCdevice *device) { @@ -72,7 +75,7 @@ Instance::Instance() : _inner(new Inner(&_thread)) { void Instance::check() { _available = false; if (auto defaultDevice = alcGetString(0, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)) { - if (auto device = alcCaptureOpenDevice(defaultDevice, AudioVoiceMsgFrequency, AL_FORMAT_MONO16, AudioVoiceMsgFrequency / 5)) { + if (auto device = alcCaptureOpenDevice(defaultDevice, kCaptureFrequency, AL_FORMAT_MONO16, kCaptureFrequency / 5)) { auto error = ErrorHappened(device); alcCaptureCloseDevice(device); _available = !error; @@ -117,7 +120,7 @@ struct Instance::Inner::Private { int32 dataPos = 0; int64 waveformMod = 0; - int64 waveformEach = (AudioVoiceMsgFrequency / 100); + int64 waveformEach = (kCaptureFrequency / 100); uint16 waveformPeak = 0; QVector waveform; @@ -180,7 +183,7 @@ void Instance::Inner::onStart() { // Start OpenAL Capture const ALCchar *dName = alcGetString(0, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER); DEBUG_LOG(("Audio Info: Capture device name '%1'").arg(dName)); - d->device = alcCaptureOpenDevice(dName, AudioVoiceMsgFrequency, AL_FORMAT_MONO16, AudioVoiceMsgFrequency / 5); + d->device = alcCaptureOpenDevice(dName, kCaptureFrequency, AL_FORMAT_MONO16, kCaptureFrequency / 5); if (!d->device) { LOG(("Audio Error: capture device not present!")); emit error(); @@ -253,7 +256,7 @@ void Instance::Inner::onStart() { d->codecContext->sample_fmt = AV_SAMPLE_FMT_FLTP; d->codecContext->bit_rate = 64000; d->codecContext->channel_layout = AV_CH_LAYOUT_MONO; - d->codecContext->sample_rate = AudioVoiceMsgFrequency; + d->codecContext->sample_rate = kCaptureFrequency; d->codecContext->channels = 1; if (d->fmtContext->oformat->flags & AVFMT_GLOBALHEADER) { @@ -341,8 +344,9 @@ void Instance::Inner::onStop(bool needResult) { // Write what is left if (!_captured.isEmpty()) { - int32 fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000, capturedSamples = _captured.size() / sizeof(short); - if ((_captured.size() % sizeof(short)) || (d->fullSamples + capturedSamples < AudioVoiceMsgFrequency) || (capturedSamples < fadeSamples)) { + auto fadeSamples = kCaptureFadeInDuration * kCaptureFrequency / 1000; + auto capturedSamples = static_cast(_captured.size() / sizeof(short)); + if ((_captured.size() % sizeof(short)) || (d->fullSamples + capturedSamples < kCaptureFrequency) || (capturedSamples < fadeSamples)) { d->fullSamples = 0; d->dataPos = 0; d->data.clear(); @@ -489,7 +493,8 @@ void Instance::Inner::onTimeout() { } if (samples > 0) { // Get samples from OpenAL - int32 s = _captured.size(), news = s + samples * sizeof(short); + auto s = _captured.size(); + auto news = s + static_cast(samples * sizeof(short)); if (news / AudioVoiceMsgBufferSize > s / AudioVoiceMsgBufferSize) { _captured.reserve(((news / AudioVoiceMsgBufferSize) + 1) * AudioVoiceMsgBufferSize); } @@ -502,9 +507,10 @@ void Instance::Inner::onTimeout() { } // Count new recording level and update view - int32 skipSamples = AudioVoiceMsgSkip * AudioVoiceMsgFrequency / 1000, fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000; - int32 levelindex = d->fullSamples + (s / sizeof(short)); - for (const short *ptr = (const short*)(_captured.constData() + s), *end = (const short*)(_captured.constData() + news); ptr < end; ++ptr, ++levelindex) { + auto skipSamples = kCaptureSkipDuration * kCaptureFrequency / 1000; + auto fadeSamples = kCaptureFadeInDuration * kCaptureFrequency / 1000; + auto levelindex = d->fullSamples + static_cast(s / sizeof(short)); + for (auto ptr = (const short*)(_captured.constData() + s), end = (const short*)(_captured.constData() + news); ptr < end; ++ptr, ++levelindex) { if (levelindex > skipSamples) { uint16 value = qAbs(*ptr); if (levelindex < skipSamples + fadeSamples) { @@ -516,7 +522,7 @@ void Instance::Inner::onTimeout() { } } qint32 samplesFull = d->fullSamples + _captured.size() / sizeof(short), samplesSinceUpdate = samplesFull - d->lastUpdate; - if (samplesSinceUpdate > AudioVoiceMsgUpdateView * AudioVoiceMsgFrequency / 1000) { + if (samplesSinceUpdate > AudioVoiceMsgUpdateView * kCaptureFrequency / 1000) { emit updated(d->levelMax, samplesFull); d->lastUpdate = samplesFull; d->levelMax = 0; @@ -548,7 +554,7 @@ void Instance::Inner::processFrame(int32 offset, int32 framesize) { emit error(); return; } - int32 samplesCnt = framesize / sizeof(short); + auto samplesCnt = static_cast(framesize / sizeof(short)); int res = 0; char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; @@ -557,7 +563,8 @@ void Instance::Inner::processFrame(int32 offset, int32 framesize) { auto srcSamplesData = &srcSamplesDataChannel; // memcpy(d->srcSamplesData[0], _captured.constData() + offset, framesize); - int32 skipSamples = AudioVoiceMsgSkip * AudioVoiceMsgFrequency / 1000, fadeSamples = AudioVoiceMsgFade * AudioVoiceMsgFrequency / 1000; + auto skipSamples = static_cast(kCaptureSkipDuration * kCaptureFrequency / 1000); + auto fadeSamples = static_cast(kCaptureFadeInDuration * kCaptureFrequency / 1000); if (d->fullSamples < skipSamples + fadeSamples) { int32 fadedCnt = qMin(samplesCnt, skipSamples + fadeSamples - d->fullSamples); float64 coef = 1. / fadeSamples, fadedFrom = d->fullSamples - skipSamples; diff --git a/Telegram/SourceFiles/media/media_audio_ffmpeg_loader.cpp b/Telegram/SourceFiles/media/media_audio_ffmpeg_loader.cpp index 2daa041ce..69fafd00c 100644 --- a/Telegram/SourceFiles/media/media_audio_ffmpeg_loader.cpp +++ b/Telegram/SourceFiles/media/media_audio_ffmpeg_loader.cpp @@ -204,7 +204,7 @@ bool FFMpegLoader::open(qint64 &position) { int64_t src_ch_layout = layout, dst_ch_layout = AudioToChannelLayout; srcRate = freq; AVSampleFormat src_sample_fmt = inputFormat, dst_sample_fmt = AudioToFormat; - dstRate = (freq != 44100 && freq != 48000) ? AudioVoiceMsgFrequency : freq; + dstRate = (freq != 44100 && freq != 48000) ? Media::Player::kDefaultFrequency : freq; av_opt_set_int(swrContext, "in_channel_layout", src_ch_layout, 0); av_opt_set_int(swrContext, "in_sample_rate", srcRate, 0); diff --git a/Telegram/SourceFiles/media/media_audio_ffmpeg_loader.h b/Telegram/SourceFiles/media/media_audio_ffmpeg_loader.h index 53d22ed92..063ae3e70 100644 --- a/Telegram/SourceFiles/media/media_audio_ffmpeg_loader.h +++ b/Telegram/SourceFiles/media/media_audio_ffmpeg_loader.h @@ -20,6 +20,7 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org */ #pragma once +#include "media/media_audio.h" #include "media/media_audio_loader.h" extern "C" { @@ -49,7 +50,7 @@ public: ~AbstractFFMpegLoader(); protected: - int32 freq = AudioVoiceMsgFrequency; + int32 freq = Media::Player::kDefaultFrequency; TimeMs len = 0; uchar *ioBuffer = nullptr; @@ -89,8 +90,8 @@ private: ReadResult readFromReadyFrame(QByteArray &result, int64 &samplesAdded); int32 fmt = AL_FORMAT_STEREO16; - int32 srcRate = AudioVoiceMsgFrequency; - int32 dstRate = AudioVoiceMsgFrequency; + int32 srcRate = Media::Player::kDefaultFrequency; + int32 dstRate = Media::Player::kDefaultFrequency; int32 maxResampleSamples = 1024; uint8_t **dstSamplesData = nullptr; diff --git a/Telegram/SourceFiles/media/media_audio_loaders.cpp b/Telegram/SourceFiles/media/media_audio_loaders.cpp index 075a85f41..3c6393f9a 100644 --- a/Telegram/SourceFiles/media/media_audio_loaders.cpp +++ b/Telegram/SourceFiles/media/media_audio_loaders.cpp @@ -98,10 +98,10 @@ void Loaders::onStart(const AudioMsgId &audio, qint64 position) { QMutexLocker lock(internal::audioPlayerMutex()); if (!mixer()) return; - auto data = mixer()->dataForType(type); - if (!data) return; + auto track = mixer()->trackForType(type); + if (!track) return; - data->loading = true; + track->loading = true; } loadData(audio, position); @@ -117,9 +117,9 @@ AudioMsgId Loaders::clear(AudioMsgId::Type type) { return result; } -void Loaders::setStoppedState(Mixer::AudioMsg *m, AudioPlayerState state) { - m->playbackState.state = state; - m->playbackState.position = 0; +void Loaders::setStoppedState(Mixer::Track *track, State state) { + track->state.state = state; + track->state.position = 0; } void Loaders::emitError(AudioMsgId::Type type) { @@ -131,9 +131,9 @@ void Loaders::onLoad(const AudioMsgId &audio) { } void Loaders::loadData(AudioMsgId audio, qint64 position) { - SetupError err = SetupNoErrorStarted; + auto err = SetupNoErrorStarted; auto type = audio.type(); - AudioPlayerLoader *l = setupLoader(audio, err, position); + auto l = setupLoader(audio, err, position); if (!l) { if (err == SetupErrorAtStart) { emitError(type); @@ -141,10 +141,10 @@ void Loaders::loadData(AudioMsgId audio, qint64 position) { return; } - bool started = (err == SetupNoErrorStarted); - bool finished = false; - bool waiting = false; - bool errAtStart = started; + auto started = (err == SetupNoErrorStarted); + auto finished = false; + auto waiting = false; + auto errAtStart = started; QByteArray samples; int64 samplesCount = 0; @@ -158,8 +158,9 @@ void Loaders::loadData(AudioMsgId audio, qint64 position) { if (errAtStart) { { QMutexLocker lock(internal::audioPlayerMutex()); - auto m = checkLoader(type); - if (m) m->playbackState.state = AudioPlayerStoppedAtStart; + if (auto track = checkLoader(type)) { + track->state.state = State::StoppedAtStart; + } } emitError(type); return; @@ -187,93 +188,54 @@ void Loaders::loadData(AudioMsgId audio, qint64 position) { } QMutexLocker lock(internal::audioPlayerMutex()); - auto m = checkLoader(type); - if (!m) { + auto track = checkLoader(type); + if (!track) { clear(type); return; } if (started) { - if (m->source) { - alSourceStop(m->source); - for (int32 i = 0; i < 3; ++i) { - if (m->samplesCount[i]) { - ALuint buffer = 0; - alSourceUnqueueBuffers(m->source, 1, &buffer); - m->samplesCount[i] = 0; - } - } - m->nextBuffer = 0; + mixer()->reattachTracks(); + + track->started(); + if (!internal::audioCheckError()) { + setStoppedState(track, State::StoppedAtStart); + emitError(type); + return; } - m->skipStart = position; - m->skipEnd = m->playbackState.duration - position; - m->playbackState.position = position; - m->started = 0; + + track->bufferedPosition = position; + track->state.position = position; + track->fadeStartPosition = position; + + track->format = l->format(); + track->frequency = l->frequency(); } if (samplesCount) { - if (!m->source) { - alGenSources(1, &m->source); - alSourcef(m->source, AL_PITCH, 1.f); - alSource3f(m->source, AL_POSITION, 0, 0, 0); - alSource3f(m->source, AL_VELOCITY, 0, 0, 0); - alSourcei(m->source, AL_LOOPING, 0); - } - if (!m->buffers[m->nextBuffer]) { - alGenBuffers(3, m->buffers); - } + track->ensureStreamCreated(); - // If this buffer is queued, try to unqueue some buffer. - if (m->samplesCount[m->nextBuffer]) { - ALint processed = 0; - alGetSourcei(m->source, AL_BUFFERS_PROCESSED, &processed); - if (processed < 1) { // No processed buffers, wait. - l->saveDecodedSamples(&samples, &samplesCount); - return; - } - - // Unqueue some processed buffer. - ALuint buffer = 0; - alSourceUnqueueBuffers(m->source, 1, &buffer); - if (!internal::audioCheckError()) { - setStoppedState(m, AudioPlayerStoppedAtError); - emitError(type); - return; - } - - // Find it in the list and make it the nextBuffer. - bool found = false; - for (int i = 0; i < 3; ++i) { - if (m->buffers[i] == buffer) { - found = true; - m->nextBuffer = i; - break; - } - } - if (!found) { - LOG(("Audio Error: Could not find the unqueued buffer! Buffer %1 in source %2 with processed count %3").arg(buffer).arg(m->source).arg(processed)); - setStoppedState(m, AudioPlayerStoppedAtError); - emitError(type); - return; - } - - if (m->samplesCount[m->nextBuffer]) { - m->skipStart += m->samplesCount[m->nextBuffer]; - m->samplesCount[m->nextBuffer] = 0; - } - } - - auto frequency = l->frequency(); - auto format = l->format(); - m->samplesCount[m->nextBuffer] = samplesCount; - alBufferData(m->buffers[m->nextBuffer], format, samples.constData(), samples.size(), frequency); - - alSourceQueueBuffers(m->source, 1, m->buffers + m->nextBuffer); - m->skipEnd -= samplesCount; - - m->nextBuffer = (m->nextBuffer + 1) % 3; + auto bufferIndex = track->getNotQueuedBufferIndex(); if (!internal::audioCheckError()) { - setStoppedState(m, AudioPlayerStoppedAtError); + setStoppedState(track, State::StoppedAtError); + emitError(type); + return; + } + + if (bufferIndex < 0) { // No free buffers, wait. + l->saveDecodedSamples(&samples, &samplesCount); + return; + } + + track->bufferSamples[bufferIndex] = samples; + track->samplesCount[bufferIndex] = samplesCount; + track->bufferedLength += samplesCount; + alBufferData(track->stream.buffers[bufferIndex], track->format, samples.constData(), samples.size(), track->frequency); + + alSourceQueueBuffers(track->stream.source, 1, track->stream.buffers + bufferIndex); + + if (!internal::audioCheckError()) { + setStoppedState(track, State::StoppedAtError); emitError(type); return; } @@ -285,31 +247,31 @@ void Loaders::loadData(AudioMsgId audio, qint64 position) { } if (finished) { - m->skipEnd = 0; - m->playbackState.duration = m->skipStart + m->samplesCount[0] + m->samplesCount[1] + m->samplesCount[2]; + track->loaded = true; + track->state.duration = track->bufferedPosition + track->bufferedLength; clear(type); } - m->loading = false; - if (m->playbackState.state == AudioPlayerResuming || m->playbackState.state == AudioPlayerPlaying || m->playbackState.state == AudioPlayerStarting) { + track->loading = false; + if (track->state.state == State::Resuming || track->state.state == State::Playing || track->state.state == State::Starting) { ALint state = AL_INITIAL; - alGetSourcei(m->source, AL_SOURCE_STATE, &state); + alGetSourcei(track->stream.source, AL_SOURCE_STATE, &state); if (internal::audioCheckError()) { if (state != AL_PLAYING) { - switch (type) { - case AudioMsgId::Type::Voice: alSourcef(m->source, AL_GAIN, internal::audioSuppressGain()); break; - case AudioMsgId::Type::Song: alSourcef(m->source, AL_GAIN, internal::audioSuppressSongGain() * Global::SongVolume()); break; - case AudioMsgId::Type::Video: alSourcef(m->source, AL_GAIN, internal::audioSuppressSongGain() * Global::VideoVolume()); break; + if (state == AL_STOPPED && !internal::CheckAudioDeviceConnected()) { + return; } + + alSourcef(track->stream.source, AL_GAIN, ComputeVolume(type)); if (!internal::audioCheckError()) { - setStoppedState(m, AudioPlayerStoppedAtError); + setStoppedState(track, State::StoppedAtError); emitError(type); return; } - alSourcePlay(m->source); + alSourcePlay(track->stream.source); if (!internal::audioCheckError()) { - setStoppedState(m, AudioPlayerStoppedAtError); + setStoppedState(track, State::StoppedAtError); emitError(type); return; } @@ -317,7 +279,7 @@ void Loaders::loadData(AudioMsgId audio, qint64 position) { emit needToCheck(); } } else { - setStoppedState(m, AudioPlayerStoppedAtError); + setStoppedState(track, State::StoppedAtError); emitError(type); } } @@ -328,8 +290,8 @@ AudioPlayerLoader *Loaders::setupLoader(const AudioMsgId &audio, SetupError &err QMutexLocker lock(internal::audioPlayerMutex()); if (!mixer()) return nullptr; - auto data = mixer()->dataForType(audio.type()); - if (!data || data->audio != audio || !data->loading) { + auto track = mixer()->trackForType(audio.type()); + if (!track || track->state.id != audio || !track->loading) { emit error(audio); LOG(("Audio Error: trying to load part of audio, that is not current at the moment")); err = SetupErrorNotPlaying; @@ -344,7 +306,7 @@ AudioPlayerLoader *Loaders::setupLoader(const AudioMsgId &audio, SetupError &err case AudioMsgId::Type::Video: l = _videoLoader.get(); isGoodId = (_video == audio); break; } - if (l && (!isGoodId || !l->check(data->file, data->data))) { + if (l && (!isGoodId || !l->check(track->file, track->data))) { clear(audio.type()); l = nullptr; } @@ -358,61 +320,59 @@ AudioPlayerLoader *Loaders::setupLoader(const AudioMsgId &audio, SetupError &err } if (audio.type() == AudioMsgId::Type::Video) { - if (!data->videoData) { - data->playbackState.state = AudioPlayerStoppedAtError; + if (!track->videoData) { + track->state.state = State::StoppedAtError; emit error(audio); LOG(("Audio Error: video sound data not ready")); return nullptr; } - _videoLoader = std_::make_unique(data->videoPlayId, std_::move(data->videoData)); + _videoLoader = std_::make_unique(track->videoPlayId, std_::move(track->videoData)); l = _videoLoader.get(); } else { - *loader = std_::make_unique(data->file, data->data); + *loader = std_::make_unique(track->file, track->data); l = loader->get(); } if (!l->open(position)) { - data->playbackState.state = AudioPlayerStoppedAtStart; + track->state.state = State::StoppedAtStart; return nullptr; } int64 duration = l->duration(); if (duration <= 0) { - data->playbackState.state = AudioPlayerStoppedAtStart; + track->state.state = State::StoppedAtStart; return nullptr; } - data->playbackState.duration = duration; - data->playbackState.frequency = l->frequency(); - if (!data->playbackState.frequency) data->playbackState.frequency = AudioVoiceMsgFrequency; + track->state.duration = duration; + track->state.frequency = l->frequency(); + if (!track->state.frequency) track->state.frequency = kDefaultFrequency; err = SetupNoErrorStarted; - } else { - if (!data->skipEnd) { - err = SetupErrorLoadedFull; - LOG(("Audio Error: trying to load part of audio, that is already loaded to the end")); - return nullptr; - } + } else if (track->loaded) { + err = SetupErrorLoadedFull; + LOG(("Audio Error: trying to load part of audio, that is already loaded to the end")); + return nullptr; } return l; } -Mixer::AudioMsg *Loaders::checkLoader(AudioMsgId::Type type) { +Mixer::Track *Loaders::checkLoader(AudioMsgId::Type type) { if (!mixer()) return nullptr; - auto data = mixer()->dataForType(type); - bool isGoodId = false; + auto track = mixer()->trackForType(type); + auto isGoodId = false; AudioPlayerLoader *l = nullptr; switch (type) { - case AudioMsgId::Type::Voice: l = _audioLoader.get(); isGoodId = (data->audio == _audio); break; - case AudioMsgId::Type::Song: l = _songLoader.get(); isGoodId = (data->audio == _song); break; - case AudioMsgId::Type::Video: l = _videoLoader.get(); isGoodId = (data->audio == _video); break; + case AudioMsgId::Type::Voice: l = _audioLoader.get(); isGoodId = (track->state.id == _audio); break; + case AudioMsgId::Type::Song: l = _songLoader.get(); isGoodId = (track->state.id == _song); break; + case AudioMsgId::Type::Video: l = _videoLoader.get(); isGoodId = (track->state.id == _video); break; } - if (!l || !data) return nullptr; + if (!l || !track) return nullptr; - if (!isGoodId || !data->loading || !l->check(data->file, data->data)) { + if (!isGoodId || !track->loading || !l->check(track->file, track->data)) { LOG(("Audio Error: playing changed while loading")); return nullptr; } - return data; + return track; } void Loaders::onCancel(const AudioMsgId &audio) { @@ -425,10 +385,10 @@ void Loaders::onCancel(const AudioMsgId &audio) { QMutexLocker lock(internal::audioPlayerMutex()); if (!mixer()) return; - for (int i = 0; i < AudioSimultaneousLimit; ++i) { - auto data = mixer()->dataForType(audio.type(), i); - if (data->audio == audio) { - data->loading = false; + for (auto i = 0; i != kTogetherLimit; ++i) { + auto track = mixer()->trackForType(audio.type(), i); + if (track->state.id == audio) { + track->loading = false; } } } diff --git a/Telegram/SourceFiles/media/media_audio_loaders.h b/Telegram/SourceFiles/media/media_audio_loaders.h index 4daec6447..5942af274 100644 --- a/Telegram/SourceFiles/media/media_audio_loaders.h +++ b/Telegram/SourceFiles/media/media_audio_loaders.h @@ -68,7 +68,7 @@ private: void emitError(AudioMsgId::Type type); AudioMsgId clear(AudioMsgId::Type type); - void setStoppedState(Mixer::AudioMsg *m, AudioPlayerState state = AudioPlayerStopped); + void setStoppedState(Mixer::Track *m, State state = State::Stopped); enum SetupError { SetupErrorAtStart = 0, @@ -78,7 +78,7 @@ private: }; void loadData(AudioMsgId audio, qint64 position); AudioPlayerLoader *setupLoader(const AudioMsgId &audio, SetupError &err, qint64 &position); - Mixer::AudioMsg *checkLoader(AudioMsgId::Type type); + Mixer::Track *checkLoader(AudioMsgId::Type type); }; diff --git a/Telegram/SourceFiles/media/media_child_ffmpeg_loader.cpp b/Telegram/SourceFiles/media/media_child_ffmpeg_loader.cpp index c6d21ef8d..f103fbb73 100644 --- a/Telegram/SourceFiles/media/media_child_ffmpeg_loader.cpp +++ b/Telegram/SourceFiles/media/media_child_ffmpeg_loader.cpp @@ -83,7 +83,7 @@ bool ChildFFMpegLoader::open(qint64 &position) { int64_t src_ch_layout = layout, dst_ch_layout = AudioToChannelLayout; _srcRate = _parentData->frequency; AVSampleFormat src_sample_fmt = _inputFormat, dst_sample_fmt = AudioToFormat; - _dstRate = (_parentData->frequency != 44100 && _parentData->frequency != 48000) ? AudioVoiceMsgFrequency : _parentData->frequency; + _dstRate = (_parentData->frequency != 44100 && _parentData->frequency != 48000) ? Media::Player::kDefaultFrequency : _parentData->frequency; av_opt_set_int(_swrContext, "in_channel_layout", src_ch_layout, 0); av_opt_set_int(_swrContext, "in_sample_rate", _srcRate, 0); diff --git a/Telegram/SourceFiles/media/media_child_ffmpeg_loader.h b/Telegram/SourceFiles/media/media_child_ffmpeg_loader.h index 3a773e2d1..ba5246c85 100644 --- a/Telegram/SourceFiles/media/media_child_ffmpeg_loader.h +++ b/Telegram/SourceFiles/media/media_child_ffmpeg_loader.h @@ -21,6 +21,7 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #pragma once #include "media/media_audio_loader.h" +#include "media/media_audio.h" extern "C" { #include @@ -33,7 +34,7 @@ extern "C" { struct VideoSoundData { AVCodecContext *context = nullptr; - int32 frequency = AudioVoiceMsgFrequency; + int32 frequency = Media::Player::kDefaultFrequency; TimeMs length = 0; ~VideoSoundData(); }; @@ -120,8 +121,8 @@ private: int32 _sampleSize = 2 * sizeof(uint16); int32 _format = AL_FORMAT_STEREO16; - int32 _srcRate = AudioVoiceMsgFrequency; - int32 _dstRate = AudioVoiceMsgFrequency; + int32 _srcRate = Media::Player::kDefaultFrequency; + int32 _dstRate = Media::Player::kDefaultFrequency; int32 _maxResampleSamples = 1024; uint8_t **_dstSamplesData = nullptr; diff --git a/Telegram/SourceFiles/media/media_clip_ffmpeg.cpp b/Telegram/SourceFiles/media/media_clip_ffmpeg.cpp index ffa200749..a325e3a62 100644 --- a/Telegram/SourceFiles/media/media_clip_ffmpeg.cpp +++ b/Telegram/SourceFiles/media/media_clip_ffmpeg.cpp @@ -371,7 +371,7 @@ bool FFMpegReaderImplementation::start(Mode mode, TimeMs &positionMs) { _audioStreamId = av_find_best_stream(_fmtContext, AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0); if (_mode == Mode::OnlyGifv) { if (_audioStreamId >= 0) { // should be no audio stream - _audioStreamId = -1; // do not attempt to access mixer() + _audioStreamId = -1; return false; } if (dataSize() > AnimationInMemory) { @@ -380,7 +380,7 @@ bool FFMpegReaderImplementation::start(Mode mode, TimeMs &positionMs) { if (_codecContext->codec_id != AV_CODEC_ID_H264) { return false; } - } else if (_mode == Mode::Silent || !Player::mixer() || !_playId) { + } else if (_mode == Mode::Silent || !_playId) { _audioStreamId = -1; } @@ -436,7 +436,7 @@ bool FFMpegReaderImplementation::start(Mode mode, TimeMs &positionMs) { } if (_audioStreamId >= 0) { - int64 position = (positionMs * soundData->frequency) / 1000LL; + auto position = (positionMs * soundData->frequency) / 1000LL; Player::mixer()->initFromVideo(_playId, std_::move(soundData), position); } diff --git a/Telegram/SourceFiles/media/player/media_player_cover.cpp b/Telegram/SourceFiles/media/player/media_player_cover.cpp index 2964782ff..4165bde2d 100644 --- a/Telegram/SourceFiles/media/player/media_player_cover.cpp +++ b/Telegram/SourceFiles/media/player/media_player_cover.cpp @@ -35,13 +35,13 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org namespace Media { namespace Player { -using State = PlayButtonLayout::State; +using ButtonState = PlayButtonLayout::State; class CoverWidget::PlayButton : public Ui::AbstractButton { public: PlayButton(QWidget *parent); - void setState(PlayButtonLayout::State state) { + void setState(ButtonState state) { _layout.setState(state); } void finishTransform() { @@ -114,17 +114,15 @@ CoverWidget::CoverWidget(QWidget *parent) : TWidget(parent) subscribe(instance()->playlistChangedNotifier(), [this] { handlePlaylistUpdate(); }); - subscribe(instance()->updatedNotifier(), [this](const UpdatedEvent &e) { - handleSongUpdate(e); + subscribe(instance()->updatedNotifier(), [this](const TrackState &state) { + handleSongUpdate(state); }); subscribe(instance()->songChangedNotifier(), [this] { handleSongChange(); }); handleSongChange(); - AudioMsgId playing; - auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); - handleSongUpdate(UpdatedEvent(&playing, &playbackState)); + handleSongUpdate(mixer()->currentState(AudioMsgId::Type::Song)); _playPause->finishTransform(); } @@ -153,10 +151,10 @@ void CoverWidget::handleSeekFinished(float64 progress) { auto positionMs = snap(static_cast(progress * _lastDurationMs), 0LL, _lastDurationMs); _seekPositionMs = -1; - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing && playbackState.duration) { - Media::Player::mixer()->seek(qRound(progress * playbackState.duration)); + auto type = AudioMsgId::Type::Song; + auto state = Media::Player::mixer()->currentState(type); + if (state.id && state.duration) { + Media::Player::mixer()->seek(type, qRound(progress * state.duration)); } instance()->stopSeeking(); @@ -228,52 +226,50 @@ void CoverWidget::updateRepeatTrackIcon() { _repeatTrack->setIconOverride(instance()->repeatEnabled() ? nullptr : &st::mediaPlayerRepeatInactiveIcon); } -void CoverWidget::handleSongUpdate(const UpdatedEvent &e) { - auto &audioId = *e.audioId; - auto &playbackState = *e.playbackState; - if (!audioId || !audioId.audio()->song()) { +void CoverWidget::handleSongUpdate(const TrackState &state) { + if (!state.id || !state.id.audio()->song()) { return; } - if (audioId.audio()->loading()) { - _playback->updateLoadingState(audioId.audio()->progress()); + if (state.id.audio()->loading()) { + _playback->updateLoadingState(state.id.audio()->progress()); } else { - _playback->updateState(*e.playbackState); + _playback->updateState(state); } - auto stopped = ((playbackState.state & AudioPlayerStoppedMask) || playbackState.state == AudioPlayerFinishing); - auto showPause = !stopped && (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); + auto stopped = (IsStopped(state.state) || state.state == State::Finishing); + auto showPause = !stopped && (state.state == State::Playing || state.state == State::Resuming || state.state == State::Starting); if (instance()->isSeeking()) { showPause = true; } - auto state = [audio = audioId.audio(), showPause] { + auto buttonState = [audio = state.id.audio(), showPause] { if (audio->loading()) { - return State::Cancel; + return ButtonState::Cancel; } else if (showPause) { - return State::Pause; + return ButtonState::Pause; } - return State::Play; + return ButtonState::Play; }; - _playPause->setState(state()); + _playPause->setState(buttonState()); - updateTimeText(audioId, playbackState); + updateTimeText(state); } -void CoverWidget::updateTimeText(const AudioMsgId &audioId, const AudioPlaybackState &playbackState) { +void CoverWidget::updateTimeText(const TrackState &state) { QString time; qint64 position = 0, duration = 0, display = 0; - auto frequency = (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - if (!(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - display = position = playbackState.position; - duration = playbackState.duration; + auto frequency = state.frequency; + if (!IsStopped(state.state) && state.state != State::Finishing) { + display = position = state.position; + duration = state.duration; } else { - display = playbackState.duration ? playbackState.duration : (audioId.audio()->song()->duration * frequency); + display = state.duration ? state.duration : (state.id.audio()->song()->duration * frequency); } - _lastDurationMs = (playbackState.duration * 1000LL) / frequency; + _lastDurationMs = (state.duration * 1000LL) / frequency; - if (audioId.audio()->loading()) { - _time = QString::number(qRound(audioId.audio()->progress() * 100)) + '%'; + if (state.id.audio()->loading()) { + _time = QString::number(qRound(state.id.audio()->progress() * 100)) + '%'; _playback->setDisabled(true); } else { display = display / frequency; diff --git a/Telegram/SourceFiles/media/player/media_player_cover.h b/Telegram/SourceFiles/media/player/media_player_cover.h index 47ff0af22..e9c6b0c9a 100644 --- a/Telegram/SourceFiles/media/player/media_player_cover.h +++ b/Telegram/SourceFiles/media/player/media_player_cover.h @@ -21,7 +21,6 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #pragma once class AudioMsgId; -struct AudioPlaybackState; namespace Ui { class FlatLabel; @@ -37,7 +36,7 @@ class Playback; namespace Player { class VolumeController; -struct UpdatedEvent; +struct TrackState; class CoverWidget : public TWidget, private base::Subscriber { public: @@ -66,11 +65,11 @@ private: void updateVolumeToggleIcon(); - void handleSongUpdate(const UpdatedEvent &e); + void handleSongUpdate(const TrackState &state); void handleSongChange(); void handlePlaylistUpdate(); - void updateTimeText(const AudioMsgId &audioId, const AudioPlaybackState &playbackState); + void updateTimeText(const TrackState &state); void updateTimeLabel(); TimeMs _seekPositionMs = -1; diff --git a/Telegram/SourceFiles/media/player/media_player_instance.cpp b/Telegram/SourceFiles/media/player/media_player_instance.cpp index 3e813e100..6b6e0a2a9 100644 --- a/Telegram/SourceFiles/media/player/media_player_instance.cpp +++ b/Telegram/SourceFiles/media/player/media_player_instance.cpp @@ -150,15 +150,12 @@ Instance *instance() { } void Instance::play() { - AudioMsgId playing; - auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing) { - if (playbackState.state & AudioPlayerStoppedMask) { - mixer()->play(playing); - } else { - if (playbackState.state == AudioPlayerPausing || playbackState.state == AudioPlayerPaused || playbackState.state == AudioPlayerPausedAtEnd) { - mixer()->pauseresume(AudioMsgId::Type::Song); - } + auto state = mixer()->currentState(AudioMsgId::Type::Song); + if (state.id) { + if (IsStopped(state.state)) { + mixer()->play(state.id); + } else if (IsPaused(state.state) || state.state == State::Pausing) { + mixer()->pauseresume(AudioMsgId::Type::Song); } } else if (_current) { mixer()->play(_current); @@ -177,11 +174,10 @@ void Instance::play(const AudioMsgId &audioId) { } void Instance::pause() { - AudioMsgId playing; - auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing) { - if (!(playbackState.state & AudioPlayerStoppedMask)) { - if (playbackState.state == AudioPlayerStarting || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerFinishing) { + auto state = mixer()->currentState(AudioMsgId::Type::Song); + if (state.id) { + if (!IsStopped(state.state)) { + if (state.state == State::Starting || state.state == State::Resuming || state.state == State::Playing || state.state == State::Finishing) { mixer()->pauseresume(AudioMsgId::Type::Song); } } @@ -193,11 +189,10 @@ void Instance::stop() { } void Instance::playPause() { - AudioMsgId playing; - auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing) { - if (playbackState.state & AudioPlayerStoppedMask) { - mixer()->play(playing); + auto state = mixer()->currentState(AudioMsgId::Type::Song); + if (state.id) { + if (IsStopped(state.state)) { + mixer()->play(state.id); } else { mixer()->pauseresume(AudioMsgId::Type::Song); } @@ -219,11 +214,10 @@ void Instance::playPauseCancelClicked() { return; } - AudioMsgId playing; - auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); - auto stopped = ((playbackState.state & AudioPlayerStoppedMask) || playbackState.state == AudioPlayerFinishing); - auto showPause = !stopped && (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); - auto audio = playing.audio(); + auto state = mixer()->currentState(AudioMsgId::Type::Song); + auto stopped = (IsStopped(state.state) || state.state == State::Finishing); + auto showPause = !stopped && (state.state == State::Playing || state.state == State::Resuming || state.state == State::Starting); + auto audio = state.id.audio(); if (audio && audio->loading()) { audio->cancel(); } else if (showPause) { @@ -252,23 +246,22 @@ void Instance::documentLoadProgress(DocumentData *document) { template void Instance::emitUpdate(CheckCallback check) { - AudioMsgId playing; - auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (!playing || !check(playing)) { + auto state = mixer()->currentState(AudioMsgId::Type::Song); + if (!state.id || !check(state.id)) { return; } - setCurrent(playing); - _updatedNotifier.notify(UpdatedEvent(&playing, &playbackState), true); + setCurrent(state.id); + _updatedNotifier.notify(state, true); - if (_isPlaying && playbackState.state == AudioPlayerStoppedAtEnd) { + if (_isPlaying && state.state == State::StoppedAtEnd) { if (_repeatEnabled) { mixer()->play(_current); } else { next(); } } - auto isPlaying = !(playbackState.state & AudioPlayerStoppedMask); + auto isPlaying = !IsStopped(state.state); if (_isPlaying != isPlaying) { _isPlaying = isPlaying; if (_isPlaying) { diff --git a/Telegram/SourceFiles/media/player/media_player_instance.h b/Telegram/SourceFiles/media/player/media_player_instance.h index d01937cef..059069517 100644 --- a/Telegram/SourceFiles/media/player/media_player_instance.h +++ b/Telegram/SourceFiles/media/player/media_player_instance.h @@ -23,7 +23,6 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org namespace Notify { struct PeerUpdate; } // namespace Notify -struct AudioPlaybackState; class AudioMsgId; namespace Media { @@ -35,12 +34,7 @@ void finish(); class Instance; Instance *instance(); -struct UpdatedEvent { - UpdatedEvent(const AudioMsgId *audioId, const AudioPlaybackState *playbackState) : audioId(audioId), playbackState(playbackState) { - } - const AudioMsgId *audioId; - const AudioPlaybackState *playbackState; -}; +struct TrackState; class Instance : private base::Subscriber { public: @@ -85,7 +79,7 @@ public: base::Observable &playerWidgetOver() { return _playerWidgetOver; } - base::Observable &updatedNotifier() { + base::Observable &updatedNotifier() { return _updatedNotifier; } base::Observable &playlistChangedNotifier() { @@ -131,7 +125,7 @@ private: base::Observable _usePanelPlayer; base::Observable _titleButtonOver; base::Observable _playerWidgetOver; - base::Observable _updatedNotifier; + base::Observable _updatedNotifier; base::Observable _playlistChangedNotifier; base::Observable _songChangedNotifier; base::Observable _repeatChangedNotifier; diff --git a/Telegram/SourceFiles/media/player/media_player_widget.cpp b/Telegram/SourceFiles/media/player/media_player_widget.cpp index d37cb07b0..48d84c47a 100644 --- a/Telegram/SourceFiles/media/player/media_player_widget.cpp +++ b/Telegram/SourceFiles/media/player/media_player_widget.cpp @@ -37,7 +37,7 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org namespace Media { namespace Player { -using State = PlayButtonLayout::State; +using ButtonState = PlayButtonLayout::State; class Widget::PlayButton : public Ui::RippleButton { public: @@ -128,17 +128,15 @@ Widget::Widget(QWidget *parent) : TWidget(parent) subscribe(instance()->playlistChangedNotifier(), [this] { handlePlaylistUpdate(); }); - subscribe(instance()->updatedNotifier(), [this](const UpdatedEvent &e) { - handleSongUpdate(e); + subscribe(instance()->updatedNotifier(), [this](const TrackState &state) { + handleSongUpdate(state); }); subscribe(instance()->songChangedNotifier(), [this] { handleSongChange(); }); handleSongChange(); - AudioMsgId playing; - auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); - handleSongUpdate(UpdatedEvent(&playing, &playbackState)); + handleSongUpdate(mixer()->currentState(AudioMsgId::Type::Song)); _playPause->finishTransform(); } @@ -207,10 +205,10 @@ void Widget::handleSeekFinished(float64 progress) { auto positionMs = snap(static_cast(progress * _lastDurationMs), 0LL, _lastDurationMs); _seekPositionMs = -1; - AudioMsgId playing; - auto playbackState = mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing && playbackState.duration) { - mixer()->seek(qRound(progress * playbackState.duration)); + auto type = AudioMsgId::Type::Song; + auto state = mixer()->currentState(type); + if (state.id && state.duration) { + mixer()->seek(type, qRound(progress * state.duration)); } instance()->stopSeeking(); @@ -301,52 +299,50 @@ void Widget::updateRepeatTrackIcon() { _repeatTrack->setRippleColorOverride(repeating ? nullptr : &st::mediaPlayerRepeatDisabledRippleBg); } -void Widget::handleSongUpdate(const UpdatedEvent &e) { - auto &audioId = *e.audioId; - auto &playbackState = *e.playbackState; - if (!audioId || !audioId.audio()->song()) { +void Widget::handleSongUpdate(const TrackState &state) { + if (!state.id || !state.id.audio()->song()) { return; } - if (audioId.audio()->loading()) { - _playback->updateLoadingState(audioId.audio()->progress()); + if (state.id.audio()->loading()) { + _playback->updateLoadingState(state.id.audio()->progress()); } else { - _playback->updateState(*e.playbackState); + _playback->updateState(state); } - auto stopped = ((playbackState.state & AudioPlayerStoppedMask) || playbackState.state == AudioPlayerFinishing); - auto showPause = !stopped && (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); + auto stopped = (IsStopped(state.state) || state.state == State::Finishing); + auto showPause = !stopped && (state.state == State::Playing || state.state == State::Resuming || state.state == State::Starting); if (instance()->isSeeking()) { showPause = true; } - auto state = [audio = audioId.audio(), showPause] { + auto buttonState = [audio = state.id.audio(), showPause] { if (audio->loading()) { - return State::Cancel; + return ButtonState::Cancel; } else if (showPause) { - return State::Pause; + return ButtonState::Pause; } - return State::Play; + return ButtonState::Play; }; - _playPause->setState(state()); + _playPause->setState(buttonState()); - updateTimeText(audioId, playbackState); + updateTimeText(state); } -void Widget::updateTimeText(const AudioMsgId &audioId, const AudioPlaybackState &playbackState) { +void Widget::updateTimeText(const TrackState &state) { QString time; qint64 position = 0, duration = 0, display = 0; - auto frequency = (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - if (!(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - display = position = playbackState.position; - duration = playbackState.duration; + auto frequency = state.frequency; + if (!IsStopped(state.state) && state.state != State::Finishing) { + display = position = state.position; + duration = state.duration; } else { - display = playbackState.duration ? playbackState.duration : (audioId.audio()->song()->duration * frequency); + display = state.duration ? state.duration : (state.id.audio()->song()->duration * frequency); } - _lastDurationMs = (playbackState.duration * 1000LL) / frequency; + _lastDurationMs = (state.duration * 1000LL) / frequency; - if (audioId.audio()->loading()) { - _time = QString::number(qRound(audioId.audio()->progress() * 100)) + '%'; + if (state.id.audio()->loading()) { + _time = QString::number(qRound(state.id.audio()->progress() * 100)) + '%'; _playback->setDisabled(true); } else { display = display / frequency; diff --git a/Telegram/SourceFiles/media/player/media_player_widget.h b/Telegram/SourceFiles/media/player/media_player_widget.h index d95d5251c..09911d4e6 100644 --- a/Telegram/SourceFiles/media/player/media_player_widget.h +++ b/Telegram/SourceFiles/media/player/media_player_widget.h @@ -21,7 +21,6 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #pragma once class AudioMsgId; -struct AudioPlaybackState; namespace Ui { class FlatLabel; @@ -39,7 +38,7 @@ namespace Player { class PlayButton; class VolumeWidget; -struct UpdatedEvent; +struct TrackState; class Widget : public TWidget, private base::Subscriber { public: @@ -81,11 +80,11 @@ private: void updateVolumeToggleIcon(); - void handleSongUpdate(const UpdatedEvent &e); + void handleSongUpdate(const TrackState &state); void handleSongChange(); void handlePlaylistUpdate(); - void updateTimeText(const AudioMsgId &audioId, const AudioPlaybackState &playbackState); + void updateTimeText(const TrackState &state); void updateTimeLabel(); TimeMs _seekPositionMs = -1; diff --git a/Telegram/SourceFiles/media/view/media_clip_controller.cpp b/Telegram/SourceFiles/media/view/media_clip_controller.cpp index 2a21594ac..524f555d1 100644 --- a/Telegram/SourceFiles/media/view/media_clip_controller.cpp +++ b/Telegram/SourceFiles/media/view/media_clip_controller.cpp @@ -105,14 +105,14 @@ void Controller::fadeUpdated(float64 opacity) { _playback->setFadeOpacity(opacity); } -void Controller::updatePlayback(const AudioPlaybackState &playbackState) { - updatePlayPauseResumeState(playbackState); - _playback->updateState(playbackState); - updateTimeTexts(playbackState); +void Controller::updatePlayback(const Player::TrackState &state) { + updatePlayPauseResumeState(state); + _playback->updateState(state); + updateTimeTexts(state); } -void Controller::updatePlayPauseResumeState(const AudioPlaybackState &playbackState) { - bool showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || _seekPositionMs >= 0); +void Controller::updatePlayPauseResumeState(const Player::TrackState &state) { + auto showPause = (state.state == Player::State::Playing || state.state == Player::State::Resuming || _seekPositionMs >= 0); if (showPause != _showPause) { disconnect(_playPauseResume, SIGNAL(clicked()), this, _showPause ? SIGNAL(pausePressed()) : SIGNAL(playPressed())); _showPause = showPause; @@ -122,21 +122,21 @@ void Controller::updatePlayPauseResumeState(const AudioPlaybackState &playbackSt } } -void Controller::updateTimeTexts(const AudioPlaybackState &playbackState) { - qint64 position = 0, duration = playbackState.duration; +void Controller::updateTimeTexts(const Player::TrackState &state) { + qint64 position = 0, duration = state.duration; - if (!(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - position = playbackState.position; - } else if (playbackState.state == AudioPlayerStoppedAtEnd) { - position = playbackState.duration; + if (!Player::IsStopped(state.state) && state.state != Player::State::Finishing) { + position = state.position; + } else if (state.state == Player::State::StoppedAtEnd) { + position = state.duration; } else { position = 0; } - auto playFrequency = (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); + auto playFrequency = state.frequency; auto playAlready = position / playFrequency; - auto playLeft = (playbackState.duration / playFrequency) - playAlready; + auto playLeft = (state.duration / playFrequency) - playAlready; - _lastDurationMs = (playbackState.duration * 1000LL) / playFrequency; + _lastDurationMs = (state.duration * 1000LL) / playFrequency; _timeAlready = formatDurationText(playAlready); auto minus = QChar(8722); diff --git a/Telegram/SourceFiles/media/view/media_clip_controller.h b/Telegram/SourceFiles/media/view/media_clip_controller.h index c749791ec..b3698b5f5 100644 --- a/Telegram/SourceFiles/media/view/media_clip_controller.h +++ b/Telegram/SourceFiles/media/view/media_clip_controller.h @@ -26,9 +26,11 @@ class FadeAnimation; class IconButton; } // namespace Ui -struct AudioPlaybackState; - namespace Media { +namespace Player { +struct TrackState; +} // namespace Player + namespace Clip { class Playback; @@ -43,7 +45,7 @@ public: void showAnimated(); void hideAnimated(); - void updatePlayback(const AudioPlaybackState &playbackState); + void updatePlayback(const Player::TrackState &state); void setInFullScreen(bool inFullScreen); void grabStart() override; @@ -74,8 +76,8 @@ private: void fadeFinished(); void fadeUpdated(float64 opacity); - void updatePlayPauseResumeState(const AudioPlaybackState &playbackState); - void updateTimeTexts(const AudioPlaybackState &playbackState); + void updatePlayPauseResumeState(const Player::TrackState &state); + void updateTimeTexts(const Player::TrackState &state); void refreshTimeTexts(); bool _showPause = false; diff --git a/Telegram/SourceFiles/media/view/media_clip_playback.cpp b/Telegram/SourceFiles/media/view/media_clip_playback.cpp index c49808a5c..4c9ab4394 100644 --- a/Telegram/SourceFiles/media/view/media_clip_playback.cpp +++ b/Telegram/SourceFiles/media/view/media_clip_playback.cpp @@ -30,17 +30,17 @@ namespace Clip { Playback::Playback(Ui::ContinuousSlider *slider) : _slider(slider) { } -void Playback::updateState(const AudioPlaybackState &playbackState) { - qint64 position = 0, duration = playbackState.duration; +void Playback::updateState(const Player::TrackState &state) { + qint64 position = 0, duration = state.duration; auto wasDisabled = _slider->isDisabled(); if (wasDisabled) setDisabled(false); - _playing = !(playbackState.state & AudioPlayerStoppedMask); - if (_playing || playbackState.state == AudioPlayerStopped) { - position = playbackState.position; - } else if (playbackState.state == AudioPlayerStoppedAtEnd) { - position = playbackState.duration; + _playing = !Player::IsStopped(state.state); + if (_playing || state.state == Player::State::Stopped) { + position = state.position; + } else if (state.state == Player::State::StoppedAtEnd) { + position = state.duration; } else { position = 0; } diff --git a/Telegram/SourceFiles/media/view/media_clip_playback.h b/Telegram/SourceFiles/media/view/media_clip_playback.h index 40c251d0a..09db0e9c9 100644 --- a/Telegram/SourceFiles/media/view/media_clip_playback.h +++ b/Telegram/SourceFiles/media/view/media_clip_playback.h @@ -22,16 +22,18 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #include "ui/widgets/continuous_sliders.h" -struct AudioPlaybackState; - namespace Media { +namespace Player { +struct TrackState; +} // namespace Player + namespace Clip { class Playback { public: Playback(Ui::ContinuousSlider *slider); - void updateState(const AudioPlaybackState &playbackState); + void updateState(const Player::TrackState &state); void updateLoadingState(float64 progress); void setFadeOpacity(float64 opacity) { diff --git a/Telegram/SourceFiles/mediaview.cpp b/Telegram/SourceFiles/mediaview.cpp index 4660063a4..ce1f22b47 100644 --- a/Telegram/SourceFiles/mediaview.cpp +++ b/Telegram/SourceFiles/mediaview.cpp @@ -1536,8 +1536,8 @@ void MediaView::restartVideoAtSeekPosition(TimeMs positionMs) { _videoPaused = _videoIsSilent = _videoStopped = false; _videoPositionMs = positionMs; - AudioPlaybackState state; - state.state = AudioPlayerPlaying; + Media::Player::TrackState state; + state.state = Media::Player::State::Playing; state.position = _videoPositionMs; state.duration = _videoDurationMs; state.frequency = _videoFrequencyMs; @@ -1587,9 +1587,9 @@ void MediaView::onVideoPlayProgress(const AudioMsgId &audioId) { } } -void MediaView::updateVideoPlaybackState(const AudioPlaybackState &state) { +void MediaView::updateVideoPlaybackState(const Media::Player::TrackState &state) { if (state.frequency) { - if (state.state & AudioPlayerStoppedMask) { + if (Media::Player::IsStopped(state.state)) { _videoStopped = true; } _clipController->updatePlayback(state); @@ -1600,13 +1600,13 @@ void MediaView::updateVideoPlaybackState(const AudioPlaybackState &state) { } void MediaView::updateSilentVideoPlaybackState() { - AudioPlaybackState state; + Media::Player::TrackState state; if (_videoPaused) { - state.state = AudioPlayerPaused; + state.state = Media::Player::State::Paused; } else if (_videoPositionMs == _videoDurationMs) { - state.state = AudioPlayerStoppedAtEnd; + state.state = Media::Player::State::StoppedAtEnd; } else { - state.state = AudioPlayerPlaying; + state.state = Media::Player::State::Playing; } state.position = _videoPositionMs; state.duration = _videoDurationMs; diff --git a/Telegram/SourceFiles/mediaview.h b/Telegram/SourceFiles/mediaview.h index c4086deaf..4700d0e8e 100644 --- a/Telegram/SourceFiles/mediaview.h +++ b/Telegram/SourceFiles/mediaview.h @@ -24,6 +24,9 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #include "ui/effects/radial_animation.h" namespace Media { +namespace Player { +struct TrackState; +} // namespace Player namespace Clip { class Controller; } // namespace Clip @@ -45,8 +48,6 @@ namespace Notify { struct PeerUpdate; } // namespace Notify -struct AudioPlaybackState; - class MediaView : public TWidget, private base::Subscriber, public RPCSender, public ClickHandlerHost { Q_OBJECT @@ -170,7 +171,7 @@ private: void updateCursor(); void setZoomLevel(int newZoom); - void updateVideoPlaybackState(const AudioPlaybackState &state); + void updateVideoPlaybackState(const Media::Player::TrackState &state); void updateSilentVideoPlaybackState(); void restartVideoAtSeekPosition(TimeMs positionMs); diff --git a/Telegram/SourceFiles/overview/overview_layout.cpp b/Telegram/SourceFiles/overview/overview_layout.cpp index 795696ec6..70f5d6349 100644 --- a/Telegram/SourceFiles/overview/overview_layout.cpp +++ b/Telegram/SourceFiles/overview/overview_layout.cpp @@ -649,12 +649,12 @@ bool Voice::updateStatusText() { statusSize = FileStatusSizeFailed; } else if (_data->loaded()) { statusSize = FileStatusSizeLoaded; - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice); - if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); - realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); + using State = Media::Player::State; + auto state = Media::Player::mixer()->currentState(AudioMsgId::Type::Voice); + if (state.id == AudioMsgId(_data, _parent->fullId()) && !Media::Player::IsStopped(state.state) && state.state != State::Finishing) { + statusSize = -1 - (state.position / state.frequency); + realDuration = (state.duration / state.frequency); + showPause = (state.state == State::Playing || state.state == State::Resuming || state.state == State::Starting); } } else { statusSize = FileStatusSizeReady; @@ -933,14 +933,14 @@ bool Document::updateStatusText() { } else if (_data->loaded()) { if (_data->song()) { statusSize = FileStatusSizeLoaded; - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing == AudioMsgId(_data, _parent->fullId()) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { - statusSize = -1 - (playbackState.position / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency)); - realDuration = playbackState.duration / (playbackState.frequency ? playbackState.frequency : AudioVoiceMsgFrequency); - showPause = (playbackState.state == AudioPlayerPlaying || playbackState.state == AudioPlayerResuming || playbackState.state == AudioPlayerStarting); + using State = Media::Player::State; + auto state = Media::Player::mixer()->currentState(AudioMsgId::Type::Song); + if (state.id == AudioMsgId(_data, _parent->fullId()) && !Media::Player::IsStopped(state.state) && state.state != State::Finishing) { + statusSize = -1 - (state.position / state.frequency); + realDuration = (state.duration / state.frequency); + showPause = (state.state == State::Playing || state.state == State::Resuming || state.state == State::Starting); } - if (!showPause && (playing == AudioMsgId(_data, _parent->fullId())) && Media::Player::instance()->isSeeking()) { + if (!showPause && (state.id == AudioMsgId(_data, _parent->fullId())) && Media::Player::instance()->isSeeking()) { showPause = true; } } else { diff --git a/Telegram/SourceFiles/overviewwidget.cpp b/Telegram/SourceFiles/overviewwidget.cpp index 3e46ab964..ea7327b85 100644 --- a/Telegram/SourceFiles/overviewwidget.cpp +++ b/Telegram/SourceFiles/overviewwidget.cpp @@ -2092,10 +2092,9 @@ int32 OverviewWidget::lastScrollTop() const { int32 OverviewWidget::countBestScroll() const { if (type() == OverviewMusicFiles) { - AudioMsgId playing; - Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing) { - int32 top = _inner->itemTop(playing.contextId()); + auto state = Media::Player::mixer()->currentState(AudioMsgId::Type::Song); + if (state.id) { + int32 top = _inner->itemTop(state.id.contextId()); if (top >= 0) { return snap(top - int(_scroll->height() - (st::msgPadding.top() + st::mediaThumbSize + st::msgPadding.bottom())) / 2, 0, _scroll->scrollTopMax()); } diff --git a/Telegram/SourceFiles/platform/platform_audio.h b/Telegram/SourceFiles/platform/platform_audio.h new file mode 100644 index 000000000..dcdb9a893 --- /dev/null +++ b/Telegram/SourceFiles/platform/platform_audio.h @@ -0,0 +1,37 @@ +/* +This file is part of Telegram Desktop, +the official desktop version of Telegram messaging app, see https://telegram.org + +Telegram Desktop is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +It is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +In addition, as a special exception, the copyright holders give permission +to link the code of portions of this program with the OpenSSL library. + +Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE +Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org +*/ +#pragma once + +#if defined Q_OS_MAC || defined Q_OS_LINUX +namespace Platform { +namespace Audio { + +inline void Init() { +} + +inline void DeInit() { +} + +} // namespace Audio +} // namespace Platform +#elif defined Q_OS_WINRT || defined Q_OS_WIN // Q_OS_MAC || Q_OS_LINUX +#include "platform/win/audio_win.h" +#endif // Q_OS_MAC || Q_OS_LINUX || Q_OS_WINRT || Q_OS_WIN diff --git a/Telegram/SourceFiles/platform/win/audio_win.cpp b/Telegram/SourceFiles/platform/win/audio_win.cpp new file mode 100644 index 000000000..babcf0e14 --- /dev/null +++ b/Telegram/SourceFiles/platform/win/audio_win.cpp @@ -0,0 +1,157 @@ +/* +This file is part of Telegram Desktop, +the official desktop version of Telegram messaging app, see https://telegram.org + +Telegram Desktop is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +It is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +In addition, as a special exception, the copyright holders give permission +to link the code of portions of this program with the OpenSSL library. + +Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE +Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org +*/ +#include "stdafx.h" +#include "platform/win/audio_win.h" + +#include "media/media_audio.h" + +#include +#include + +#include +using namespace Microsoft::WRL; + +namespace Platform { +namespace Audio { +namespace { + +// Inspired by Chromium. +class DeviceListener : public IMMNotificationClient { +public: + DeviceListener() = default; + DeviceListener(const DeviceListener &other) = delete; + DeviceListener &operator=(const DeviceListener &other) = delete; + virtual ~DeviceListener() = default; + +private: + // IMMNotificationClient implementation. + STDMETHOD_(ULONG, AddRef)() override { + return 1; + } + STDMETHOD_(ULONG, Release)() override { + return 1; + } + STDMETHOD(QueryInterface)(REFIID iid, void** object) override; + STDMETHOD(OnPropertyValueChanged)(LPCWSTR device_id, const PROPERTYKEY key) override; + STDMETHOD(OnDeviceAdded)(LPCWSTR device_id) override { + return S_OK; + } + STDMETHOD(OnDeviceRemoved)(LPCWSTR device_id) override { + return S_OK; + } + STDMETHOD(OnDeviceStateChanged)(LPCWSTR device_id, DWORD new_state) override; + STDMETHOD(OnDefaultDeviceChanged)(EDataFlow flow, ERole role, LPCWSTR new_default_device_id) override; + +}; + +STDMETHODIMP DeviceListener::QueryInterface(REFIID iid, void** object) { + if (iid == IID_IUnknown || iid == __uuidof(IMMNotificationClient)) { + *object = static_cast(this); + return S_OK; + } + + *object = NULL; + return E_NOINTERFACE; +} + +STDMETHODIMP DeviceListener::OnPropertyValueChanged(LPCWSTR device_id, const PROPERTYKEY key) { + LOG(("Audio Info: OnPropertyValueChanged() scheduling detach from audio device.")); + Media::Player::DetachFromDeviceByTimer(); + return S_OK; +} + +STDMETHODIMP DeviceListener::OnDeviceStateChanged(LPCWSTR device_id, DWORD new_state) { + LOG(("Audio Info: OnDeviceStateChanged() scheduling detach from audio device.")); + Media::Player::DetachFromDeviceByTimer(); + return S_OK; +} + +STDMETHODIMP DeviceListener::OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR new_default_device_id) { + // Only listen for console and communication device changes. + if ((role != eConsole && role != eCommunications) || (flow != eRender && flow != eCapture)) { + LOG(("Audio Info: skipping OnDefaultDeviceChanged() flow %1, role %2, new_default_device_id: %3").arg(flow).arg(role).arg(new_default_device_id ? '"' + QString::fromWCharArray(new_default_device_id) + '"' : QString("nullptr"))); + return S_OK; + } + + LOG(("Audio Info: OnDefaultDeviceChanged() scheduling detach from audio device, flow %1, role %2, new_default_device_id: %3").arg(flow).arg(role).arg(new_default_device_id ? '"' + QString::fromWCharArray(new_default_device_id) + '"' : QString("nullptr"))); + Media::Player::DetachFromDeviceByTimer(); + + return S_OK; +} + +auto WasCoInitialized = false; +ComPtr Enumerator; + +DeviceListener *Listener = nullptr; + +} // namespace + +void Init() { + auto hr = CoCreateInstance(CLSID_MMDeviceEnumerator, nullptr, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&Enumerator)); + if (FAILED(hr)) { + Enumerator.Reset(); + + if (hr == CO_E_NOTINITIALIZED) { + LOG(("Audio Info: CoCreateInstance fails with CO_E_NOTINITIALIZED")); + hr = CoInitialize(nullptr); + if (SUCCEEDED(hr)) { + WasCoInitialized = true; + hr = CoCreateInstance(CLSID_MMDeviceEnumerator, nullptr, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&Enumerator)); + if (FAILED(hr)) { + Enumerator.Reset(); + + LOG(("Audio Error: could not CoCreateInstance of MMDeviceEnumerator, HRESULT: %1").arg(hr)); + return; + } + } + } else { + LOG(("Audio Error: could not CoCreateInstance of MMDeviceEnumerator, HRESULT: %1").arg(hr)); + return; + } + } + + Listener = new DeviceListener(); + hr = Enumerator->RegisterEndpointNotificationCallback(Listener); + if (FAILED(hr)) { + LOG(("Audio Error: RegisterEndpointNotificationCallback failed, HRESULT: %1").arg(hr)); + delete base::take(Listener); + } +} + +void DeInit() { + if (Enumerator) { + if (Listener) { + auto hr = Enumerator->UnregisterEndpointNotificationCallback(Listener); + if (FAILED(hr)) { + LOG(("Audio Error: UnregisterEndpointNotificationCallback failed, HRESULT: %1").arg(hr)); + } + delete base::take(Listener); + } + Enumerator.Reset(); + } + if (WasCoInitialized) { + CoUninitialize(); + } + AUDCLNT_E_NOT_INITIALIZED; +} + +} // namespace Audio +} // namespace Platform diff --git a/Telegram/SourceFiles/platform/win/audio_win.h b/Telegram/SourceFiles/platform/win/audio_win.h new file mode 100644 index 000000000..ac707ef4b --- /dev/null +++ b/Telegram/SourceFiles/platform/win/audio_win.h @@ -0,0 +1,31 @@ +/* +This file is part of Telegram Desktop, +the official desktop version of Telegram messaging app, see https://telegram.org + +Telegram Desktop is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +It is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +In addition, as a special exception, the copyright holders give permission +to link the code of portions of this program with the OpenSSL library. + +Full license: https://github.com/telegramdesktop/tdesktop/blob/master/LICENSE +Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org +*/ +#pragma once + +namespace Platform { +namespace Audio { + +void Init(); +void DeInit(); + +} // namespace Audio +} // namespace Platform + diff --git a/Telegram/SourceFiles/pspecific_mac_p.mm b/Telegram/SourceFiles/pspecific_mac_p.mm index 86fdf1019..44c55eb02 100644 --- a/Telegram/SourceFiles/pspecific_mac_p.mm +++ b/Telegram/SourceFiles/pspecific_mac_p.mm @@ -23,6 +23,7 @@ Copyright (c) 2014-2017 John Preston, https://desktop.telegram.org #include "application.h" #include "localstorage.h" #include "media/player/media_player_instance.h" +#include "media/media_audio.h" #include "platform/mac/mac_utilities.h" #include "styles/style_window.h" #include "lang.h" @@ -125,6 +126,9 @@ ApplicationDelegate *_sharedDelegate = nil; - (void)receiveWakeNote:(NSNotification*)aNotification { if (App::app()) App::app()->checkLocalTime(); + + LOG(("Audio Info: -receiveWakeNote: received, scheduling detach from audio device")); + Media::Player::DetachFromDeviceByTimer(); } - (void)setWatchingMediaKeys:(BOOL)watching { @@ -216,27 +220,21 @@ bool objc_handleMediaKeyEvent(void *ev) { switch (keyCode) { case NX_KEYTYPE_PLAY: if (keyState == 0) { // Play pressed and released - if (Media::Player::exists()) { - Media::Player::instance()->playPause(); - } + Media::Player::instance()->playPause(); return true; } break; case NX_KEYTYPE_FAST: if (keyState == 0) { // Next pressed and released - if (Media::Player::exists()) { - Media::Player::instance()->next(); - } + Media::Player::instance()->next(); return true; } break; case NX_KEYTYPE_REWIND: if (keyState == 0) { // Previous pressed and released - if (Media::Player::exists()) { - Media::Player::instance()->previous(); - } + Media::Player::instance()->previous(); return true; } break; diff --git a/Telegram/SourceFiles/structs.cpp b/Telegram/SourceFiles/structs.cpp index 22eed9aa8..277b3b259 100644 --- a/Telegram/SourceFiles/structs.cpp +++ b/Telegram/SourceFiles/structs.cpp @@ -1174,10 +1174,10 @@ void DocumentOpenClickHandler::doOpen(DocumentData *data, HistoryItem *context, } } if (!location.isEmpty() || (!data->data().isEmpty() && (playVoice || playMusic || playVideo || playAnimation))) { + using State = Media::Player::State; if (playVoice) { - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice); - if (playing == AudioMsgId(data, msgId) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { + auto state = Media::Player::mixer()->currentState(AudioMsgId::Type::Voice); + if (state.id == AudioMsgId(data, msgId) && !Media::Player::IsStopped(state.state) && state.state != State::Finishing) { Media::Player::mixer()->pauseresume(AudioMsgId::Type::Voice); } else { auto audio = AudioMsgId(data, msgId); @@ -1188,9 +1188,8 @@ void DocumentOpenClickHandler::doOpen(DocumentData *data, HistoryItem *context, } } } else if (playMusic) { - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing == AudioMsgId(data, msgId) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { + auto state = Media::Player::mixer()->currentState(AudioMsgId::Type::Song); + if (state.id == AudioMsgId(data, msgId) && !Media::Player::IsStopped(state.state) && state.state != State::Finishing) { Media::Player::mixer()->pauseresume(AudioMsgId::Type::Song); } else { auto song = AudioMsgId(data, msgId); @@ -1480,25 +1479,24 @@ void DocumentData::performActionOnLoad() { return; } } + using State = Media::Player::State; if (playVoice) { if (loaded()) { - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Voice); - if (playing == AudioMsgId(this, _actionOnLoadMsgId) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { + auto state = Media::Player::mixer()->currentState(AudioMsgId::Type::Voice); + if (state.id == AudioMsgId(this, _actionOnLoadMsgId) && !Media::Player::IsStopped(state.state) && state.state != State::Finishing) { Media::Player::mixer()->pauseresume(AudioMsgId::Type::Voice); - } else if (playbackState.state & AudioPlayerStoppedMask) { + } else if (Media::Player::IsStopped(state.state)) { Media::Player::mixer()->play(AudioMsgId(this, _actionOnLoadMsgId)); if (App::main()) App::main()->mediaMarkRead(this); } } } else if (playMusic) { if (loaded()) { - AudioMsgId playing; - auto playbackState = Media::Player::mixer()->currentState(&playing, AudioMsgId::Type::Song); - if (playing == AudioMsgId(this, _actionOnLoadMsgId) && !(playbackState.state & AudioPlayerStoppedMask) && playbackState.state != AudioPlayerFinishing) { + auto state = Media::Player::mixer()->currentState(AudioMsgId::Type::Song); + if (state.id == AudioMsgId(this, _actionOnLoadMsgId) && !Media::Player::IsStopped(state.state) && state.state != State::Finishing) { Media::Player::mixer()->pauseresume(AudioMsgId::Type::Song); - } else if (playbackState.state & AudioPlayerStoppedMask) { - AudioMsgId song(this, _actionOnLoadMsgId); + } else if (Media::Player::IsStopped(state.state)) { + auto song = AudioMsgId(this, _actionOnLoadMsgId); Media::Player::mixer()->play(song); Media::Player::Updated().notify(song); } diff --git a/Telegram/SourceFiles/window/window.style b/Telegram/SourceFiles/window/window.style index d5b830e19..4fdc48e5f 100644 --- a/Telegram/SourceFiles/window/window.style +++ b/Telegram/SourceFiles/window/window.style @@ -30,7 +30,7 @@ windowDefaultHeight: 600px; windowShadow: icon {{ "window_shadow", windowShadowFg }}; windowShadowShift: 1px; -adaptiveChatWideWidth: 860px; +adaptiveChatWideWidth: 880px; notifyBorder: windowShadowFgFallback; notifyBorderWidth: 1px; diff --git a/Telegram/build/version b/Telegram/build/version index 0c273e6d1..386a3fbdc 100644 --- a/Telegram/build/version +++ b/Telegram/build/version @@ -3,4 +3,4 @@ AppVersionStrMajor 1.0 AppVersionStrSmall 1.0.2 AppVersionStr 1.0.2 AlphaChannel 0 -BetaVersion 0 +BetaVersion 1000002001 diff --git a/Telegram/gyp/Telegram.gyp b/Telegram/gyp/Telegram.gyp index 60d54c0a3..a1e632ae9 100644 --- a/Telegram/gyp/Telegram.gyp +++ b/Telegram/gyp/Telegram.gyp @@ -380,6 +380,8 @@ '<(src_loc)/platform/mac/notifications_manager_mac.h', '<(src_loc)/platform/mac/window_title_mac.mm', '<(src_loc)/platform/mac/window_title_mac.h', + '<(src_loc)/platform/win/audio_win.cpp', + '<(src_loc)/platform/win/audio_win.h', '<(src_loc)/platform/win/main_window_win.cpp', '<(src_loc)/platform/win/main_window_win.h', '<(src_loc)/platform/win/notifications_manager_win.cpp', @@ -392,6 +394,7 @@ '<(src_loc)/platform/win/windows_dlls.h', '<(src_loc)/platform/win/windows_event_filter.cpp', '<(src_loc)/platform/win/windows_event_filter.h', + '<(src_loc)/platform/platform_audio.h', '<(src_loc)/platform/platform_file_dialog.h', '<(src_loc)/platform/platform_main_window.h', '<(src_loc)/platform/platform_notifications_manager.h', @@ -644,12 +647,14 @@ 'sources!': [ '<(src_loc)/pspecific_win.cpp', '<(src_loc)/pspecific_win.h', + '<(src_loc)/platform/win/audio_win.cpp', + '<(src_loc)/platform/win/audio_win.h', '<(src_loc)/platform/win/main_window_win.cpp', '<(src_loc)/platform/win/main_window_win.h', '<(src_loc)/platform/win/notifications_manager_win.cpp', '<(src_loc)/platform/win/notifications_manager_win.h', - '<(src_loc)/platform/win/window_title_win.cpp', - '<(src_loc)/platform/win/window_title_win.h', + '<(src_loc)/platform/win/window_title_win.cpp', + '<(src_loc)/platform/win/window_title_win.h', '<(src_loc)/platform/win/windows_app_user_model_id.cpp', '<(src_loc)/platform/win/windows_app_user_model_id.h', '<(src_loc)/platform/win/windows_dlls.cpp',