From 3062a8ef995cfca8a4534e02e9d5a003877e8a06 Mon Sep 17 00:00:00 2001 From: Berkus Decker Date: Wed, 22 Nov 2017 03:52:55 +0200 Subject: [PATCH] git subrepo clone https://github.com/telegramdesktop/libtgvoip Telegram/ThirdParty/libtgvoip subrepo: subdir: "Telegram/ThirdParty/libtgvoip" merged: "651594b3" upstream: origin: "https://github.com/telegramdesktop/libtgvoip" branch: "tdesktop" commit: "651594b3" git-subrepo: version: "0.3.1" origin: "???" commit: "???" --- .gitmodules | 3 - Telegram/ThirdParty/libtgvoip | 1 - Telegram/ThirdParty/libtgvoip/.gitignore | 21 + Telegram/ThirdParty/libtgvoip/.gitrepo | 11 + Telegram/ThirdParty/libtgvoip/Android.mk | 58 + .../ThirdParty/libtgvoip/BlockingQueue.cpp | 10 + Telegram/ThirdParty/libtgvoip/BlockingQueue.h | 94 + .../libtgvoip/BufferInputStream.cpp | 106 + .../ThirdParty/libtgvoip/BufferInputStream.h | 38 + .../libtgvoip/BufferOutputStream.cpp | 99 + .../ThirdParty/libtgvoip/BufferOutputStream.h | 38 + Telegram/ThirdParty/libtgvoip/BufferPool.cpp | 66 + Telegram/ThirdParty/libtgvoip/BufferPool.h | 32 + .../libtgvoip/CongestionControl.cpp | 173 ++ .../ThirdParty/libtgvoip/CongestionControl.h | 71 + .../ThirdParty/libtgvoip/EchoCanceller.cpp | 356 +++ Telegram/ThirdParty/libtgvoip/EchoCanceller.h | 53 + Telegram/ThirdParty/libtgvoip/Info.plist | 24 + .../ThirdParty/libtgvoip/JitterBuffer.cpp | 488 ++++ Telegram/ThirdParty/libtgvoip/JitterBuffer.h | 95 + .../ThirdParty/libtgvoip/MediaStreamItf.cpp | 18 + .../ThirdParty/libtgvoip/MediaStreamItf.h | 29 + .../ThirdParty/libtgvoip/NetworkSocket.cpp | 608 ++++ Telegram/ThirdParty/libtgvoip/NetworkSocket.h | 173 ++ Telegram/ThirdParty/libtgvoip/OpusDecoder.cpp | 257 ++ Telegram/ThirdParty/libtgvoip/OpusDecoder.h | 56 + Telegram/ThirdParty/libtgvoip/OpusEncoder.cpp | 160 ++ Telegram/ThirdParty/libtgvoip/OpusEncoder.h | 59 + Telegram/ThirdParty/libtgvoip/UNLICENSE | 24 + .../ThirdParty/libtgvoip/VoIPController.cpp | 2486 +++++++++++++++++ .../ThirdParty/libtgvoip/VoIPController.h | 509 ++++ .../ThirdParty/libtgvoip/VoIPServerConfig.cpp | 101 + .../ThirdParty/libtgvoip/VoIPServerConfig.h | 37 + .../ThirdParty/libtgvoip/audio/AudioInput.cpp | 106 + .../ThirdParty/libtgvoip/audio/AudioInput.h | 42 + .../libtgvoip/audio/AudioOutput.cpp | 121 + .../ThirdParty/libtgvoip/audio/AudioOutput.h | 47 + .../ThirdParty/libtgvoip/audio/Resampler.cpp | 117 + .../ThirdParty/libtgvoip/audio/Resampler.h | 22 + .../libtgvoip/client/android/tg_voip_jni.cpp | 308 ++ .../libtgvoip/libtgvoip.UWP.vcxproj | 509 ++++ .../libtgvoip/libtgvoip.UWP.vcxproj.filters | 492 ++++ .../libtgvoip/libtgvoip.WP81.vcxproj | 420 +++ .../libtgvoip/libtgvoip.WP81.vcxproj.filters | 492 ++++ Telegram/ThirdParty/libtgvoip/libtgvoip.gyp | 398 +++ .../libtgvoip.xcodeproj/project.pbxproj | 1553 ++++++++++ .../contents.xcworkspacedata | 7 + .../UserInterfaceState.xcuserstate | Bin 0 -> 9235 bytes .../xcschemes/libtgvoip.xcscheme | 80 + .../xcschemes/xcschememanagement.plist | 22 + .../xcschemes/xcschememanagement.plist | 14 + .../libtgvoip_osx.xcodeproj/project.pbxproj | 1567 +++++++++++ .../contents.xcworkspacedata | 7 + .../UserInterfaceState.xcuserstate | Bin 0 -> 9235 bytes .../xcschemes/libtgvoip.xcscheme | 80 + .../xcschemes/xcschememanagement.plist | 22 + .../xcschemes/xcschememanagement.plist | 14 + Telegram/ThirdParty/libtgvoip/logging.cpp | 99 + Telegram/ThirdParty/libtgvoip/logging.h | 97 + .../os/android/AudioInputAndroid.cpp | 123 + .../libtgvoip/os/android/AudioInputAndroid.h | 38 + .../os/android/AudioInputOpenSLES.cpp | 137 + .../libtgvoip/os/android/AudioInputOpenSLES.h | 40 + .../os/android/AudioOutputAndroid.cpp | 125 + .../libtgvoip/os/android/AudioOutputAndroid.h | 39 + .../os/android/AudioOutputOpenSLES.cpp | 171 ++ .../os/android/AudioOutputOpenSLES.h | 47 + .../os/android/OpenSLEngineWrapper.cpp | 48 + .../os/android/OpenSLEngineWrapper.h | 26 + .../os/darwin/AudioInputAudioUnit.cpp | 82 + .../libtgvoip/os/darwin/AudioInputAudioUnit.h | 37 + .../os/darwin/AudioInputAudioUnitOSX.cpp | 309 ++ .../os/darwin/AudioInputAudioUnitOSX.h | 40 + .../os/darwin/AudioOutputAudioUnit.cpp | 125 + .../os/darwin/AudioOutputAudioUnit.h | 43 + .../os/darwin/AudioOutputAudioUnitOSX.cpp | 368 +++ .../os/darwin/AudioOutputAudioUnitOSX.h | 43 + .../libtgvoip/os/darwin/AudioUnitIO.cpp | 317 +++ .../libtgvoip/os/darwin/AudioUnitIO.h | 59 + .../libtgvoip/os/darwin/DarwinSpecific.h | 19 + .../libtgvoip/os/darwin/DarwinSpecific.mm | 17 + .../libtgvoip/os/darwin/TGLogWrapper.h | 20 + .../libtgvoip/os/darwin/TGLogWrapper.m | 10 + .../libtgvoip/os/linux/AudioInputALSA.cpp | 179 ++ .../libtgvoip/os/linux/AudioInputALSA.h | 48 + .../libtgvoip/os/linux/AudioInputPulse.cpp | 333 +++ .../libtgvoip/os/linux/AudioInputPulse.h | 58 + .../libtgvoip/os/linux/AudioOutputALSA.cpp | 182 ++ .../libtgvoip/os/linux/AudioOutputALSA.h | 48 + .../libtgvoip/os/linux/AudioOutputPulse.cpp | 340 +++ .../libtgvoip/os/linux/AudioOutputPulse.h | 54 + .../libtgvoip/os/linux/PulseAudioLoader.cpp | 120 + .../libtgvoip/os/linux/PulseAudioLoader.h | 109 + .../libtgvoip/os/posix/NetworkSocketPosix.cpp | 522 ++++ .../libtgvoip/os/posix/NetworkSocketPosix.h | 74 + .../libtgvoip/os/windows/AudioInputWASAPI.cpp | 431 +++ .../libtgvoip/os/windows/AudioInputWASAPI.h | 106 + .../libtgvoip/os/windows/AudioInputWave.cpp | 170 ++ .../libtgvoip/os/windows/AudioInputWave.h | 41 + .../os/windows/AudioOutputWASAPI.cpp | 439 +++ .../libtgvoip/os/windows/AudioOutputWASAPI.h | 104 + .../libtgvoip/os/windows/AudioOutputWave.cpp | 165 ++ .../libtgvoip/os/windows/AudioOutputWave.h | 41 + .../libtgvoip/os/windows/CXWrapper.cpp | 437 +++ .../libtgvoip/os/windows/CXWrapper.h | 167 ++ .../os/windows/NetworkSocketWinsock.cpp | 638 +++++ .../os/windows/NetworkSocketWinsock.h | 72 + .../os/windows/WindowsSandboxUtils.cpp | 68 + .../os/windows/WindowsSandboxUtils.h | 38 + Telegram/ThirdParty/libtgvoip/threading.h | 207 ++ .../webrtc_dsp/webrtc/base/array_view.h | 157 ++ .../webrtc_dsp/webrtc/base/atomicops.h | 87 + .../webrtc_dsp/webrtc/base/basictypes.h | 70 + .../webrtc_dsp/webrtc/base/checks.cc | 140 + .../libtgvoip/webrtc_dsp/webrtc/base/checks.h | 290 ++ .../webrtc_dsp/webrtc/base/constructormagic.h | 34 + .../webrtc_dsp/webrtc/base/safe_compare.h | 184 ++ .../webrtc_dsp/webrtc/base/safe_conversions.h | 70 + .../webrtc/base/safe_conversions_impl.h | 188 ++ .../webrtc_dsp/webrtc/base/sanitizer.h | 116 + .../webrtc_dsp/webrtc/base/stringutils.cc | 133 + .../webrtc_dsp/webrtc/base/stringutils.h | 318 +++ .../webrtc_dsp/webrtc/base/type_traits.h | 79 + .../webrtc/common_audio/audio_util.cc | 51 + .../webrtc/common_audio/channel_buffer.cc | 79 + .../webrtc/common_audio/channel_buffer.h | 186 ++ .../webrtc_dsp/webrtc/common_audio/fft4g.c | 1332 +++++++++ .../webrtc_dsp/webrtc/common_audio/fft4g.h | 25 + .../webrtc/common_audio/include/audio_util.h | 187 ++ .../webrtc/common_audio/ring_buffer.c | 233 ++ .../webrtc/common_audio/ring_buffer.h | 75 + .../auto_corr_to_refl_coef.c | 103 + .../signal_processing/auto_correlation.c | 65 + .../signal_processing/complex_bit_reverse.c | 108 + .../complex_bit_reverse_arm.S | 119 + .../signal_processing/complex_fft.c | 298 ++ .../signal_processing/complex_fft_tables.h | 148 + .../signal_processing/copy_set_operations.c | 82 + .../signal_processing/cross_correlation.c | 30 + .../cross_correlation_neon.c | 91 + .../signal_processing/division_operations.c | 138 + .../dot_product_with_scale.c | 32 + .../signal_processing/downsample_fast.c | 60 + .../signal_processing/downsample_fast_neon.c | 221 ++ .../common_audio/signal_processing/energy.c | 39 + .../signal_processing/filter_ar.c | 89 + .../signal_processing/filter_ar_fast_q12.c | 42 + .../filter_ar_fast_q12_armv7.S | 218 ++ .../signal_processing/filter_ma_fast_q12.c | 52 + .../signal_processing/get_hanning_window.c | 77 + .../signal_processing/get_scaling_square.c | 46 + .../ilbc_specific_functions.c | 90 + .../signal_processing/include/real_fft.h | 97 + .../include/signal_processing_library.h | 1642 +++++++++++ .../signal_processing/include/spl_inl.h | 154 + .../signal_processing/include/spl_inl_armv7.h | 136 + .../signal_processing/include/spl_inl_mips.h | 225 ++ .../signal_processing/levinson_durbin.c | 246 ++ .../signal_processing/lpc_to_refl_coef.c | 56 + .../signal_processing/min_max_operations.c | 224 ++ .../min_max_operations_neon.c | 286 ++ .../randomization_functions.c | 115 + .../common_audio/signal_processing/real_fft.c | 102 + .../signal_processing/refl_coef_to_lpc.c | 59 + .../common_audio/signal_processing/resample.c | 505 ++++ .../signal_processing/resample_48khz.c | 186 ++ .../signal_processing/resample_by_2.c | 183 ++ .../resample_by_2_internal.c | 679 +++++ .../resample_by_2_internal.h | 47 + .../signal_processing/resample_fractional.c | 239 ++ .../common_audio/signal_processing/spl_init.c | 133 + .../common_audio/signal_processing/spl_inl.c | 24 + .../common_audio/signal_processing/spl_sqrt.c | 194 ++ .../signal_processing/spl_sqrt_floor.c | 77 + .../signal_processing/spl_sqrt_floor_arm.S | 110 + .../signal_processing/splitting_filter_impl.c | 207 ++ .../sqrt_of_one_minus_x_squared.c | 35 + .../vector_scaling_operations.c | 165 ++ .../webrtc/common_audio/sparse_fir_filter.cc | 62 + .../webrtc/common_audio/sparse_fir_filter.h | 53 + .../webrtc/common_audio/wav_file.cc | 205 ++ .../webrtc_dsp/webrtc/common_audio/wav_file.h | 118 + .../webrtc/common_audio/wav_header.cc | 243 ++ .../webrtc/common_audio/wav_header.h | 64 + .../modules/audio_processing/aec/aec_common.h | 39 + .../modules/audio_processing/aec/aec_core.cc | 2047 ++++++++++++++ .../modules/audio_processing/aec/aec_core.h | 335 +++ .../audio_processing/aec/aec_core_neon.cc | 741 +++++ .../aec/aec_core_optimized_methods.h | 80 + .../audio_processing/aec/aec_core_sse2.cc | 754 +++++ .../audio_processing/aec/aec_resampler.cc | 207 ++ .../audio_processing/aec/aec_resampler.h | 39 + .../audio_processing/aec/echo_cancellation.cc | 863 ++++++ .../audio_processing/aec/echo_cancellation.h | 299 ++ .../audio_processing/aecm/aecm_core.cc | 1231 ++++++++ .../modules/audio_processing/aecm/aecm_core.h | 436 +++ .../audio_processing/aecm/aecm_core_c.cc | 769 +++++ .../audio_processing/aecm/aecm_core_neon.cc | 203 ++ .../audio_processing/aecm/aecm_defines.h | 87 + .../aecm/echo_control_mobile.cc | 648 +++++ .../aecm/echo_control_mobile.h | 209 ++ .../audio_processing/agc/legacy/analog_agc.c | 1390 +++++++++ .../audio_processing/agc/legacy/analog_agc.h | 132 + .../audio_processing/agc/legacy/digital_agc.c | 688 +++++ .../audio_processing/agc/legacy/digital_agc.h | 79 + .../agc/legacy/gain_control.h | 247 ++ .../logging/apm_data_dumper.cc | 74 + .../logging/apm_data_dumper.h | 151 + .../modules/audio_processing/ns/defines.h | 49 + .../audio_processing/ns/noise_suppression.c | 71 + .../audio_processing/ns/noise_suppression.h | 135 + .../audio_processing/ns/noise_suppression_x.c | 61 + .../audio_processing/ns/noise_suppression_x.h | 113 + .../modules/audio_processing/ns/ns_core.c | 1416 ++++++++++ .../modules/audio_processing/ns/ns_core.h | 190 ++ .../modules/audio_processing/ns/nsx_core.c | 2107 ++++++++++++++ .../modules/audio_processing/ns/nsx_core.h | 263 ++ .../modules/audio_processing/ns/nsx_core_c.c | 260 ++ .../audio_processing/ns/nsx_core_neon.c | 610 ++++ .../modules/audio_processing/ns/nsx_defines.h | 64 + .../audio_processing/ns/windows_private.h | 574 ++++ .../audio_processing/splitting_filter.cc | 108 + .../audio_processing/splitting_filter.h | 69 + .../three_band_filter_bank.cc | 216 ++ .../audio_processing/three_band_filter_bank.h | 69 + .../utility/block_mean_calculator.cc | 53 + .../utility/block_mean_calculator.h | 52 + .../utility/delay_estimator.cc | 703 +++++ .../utility/delay_estimator.h | 251 ++ .../utility/delay_estimator_internal.h | 48 + .../utility/delay_estimator_wrapper.cc | 486 ++++ .../utility/delay_estimator_wrapper.h | 244 ++ .../audio_processing/utility/ooura_fft.cc | 543 ++++ .../audio_processing/utility/ooura_fft.h | 60 + .../utility/ooura_fft_neon.cc | 354 +++ .../utility/ooura_fft_sse2.cc | 440 +++ .../utility/ooura_fft_tables_common.h | 54 + .../utility/ooura_fft_tables_neon_sse2.h | 94 + .../system_wrappers/include/asm_defines.h | 66 + .../include/compile_assert_c.h | 21 + .../include/cpu_features_wrapper.h | 51 + .../webrtc/system_wrappers/include/metrics.h | 245 ++ .../system_wrappers/source/cpu_features.cc | 72 + .../libtgvoip/webrtc_dsp/webrtc/typedefs.h | 115 + 244 files changed, 55588 insertions(+), 4 deletions(-) delete mode 160000 Telegram/ThirdParty/libtgvoip create mode 100644 Telegram/ThirdParty/libtgvoip/.gitignore create mode 100644 Telegram/ThirdParty/libtgvoip/.gitrepo create mode 100644 Telegram/ThirdParty/libtgvoip/Android.mk create mode 100644 Telegram/ThirdParty/libtgvoip/BlockingQueue.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/BlockingQueue.h create mode 100644 Telegram/ThirdParty/libtgvoip/BufferInputStream.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/BufferInputStream.h create mode 100644 Telegram/ThirdParty/libtgvoip/BufferOutputStream.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/BufferOutputStream.h create mode 100644 Telegram/ThirdParty/libtgvoip/BufferPool.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/BufferPool.h create mode 100644 Telegram/ThirdParty/libtgvoip/CongestionControl.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/CongestionControl.h create mode 100644 Telegram/ThirdParty/libtgvoip/EchoCanceller.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/EchoCanceller.h create mode 100644 Telegram/ThirdParty/libtgvoip/Info.plist create mode 100644 Telegram/ThirdParty/libtgvoip/JitterBuffer.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/JitterBuffer.h create mode 100644 Telegram/ThirdParty/libtgvoip/MediaStreamItf.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/MediaStreamItf.h create mode 100644 Telegram/ThirdParty/libtgvoip/NetworkSocket.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/NetworkSocket.h create mode 100644 Telegram/ThirdParty/libtgvoip/OpusDecoder.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/OpusDecoder.h create mode 100644 Telegram/ThirdParty/libtgvoip/OpusEncoder.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/OpusEncoder.h create mode 100644 Telegram/ThirdParty/libtgvoip/UNLICENSE create mode 100644 Telegram/ThirdParty/libtgvoip/VoIPController.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/VoIPController.h create mode 100644 Telegram/ThirdParty/libtgvoip/VoIPServerConfig.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/VoIPServerConfig.h create mode 100644 Telegram/ThirdParty/libtgvoip/audio/AudioInput.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/audio/AudioInput.h create mode 100644 Telegram/ThirdParty/libtgvoip/audio/AudioOutput.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/audio/AudioOutput.h create mode 100644 Telegram/ThirdParty/libtgvoip/audio/Resampler.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/audio/Resampler.h create mode 100644 Telegram/ThirdParty/libtgvoip/client/android/tg_voip_jni.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip.UWP.vcxproj create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip.UWP.vcxproj.filters create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip.WP81.vcxproj create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip.WP81.vcxproj.filters create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip.gyp create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/project.pbxproj create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/project.xcworkspace/contents.xcworkspacedata create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/project.xcworkspace/xcuserdata/grishka.xcuserdatad/UserInterfaceState.xcuserstate create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/libtgvoip.xcscheme create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/xcschememanagement.plist create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/xcschememanagement.plist create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/project.pbxproj create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/project.xcworkspace/contents.xcworkspacedata create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/project.xcworkspace/xcuserdata/grishka.xcuserdatad/UserInterfaceState.xcuserstate create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/libtgvoip.xcscheme create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/xcschememanagement.plist create mode 100644 Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/xcschememanagement.plist create mode 100644 Telegram/ThirdParty/libtgvoip/logging.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/logging.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/android/AudioInputAndroid.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/android/AudioInputAndroid.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/android/AudioInputOpenSLES.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/android/AudioInputOpenSLES.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/android/AudioOutputAndroid.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/android/AudioOutputAndroid.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/android/AudioOutputOpenSLES.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/android/AudioOutputOpenSLES.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/android/OpenSLEngineWrapper.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/android/OpenSLEngineWrapper.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnit.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnit.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnitOSX.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnitOSX.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnit.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnit.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnitOSX.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnitOSX.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/AudioUnitIO.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/AudioUnitIO.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/DarwinSpecific.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/DarwinSpecific.mm create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/TGLogWrapper.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/darwin/TGLogWrapper.m create mode 100644 Telegram/ThirdParty/libtgvoip/os/linux/AudioInputALSA.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/linux/AudioInputALSA.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/linux/AudioInputPulse.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/linux/AudioInputPulse.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputALSA.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputALSA.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputPulse.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputPulse.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/linux/PulseAudioLoader.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/linux/PulseAudioLoader.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/posix/NetworkSocketPosix.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/posix/NetworkSocketPosix.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWASAPI.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWASAPI.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWave.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWave.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWASAPI.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWASAPI.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWave.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWave.h create mode 100755 Telegram/ThirdParty/libtgvoip/os/windows/CXWrapper.cpp create mode 100755 Telegram/ThirdParty/libtgvoip/os/windows/CXWrapper.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/windows/NetworkSocketWinsock.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/windows/NetworkSocketWinsock.h create mode 100644 Telegram/ThirdParty/libtgvoip/os/windows/WindowsSandboxUtils.cpp create mode 100644 Telegram/ThirdParty/libtgvoip/os/windows/WindowsSandboxUtils.h create mode 100644 Telegram/ThirdParty/libtgvoip/threading.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/array_view.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/atomicops.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/basictypes.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/checks.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/checks.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/constructormagic.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/safe_compare.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/safe_conversions.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/safe_conversions_impl.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/sanitizer.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/stringutils.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/stringutils.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/type_traits.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/audio_util.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/channel_buffer.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/channel_buffer.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/fft4g.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/fft4g.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/include/audio_util.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/ring_buffer.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/ring_buffer.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/auto_corr_to_refl_coef.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/auto_correlation.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_bit_reverse.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_bit_reverse_arm.S create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_fft.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_fft_tables.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/copy_set_operations.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/cross_correlation.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/cross_correlation_neon.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/division_operations.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/dot_product_with_scale.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/downsample_fast.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/downsample_fast_neon.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/energy.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar_fast_q12.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar_fast_q12_armv7.S create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ma_fast_q12.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/get_hanning_window.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/get_scaling_square.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/ilbc_specific_functions.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/real_fft.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/signal_processing_library.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl_armv7.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl_mips.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/levinson_durbin.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/lpc_to_refl_coef.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/min_max_operations.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/min_max_operations_neon.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/randomization_functions.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/real_fft.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/refl_coef_to_lpc.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_48khz.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2_internal.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2_internal.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_fractional.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_init.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_inl.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt_floor.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt_floor_arm.S create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/splitting_filter_impl.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/vector_scaling_operations.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/sparse_fir_filter.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/sparse_fir_filter.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_file.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_file.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_header.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_header.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_common.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_neon.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_optimized_methods.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_sse2.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_resampler.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_resampler.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/echo_cancellation.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/echo_cancellation.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core_c.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core_neon.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_defines.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/echo_control_mobile.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/echo_control_mobile.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/analog_agc.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/analog_agc.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/digital_agc.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/digital_agc.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/gain_control.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/logging/apm_data_dumper.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/logging/apm_data_dumper.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/defines.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression_x.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression_x.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/ns_core.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/ns_core.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core_c.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core_neon.c create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_defines.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/windows_private.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/splitting_filter.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/splitting_filter.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/three_band_filter_bank.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/three_band_filter_bank.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/block_mean_calculator.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/block_mean_calculator.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_internal.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_neon.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_sse2.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_tables_common.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_tables_neon_sse2.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/asm_defines.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/compile_assert_c.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/cpu_features_wrapper.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/metrics.h create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/source/cpu_features.cc create mode 100644 Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/typedefs.h diff --git a/.gitmodules b/.gitmodules index 288741058..b33017585 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ -[submodule "Telegram/ThirdParty/libtgvoip"] - path = Telegram/ThirdParty/libtgvoip - url = https://github.com/telegramdesktop/libtgvoip [submodule "Telegram/ThirdParty/variant"] path = Telegram/ThirdParty/variant url = https://github.com/mapbox/variant diff --git a/Telegram/ThirdParty/libtgvoip b/Telegram/ThirdParty/libtgvoip deleted file mode 160000 index 9f78cb85e..000000000 --- a/Telegram/ThirdParty/libtgvoip +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 9f78cb85e5db9b39a6c4c3f7847ea3e98c9f544a diff --git a/Telegram/ThirdParty/libtgvoip/.gitignore b/Telegram/ThirdParty/libtgvoip/.gitignore new file mode 100644 index 000000000..729a0d62a --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/.gitignore @@ -0,0 +1,21 @@ +bin +.idea +build +*/Debug/* +*/Release/* + +# Build results +[Dd]ebug/ +[Dd]ebugPublic/ +[Rr]elease/ +[Rr]eleases/ +[Pp]review/ +[Pp]roduction/ +x64/ +x86/ +bld/ +[Bb]in/ +[Oo]bj/ + +# Visual Studio 2015 cache/options directory +.vs/ diff --git a/Telegram/ThirdParty/libtgvoip/.gitrepo b/Telegram/ThirdParty/libtgvoip/.gitrepo new file mode 100644 index 000000000..f8f0b7223 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/.gitrepo @@ -0,0 +1,11 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme +; +[subrepo] + remote = https://github.com/telegramdesktop/libtgvoip + branch = tdesktop + commit = 651594b3cef23051ca7370ddd14d7c23377d6341 + parent = 5e31fcb2406d6d8ce1d6d61199bacbc06014c2d3 + cmdver = 0.3.1 diff --git a/Telegram/ThirdParty/libtgvoip/Android.mk b/Telegram/ThirdParty/libtgvoip/Android.mk new file mode 100644 index 000000000..23ef1c6ec --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/Android.mk @@ -0,0 +1,58 @@ +LOCAL_MODULE := WebRtcAec + +LOCAL_SRC_FILES := ./libtgvoip/external/libWebRtcAec_android_$(TARGET_ARCH_ABI).a + +include $(PREBUILT_STATIC_LIBRARY) + +include $(CLEAR_VARS) + +LOCAL_MODULE := voip +LOCAL_CPPFLAGS := -Wall -std=c++11 -DANDROID -finline-functions -ffast-math -Os -fno-strict-aliasing -O3 +LOCAL_CFLAGS := -O3 -DUSE_KISS_FFT -fexceptions + +ifeq ($(TARGET_ARCH_ABI),armeabi-v7a) +# LOCAL_CPPFLAGS += -mfloat-abi=softfp -mfpu=neon +# LOCAL_CFLAGS += -mfloat-abi=softfp -mfpu=neon -DFLOATING_POINT +# LOCAL_ARM_NEON := true +else + LOCAL_CFLAGS += -DFIXED_POINT + ifeq ($(TARGET_ARCH_ABI),armeabi) +# LOCAL_CPPFLAGS += -mfloat-abi=softfp -mfpu=neon +# LOCAL_CFLAGS += -mfloat-abi=softfp -mfpu=neon + else + ifeq ($(TARGET_ARCH_ABI),x86) + + endif + endif +endif + +MY_DIR := libtgvoip + +LOCAL_C_INCLUDES := jni/opus/include jni/boringssl/include/ + +LOCAL_SRC_FILES := \ +./libtgvoip/logging.cpp \ +./libtgvoip/VoIPController.cpp \ +./libtgvoip/BufferInputStream.cpp \ +./libtgvoip/BufferOutputStream.cpp \ +./libtgvoip/BlockingQueue.cpp \ +./libtgvoip/audio/AudioInput.cpp \ +./libtgvoip/os/android/AudioInputOpenSLES.cpp \ +./libtgvoip/MediaStreamItf.cpp \ +./libtgvoip/audio/AudioOutput.cpp \ +./libtgvoip/OpusEncoder.cpp \ +./libtgvoip/os/android/AudioOutputOpenSLES.cpp \ +./libtgvoip/JitterBuffer.cpp \ +./libtgvoip/OpusDecoder.cpp \ +./libtgvoip/BufferPool.cpp \ +./libtgvoip/os/android/OpenSLEngineWrapper.cpp \ +./libtgvoip/os/android/AudioInputAndroid.cpp \ +./libtgvoip/os/android/AudioOutputAndroid.cpp \ +./libtgvoip/EchoCanceller.cpp \ +./libtgvoip/CongestionControl.cpp \ +./libtgvoip/VoIPServerConfig.cpp \ +./libtgvoip/NetworkSocket.cpp + +include $(BUILD_STATIC_LIBRARY) + +include $(CLEAR_VARS) diff --git a/Telegram/ThirdParty/libtgvoip/BlockingQueue.cpp b/Telegram/ThirdParty/libtgvoip/BlockingQueue.cpp new file mode 100644 index 000000000..4a02d4762 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/BlockingQueue.cpp @@ -0,0 +1,10 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "BlockingQueue.h" + +using namespace tgvoip; + diff --git a/Telegram/ThirdParty/libtgvoip/BlockingQueue.h b/Telegram/ThirdParty/libtgvoip/BlockingQueue.h new file mode 100644 index 000000000..212b86113 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/BlockingQueue.h @@ -0,0 +1,94 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_BLOCKINGQUEUE_H +#define LIBTGVOIP_BLOCKINGQUEUE_H + +#include +#include +#include "threading.h" + +using namespace std; + +namespace tgvoip{ + +template +class BlockingQueue{ +public: + BlockingQueue(size_t capacity) : semaphore(capacity, 0){ + this->capacity=capacity; + overflowCallback=NULL; + init_mutex(mutex); + }; + + ~BlockingQueue(){ + semaphore.Release(); + free_mutex(mutex); + } + + void Put(T thing){ + MutexGuard sync(mutex); + queue.push_back(thing); + bool didOverflow=false; + while(queue.size()>capacity){ + didOverflow=true; + if(overflowCallback){ + overflowCallback(queue.front()); + queue.pop_front(); + }else{ + abort(); + } + } + if(!didOverflow) + semaphore.Release(); + } + + T GetBlocking(){ + semaphore.Acquire(); + MutexGuard sync(mutex); + T r=GetInternal(); + return r; + } + + T Get(){ + MutexGuard sync(mutex); + if(queue.size()>0) + semaphore.Acquire(); + T r=GetInternal(); + return r; + } + + unsigned int Size(){ + return queue.size(); + } + + void PrepareDealloc(){ + + } + + void SetOverflowCallback(void (*overflowCallback)(T)){ + this->overflowCallback=overflowCallback; + } + +private: + T GetInternal(){ + //if(queue.size()==0) + // return NULL; + T r=queue.front(); + queue.pop_front(); + return r; + } + + list queue; + size_t capacity; + //tgvoip_lock_t lock; + Semaphore semaphore; + tgvoip_mutex_t mutex; + void (*overflowCallback)(T); +}; +} + +#endif //LIBTGVOIP_BLOCKINGQUEUE_H diff --git a/Telegram/ThirdParty/libtgvoip/BufferInputStream.cpp b/Telegram/ThirdParty/libtgvoip/BufferInputStream.cpp new file mode 100644 index 000000000..f4858bf45 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/BufferInputStream.cpp @@ -0,0 +1,106 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "BufferInputStream.h" +#include +#include +#include +#include + +using namespace tgvoip; + +BufferInputStream::BufferInputStream(unsigned char* data, size_t length){ + this->buffer=data; + this->length=length; + offset=0; +} + +BufferInputStream::~BufferInputStream(){ + +} + + +void BufferInputStream::Seek(size_t offset){ + if(offset>length){ + throw std::out_of_range("Not enough bytes in buffer"); + } + this->offset=offset; +} + +size_t BufferInputStream::GetLength(){ + return length; +} + +size_t BufferInputStream::GetOffset(){ + return offset; +} + +size_t BufferInputStream::Remaining(){ + return length-offset; +} + +unsigned char BufferInputStream::ReadByte(){ + EnsureEnoughRemaining(1); + return (unsigned char)buffer[offset++]; +} + +int32_t BufferInputStream::ReadInt32(){ + EnsureEnoughRemaining(4); + int32_t res=((int32_t)buffer[offset] & 0xFF) | + (((int32_t)buffer[offset+1] & 0xFF) << 8) | + (((int32_t)buffer[offset+2] & 0xFF) << 16) | + (((int32_t)buffer[offset+3] & 0xFF) << 24); + offset+=4; + return res; +} + +int64_t BufferInputStream::ReadInt64(){ + EnsureEnoughRemaining(8); + int64_t res=((int64_t)buffer[offset] & 0xFF) | + (((int64_t)buffer[offset+1] & 0xFF) << 8) | + (((int64_t)buffer[offset+2] & 0xFF) << 16) | + (((int64_t)buffer[offset+3] & 0xFF) << 24) | + (((int64_t)buffer[offset+4] & 0xFF) << 32) | + (((int64_t)buffer[offset+5] & 0xFF) << 40) | + (((int64_t)buffer[offset+6] & 0xFF) << 48) | + (((int64_t)buffer[offset+7] & 0xFF) << 56); + offset+=8; + return res; +} + +int16_t BufferInputStream::ReadInt16(){ + EnsureEnoughRemaining(2); + int16_t res=(uint16_t)buffer[offset] | ((uint16_t)buffer[offset+1] << 8); + offset+=2; + return res; +} + + +int32_t BufferInputStream::ReadTlLength(){ + unsigned char l=ReadByte(); + if(l<254) + return l; + assert(length-offset>=3); + EnsureEnoughRemaining(3); + int32_t res=((int32_t)buffer[offset] & 0xFF) | + (((int32_t)buffer[offset+1] & 0xFF) << 8) | + (((int32_t)buffer[offset+2] & 0xFF) << 16); + offset+=3; + return res; +} + +void BufferInputStream::ReadBytes(unsigned char *to, size_t count){ + EnsureEnoughRemaining(count); + memcpy(to, buffer+offset, count); + offset+=count; +} + + +void BufferInputStream::EnsureEnoughRemaining(size_t need){ + if(length-offset +#include + +namespace tgvoip{ +class BufferInputStream{ + +public: + BufferInputStream(unsigned char* data, size_t length); + ~BufferInputStream(); + void Seek(size_t offset); + size_t GetLength(); + size_t GetOffset(); + size_t Remaining(); + unsigned char ReadByte(); + int64_t ReadInt64(); + int32_t ReadInt32(); + int16_t ReadInt16(); + int32_t ReadTlLength(); + void ReadBytes(unsigned char* to, size_t count); + +private: + void EnsureEnoughRemaining(size_t need); + unsigned char* buffer; + size_t length; + size_t offset; +}; +} + +#endif //LIBTGVOIP_BUFFERINPUTSTREAM_H diff --git a/Telegram/ThirdParty/libtgvoip/BufferOutputStream.cpp b/Telegram/ThirdParty/libtgvoip/BufferOutputStream.cpp new file mode 100644 index 000000000..e3d38729a --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/BufferOutputStream.cpp @@ -0,0 +1,99 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "BufferOutputStream.h" +#include +#include + +using namespace tgvoip; + +BufferOutputStream::BufferOutputStream(size_t size){ + buffer=(unsigned char*) malloc(size); + offset=0; + this->size=size; + bufferProvided=false; +} + +BufferOutputStream::BufferOutputStream(unsigned char *buffer, size_t size){ + this->buffer=buffer; + this->size=size; + offset=0; + bufferProvided=true; +} + +BufferOutputStream::~BufferOutputStream(){ + if(!bufferProvided) + free(buffer); +} + +void BufferOutputStream::WriteByte(unsigned char byte){ + this->ExpandBufferIfNeeded(1); + buffer[offset++]=byte; +} + +void BufferOutputStream::WriteInt32(int32_t i){ + this->ExpandBufferIfNeeded(4); + buffer[offset+3]=(unsigned char)((i >> 24) & 0xFF); + buffer[offset+2]=(unsigned char)((i >> 16) & 0xFF); + buffer[offset+1]=(unsigned char)((i >> 8) & 0xFF); + buffer[offset]=(unsigned char)(i & 0xFF); + offset+=4; +} + +void BufferOutputStream::WriteInt64(int64_t i){ + this->ExpandBufferIfNeeded(8); + buffer[offset+7]=(unsigned char)((i >> 56) & 0xFF); + buffer[offset+6]=(unsigned char)((i >> 48) & 0xFF); + buffer[offset+5]=(unsigned char)((i >> 40) & 0xFF); + buffer[offset+4]=(unsigned char)((i >> 32) & 0xFF); + buffer[offset+3]=(unsigned char)((i >> 24) & 0xFF); + buffer[offset+2]=(unsigned char)((i >> 16) & 0xFF); + buffer[offset+1]=(unsigned char)((i >> 8) & 0xFF); + buffer[offset]=(unsigned char)(i & 0xFF); + offset+=8; +} + +void BufferOutputStream::WriteInt16(int16_t i){ + this->ExpandBufferIfNeeded(2); + buffer[offset+1]=(unsigned char)((i >> 8) & 0xFF); + buffer[offset]=(unsigned char)(i & 0xFF); + offset+=2; +} + +void BufferOutputStream::WriteBytes(unsigned char *bytes, size_t count){ + this->ExpandBufferIfNeeded(count); + memcpy(buffer+offset, bytes, count); + offset+=count; +} + +unsigned char *BufferOutputStream::GetBuffer(){ + return buffer; +} + +size_t BufferOutputStream::GetLength(){ + return offset; +} + +void BufferOutputStream::ExpandBufferIfNeeded(size_t need){ + if(offset+need>size){ + if(bufferProvided){ + throw std::out_of_range("buffer overflow"); + } + if(need<1024){ + buffer=(unsigned char *) realloc(buffer, size+1024); + size+=1024; + }else{ + buffer=(unsigned char *) realloc(buffer, size+need); + size+=need; + } + } +} + + +void BufferOutputStream::Reset(){ + offset=0; +} + diff --git a/Telegram/ThirdParty/libtgvoip/BufferOutputStream.h b/Telegram/ThirdParty/libtgvoip/BufferOutputStream.h new file mode 100644 index 000000000..a9eaccf67 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/BufferOutputStream.h @@ -0,0 +1,38 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_BUFFEROUTPUTSTREAM_H +#define LIBTGVOIP_BUFFEROUTPUTSTREAM_H + +#include +#include + +namespace tgvoip{ +class BufferOutputStream{ + +public: + BufferOutputStream(size_t size); + BufferOutputStream(unsigned char* buffer, size_t size); + ~BufferOutputStream(); + void WriteByte(unsigned char byte); + void WriteInt64(int64_t i); + void WriteInt32(int32_t i); + void WriteInt16(int16_t i); + void WriteBytes(unsigned char* bytes, size_t count); + unsigned char* GetBuffer(); + size_t GetLength(); + void Reset(); + +private: + void ExpandBufferIfNeeded(size_t need); + unsigned char* buffer; + size_t size; + size_t offset; + bool bufferProvided; +}; +} + +#endif //LIBTGVOIP_BUFFEROUTPUTSTREAM_H diff --git a/Telegram/ThirdParty/libtgvoip/BufferPool.cpp b/Telegram/ThirdParty/libtgvoip/BufferPool.cpp new file mode 100644 index 000000000..9dc6b488e --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/BufferPool.cpp @@ -0,0 +1,66 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "BufferPool.h" +#include "logging.h" +#include +#include + +using namespace tgvoip; + +BufferPool::BufferPool(unsigned int size, unsigned int count){ + assert(count<=64); + init_mutex(mutex); + buffers[0]=(unsigned char*) malloc(size*count); + bufferCount=count; + int i; + for(i=1;isize=size; +} + +BufferPool::~BufferPool(){ + free_mutex(mutex); + free(buffers[0]); +} + +unsigned char* BufferPool::Get(){ + lock_mutex(mutex); + int i; + for(i=0;i> i) & 1)){ + usedBuffers|=(1LL << i); + unlock_mutex(mutex); + return buffers[i]; + } + } + unlock_mutex(mutex); + return NULL; +} + +void BufferPool::Reuse(unsigned char* buffer){ + lock_mutex(mutex); + int i; + for(i=0;i +#include "threading.h" + +namespace tgvoip{ +class BufferPool{ +public: + BufferPool(unsigned int size, unsigned int count); + ~BufferPool(); + unsigned char* Get(); + void Reuse(unsigned char* buffer); + size_t GetSingleBufferSize(); + size_t GetBufferCount(); + +private: + uint64_t usedBuffers; + int bufferCount; + size_t size; + unsigned char* buffers[64]; + tgvoip_mutex_t mutex; +}; +} + +#endif //LIBTGVOIP_BUFFERPOOL_H diff --git a/Telegram/ThirdParty/libtgvoip/CongestionControl.cpp b/Telegram/ThirdParty/libtgvoip/CongestionControl.cpp new file mode 100644 index 000000000..0bed069d3 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/CongestionControl.cpp @@ -0,0 +1,173 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "CongestionControl.h" +#include "VoIPController.h" +#include "logging.h" +#include "VoIPServerConfig.h" +#include +#include + +using namespace tgvoip; + +CongestionControl::CongestionControl(){ + memset(rttHistory, 0, sizeof(rttHistory)); + memset(inflightPackets, 0, sizeof(inflightPackets)); + memset(inflightHistory, 0, sizeof(inflightHistory)); + tmpRtt=0; + tmpRttCount=0; + rttHistorySize=0; + rttHistoryTop=0; + lastSentSeq=0; + inflightHistoryTop=0; + state=TGVOIP_CONCTL_STARTUP; + lastActionTime=0; + lastActionRtt=0; + stateTransitionTime=0; + inflightDataSize=0; + lossCount=0; + cwnd=(size_t) ServerConfig::GetSharedInstance()->GetInt("audio_congestion_window", 1024); + init_mutex(mutex); +} + +CongestionControl::~CongestionControl(){ + free_mutex(mutex); +} + +size_t CongestionControl::GetAcknowledgedDataSize(){ + return 0; +} + +double CongestionControl::GetAverageRTT(){ + if(rttHistorySize==0) + return 0; + double avg=0; + int i; + for(i=0;i<30 && i=0 ? x : (100+x)]; + //LOGV("adding [%d] %f", x>=0 ? x : (100+x), rttHistory[x>=0 ? x : (100+x)]); + } + return avg/i; +} + +size_t CongestionControl::GetInflightDataSize(){ + size_t avg=0; + int i; + for(i=0;i<30;i++){ + avg+=inflightHistory[i]; + } + return avg/30; +} + + +size_t CongestionControl::GetCongestionWindow(){ + return cwnd; +} + +double CongestionControl::GetMinimumRTT(){ + int i; + double min=INFINITY; + for(i=0;i<100;i++){ + if(rttHistory[i]>0 && rttHistory[i]0){ + tmpRtt+=(VoIPController::GetCurrentTime()-inflightPackets[i].sendTime); + tmpRttCount++; + inflightPackets[i].sendTime=0; + inflightDataSize-=inflightPackets[i].size; + break; + } + } +} + +void CongestionControl::PacketSent(uint32_t seq, size_t size){ + if(!seqgt(seq, lastSentSeq) || seq==lastSentSeq){ + LOGW("Duplicate outgoing seq %u", seq); + return; + } + lastSentSeq=seq; + MutexGuard sync(mutex); + double smallestSendTime=INFINITY; + tgvoip_congestionctl_packet_t* slot=NULL; + int i; + for(i=0;i<100;i++){ + if(inflightPackets[i].sendTime==0){ + slot=&inflightPackets[i]; + break; + } + if(smallestSendTime>inflightPackets[i].sendTime){ + slot=&inflightPackets[i]; + smallestSendTime=slot->sendTime; + } + } + assert(slot!=NULL); + if(slot->sendTime>0){ + inflightDataSize-=slot->size; + lossCount++; + LOGD("Packet with seq %u was not acknowledged", slot->seq); + } + slot->seq=seq; + slot->size=size; + slot->sendTime=VoIPController::GetCurrentTime(); + inflightDataSize+=size; +} + + +void CongestionControl::Tick(){ + tickCount++; + MutexGuard sync(mutex); + if(tmpRttCount>0){ + rttHistory[rttHistoryTop]=tmpRtt/tmpRttCount; + rttHistoryTop=(rttHistoryTop+1)%100; + if(rttHistorySize<100) + rttHistorySize++; + tmpRtt=0; + tmpRttCount=0; + } + int i; + for(i=0;i<100;i++){ + if(inflightPackets[i].sendTime!=0 && VoIPController::GetCurrentTime()-inflightPackets[i].sendTime>2){ + inflightPackets[i].sendTime=0; + inflightDataSize-=inflightPackets[i].size; + lossCount++; + LOGD("Packet with seq %u was not acknowledged", inflightPackets[i].seq); + } + } + inflightHistory[inflightHistoryTop]=inflightDataSize; + inflightHistoryTop=(inflightHistoryTop+1)%30; +} + + +int CongestionControl::GetBandwidthControlAction(){ + if(VoIPController::GetCurrentTime()-lastActionTime<1) + return TGVOIP_CONCTL_ACT_NONE; + size_t inflightAvg=GetInflightDataSize(); + size_t max=cwnd+cwnd/10; + size_t min=cwnd-cwnd/10; + if(inflightAvgmax){ + lastActionTime=VoIPController::GetCurrentTime(); + return TGVOIP_CONCTL_ACT_DECREASE; + } + return TGVOIP_CONCTL_ACT_NONE; +} + + +uint32_t CongestionControl::GetSendLossCount(){ + return lossCount; +} diff --git a/Telegram/ThirdParty/libtgvoip/CongestionControl.h b/Telegram/ThirdParty/libtgvoip/CongestionControl.h new file mode 100644 index 000000000..2890f0a65 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/CongestionControl.h @@ -0,0 +1,71 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_CONGESTIONCONTROL_H +#define LIBTGVOIP_CONGESTIONCONTROL_H + +#include +#include +#include "threading.h" + +#define TGVOIP_CONCTL_STARTUP 0 +#define TGVOIP_CONCTL_DRAIN 1 +#define TGVOIP_CONCTL_PROBE_BW 2 +#define TGVOIP_CONCTL_PROBE_RTT 3 + +#define TGVOIP_CONCTL_ACT_INCREASE 1 +#define TGVOIP_CONCTL_ACT_DECREASE 2 +#define TGVOIP_CONCTL_ACT_NONE 0 + +namespace tgvoip{ + +struct tgvoip_congestionctl_packet_t{ + uint32_t seq; + double sendTime; + size_t size; +}; +typedef struct tgvoip_congestionctl_packet_t tgvoip_congestionctl_packet_t; + +class CongestionControl{ +public: + CongestionControl(); + ~CongestionControl(); + + void PacketSent(uint32_t seq, size_t size); + void PacketAcknowledged(uint32_t seq); + + double GetAverageRTT(); + double GetMinimumRTT(); + size_t GetInflightDataSize(); + size_t GetCongestionWindow(); + size_t GetAcknowledgedDataSize(); + void Tick(); + int GetBandwidthControlAction(); + uint32_t GetSendLossCount(); + +private: + double rttHistory[100]; + tgvoip_congestionctl_packet_t inflightPackets[100]; + size_t inflightHistory[30]; + int state; + uint32_t lossCount; + double tmpRtt; + double lastActionTime; + double lastActionRtt; + double stateTransitionTime; + int tmpRttCount; + char rttHistorySize; + unsigned int rttHistoryTop; + unsigned int inflightHistoryTop; + uint32_t lastSentSeq; + uint32_t tickCount; + size_t inflightDataSize; + size_t cwnd; + tgvoip_mutex_t mutex; +}; +} + +#endif //LIBTGVOIP_CONGESTIONCONTROL_H diff --git a/Telegram/ThirdParty/libtgvoip/EchoCanceller.cpp b/Telegram/ThirdParty/libtgvoip/EchoCanceller.cpp new file mode 100644 index 000000000..2106532d2 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/EchoCanceller.cpp @@ -0,0 +1,356 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "EchoCanceller.h" +#include "audio/AudioOutput.h" +#include "audio/AudioInput.h" +#include "logging.h" +#include +#include + +#ifndef TGVOIP_NO_DSP +#ifndef TGVOIP_USE_DESKTOP_DSP +#include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h" +#include "webrtc/modules/audio_processing/ns/noise_suppression_x.h" +#else +#include "webrtc/modules/audio_processing/aec/echo_cancellation.h" +//#include "webrtc/modules/audio_processing/ns/noise_suppression.h" +#include "webrtc/modules/audio_processing/ns/noise_suppression_x.h" +#endif +#include "webrtc/modules/audio_processing/splitting_filter.h" +#include "webrtc/common_audio/channel_buffer.h" +#include "webrtc/modules/audio_processing/agc/legacy/gain_control.h" +#endif + +#define AEC_FRAME_SIZE 160 +#define OFFSET_STEP AEC_FRAME_SIZE*2 + +//#define CLAMP(x, min, max) (xmin ? x : min) : max) +#define CLAMP(x, min, max) x + +using namespace tgvoip; + +#ifdef TGVOIP_USE_DESKTOP_DSP +namespace webrtc{ + void WebRtcAec_enable_delay_agnostic(AecCore* self, int enable); +} +#endif + +EchoCanceller::EchoCanceller(bool enableAEC, bool enableNS, bool enableAGC){ + this->enableAEC=enableAEC; + this->enableAGC=enableAGC; + this->enableNS=enableNS; + +#ifndef TGVOIP_NO_DSP + + splittingFilter=new webrtc::SplittingFilter(1, 3, 960); + splittingFilterFarend=new webrtc::SplittingFilter(1, 3, 960); + + splittingFilterIn=new webrtc::IFChannelBuffer(960, 1, 1); + splittingFilterFarendIn=new webrtc::IFChannelBuffer(960, 1, 1); + splittingFilterOut=new webrtc::IFChannelBuffer(960, 1, 3); + splittingFilterFarendOut=new webrtc::IFChannelBuffer(960, 1, 3); + + if(enableAEC){ + init_mutex(aecMutex); +#ifndef TGVOIP_USE_DESKTOP_DSP + aec=WebRtcAecm_Create(); + WebRtcAecm_Init(aec, 16000); + AecmConfig cfg; + cfg.cngMode=AecmFalse; + cfg.echoMode=1; + WebRtcAecm_set_config(aec, cfg); +#else + aec=webrtc::WebRtcAec_Create(); + webrtc::WebRtcAec_Init(aec, 48000, 48000); + //webrtc::WebRtcAec_enable_delay_agnostic(webrtc::WebRtcAec_aec_core(aec), 1); + webrtc::AecConfig config; + config.metricsMode=webrtc::kAecFalse; + config.nlpMode=webrtc::kAecNlpAggressive; + config.skewMode=webrtc::kAecFalse; + config.delay_logging=webrtc::kAecFalse; + webrtc::WebRtcAec_set_config(aec, config); +#endif + + farendQueue=new BlockingQueue(11); + farendBufferPool=new BufferPool(960*2, 10); + running=true; + + start_thread(bufferFarendThread, EchoCanceller::StartBufferFarendThread, this); + } + + if(enableNS){ +//#ifndef TGVOIP_USE_DESKTOP_DSP + ns=WebRtcNsx_Create(); + WebRtcNsx_Init((NsxHandle*)ns, 48000); + WebRtcNsx_set_policy((NsxHandle*)ns, 1); +/*#else + ns=WebRtcNs_Create(); + WebRtcNs_Init((NsHandle*)ns, 48000); + WebRtcNs_set_policy((NsHandle*)ns, 1); +#endif*/ + } + + if(enableAGC){ + agc=WebRtcAgc_Create(); + WebRtcAgcConfig agcConfig; + agcConfig.compressionGaindB = 9; + agcConfig.limiterEnable = 1; + agcConfig.targetLevelDbfs = 3; + WebRtcAgc_Init(agc, 0, 255, kAgcModeAdaptiveAnalog, 48000); + WebRtcAgc_set_config(agc, agcConfig); + agcMicLevel=128; + } +#endif +} + +EchoCanceller::~EchoCanceller(){ + if(enableAEC){ + running=false; + farendQueue->Put(NULL); + join_thread(bufferFarendThread); + delete farendQueue; + delete farendBufferPool; +#ifndef TGVOIP_USE_DESKTOP_DSP + WebRtcAecm_Free(aec); +#else + webrtc::WebRtcAec_Free(aec); +#endif + } + if(enableNS){ +//#ifndef TGVOIP_USE_DESKTOP_DSP + WebRtcNsx_Free((NsxHandle*)ns); +/*#else + WebRtcNs_Free((NsHandle*)ns); +#endif*/ + } + if(enableAGC){ + WebRtcAgc_Free(agc); + } + //webrtc::WebRtcAec_Free(state); + + delete (webrtc::SplittingFilter*)splittingFilter; + delete (webrtc::SplittingFilter*)splittingFilterFarend; + + delete (webrtc::IFChannelBuffer*)splittingFilterIn; + delete (webrtc::IFChannelBuffer*)splittingFilterOut; + delete (webrtc::IFChannelBuffer*)splittingFilterFarendIn; + delete (webrtc::IFChannelBuffer*)splittingFilterFarendOut; + + if (this->enableAEC) { + free_mutex(aecMutex); + } +} + +void EchoCanceller::Start(){ + +} + +void EchoCanceller::Stop(){ + +} + + +void EchoCanceller::SpeakerOutCallback(unsigned char* data, size_t len){ + if(len!=960*2 || !enableAEC) + return; + /*size_t offset=0; + while(offsetGet(); + if(buf){ + memcpy(buf, data, 960*2); + farendQueue->Put(buf); + } +} + +void *EchoCanceller::StartBufferFarendThread(void *arg){ + ((EchoCanceller*)arg)->RunBufferFarendThread(); + return NULL; +} + +void EchoCanceller::RunBufferFarendThread(){ + while(running){ + int16_t* samplesIn=farendQueue->GetBlocking(); + if(samplesIn){ + webrtc::IFChannelBuffer* bufIn=(webrtc::IFChannelBuffer*) splittingFilterFarendIn; + webrtc::IFChannelBuffer* bufOut=(webrtc::IFChannelBuffer*) splittingFilterFarendOut; + memcpy(bufIn->ibuf()->bands(0)[0], samplesIn, 960*2); + farendBufferPool->Reuse((unsigned char *) samplesIn); + ((webrtc::SplittingFilter*)splittingFilterFarend)->Analysis(bufIn, bufOut); + lock_mutex(aecMutex); +#ifndef TGVOIP_USE_DESKTOP_DSP + WebRtcAecm_BufferFarend(aec, bufOut->ibuf_const()->bands(0)[0], 160); + WebRtcAecm_BufferFarend(aec, bufOut->ibuf_const()->bands(0)[0]+160, 160); +#else + webrtc::WebRtcAec_BufferFarend(aec, bufOut->fbuf_const()->bands(0)[0], 160); + webrtc::WebRtcAec_BufferFarend(aec, bufOut->fbuf_const()->bands(0)[0]+160, 160); +#endif + unlock_mutex(aecMutex); + didBufferFarend=true; + } + } +} + +void EchoCanceller::Enable(bool enabled){ + //isOn=enabled; +} + +void EchoCanceller::ProcessInput(unsigned char* data, unsigned char* out, size_t len){ + int i; + if(!enableAEC && !enableAGC && !enableNS){ + memcpy(out, data, len); + return; + } + int16_t* samplesIn=(int16_t*)data; + int16_t* samplesOut=(int16_t*)out; + + webrtc::IFChannelBuffer* bufIn=(webrtc::IFChannelBuffer*) splittingFilterIn; + webrtc::IFChannelBuffer* bufOut=(webrtc::IFChannelBuffer*) splittingFilterOut; + + memcpy(bufIn->ibuf()->bands(0)[0], samplesIn, 960*2); + + ((webrtc::SplittingFilter*)splittingFilter)->Analysis(bufIn, bufOut); + +#ifndef TGVOIP_USE_DESKTOP_DSP + if(enableAEC && enableNS){ + int16_t _nsOut[3][320]; + int16_t* nsIn[3]; + int16_t* nsOut[3]; + for(i=0;i<3;i++){ + nsIn[i]=(int16_t*)bufOut->ibuf_const()->bands(0)[i]; + nsOut[i]=_nsOut[i]; + } + WebRtcNsx_Process((NsxHandle*)ns, (const short *const *) nsIn, 3, nsOut); + for(i=0;i<3;i++){ + nsOut[i]+=160; + nsIn[i]+=160; + } + WebRtcNsx_Process((NsxHandle*)ns, (const short *const *) nsIn, 3, nsOut); + + memcpy(bufOut->ibuf()->bands(0)[1], _nsOut[1], 320*2*2); + + lock_mutex(aecMutex); + WebRtcAecm_Process(aec, bufOut->ibuf()->bands(0)[0], _nsOut[0], samplesOut, AEC_FRAME_SIZE, (int16_t) tgvoip::audio::AudioOutput::GetEstimatedDelay()); + WebRtcAecm_Process(aec, bufOut->ibuf()->bands(0)[0]+160, _nsOut[0]+160, samplesOut+160, AEC_FRAME_SIZE, (int16_t) (tgvoip::audio::AudioOutput::GetEstimatedDelay()+audio::AudioInput::GetEstimatedDelay())); + unlock_mutex(aecMutex); + memcpy(bufOut->ibuf()->bands(0)[0], samplesOut, 320*2); + }else if(enableAEC){ + lock_mutex(aecMutex); + WebRtcAecm_Process(aec, bufOut->ibuf()->bands(0)[0], NULL, samplesOut, AEC_FRAME_SIZE, (int16_t) tgvoip::audio::AudioOutput::GetEstimatedDelay()); + WebRtcAecm_Process(aec, bufOut->ibuf()->bands(0)[0]+160, NULL, samplesOut+160, AEC_FRAME_SIZE, (int16_t) (tgvoip::audio::AudioOutput::GetEstimatedDelay()+audio::AudioInput::GetEstimatedDelay())); + unlock_mutex(aecMutex); + memcpy(bufOut->ibuf()->bands(0)[0], samplesOut, 320*2); + }else if(enableNS){ + int16_t _nsOut[3][320]; + int16_t* nsIn[3]; + int16_t* nsOut[3]; + for(i=0;i<3;i++){ + nsIn[i]=(int16_t*)bufOut->ibuf_const()->bands(0)[i]; + nsOut[i]=_nsOut[i]; + } + WebRtcNsx_Process((NsxHandle*)ns, (const short *const *) nsIn, 3, nsOut); + for(i=0;i<3;i++){ + nsOut[i]+=160; + nsIn[i]+=160; + } + WebRtcNsx_Process((NsxHandle*)ns, (const short *const *) nsIn, 3, nsOut); + + memcpy(bufOut->ibuf()->bands(0)[0], _nsOut[0], 320*2); + memcpy(bufOut->ibuf()->bands(0)[1], _nsOut[1], 320*2); + memcpy(bufOut->ibuf()->bands(0)[2], _nsOut[2], 320*2); + } +#else + /*if(enableNS){ + float _nsOut[3][320]; + const float* nsIn[3]; + float* nsOut[3]; + for(i=0;i<3;i++){ + nsIn[i]=bufOut->fbuf_const()->bands(0)[i]; + nsOut[i]=_nsOut[i]; + } + WebRtcNs_Process((NsHandle*)ns, nsIn, 3, nsOut); + for(i=0;i<3;i++){ + nsOut[i]+=160; + nsIn[i]+=160; + } + WebRtcNs_Process((NsHandle*)ns, nsIn, 3, nsOut); + + memcpy(bufOut->fbuf()->bands(0)[0], _nsOut[0], 320*4); + memcpy(bufOut->fbuf()->bands(0)[1], _nsOut[1], 320*4); + memcpy(bufOut->fbuf()->bands(0)[2], _nsOut[2], 320*4); + }*/ + if(enableNS){ + int16_t _nsOut[3][320]; + int16_t* nsIn[3]; + int16_t* nsOut[3]; + for(i=0;i<3;i++){ + nsIn[i]=(int16_t*)bufOut->ibuf_const()->bands(0)[i]; + nsOut[i]=_nsOut[i]; + } + WebRtcNsx_Process((NsxHandle*)ns, (const short *const *)nsIn, 3, nsOut); + for(i=0;i<3;i++){ + nsOut[i]+=160; + nsIn[i]+=160; + } + WebRtcNsx_Process((NsxHandle*)ns, (const short *const *)nsIn, 3, nsOut); + + memcpy(bufOut->ibuf()->bands(0)[0], _nsOut[0], 320*2); + memcpy(bufOut->ibuf()->bands(0)[1], _nsOut[1], 320*2); + memcpy(bufOut->ibuf()->bands(0)[2], _nsOut[2], 320*2); + } + + if(enableAEC){ + const float* aecIn[3]; + float* aecOut[3]; + float _aecOut[3][320]; + for(i=0;i<3;i++){ + aecIn[i]=bufOut->fbuf_const()->bands(0)[i]; + aecOut[i]=_aecOut[i]; + } + webrtc::WebRtcAec_Process(aec, aecIn, 3, aecOut, AEC_FRAME_SIZE, audio::AudioOutput::GetEstimatedDelay()+audio::AudioInput::GetEstimatedDelay(), 0); + for(i=0;i<3;i++){ + aecOut[i]+=160; + aecIn[i]+=160; + } + webrtc::WebRtcAec_Process(aec, aecIn, 3, aecOut, AEC_FRAME_SIZE, audio::AudioOutput::GetEstimatedDelay()+audio::AudioInput::GetEstimatedDelay(), 0); + + memcpy(bufOut->fbuf()->bands(0)[0], _aecOut[0], 320*4); + memcpy(bufOut->fbuf()->bands(0)[1], _aecOut[1], 320*4); + memcpy(bufOut->fbuf()->bands(0)[2], _aecOut[2], 320*4); + } +#endif + + if(enableAGC){ + int16_t _agcOut[3][320]; + int16_t* agcIn[3]; + int16_t* agcOut[3]; + for(i=0;i<3;i++){ + agcIn[i]=(int16_t*)bufOut->ibuf_const()->bands(0)[i]; + agcOut[i]=_agcOut[i]; + } + uint8_t saturation; + WebRtcAgc_AddMic(agc, agcIn, 3, 160); + WebRtcAgc_Process(agc, (const int16_t *const *) agcIn, 3, 160, agcOut, agcMicLevel, &agcMicLevel, 0, &saturation); + for(i=0;i<3;i++){ + agcOut[i]+=160; + agcIn[i]+=160; + } + WebRtcAgc_AddMic(agc, agcIn, 3, 160); + WebRtcAgc_Process(agc, (const int16_t *const *) agcIn, 3, 160, agcOut, agcMicLevel, &agcMicLevel, 0, &saturation); + //LOGV("AGC mic level %d", agcMicLevel); + memcpy(bufOut->ibuf()->bands(0)[0], _agcOut[0], 320*2); + memcpy(bufOut->ibuf()->bands(0)[1], _agcOut[1], 320*2); + memcpy(bufOut->ibuf()->bands(0)[2], _agcOut[2], 320*2); + } + + ((webrtc::SplittingFilter*)splittingFilter)->Synthesis(bufOut, bufIn); + + memcpy(samplesOut, bufIn->ibuf_const()->bands(0)[0], 960*2); +} + diff --git a/Telegram/ThirdParty/libtgvoip/EchoCanceller.h b/Telegram/ThirdParty/libtgvoip/EchoCanceller.h new file mode 100644 index 000000000..982bd4abb --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/EchoCanceller.h @@ -0,0 +1,53 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_ECHOCANCELLER_H +#define LIBTGVOIP_ECHOCANCELLER_H + +#include "threading.h" +#include "BufferPool.h" +#include "BlockingQueue.h" + +namespace tgvoip{ +class EchoCanceller{ + +public: + EchoCanceller(bool enableAEC, bool enableNS, bool enableAGC); + virtual ~EchoCanceller(); + virtual void Start(); + virtual void Stop(); + void SpeakerOutCallback(unsigned char* data, size_t len); + void Enable(bool enabled); + void ProcessInput(unsigned char* data, unsigned char* out, size_t len); + +private: + bool enableAEC; + bool enableAGC; + bool enableNS; +#ifndef TGVOIP_NO_DSP + static void* StartBufferFarendThread(void* arg); + void RunBufferFarendThread(); + bool didBufferFarend; + tgvoip_mutex_t aecMutex; + void* aec; + void* splittingFilter; // webrtc::SplittingFilter + void* splittingFilterIn; // webrtc::IFChannelBuffer + void* splittingFilterOut; // webrtc::IFChannelBuffer + void* splittingFilterFarend; // webrtc::SplittingFilter + void* splittingFilterFarendIn; // webrtc::IFChannelBuffer + void* splittingFilterFarendOut; // webrtc::IFChannelBuffer + tgvoip_thread_t bufferFarendThread; + BlockingQueue* farendQueue; + BufferPool* farendBufferPool; + bool running; + void* ns; // NsxHandle + void* agc; + int32_t agcMicLevel; +#endif +}; +} + +#endif //LIBTGVOIP_ECHOCANCELLER_H diff --git a/Telegram/ThirdParty/libtgvoip/Info.plist b/Telegram/ThirdParty/libtgvoip/Info.plist new file mode 100644 index 000000000..fbe1e6b31 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/Info.plist @@ -0,0 +1,24 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + FMWK + CFBundleShortVersionString + 1.0 + CFBundleVersion + $(CURRENT_PROJECT_VERSION) + NSPrincipalClass + + + diff --git a/Telegram/ThirdParty/libtgvoip/JitterBuffer.cpp b/Telegram/ThirdParty/libtgvoip/JitterBuffer.cpp new file mode 100644 index 000000000..e589389d7 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/JitterBuffer.cpp @@ -0,0 +1,488 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "VoIPController.h" +#include "JitterBuffer.h" +#include "logging.h" +#include "VoIPServerConfig.h" +#include + +using namespace tgvoip; + +JitterBuffer::JitterBuffer(MediaStreamItf *out, uint32_t step):bufferPool(JITTER_SLOT_SIZE, JITTER_SLOT_COUNT){ + if(out) + out->SetCallback(JitterBuffer::CallbackOut, this); + this->step=step; + memset(slots, 0, sizeof(jitter_packet_t)*JITTER_SLOT_COUNT); + minDelay=6; + lostCount=0; + needBuffering=true; + tickCount=0; + dontIncMinDelay=0; + dontDecMinDelay=0; + lostPackets=0; + outstandingDelayChange=0; + if(step<30){ + minMinDelay=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_min_delay_20", 6); + maxMinDelay=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_max_delay_20", 25); + maxUsedSlots=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_max_slots_20", 50); + }else if(step<50){ + minMinDelay=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_min_delay_40", 4); + maxMinDelay=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_max_delay_40", 15); + maxUsedSlots=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_max_slots_40", 30); + }else{ + minMinDelay=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_min_delay_60", 1); + maxMinDelay=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_max_delay_60", 10); + maxUsedSlots=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_max_slots_60", 20); + } + lossesToReset=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_losses_to_reset", 20); + resyncThreshold=ServerConfig::GetSharedInstance()->GetDouble("jitter_resync_threshold", 1.0); + //dump=fopen("/sdcard/tgvoip_jitter_dump.txt", "a"); + //fprintf(dump, "==================================\n"); + Reset(); + init_mutex(mutex); +} + +JitterBuffer::~JitterBuffer(){ + Reset(); + free_mutex(mutex); +} + +void JitterBuffer::SetMinPacketCount(uint32_t count){ + if(minDelay==count) + return; + minDelay=count; + Reset(); +} + +int JitterBuffer::GetMinPacketCount(){ + return minDelay; +} + +size_t JitterBuffer::CallbackIn(unsigned char *data, size_t len, void *param){ + //((JitterBuffer*)param)->HandleInput(data, len); + return 0; +} + +size_t JitterBuffer::CallbackOut(unsigned char *data, size_t len, void *param){ + return 0; //((JitterBuffer*)param)->HandleOutput(data, len, 0, NULL); +} + +void JitterBuffer::HandleInput(unsigned char *data, size_t len, uint32_t timestamp){ + jitter_packet_t pkt; + pkt.size=len; + pkt.buffer=data; + pkt.timestamp=timestamp; + lock_mutex(mutex); + PutInternal(&pkt); + unlock_mutex(mutex); + //LOGV("in, ts=%d", timestamp); +} + +void JitterBuffer::Reset(){ + wasReset=true; + needBuffering=true; + lastPutTimestamp=0; + int i; + for(i=0;isizesize)); + }else{ + if(pkt) { + pkt->size = slots[i].size; + pkt->timestamp = slots[i].timestamp; + memcpy(pkt->buffer, slots[i].buffer, slots[i].size); + } + } + bufferPool.Reuse(slots[i].buffer); + slots[i].buffer=NULL; + if(offset==0) + Advance(); + lostCount=0; + needBuffering=false; + return JR_OK; + } + + LOGW("jitter: found no packet for timestamp %lld (last put = %d, lost = %d)", (long long int)timestampToGet, lastPutTimestamp, lostCount); + + if(offset==0) + Advance(); + + if(!needBuffering){ + lostCount++; + if(offset==0){ + lostPackets++; + lostSinceReset++; + } + if(lostCount>=lossesToReset || (gotSinceReset>minDelay*25 && lostSinceReset>gotSinceReset/2)){ + LOGW("jitter: lost %d packets in a row, resetting", lostCount); + //minDelay++; + dontIncMinDelay=16; + dontDecMinDelay+=128; + if(GetCurrentDelay()size>JITTER_SLOT_SIZE){ + LOGE("The packet is too big to fit into the jitter buffer"); + return; + } + gotSinceReset++; + int i; + if(wasReset){ + wasReset=false; + outstandingDelayChange=0; + nextTimestamp=((int64_t)pkt->timestamp)-step*minDelay; + LOGI("jitter: resyncing, next timestamp = %lld (step=%d, minDelay=%d)", (long long int)nextTimestamp, step, minDelay); + } + + for(i=0;itimestamp-slots[i].timestamptimestamp-closestTime){ + closestTime=slots[i].timestamp; + prevTime=slots[i].recvTime; + } + }*/ + double time=VoIPController::GetCurrentTime(); + if(expectNextAtTime!=0){ + double dev=expectNextAtTime-time; + //LOGV("packet dev %f", dev); + deviationHistory[deviationPtr]=dev; + deviationPtr=(deviationPtr+1)%64; + expectNextAtTime+=step/1000.0; + }else{ + expectNextAtTime=time+step/1000.0; + } + + if(pkt->timestamptimestamp); + latePacketCount++; + lostPackets--; + }else if(pkt->timestamptimestamp); + latePacketCount++; + return; + } + + if(pkt->timestamp>lastPutTimestamp) + lastPutTimestamp=pkt->timestamp; + + for(i=0;i=maxUsedSlots){ + int toRemove=JITTER_SLOT_COUNT; + uint32_t bestTimestamp=0xFFFFFFFF; + for(i=0;itimestamp; + slots[i].size=pkt->size; + slots[i].buffer=bufferPool.Get(); + slots[i].recvTimeDiff=time-prevRecvTime; + if(slots[i].buffer) + memcpy(slots[i].buffer, pkt->buffer, pkt->size); + else + LOGE("WTF!!"); + //fprintf(dump, "%f %d\n", time-prevRecvTime, GetCurrentDelay()); + prevRecvTime=time; +} + + +void JitterBuffer::Advance(){ + nextTimestamp+=step; +} + + +int JitterBuffer::GetCurrentDelay(){ + int delay=0; + int i; + for(i=0;i0) + absolutelyNoLatePackets=false; + } + avgLate64/=64; + avgLate32/=32; + avgLate16/=16; + //LOGV("jitter: avg late=%.1f, %.1f, %.1f", avgLate16, avgLate32, avgLate64); + if(avgLate16>=resyncThreshold){ + wasReset=true; + } + /*if(avgLate16>=0.3){ + if(dontIncMinDelay==0 && minDelay<15){ + minDelay++; + if(GetCurrentDelay()0) + dontDecMinDelay--; + if(dontDecMinDelay==0 && minDelay>minMinDelay){ + minDelay--; + dontDecMinDelay=64; + dontIncMinDelay+=16; + } + } + + if(dontIncMinDelay>0) + dontIncMinDelay--;*/ + + if(absolutelyNoLatePackets){ + if(dontDecMinDelay>0) + dontDecMinDelay--; + } + + memmove(&delayHistory[1], delayHistory, 63*sizeof(int)); + delayHistory[0]=GetCurrentDelay(); + + avgDelay=0; + int min=100; + for(i=0;i<32;i++){ + avgDelay+=delayHistory[i]; + if(delayHistory[i]maxMinDelay) + stddevDelay=maxMinDelay; + if(stddevDelay!=minDelay){ + int32_t diff=stddevDelay-minDelay; + if(diff>0){ + dontDecMinDelay=100; + } + if(diff<-1) + diff=-1; + if(diff>1) + diff=1; + if((diff>0 && dontIncMinDelay==0) || (diff<0 && dontDecMinDelay==0)){ + //nextTimestamp+=diff*(int32_t)step; + minDelay+=diff; + outstandingDelayChange+=diff*60; + dontChangeDelay+=32; + LOGD("new delay from stddev %d", minDelay); + if(diff<0){ + dontDecMinDelay+=25; + } + if(diff>0){ + dontIncMinDelay=25; + } + } + } + lastMeasuredJitter=stddev; + lastMeasuredDelay=stddevDelay; + //LOGV("stddev=%.3f, avg=%.3f, ndelay=%d, dontDec=%u", stddev, avgdev, stddevDelay, dontDecMinDelay); + if(dontChangeDelay==0){ + if(avgDelay>minDelay+0.5){ + outstandingDelayChange-=avgDelay>minDelay+2 ? 60 : 20; + dontChangeDelay+=10; + }else if(avgDelay0) + dontChangeDelay--; + + //LOGV("jitter: avg delay=%d, delay=%d, late16=%.1f, dontDecMinDelay=%d", avgDelay, delayHistory[0], avgLate16, dontDecMinDelay); + /*if(!adjustingDelay) { + if (((minDelay==1 ? (avgDelay>=3) : (avgDelay>=minDelay/2)) && delayHistory[0]>minDelay && avgLate16<=0.1 && absolutelyNoLatePackets && dontDecMinDelay<32 && min>minDelay)) { + LOGI("jitter: need adjust"); + adjustingDelay=true; + } + }else{ + if(!absolutelyNoLatePackets){ + LOGI("jitter: done adjusting because we're losing packets"); + adjustingDelay=false; + }else if(tickCount%5==0){ + LOGD("jitter: removing a packet to reduce delay"); + GetInternal(NULL, 0); + expectNextAtTime=0; + if(GetCurrentDelay()<=minDelay || min<=minDelay){ + adjustingDelay = false; + LOGI("jitter: done adjusting"); + } + } + }*/ + + tickCount++; + + unlock_mutex(mutex); +} + + +void JitterBuffer::GetAverageLateCount(double *out){ + double avgLate64=0, avgLate32=0, avgLate16=0; + int i; + for(i=0;i<64;i++){ + avgLate64+=lateHistory[i]; + if(i<32) + avgLate32+=lateHistory[i]; + if(i<16) + avgLate16+=lateHistory[i]; + } + avgLate64/=64; + avgLate32/=32; + avgLate16/=16; + out[0]=avgLate16; + out[1]=avgLate32; + out[2]=avgLate64; +} + + +int JitterBuffer::GetAndResetLostPacketCount(){ + lock_mutex(mutex); + int r=lostPackets; + lostPackets=0; + unlock_mutex(mutex); + return r; +} + +double JitterBuffer::GetLastMeasuredJitter(){ + return lastMeasuredJitter; +} + +double JitterBuffer::GetLastMeasuredDelay(){ + return lastMeasuredDelay; +} + +double JitterBuffer::GetAverageDelay(){ + return avgDelay; +} diff --git a/Telegram/ThirdParty/libtgvoip/JitterBuffer.h b/Telegram/ThirdParty/libtgvoip/JitterBuffer.h new file mode 100644 index 000000000..8785ec56d --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/JitterBuffer.h @@ -0,0 +1,95 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_JITTERBUFFER_H +#define LIBTGVOIP_JITTERBUFFER_H + +#include +#include +#include +#include "MediaStreamItf.h" +#include "BlockingQueue.h" +#include "BufferPool.h" +#include "threading.h" + +#define JITTER_SLOT_COUNT 64 +#define JITTER_SLOT_SIZE 1024 +#define JR_OK 1 +#define JR_MISSING 2 +#define JR_BUFFERING 3 + +struct jitter_packet_t{ + unsigned char* buffer; + size_t size; + uint32_t timestamp; + double recvTimeDiff; +}; +typedef struct jitter_packet_t jitter_packet_t; + +namespace tgvoip{ +class JitterBuffer{ +public: + JitterBuffer(MediaStreamItf* out, uint32_t step); + ~JitterBuffer(); + void SetMinPacketCount(uint32_t count); + int GetMinPacketCount(); + int GetCurrentDelay(); + double GetAverageDelay(); + void Reset(); + void HandleInput(unsigned char* data, size_t len, uint32_t timestamp); + size_t HandleOutput(unsigned char* buffer, size_t len, int offsetInSteps, int* playbackScaledDuration); + void Tick(); + void GetAverageLateCount(double* out); + int GetAndResetLostPacketCount(); + double GetLastMeasuredJitter(); + double GetLastMeasuredDelay(); + +private: + static size_t CallbackIn(unsigned char* data, size_t len, void* param); + static size_t CallbackOut(unsigned char* data, size_t len, void* param); + void PutInternal(jitter_packet_t* pkt); + int GetInternal(jitter_packet_t* pkt, int offset); + void Advance(); + + BufferPool bufferPool; + tgvoip_mutex_t mutex; + jitter_packet_t slots[JITTER_SLOT_COUNT]; + int64_t nextTimestamp; + uint32_t step; + uint32_t minDelay; + uint32_t minMinDelay; + uint32_t maxMinDelay; + uint32_t maxUsedSlots; + uint32_t lastPutTimestamp; + uint32_t lossesToReset; + double resyncThreshold; + int lostCount; + int lostSinceReset; + int gotSinceReset; + bool wasReset; + bool needBuffering; + int delayHistory[64]; + int lateHistory[64]; + bool adjustingDelay; + unsigned int tickCount; + unsigned int latePacketCount; + unsigned int dontIncMinDelay; + unsigned int dontDecMinDelay; + int lostPackets; + double prevRecvTime; + double expectNextAtTime; + double deviationHistory[64]; + int deviationPtr; + double lastMeasuredJitter; + double lastMeasuredDelay; + int outstandingDelayChange; + unsigned int dontChangeDelay; + double avgDelay; + //FILE* dump; +}; +} + +#endif //LIBTGVOIP_JITTERBUFFER_H diff --git a/Telegram/ThirdParty/libtgvoip/MediaStreamItf.cpp b/Telegram/ThirdParty/libtgvoip/MediaStreamItf.cpp new file mode 100644 index 000000000..6e8b72953 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/MediaStreamItf.cpp @@ -0,0 +1,18 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "MediaStreamItf.h" + +using namespace tgvoip; + +void MediaStreamItf::SetCallback(size_t (*f)(unsigned char *, size_t, void*), void* param){ + callback=f; + callbackParam=param; +} + +size_t MediaStreamItf::InvokeCallback(unsigned char *data, size_t length){ + return (*callback)(data, length, callbackParam); +} diff --git a/Telegram/ThirdParty/libtgvoip/MediaStreamItf.h b/Telegram/ThirdParty/libtgvoip/MediaStreamItf.h new file mode 100644 index 000000000..97e236805 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/MediaStreamItf.h @@ -0,0 +1,29 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_MEDIASTREAMINPUT_H +#define LIBTGVOIP_MEDIASTREAMINPUT_H + +#include + +namespace tgvoip{ +class MediaStreamItf{ +public: + virtual void Start()=0; + virtual void Stop()=0; + void SetCallback(size_t (*f)(unsigned char*, size_t, void*), void* param); + +//protected: + size_t InvokeCallback(unsigned char* data, size_t length); + +private: + size_t (*callback)(unsigned char*, size_t, void*); + void* callbackParam; +}; +} + + +#endif //LIBTGVOIP_MEDIASTREAMINPUT_H diff --git a/Telegram/ThirdParty/libtgvoip/NetworkSocket.cpp b/Telegram/ThirdParty/libtgvoip/NetworkSocket.cpp new file mode 100644 index 000000000..3f7f02e63 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/NetworkSocket.cpp @@ -0,0 +1,608 @@ +// +// Created by Grishka on 29.03.17. +// + +#include "NetworkSocket.h" +#include +#include +#include +#include +#if defined(_WIN32) +#include "os/windows/NetworkSocketWinsock.h" +#include +#else +#include "os/posix/NetworkSocketPosix.h" +#endif +#include "logging.h" +#include "VoIPServerConfig.h" +#include "VoIPController.h" +#include "BufferInputStream.h" + +#define MIN_UDP_PORT 16384 +#define MAX_UDP_PORT 32768 + +using namespace tgvoip; + +NetworkSocket::NetworkSocket(NetworkProtocol protocol) : protocol(protocol){ + ipv6Timeout=ServerConfig::GetSharedInstance()->GetDouble("nat64_fallback_timeout", 3); + failed=false; + + proxyAddress=NULL; + proxyPort=0; + proxyUsername=NULL; + proxyPassword=NULL; +} + +NetworkSocket::~NetworkSocket(){ + +} + +std::string NetworkSocket::GetLocalInterfaceInfo(IPv4Address *inet4addr, IPv6Address *inet6addr){ + std::string r="not implemented"; + return r; +} + +uint16_t NetworkSocket::GenerateLocalPort(){ + return (uint16_t) ((rand()%(MAX_UDP_PORT-MIN_UDP_PORT))+MIN_UDP_PORT); +} + +void NetworkSocket::SetMaxPriority(){ +} + +bool NetworkSocket::IsFailed(){ + return failed; +} + +NetworkSocket *NetworkSocket::Create(NetworkProtocol protocol){ +#ifndef _WIN32 + return new NetworkSocketPosix(protocol); +#else + return new NetworkSocketWinsock(protocol); +#endif +} + +IPv4Address *NetworkSocket::ResolveDomainName(std::string name){ +#ifndef _WIN32 + return NetworkSocketPosix::ResolveDomainName(name); +#else + return NetworkSocketWinsock::ResolveDomainName(name); +#endif +} + +void NetworkSocket::SetSocksProxy(IPv4Address *addr, uint16_t port, char *username, char *password){ + proxyAddress=addr; + proxyPort=port; + proxyUsername=username; + proxyPassword=password; +} + +void NetworkSocket::GenerateTCPO2States(unsigned char* buffer, TCPO2State* recvState, TCPO2State* sendState){ + memset(recvState, 0, sizeof(TCPO2State)); + memset(sendState, 0, sizeof(TCPO2State)); + unsigned char nonce[64]; + uint32_t *first = reinterpret_cast(nonce), *second = first + 1; + uint32_t first1 = 0x44414548U, first2 = 0x54534f50U, first3 = 0x20544547U, first4 = 0x20544547U, first5 = 0xeeeeeeeeU; + uint32_t second1 = 0; + do { + VoIPController::crypto.rand_bytes(nonce, sizeof(nonce)); + } while (*first == first1 || *first == first2 || *first == first3 || *first == first4 || *first == first5 || *second == second1 || *reinterpret_cast(nonce) == 0xef); + + // prepare encryption key/iv + memcpy(sendState->key, nonce + 8, 32); + memcpy(sendState->iv, nonce + 8 + 32, 16); + + // prepare decryption key/iv + char reversed[48]; + memcpy(reversed, nonce + 8, sizeof(reversed)); + std::reverse(reversed, reversed + sizeof(reversed)); + memcpy(recvState->key, reversed, 32); + memcpy(recvState->iv, reversed + 32, 16); + + // write protocol identifier + *reinterpret_cast(nonce + 56) = 0xefefefefU; + memcpy(buffer, nonce, 56); + EncryptForTCPO2(nonce, sizeof(nonce), sendState); + memcpy(buffer+56, nonce+56, 8); +} + +void NetworkSocket::EncryptForTCPO2(unsigned char *buffer, size_t len, TCPO2State *state){ + VoIPController::crypto.aes_ctr_encrypt(buffer, len, state->key, state->iv, state->ecount, &state->num); +} + +size_t NetworkSocket::Receive(unsigned char *buffer, size_t len){ + NetworkPacket pkt; + pkt.data=buffer; + pkt.length=len; + Receive(&pkt); + return pkt.length; +} + +size_t NetworkSocket::Send(unsigned char *buffer, size_t len){ + NetworkPacket pkt; + pkt.data=buffer; + pkt.length=len; + Send(&pkt); + return pkt.length; +} + +bool NetworkAddress::operator==(const NetworkAddress &other){ + IPv4Address* self4=dynamic_cast(this); + IPv4Address* other4=dynamic_cast((NetworkAddress*)&other); + if(self4 && other4){ + return self4->GetAddress()==other4->GetAddress(); + } + IPv6Address* self6=dynamic_cast(this); + IPv6Address* other6=dynamic_cast((NetworkAddress*)&other); + if(self6 && other6){ + return memcmp(self6->GetAddress(), other6->GetAddress(), 16)==0; + } + return false; +} + +bool NetworkAddress::operator!=(const NetworkAddress &other){ + return !(*this == other); +} + +IPv4Address::IPv4Address(std::string addr){ +#ifndef _WIN32 + this->address=NetworkSocketPosix::StringToV4Address(addr); +#else + this->address=NetworkSocketWinsock::StringToV4Address(addr); +#endif +} + +IPv4Address::IPv4Address(uint32_t addr){ + this->address=addr; +} + +IPv4Address::IPv4Address(){ + this->address=0; +} + + +std::string IPv4Address::ToString(){ +#ifndef _WIN32 + return NetworkSocketPosix::V4AddressToString(address); +#else + return NetworkSocketWinsock::V4AddressToString(address); +#endif +} + +/*sockaddr &IPv4Address::ToSockAddr(uint16_t port){ + sockaddr_in sa; + sa.sin_family=AF_INET; + sa.sin_addr=addr; + sa.sin_port=port; + return *((sockaddr *) &sa); +}*/ + +uint32_t IPv4Address::GetAddress(){ + return address; +} + +IPv6Address::IPv6Address(std::string addr){ +#ifndef _WIN32 + NetworkSocketPosix::StringToV6Address(addr, this->address); +#else + NetworkSocketWinsock::StringToV6Address(addr, this->address); +#endif +} + +IPv6Address::IPv6Address(uint8_t addr[16]){ + memcpy(address, addr, 16); +} + +IPv6Address::IPv6Address(){ + memset(address, 0, 16); +} + +std::string IPv6Address::ToString(){ + return ""; +} + +/*sockaddr &IPv6Address::ToSockAddr(uint16_t port){ + sockaddr_in6 sa; + sa.sin6_family=AF_INET6; + sa.sin6_addr=addr; + sa.sin6_port=port; + return *((sockaddr *) &sa); +}*/ + +const uint8_t *IPv6Address::GetAddress(){ + return address; +} + +bool NetworkSocket::Select(std::vector &readFds, std::vector &errorFds, SocketSelectCanceller *canceller){ +#ifndef _WIN32 + return NetworkSocketPosix::Select(readFds, errorFds, canceller); +#else + return NetworkSocketWinsock::Select(readFds, errorFds, canceller); +#endif +} + +SocketSelectCanceller::~SocketSelectCanceller(){ + +} + +SocketSelectCanceller *SocketSelectCanceller::Create(){ +#ifndef _WIN32 + return new SocketSelectCancellerPosix(); +#else + return new SocketSelectCancellerWin32(); +#endif +} + + + +NetworkSocketTCPObfuscated::NetworkSocketTCPObfuscated(NetworkSocket *wrapped) : NetworkSocketWrapper(PROTO_TCP){ + this->wrapped=wrapped; +} + +NetworkSocketTCPObfuscated::~NetworkSocketTCPObfuscated(){ + if(wrapped) + delete wrapped; +} + +NetworkSocket *NetworkSocketTCPObfuscated::GetWrapped(){ + return wrapped; +} + +void NetworkSocketTCPObfuscated::InitConnection(){ + unsigned char buf[64]; + GenerateTCPO2States(buf, &recvState, &sendState); + wrapped->Send(buf, 64); +} + +void NetworkSocketTCPObfuscated::Send(NetworkPacket *packet){ + BufferOutputStream os(packet->length+4); + size_t len=packet->length/4; + if(len<0x7F){ + os.WriteByte((unsigned char)len); + }else{ + os.WriteByte(0x7F); + os.WriteByte((unsigned char)(len & 0xFF)); + os.WriteByte((unsigned char)((len >> 8) & 0xFF)); + os.WriteByte((unsigned char)((len >> 16) & 0xFF)); + } + os.WriteBytes(packet->data, packet->length); + EncryptForTCPO2(os.GetBuffer(), os.GetLength(), &sendState); + wrapped->Send(os.GetBuffer(), os.GetLength()); + //LOGD("Sent %u bytes", os.GetLength()); +} + +void NetworkSocketTCPObfuscated::Receive(NetworkPacket *packet){ + unsigned char len1; + size_t packetLen=0; + size_t offset=0; + size_t len; + wrapped->Receive(&len1, 1); + /*if(len<=0) + goto failed;*/ + EncryptForTCPO2(&len1, 1, &recvState); + + if(len1<0x7F){ + packetLen=(size_t)len1*4; + }else{ + unsigned char len2[3]; + len=wrapped->Receive(len2, 3); + /*if(len<=0) + goto failed;*/ + EncryptForTCPO2(len2, 3, &recvState); + packetLen=((size_t)len2[0] | ((size_t)len2[1] << 8) | ((size_t)len2[2] << 16))*4; + } + + if(packetLen>packet->length){ + LOGW("packet too big to fit into buffer (%u vs %u)", (unsigned int)packetLen, (unsigned int)packet->length); + packet->length=0; + return; + } + + while(offsetReceive(packet->data+offset, packetLen-offset); + /*if(len<=0) + goto failed;*/ + offset+=len; + } + EncryptForTCPO2(packet->data, packetLen, &recvState); + //packet->address=&itr->address; + packet->length=packetLen; + //packet->port=itr->port; + packet->protocol=PROTO_TCP; + packet->address=wrapped->GetConnectedAddress(); + packet->port=wrapped->GetConnectedPort(); +} + +void NetworkSocketTCPObfuscated::Open(){ + +} + +void NetworkSocketTCPObfuscated::Close(){ + wrapped->Close(); +} + +void NetworkSocketTCPObfuscated::Connect(NetworkAddress *address, uint16_t port){ + +} + +bool NetworkSocketTCPObfuscated::IsFailed(){ + return wrapped->IsFailed(); +} + +NetworkSocketSOCKS5Proxy::NetworkSocketSOCKS5Proxy(NetworkSocket *tcp, NetworkSocket *udp, std::string username, std::string password) : NetworkSocketWrapper(udp ? PROTO_UDP : PROTO_TCP){ + this->tcp=tcp; + this->udp=udp; + this->username=username; + this->password=password; + connectedAddress=NULL; +} + +NetworkSocketSOCKS5Proxy::~NetworkSocketSOCKS5Proxy(){ + delete tcp; + if(connectedAddress) + delete connectedAddress; +} + +void NetworkSocketSOCKS5Proxy::Send(NetworkPacket *packet){ + if(protocol==PROTO_TCP){ + tcp->Send(packet); + }else if(protocol==PROTO_UDP){ + unsigned char buf[1500]; + BufferOutputStream out(buf, sizeof(buf)); + out.WriteInt16(0); // RSV + out.WriteByte(0); // FRAG + IPv4Address* v4=dynamic_cast(packet->address); + IPv6Address* v6=dynamic_cast(packet->address); + if(v4){ + out.WriteByte(1); // ATYP (IPv4) + out.WriteInt32(v4->GetAddress()); + }else{ + out.WriteByte(4); // ATYP (IPv6) + out.WriteBytes((unsigned char *) v6->GetAddress(), 16); + } + out.WriteInt16(htons(packet->port)); + out.WriteBytes(packet->data, packet->length); + NetworkPacket p; + p.data=buf; + p.length=out.GetLength(); + p.address=connectedAddress; + p.port=connectedPort; + p.protocol=PROTO_UDP; + udp->Send(&p); + } +} + +void NetworkSocketSOCKS5Proxy::Receive(NetworkPacket *packet){ + if(protocol==PROTO_TCP){ + tcp->Receive(packet); + }else if(protocol==PROTO_UDP){ + unsigned char buf[1500]; + NetworkPacket p; + p.data=buf; + p.length=sizeof(buf); + udp->Receive(&p); + if(p.length && p.address && *p.address==*connectedAddress && p.port==connectedPort){ + BufferInputStream in(buf, p.length); + in.ReadInt16(); // RSV + in.ReadByte(); // FRAG + unsigned char atyp=in.ReadByte(); + if(atyp==1){ // IPv4 + lastRecvdV4=IPv4Address((uint32_t) in.ReadInt32()); + packet->address=&lastRecvdV4; + }else if(atyp==4){ // IPv6 + unsigned char addr[16]; + in.ReadBytes(addr, 16); + lastRecvdV6=IPv6Address(addr); + packet->address=&lastRecvdV6; + } + packet->port=ntohs(in.ReadInt16()); + if(packet->length>=in.Remaining()){ + packet->length=in.Remaining(); + in.ReadBytes(packet->data, in.Remaining()); + }else{ + packet->length=0; + LOGW("socks5: received packet too big"); + } + } + } +} + +void NetworkSocketSOCKS5Proxy::Open(){ + if(protocol==PROTO_UDP){ + unsigned char buf[1024]; + BufferOutputStream out(buf, sizeof(buf)); + out.WriteByte(5); // VER + out.WriteByte(3); // CMD (UDP ASSOCIATE) + out.WriteByte(0); // RSV + out.WriteByte(1); // ATYP (IPv4) + out.WriteInt32(0); // DST.ADDR + out.WriteInt16(0); // DST.PORT + tcp->Send(buf, out.GetLength()); + size_t l=tcp->Receive(buf, sizeof(buf)); + if(l<2 || tcp->IsFailed()){ + LOGW("socks5: udp associate failed"); + failed=true; + return; + } + try{ + BufferInputStream in(buf, l); + unsigned char ver=in.ReadByte(); + unsigned char rep=in.ReadByte(); + if(ver!=5){ + LOGW("socks5: udp associate: wrong ver in response"); + failed=true; + return; + } + if(rep!=0){ + LOGW("socks5: udp associate failed with error %02X", rep); + failed=true; + return; + } + in.ReadByte(); // RSV + unsigned char atyp=in.ReadByte(); + if(atyp==1){ + uint32_t addr=(uint32_t) in.ReadInt32(); + connectedAddress=new IPv4Address(addr); + }else if(atyp==3){ + unsigned char len=in.ReadByte(); + char domain[256]; + memset(domain, 0, sizeof(domain)); + in.ReadBytes((unsigned char*)domain, len); + LOGD("address type is domain, address=%s", domain); + connectedAddress=ResolveDomainName(std::string(domain)); + if(!connectedAddress){ + LOGW("socks5: failed to resolve domain name '%s'", domain); + failed=true; + return; + } + }else if(atyp==4){ + unsigned char addr[16]; + in.ReadBytes(addr, 16); + connectedAddress=new IPv6Address(addr); + }else{ + LOGW("socks5: unknown address type %d", atyp); + failed=true; + return; + } + connectedPort=(uint16_t)ntohs(in.ReadInt16()); + tcp->SetTimeouts(0, 0); + LOGV("socks5: udp associate successful, given endpoint %s:%d", connectedAddress->ToString().c_str(), connectedPort); + }catch(std::out_of_range& x){ + LOGW("socks5: udp associate response parse failed"); + failed=true; + } + } +} + +void NetworkSocketSOCKS5Proxy::Close(){ + tcp->Close(); +} + +void NetworkSocketSOCKS5Proxy::Connect(NetworkAddress *address, uint16_t port){ + if(!failed){ + tcp->SetTimeouts(1, 2); + unsigned char buf[1024]; + BufferOutputStream out(buf, sizeof(buf)); + out.WriteByte(5); // VER + out.WriteByte(1); // CMD (CONNECT) + out.WriteByte(0); // RSV + IPv4Address* v4=dynamic_cast(address); + IPv6Address* v6=dynamic_cast(address); + if(v4){ + out.WriteByte(1); // ATYP (IPv4) + out.WriteInt32(v4->GetAddress()); + }else if(v6){ + out.WriteByte(4); // ATYP (IPv6) + out.WriteBytes((unsigned char*)v6->GetAddress(), 16); + }else{ + LOGW("socks5: unknown address type"); + failed=true; + return; + } + out.WriteInt16(htons(port)); // DST.PORT + tcp->Send(buf, out.GetLength()); + size_t l=tcp->Receive(buf, sizeof(buf)); + if(l<2 || tcp->IsFailed()){ + LOGW("socks5: connect failed") + failed=true; + return; + } + BufferInputStream in(buf, l); + unsigned char ver=in.ReadByte(); + if(ver!=5){ + LOGW("socks5: connect: wrong ver in response"); + failed=true; + return; + } + unsigned char rep=in.ReadByte(); + if(rep!=0){ + LOGW("socks5: connect: failed with error %02X", rep); + failed=true; + return; + } + connectedAddress=v4 ? (NetworkAddress*)new IPv4Address(*v4) : (NetworkAddress*)new IPv6Address(*v6); + connectedPort=port; + LOGV("socks5: connect succeeded"); + tcp->SetTimeouts(5, 60); + } +} + +NetworkSocket *NetworkSocketSOCKS5Proxy::GetWrapped(){ + return protocol==PROTO_TCP ? tcp : udp; +} + +void NetworkSocketSOCKS5Proxy::InitConnection(){ + unsigned char buf[1024]; + tcp->SetTimeouts(1, 2); + BufferOutputStream p(buf, sizeof(buf)); + p.WriteByte(5); // VER + if(!username.empty()){ + p.WriteByte(2); // NMETHODS + p.WriteByte(0); // no auth + p.WriteByte(2); // user/pass + }else{ + p.WriteByte(1); // NMETHODS + p.WriteByte(0); // no auth + } + tcp->Send(buf, p.GetLength()); + size_t l=tcp->Receive(buf, sizeof(buf)); + if(l<2 || tcp->IsFailed()){ + failed=true; + return; + } + BufferInputStream in(buf, l); + unsigned char ver=in.ReadByte(); + unsigned char chosenMethod=in.ReadByte(); + LOGV("socks5: VER=%02X, METHOD=%02X", ver, chosenMethod); + if(ver!=5){ + LOGW("socks5: incorrect VER in response"); + failed=true; + return; + } + if(chosenMethod==0){ + // connected, no further auth needed + }else if(chosenMethod==2 && !username.empty()){ + p.Reset(); + p.WriteByte(1); // VER + p.WriteByte((unsigned char)(username.length()>255 ? 255 : username.length())); // ULEN + p.WriteBytes((unsigned char*)username.c_str(), username.length()>255 ? 255 : username.length()); // UNAME + p.WriteByte((unsigned char)(password.length()>255 ? 255 : password.length())); // PLEN + p.WriteBytes((unsigned char*)password.c_str(), password.length()>255 ? 255 : password.length()); // PASSWD + tcp->Send(buf, p.GetLength()); + l=tcp->Receive(buf, sizeof(buf)); + if(l<2 || tcp->IsFailed()){ + failed=true; + return; + } + in=BufferInputStream(buf, l); + ver=in.ReadByte(); + unsigned char status=in.ReadByte(); + LOGV("socks5: auth response VER=%02X, STATUS=%02X", ver, status); + if(ver!=1){ + LOGW("socks5: auth response VER is incorrect"); + failed=true; + return; + } + if(status!=0){ + LOGW("socks5: username/password auth failed"); + failed=true; + return; + } + }else{ + LOGW("socks5: unsupported auth method"); + failed=true; + return; + } + tcp->SetTimeouts(5, 60); +} + +bool NetworkSocketSOCKS5Proxy::IsFailed(){ + return NetworkSocket::IsFailed() || tcp->IsFailed(); +} + +NetworkAddress *NetworkSocketSOCKS5Proxy::GetConnectedAddress(){ + return connectedAddress; +} + +uint16_t NetworkSocketSOCKS5Proxy::GetConnectedPort(){ + return connectedPort; +} diff --git a/Telegram/ThirdParty/libtgvoip/NetworkSocket.h b/Telegram/ThirdParty/libtgvoip/NetworkSocket.h new file mode 100644 index 000000000..6c89d747e --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/NetworkSocket.h @@ -0,0 +1,173 @@ +// +// Created by Grishka on 29.03.17. +// + +#ifndef LIBTGVOIP_NETWORKSOCKET_H +#define LIBTGVOIP_NETWORKSOCKET_H + +#include +#include +#include + +namespace tgvoip { + + enum NetworkProtocol{ + PROTO_UDP=0, + PROTO_TCP + }; + + struct TCPO2State{ + unsigned char key[32]; + unsigned char iv[16]; + unsigned char ecount[16]; + uint32_t num; + }; + + class NetworkAddress{ + public: + virtual std::string ToString()=0; + bool operator==(const NetworkAddress& other); + bool operator!=(const NetworkAddress& other); + virtual ~NetworkAddress()=default; + }; + + class IPv4Address : public NetworkAddress{ + public: + IPv4Address(std::string addr); + IPv4Address(uint32_t addr); + IPv4Address(); + virtual std::string ToString(); + //virtual sockaddr& ToSockAddr(uint16_t port); + uint32_t GetAddress(); + + private: + uint32_t address; + }; + + class IPv6Address : public NetworkAddress{ + public: + IPv6Address(std::string addr); + IPv6Address(uint8_t addr[16]); + IPv6Address(); + virtual std::string ToString(); + //virtual sockaddr& ToSockAddr(uint16_t port); + const uint8_t* GetAddress(); + private: + uint8_t address[16]; + }; + + struct NetworkPacket{ + unsigned char* data; + size_t length; + NetworkAddress* address; + uint16_t port; + NetworkProtocol protocol; + }; + typedef struct NetworkPacket NetworkPacket; + + class SocketSelectCanceller{ + public: + virtual ~SocketSelectCanceller(); + virtual void CancelSelect()=0; + static SocketSelectCanceller* Create(); + }; + + class NetworkSocket{ + public: + NetworkSocket(NetworkProtocol protocol); + virtual ~NetworkSocket(); + virtual void Send(NetworkPacket* packet)=0; + virtual void Receive(NetworkPacket* packet)=0; + size_t Receive(unsigned char* buffer, size_t len); + size_t Send(unsigned char* buffer, size_t len); + virtual void Open()=0; + virtual void Close()=0; + virtual uint16_t GetLocalPort(){ return 0; }; + virtual void Connect(NetworkAddress* address, uint16_t port)=0; + virtual std::string GetLocalInterfaceInfo(IPv4Address* inet4addr, IPv6Address* inet6addr); + virtual void OnActiveInterfaceChanged(){}; + virtual NetworkAddress* GetConnectedAddress(){ return NULL; }; + virtual uint16_t GetConnectedPort(){ return 0; }; + virtual void SetTimeouts(int sendTimeout, int recvTimeout){}; + + virtual bool IsFailed(); + void SetSocksProxy(IPv4Address* addr, uint16_t port, char* username, char* password); + + static NetworkSocket* Create(NetworkProtocol protocol); + static IPv4Address* ResolveDomainName(std::string name); + static bool Select(std::vector& readFds, std::vector& errorFds, SocketSelectCanceller* canceller); + + protected: + virtual uint16_t GenerateLocalPort(); + virtual void SetMaxPriority(); + static void GenerateTCPO2States(unsigned char* buffer, TCPO2State* recvState, TCPO2State* sendState); + static void EncryptForTCPO2(unsigned char* buffer, size_t len, TCPO2State* state); + double ipv6Timeout; + unsigned char nat64Prefix[12]; + bool failed; + NetworkProtocol protocol; + + IPv4Address* proxyAddress; + uint16_t proxyPort; + char* proxyUsername; + char* proxyPassword; + }; + + class NetworkSocketWrapper : public NetworkSocket{ + public: + NetworkSocketWrapper(NetworkProtocol protocol) : NetworkSocket(protocol){}; + virtual ~NetworkSocketWrapper(){}; + virtual NetworkSocket* GetWrapped()=0; + virtual void InitConnection()=0; + }; + + class NetworkSocketTCPObfuscated : public NetworkSocketWrapper{ + public: + NetworkSocketTCPObfuscated(NetworkSocket* wrapped); + virtual ~NetworkSocketTCPObfuscated(); + virtual NetworkSocket* GetWrapped(); + virtual void InitConnection(); + virtual void Send(NetworkPacket *packet); + virtual void Receive(NetworkPacket *packet); + virtual void Open(); + virtual void Close(); + virtual void Connect(NetworkAddress *address, uint16_t port); + + virtual bool IsFailed(); + + private: + NetworkSocket* wrapped; + TCPO2State recvState; + TCPO2State sendState; + }; + + class NetworkSocketSOCKS5Proxy : public NetworkSocketWrapper{ + public: + NetworkSocketSOCKS5Proxy(NetworkSocket* tcp, NetworkSocket* udp, std::string username, std::string password); + virtual ~NetworkSocketSOCKS5Proxy(); + virtual void Send(NetworkPacket *packet); + virtual void Receive(NetworkPacket *packet); + virtual void Open(); + virtual void Close(); + virtual void Connect(NetworkAddress *address, uint16_t port); + virtual NetworkSocket *GetWrapped(); + virtual void InitConnection(); + virtual bool IsFailed(); + virtual NetworkAddress *GetConnectedAddress(); + virtual uint16_t GetConnectedPort(); + + private: + NetworkSocket* tcp; + NetworkSocket* udp; + std::string username; + std::string password; + NetworkAddress* connectedAddress; + uint16_t connectedPort; + + IPv4Address lastRecvdV4; + IPv6Address lastRecvdV6; + }; + +} + +#endif //LIBTGVOIP_NETWORKSOCKET_H diff --git a/Telegram/ThirdParty/libtgvoip/OpusDecoder.cpp b/Telegram/ThirdParty/libtgvoip/OpusDecoder.cpp new file mode 100644 index 000000000..6f6626aff --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/OpusDecoder.cpp @@ -0,0 +1,257 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "OpusDecoder.h" +#include "audio/Resampler.h" +#include "logging.h" +#include + +#define PACKET_SIZE (960*2) + +using namespace tgvoip; + +tgvoip::OpusDecoder::OpusDecoder(MediaStreamItf *dst) : semaphore(32, 0){ + //this->source=source; + dst->SetCallback(OpusDecoder::Callback, this); + dec=opus_decoder_create(48000, 1, NULL); + //test=fopen("/sdcard/test.raw", "wb"); + buffer=(unsigned char *) malloc(8192); + //lastDecoded=(unsigned char*) malloc(960*2); + lastDecoded=NULL; + lastDecodedLen=0; + outputBufferSize=0; + lastDecodedOffset=0; + decodedQueue=new BlockingQueue(33); + bufferPool=new BufferPool(PACKET_SIZE, 32); + echoCanceller=NULL; + frameDuration=20; +} + +tgvoip::OpusDecoder::~OpusDecoder(){ + opus_decoder_destroy(dec); + free(buffer); + delete bufferPool; + delete decodedQueue; +} + + +void tgvoip::OpusDecoder::SetEchoCanceller(EchoCanceller* canceller){ + echoCanceller=canceller; +} + +size_t tgvoip::OpusDecoder::Callback(unsigned char *data, size_t len, void *param){ + ((OpusDecoder*)param)->HandleCallback(data, len); + return 0; +} + +void tgvoip::OpusDecoder::HandleCallback(unsigned char *data, size_t len){ + if(!running){ + memset(data, 0, len); + return; + } + if(outputBufferSize==0){ + outputBufferSize=len; + int packetsNeeded; + if(len>PACKET_SIZE) + packetsNeeded=len/PACKET_SIZE; + else + packetsNeeded=1; + packetsNeeded*=2; + semaphore.Release(packetsNeeded); + } + assert(outputBufferSize==len && "output buffer size is supposed to be the same throughout callbacks"); + if(len>PACKET_SIZE){ + int count=len/PACKET_SIZE; + int i; + for(i=0;iGetBlocking(); + if(!lastDecoded) + return; + memcpy(data+(i*PACKET_SIZE), lastDecoded, PACKET_SIZE); + if(echoCanceller) + echoCanceller->SpeakerOutCallback(data, PACKET_SIZE); + bufferPool->Reuse(lastDecoded); + } + semaphore.Release(count); + }else if(len==PACKET_SIZE){ + lastDecoded=(unsigned char*) decodedQueue->GetBlocking(); + if(!lastDecoded) + return; + memcpy(data, lastDecoded, PACKET_SIZE); + bufferPool->Reuse(lastDecoded); + semaphore.Release(); + lock_mutex(mutex); + if(echoCanceller) + echoCanceller->SpeakerOutCallback(data, PACKET_SIZE); + unlock_mutex(mutex); + }else if(lenGetBlocking(); + } + if(!lastDecoded) + return; + + memcpy(data, lastDecoded+lastDecodedOffset, len); + lastDecodedOffset+=len; + + if(lastDecodedOffset>=PACKET_SIZE){ + if(echoCanceller) + echoCanceller->SpeakerOutCallback(lastDecoded, PACKET_SIZE); + lastDecodedOffset=0; + bufferPool->Reuse(lastDecoded); + //LOGV("before req packet, qsize=%d", decodedQueue->Size()); + if(decodedQueue->Size()==0) + semaphore.Release(2); + else + semaphore.Release(); + } + } + /*if(lastDecodedLen){ + LOGV("ldl=%d, l=%d", lastDecodedLen, len); + if(len==PACKET_SIZE){ + memcpy(data, lastDecoded, len); + packetsNeeded=1; + }else if(len>PACKET_SIZE){ + memcpy(data, lastDecoded, len); + //LOGV("ldl=%d, l=%d", lastDecodedLen, len); + packetsNeeded=len/PACKET_SIZE; + }else if(len=PACKET_SIZE){ + packetsNeeded=1; + lastDecodedOffset=0; + } + } + }else{ + LOGW("skipping callback"); + if(len>PACKET_SIZE) + packetsNeeded=len/PACKET_SIZE; + else + packetsNeeded=1; + }*/ + /*if(packetsNeeded>0){ + lock_mutex(mutex); + notify_lock(lock); + unlock_mutex(mutex); + }*/ +} + + +void tgvoip::OpusDecoder::Start(){ + init_mutex(mutex); + running=true; + start_thread(thread, OpusDecoder::StartThread, this); + set_thread_priority(thread, get_thread_max_priority()); + set_thread_name(thread, "opus_decoder"); +} + +void tgvoip::OpusDecoder::Stop(){ + if(!running) + return; + running=false; + semaphore.Release(); + join_thread(thread); + free_mutex(mutex); +} + + +void* tgvoip::OpusDecoder::StartThread(void *param){ + ((tgvoip::OpusDecoder*)param)->RunThread(); + return NULL; +} + +void tgvoip::OpusDecoder::RunThread(){ + unsigned char nextBuffer[8192]; + unsigned char decodeBuffer[8192]; + int i; + int packetsPerFrame=frameDuration/20; + bool first=true; + LOGI("decoder: packets per frame %d", packetsPerFrame); + size_t nextLen=0; + while(running){ + //LOGV("after wait, running=%d", running); + //LOGD("Will get %d packets", packetsNeeded); + //lastDecodedLen=0; + memcpy(buffer, nextBuffer, nextLen); + size_t inLen=nextLen; + //nextLen=InvokeCallback(nextBuffer, 8192); + int playbackDuration=0; + nextLen=jitterBuffer->HandleOutput(nextBuffer, 8192, 0, &playbackDuration); + if(first){ + first=false; + continue; + } + //LOGV("Before decode, len=%d", inLen); + if(!inLen){ + LOGV("Trying to recover late packet"); + inLen=jitterBuffer->HandleOutput(buffer, 8192, -2, &playbackDuration); + if(inLen) + LOGV("Decoding late packet"); + } + int size; + if(inLen || nextLen) + size=opus_decode(dec, inLen ? buffer : nextBuffer, inLen ? inLen : nextLen, (opus_int16*) decodeBuffer, packetsPerFrame*960, inLen ? 0 : 1); + else{ // do packet loss concealment + size=opus_decode(dec, NULL, 0, (opus_int16 *) decodeBuffer, packetsPerFrame*960, 0); + LOGV("PLC"); + } + if(size<0) + LOGW("decoder: opus_decode error %d", size); + //LOGV("After decode, size=%d", size); + //LOGD("playbackDuration=%d", playbackDuration); + unsigned char* processedBuffer; + if(playbackDuration==80){ + processedBuffer=buffer; + audio::Resampler::Rescale60To80((int16_t*) decodeBuffer, (int16_t*) processedBuffer); + }else if(playbackDuration==40){ + processedBuffer=buffer; + audio::Resampler::Rescale60To40((int16_t*) decodeBuffer, (int16_t*) processedBuffer); + }else{ + processedBuffer=decodeBuffer; + } + for(i=0;iGet(); + if(buf){ + if(size>0){ + memcpy(buf, processedBuffer+(PACKET_SIZE*i), PACKET_SIZE); + }else{ + LOGE("Error decoding, result=%d", size); + memset(buf, 0, PACKET_SIZE); + } + decodedQueue->Put(buf); + }else{ + LOGW("decoder: no buffers left!"); + } + //LOGD("packets needed: %d", packetsNeeded); + } + } +} + + +void tgvoip::OpusDecoder::SetFrameDuration(uint32_t duration){ + frameDuration=duration; +} + + +void tgvoip::OpusDecoder::ResetQueue(){ + /*lock_mutex(mutex); + packetsNeeded=0; + unlock_mutex(mutex); + while(decodedQueue->Size()>0){ + bufferPool->Reuse((unsigned char *) decodedQueue->Get()); + }*/ +} + + +void tgvoip::OpusDecoder::SetJitterBuffer(JitterBuffer* jitterBuffer){ + this->jitterBuffer=jitterBuffer; +} diff --git a/Telegram/ThirdParty/libtgvoip/OpusDecoder.h b/Telegram/ThirdParty/libtgvoip/OpusDecoder.h new file mode 100644 index 000000000..51bbbacde --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/OpusDecoder.h @@ -0,0 +1,56 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_OPUSDECODER_H +#define LIBTGVOIP_OPUSDECODER_H + + +#include "MediaStreamItf.h" +#include "opus.h" +#include "threading.h" +#include "BlockingQueue.h" +#include "BufferPool.h" +#include "EchoCanceller.h" +#include "JitterBuffer.h" +#include + +namespace tgvoip{ +class OpusDecoder { +public: + virtual void Start(); + + virtual void Stop(); + + OpusDecoder(MediaStreamItf* dst); + virtual ~OpusDecoder(); + void HandleCallback(unsigned char* data, size_t len); + void SetEchoCanceller(EchoCanceller* canceller); + void SetFrameDuration(uint32_t duration); + void ResetQueue(); + void SetJitterBuffer(JitterBuffer* jitterBuffer); + +private: + static size_t Callback(unsigned char* data, size_t len, void* param); + static void* StartThread(void* param); + void RunThread(); + ::OpusDecoder* dec; + BlockingQueue* decodedQueue; + BufferPool* bufferPool; + unsigned char* buffer; + unsigned char* lastDecoded; + size_t lastDecodedLen, lastDecodedOffset; + size_t outputBufferSize; + bool running; + tgvoip_thread_t thread; + Semaphore semaphore; + tgvoip_mutex_t mutex; + uint32_t frameDuration; + EchoCanceller* echoCanceller; + JitterBuffer* jitterBuffer; +}; +} + +#endif //LIBTGVOIP_OPUSDECODER_H diff --git a/Telegram/ThirdParty/libtgvoip/OpusEncoder.cpp b/Telegram/ThirdParty/libtgvoip/OpusEncoder.cpp new file mode 100644 index 000000000..bf42cd792 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/OpusEncoder.cpp @@ -0,0 +1,160 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "OpusEncoder.h" +#include +#include "logging.h" +#include "VoIPServerConfig.h" + +tgvoip::OpusEncoder::OpusEncoder(MediaStreamItf *source):queue(11), bufferPool(960*2, 10){ + this->source=source; + source->SetCallback(tgvoip::OpusEncoder::Callback, this); + enc=opus_encoder_create(48000, 1, OPUS_APPLICATION_VOIP, NULL); + opus_encoder_ctl(enc, OPUS_SET_COMPLEXITY(10)); + opus_encoder_ctl(enc, OPUS_SET_PACKET_LOSS_PERC(15)); + opus_encoder_ctl(enc, OPUS_SET_INBAND_FEC(1)); + opus_encoder_ctl(enc, OPUS_SET_SIGNAL(OPUS_SIGNAL_VOICE)); + opus_encoder_ctl(enc, OPUS_SET_BANDWIDTH(OPUS_BANDWIDTH_FULLBAND)); + requestedBitrate=32000; + currentBitrate=0; + running=false; + echoCanceller=NULL; + complexity=10; + frameDuration=20; + mediumCorrectionBitrate=ServerConfig::GetSharedInstance()->GetInt("audio_medium_fec_bitrate", 10000); + strongCorrectionBitrate=ServerConfig::GetSharedInstance()->GetInt("audio_strong_fec_bitrate", 8000); + mediumCorrectionMultiplier=ServerConfig::GetSharedInstance()->GetDouble("audio_medium_fec_multiplier", 1.5); + strongCorrectionMultiplier=ServerConfig::GetSharedInstance()->GetDouble("audio_strong_fec_multiplier", 2.0); +} + +tgvoip::OpusEncoder::~OpusEncoder(){ + opus_encoder_destroy(enc); +} + +void tgvoip::OpusEncoder::Start(){ + if(running) + return; + running=true; + start_thread(thread, StartThread, this); + set_thread_priority(thread, get_thread_max_priority()); + set_thread_name(thread, "opus_encoder"); +} + +void tgvoip::OpusEncoder::Stop(){ + if(!running) + return; + running=false; + queue.Put(NULL); + join_thread(thread); +} + + +void tgvoip::OpusEncoder::SetBitrate(uint32_t bitrate){ + requestedBitrate=bitrate; +} + +void tgvoip::OpusEncoder::Encode(unsigned char *data, size_t len){ + if(requestedBitrate!=currentBitrate){ + opus_encoder_ctl(enc, OPUS_SET_BITRATE(requestedBitrate)); + currentBitrate=requestedBitrate; + LOGV("opus_encoder: setting bitrate to %u", currentBitrate); + } + int32_t r=opus_encode(enc, (int16_t*)data, len/2, buffer, 4096); + if(r<=0){ + LOGE("Error encoding: %d", r); + }else if(r==1){ + LOGW("DTX"); + }else if(running){ + //LOGV("Packet size = %d", r); + InvokeCallback(buffer, (size_t)r); + } +} + +size_t tgvoip::OpusEncoder::Callback(unsigned char *data, size_t len, void* param){ + OpusEncoder* e=(OpusEncoder*)param; + unsigned char* buf=e->bufferPool.Get(); + if(buf){ + assert(len==960*2); + memcpy(buf, data, 960*2); + e->queue.Put(buf); + }else{ + LOGW("opus_encoder: no buffer slots left"); + if(e->complexity>1){ + e->complexity--; + opus_encoder_ctl(e->enc, OPUS_SET_COMPLEXITY(e->complexity)); + } + } + return 0; +} + + +uint32_t tgvoip::OpusEncoder::GetBitrate(){ + return requestedBitrate; +} + +void tgvoip::OpusEncoder::SetEchoCanceller(EchoCanceller* aec){ + echoCanceller=aec; +} + +void* tgvoip::OpusEncoder::StartThread(void* arg){ + ((OpusEncoder*)arg)->RunThread(); + return NULL; +} + +void tgvoip::OpusEncoder::RunThread(){ + unsigned char buf[960*2]; + uint32_t bufferedCount=0; + uint32_t packetsPerFrame=frameDuration/20; + LOGV("starting encoder, packets per frame=%d", packetsPerFrame); + unsigned char* frame; + if(packetsPerFrame>1) + frame=(unsigned char *) malloc(960*2*packetsPerFrame); + else + frame=NULL; + while(running){ + unsigned char* packet=(unsigned char*)queue.GetBlocking(); + if(packet){ + if(echoCanceller) + echoCanceller->ProcessInput(packet, buf, 960*2); + else + memcpy(buf, packet, 960*2); + if(packetsPerFrame==1){ + Encode(buf, 960*2); + }else{ + memcpy(frame+(960*2*bufferedCount), buf, 960*2); + bufferedCount++; + if(bufferedCount==packetsPerFrame){ + Encode(frame, 960*2*packetsPerFrame); + bufferedCount=0; + } + } + bufferPool.Reuse(packet); + } + } + if(frame) + free(frame); +} + + +void tgvoip::OpusEncoder::SetOutputFrameDuration(uint32_t duration){ + frameDuration=duration; +} + + +void tgvoip::OpusEncoder::SetPacketLoss(int percent){ + packetLossPercent=percent; + double multiplier=1; + if(currentBitrate<=strongCorrectionBitrate) + multiplier=strongCorrectionMultiplier; + else if(currentBitrate<=mediumCorrectionBitrate) + multiplier=mediumCorrectionMultiplier; + opus_encoder_ctl(enc, OPUS_SET_PACKET_LOSS_PERC((int)(percent*multiplier))); + opus_encoder_ctl(enc, OPUS_SET_BANDWIDTH(percent>17 ? OPUS_AUTO : OPUS_BANDWIDTH_FULLBAND)); +} + +int tgvoip::OpusEncoder::GetPacketLoss(){ + return packetLossPercent; +} diff --git a/Telegram/ThirdParty/libtgvoip/OpusEncoder.h b/Telegram/ThirdParty/libtgvoip/OpusEncoder.h new file mode 100644 index 000000000..9329f89ae --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/OpusEncoder.h @@ -0,0 +1,59 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_OPUSENCODER_H +#define LIBTGVOIP_OPUSENCODER_H + + +#include "MediaStreamItf.h" +#include "opus.h" +#include "threading.h" +#include "BlockingQueue.h" +#include "BufferPool.h" +#include "EchoCanceller.h" + +#include + +namespace tgvoip{ +class OpusEncoder : public MediaStreamItf{ +public: + OpusEncoder(MediaStreamItf* source); + virtual ~OpusEncoder(); + virtual void Start(); + virtual void Stop(); + void SetBitrate(uint32_t bitrate); + void SetEchoCanceller(EchoCanceller* aec); + void SetOutputFrameDuration(uint32_t duration); + void SetPacketLoss(int percent); + int GetPacketLoss(); + uint32_t GetBitrate(); + +private: + static size_t Callback(unsigned char* data, size_t len, void* param); + static void* StartThread(void* arg); + void RunThread(); + void Encode(unsigned char* data, size_t len); + MediaStreamItf* source; + ::OpusEncoder* enc; + unsigned char buffer[4096]; + uint32_t requestedBitrate; + uint32_t currentBitrate; + tgvoip_thread_t thread; + BlockingQueue queue; + BufferPool bufferPool; + EchoCanceller* echoCanceller; + int complexity; + bool running; + uint32_t frameDuration; + int packetLossPercent; + uint32_t mediumCorrectionBitrate; + uint32_t strongCorrectionBitrate; + double mediumCorrectionMultiplier; + double strongCorrectionMultiplier; +}; +} + +#endif //LIBTGVOIP_OPUSENCODER_H diff --git a/Telegram/ThirdParty/libtgvoip/UNLICENSE b/Telegram/ThirdParty/libtgvoip/UNLICENSE new file mode 100644 index 000000000..00d2e135a --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/UNLICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/VoIPController.cpp b/Telegram/ThirdParty/libtgvoip/VoIPController.cpp new file mode 100644 index 000000000..fd1a16d82 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/VoIPController.cpp @@ -0,0 +1,2486 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef _WIN32 +#include +#include +#endif +#include +#include +#include +#include "VoIPController.h" +#include "logging.h" +#include "threading.h" +#include "BufferOutputStream.h" +#include "BufferInputStream.h" +#include "OpusEncoder.h" +#include "OpusDecoder.h" +#include "VoIPServerConfig.h" +#include +#include +#include +#include +#include +#include + + +#define PKT_INIT 1 +#define PKT_INIT_ACK 2 +#define PKT_STREAM_STATE 3 +#define PKT_STREAM_DATA 4 +#define PKT_UPDATE_STREAMS 5 +#define PKT_PING 6 +#define PKT_PONG 7 +#define PKT_STREAM_DATA_X2 8 +#define PKT_STREAM_DATA_X3 9 +#define PKT_LAN_ENDPOINT 10 +#define PKT_NETWORK_CHANGED 11 +#define PKT_SWITCH_PREF_RELAY 12 +#define PKT_SWITCH_TO_P2P 13 +#define PKT_NOP 14 + +#define IS_MOBILE_NETWORK(x) (x==NET_TYPE_GPRS || x==NET_TYPE_EDGE || x==NET_TYPE_3G || x==NET_TYPE_HSPA || x==NET_TYPE_LTE || x==NET_TYPE_OTHER_MOBILE) + +#define PROTOCOL_NAME 0x50567247 // "GrVP" in little endian (reversed here) +#define PROTOCOL_VERSION 3 +#define MIN_PROTOCOL_VERSION 3 + +#define STREAM_DATA_FLAG_LEN16 0x40 +#define STREAM_DATA_FLAG_HAS_MORE_FLAGS 0x80 + +#define STREAM_TYPE_AUDIO 1 +#define STREAM_TYPE_VIDEO 2 + +#define CODEC_OPUS 1 + +/*flags:# voice_call_id:flags.2?int128 in_seq_no:flags.4?int out_seq_no:flags.4?int + * recent_received_mask:flags.5?int proto:flags.3?int extra:flags.1?string raw_data:flags.0?string*/ +#define PFLAG_HAS_DATA 1 +#define PFLAG_HAS_EXTRA 2 +#define PFLAG_HAS_CALL_ID 4 +#define PFLAG_HAS_PROTO 8 +#define PFLAG_HAS_SEQ 16 +#define PFLAG_HAS_RECENT_RECV 32 + +#define INIT_FLAG_DATA_SAVING_ENABLED 1 + +#define TLID_DECRYPTED_AUDIO_BLOCK 0xDBF948C1 +#define TLID_SIMPLE_AUDIO_BLOCK 0xCC0D0E76 +#define TLID_UDP_REFLECTOR_PEER_INFO 0x27D9371C +#define TLID_UDP_REFLECTOR_PEER_INFO_IPV6 0x83fc73b1 +#define TLID_UDP_REFLECTOR_SELF_INFO 0xc01572c7 +#define PAD4(x) (4-(x+(x<=253 ? 1 : 0))%4) + +#define MAX(a,b) (a>b ? a : b) +#define MIN(a,b) (a +double VoIPController::machTimebase=0; +uint64_t VoIPController::machTimestart=0; +#endif + +#ifdef _WIN32 +int64_t VoIPController::win32TimeScale = 0; +bool VoIPController::didInitWin32TimeScale = false; +#endif + +#define SHA1_LENGTH 20 +#define SHA256_LENGTH 32 + +#ifndef TGVOIP_USE_CUSTOM_CRYPTO +#include +#include +#include + +void tgvoip_openssl_aes_ige_encrypt(uint8_t* in, uint8_t* out, size_t length, uint8_t* key, uint8_t* iv){ + AES_KEY akey; + AES_set_encrypt_key(key, 32*8, &akey); + AES_ige_encrypt(in, out, length, &akey, iv, AES_ENCRYPT); +} + +void tgvoip_openssl_aes_ige_decrypt(uint8_t* in, uint8_t* out, size_t length, uint8_t* key, uint8_t* iv){ + AES_KEY akey; + AES_set_decrypt_key(key, 32*8, &akey); + AES_ige_encrypt(in, out, length, &akey, iv, AES_DECRYPT); +} + +void tgvoip_openssl_rand_bytes(uint8_t* buffer, size_t len){ + RAND_bytes(buffer, len); +} + +void tgvoip_openssl_sha1(uint8_t* msg, size_t len, uint8_t* output){ + SHA1(msg, len, output); +} + +void tgvoip_openssl_sha256(uint8_t* msg, size_t len, uint8_t* output){ + SHA256(msg, len, output); +} + +void tgvoip_openssl_aes_ctr_encrypt(uint8_t* inout, size_t length, uint8_t* key, uint8_t* iv, uint8_t* ecount, uint32_t* num){ + AES_KEY akey; + AES_set_encrypt_key(key, 32*8, &akey); + AES_ctr128_encrypt(inout, inout, length, &akey, iv, ecount, num); +} + +voip_crypto_functions_t VoIPController::crypto={ + tgvoip_openssl_rand_bytes, + tgvoip_openssl_sha1, + tgvoip_openssl_sha256, + tgvoip_openssl_aes_ige_encrypt, + tgvoip_openssl_aes_ige_decrypt, + tgvoip_openssl_aes_ctr_encrypt + +}; +#else +voip_crypto_functions_t VoIPController::crypto; // set it yourself upon initialization +#endif + +#ifdef _MSC_VER +#define MSC_STACK_FALLBACK(a, b) (b) +#else +#define MSC_STACK_FALLBACK(a, b) (a) +#endif + +extern FILE* tgvoipLogFile; + +VoIPController::VoIPController() : activeNetItfName(""), + currentAudioInput("default"), + currentAudioOutput("default"), + proxyAddress(""), + proxyUsername(""), + proxyPassword(""), + outgoingPacketsBufferPool(1024, 20){ + seq=1; + lastRemoteSeq=0; + state=STATE_WAIT_INIT; + audioInput=NULL; + audioOutput=NULL; + decoder=NULL; + encoder=NULL; + jitterBuffer=NULL; + audioOutStarted=false; + audioTimestampIn=0; + audioTimestampOut=0; + stopping=false; + int i; + sendQueue=new BlockingQueue(21); + init_mutex(sendBufferMutex); + memset(remoteAcks, 0, sizeof(double)*32); + memset(sentPacketTimes, 0, sizeof(double)*32); + memset(recvPacketTimes, 0, sizeof(double)*32); + memset(rttHistory, 0, sizeof(double)*32); + memset(sendLossCountHistory, 0, sizeof(uint32_t)*32); + memset(&stats, 0, sizeof(voip_stats_t)); + lastRemoteAckSeq=0; + lastSentSeq=0; + recvLossCount=0; + packetsRecieved=0; + waitingForAcks=false; + networkType=NET_TYPE_UNKNOWN; + stateCallback=NULL; + echoCanceller=NULL; + dontSendPackets=0; + micMuted=false; + currentEndpoint=NULL; + waitingForRelayPeerInfo=false; + allowP2p=true; + dataSavingMode=false; + publicEndpointsReqTime=0; + init_mutex(queuedPacketsMutex); + init_mutex(endpointsMutex); + connectionInitTime=0; + lastRecvPacketTime=0; + dataSavingRequestedByPeer=false; + peerVersion=0; + conctl=new CongestionControl(); + prevSendLossCount=0; + receivedInit=false; + receivedInitAck=false; + peerPreferredRelay=NULL; + statsDump=NULL; + useTCP=false; + useUDP=true; + didAddTcpRelays=false; + setEstablishedAt=0; + udpPingCount=0; + lastUdpPingTime=0; + openingTcpSocket=NULL; + + proxyProtocol=PROXY_NONE; + proxyPort=0; + resolvedProxyAddress=NULL; + + signalBarCount=0; + signalBarCountCallback=NULL; + + selectCanceller=SocketSelectCanceller::Create(); + udpSocket=NetworkSocket::Create(PROTO_UDP); + realUdpSocket=udpSocket; + udpConnectivityState=UDP_UNKNOWN; + + maxAudioBitrate=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("audio_max_bitrate", 20000); + maxAudioBitrateGPRS=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("audio_max_bitrate_gprs", 8000); + maxAudioBitrateEDGE=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("audio_max_bitrate_edge", 16000); + maxAudioBitrateSaving=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("audio_max_bitrate_saving", 8000); + initAudioBitrate=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("audio_init_bitrate", 16000); + initAudioBitrateGPRS=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("audio_init_bitrate_gprs", 8000); + initAudioBitrateEDGE=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("audio_init_bitrate_edge", 8000); + initAudioBitrateSaving=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("audio_init_bitrate_saving", 8000); + audioBitrateStepIncr=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("audio_bitrate_step_incr", 1000); + audioBitrateStepDecr=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("audio_bitrate_step_decr", 1000); + minAudioBitrate=(uint32_t) ServerConfig::GetSharedInstance()->GetInt("audio_min_bitrate", 8000); + relaySwitchThreshold=ServerConfig::GetSharedInstance()->GetDouble("relay_switch_threshold", 0.8); + p2pToRelaySwitchThreshold=ServerConfig::GetSharedInstance()->GetDouble("p2p_to_relay_switch_threshold", 0.6); + relayToP2pSwitchThreshold=ServerConfig::GetSharedInstance()->GetDouble("relay_to_p2p_switch_threshold", 0.8); + reconnectingTimeout=ServerConfig::GetSharedInstance()->GetDouble("reconnecting_state_timeout", 2.0); + +#ifdef __APPLE__ + machTimestart=0; +#ifdef TGVOIP_USE_AUDIO_SESSION + needNotifyAcquiredAudioSession=false; +#endif +#endif + + voip_stream_t* stm=(voip_stream_t *) malloc(sizeof(voip_stream_t)); + stm->id=1; + stm->type=STREAM_TYPE_AUDIO; + stm->codec=CODEC_OPUS; + stm->enabled=1; + stm->frameDuration=60; + outgoingStreams.push_back(stm); +} + +VoIPController::~VoIPController(){ + LOGD("Entered VoIPController::~VoIPController"); + if(audioInput) + audioInput->Stop(); + if(audioOutput) + audioOutput->Stop(); + stopping=true; + runReceiver=false; + LOGD("before shutdown socket"); + if(udpSocket) + udpSocket->Close(); + if(realUdpSocket!=udpSocket) + realUdpSocket->Close(); + selectCanceller->CancelSelect(); + sendQueue->Put(PendingOutgoingPacket{0}); + if(openingTcpSocket) + openingTcpSocket->Close(); + LOGD("before join sendThread"); + join_thread(sendThread); + LOGD("before join recvThread"); + join_thread(recvThread); + LOGD("before join tickThread"); + join_thread(tickThread); + free_mutex(sendBufferMutex); + LOGD("before close socket"); + if(udpSocket) + delete udpSocket; + if(udpSocket!=realUdpSocket) + delete realUdpSocket; + LOGD("before delete jitter buffer"); + if(jitterBuffer){ + delete jitterBuffer; + } + LOGD("before stop decoder"); + if(decoder){ + decoder->Stop(); + } + LOGD("before delete audio input"); + if(audioInput){ + delete audioInput; + } + LOGD("before delete encoder"); + if(encoder){ + encoder->Stop(); + delete encoder; + } + LOGD("before delete audio output"); + if(audioOutput){ + delete audioOutput; + } + LOGD("before delete decoder"); + if(decoder){ + delete decoder; + } + LOGD("before delete echo canceller"); + if(echoCanceller){ + echoCanceller->Stop(); + delete echoCanceller; + } + delete sendQueue; + unsigned int i; + for(i=0;idata) + free(queuedPackets[i]->data); + free(queuedPackets[i]); + } + delete conctl; + for(std::vector::iterator itr=endpoints.begin();itr!=endpoints.end();++itr){ + if((*itr)->socket){ + (*itr)->socket->Close(); + delete (*itr)->socket; + } + delete *itr; + } + if(tgvoipLogFile){ + FILE* log=tgvoipLogFile; + tgvoipLogFile=NULL; + fclose(log); + } + if(statsDump) + fclose(statsDump); + if(resolvedProxyAddress) + delete resolvedProxyAddress; + delete selectCanceller; + LOGD("Left VoIPController::~VoIPController"); +} + +void VoIPController::SetRemoteEndpoints(std::vector endpoints, bool allowP2p){ + LOGW("Set remote endpoints"); + preferredRelay=NULL; + size_t i; + lock_mutex(endpointsMutex); + this->endpoints.clear(); + didAddTcpRelays=false; + useTCP=true; + for(std::vector::iterator itrtr=endpoints.begin();itrtr!=endpoints.end();++itrtr){ + this->endpoints.push_back(new Endpoint(*itrtr)); + if(itrtr->type==EP_TYPE_TCP_RELAY) + didAddTcpRelays=true; + if(itrtr->type==EP_TYPE_UDP_RELAY) + useTCP=false; + LOGV("Adding endpoint: %s:%d, %s", itrtr->address.ToString().c_str(), itrtr->port, itrtr->type==EP_TYPE_UDP_RELAY ? "UDP" : "TCP"); + } + unlock_mutex(endpointsMutex); + currentEndpoint=this->endpoints[0]; + preferredRelay=currentEndpoint; + this->allowP2p=allowP2p; +} + +void* VoIPController::StartRecvThread(void* controller){ + ((VoIPController*)controller)->RunRecvThread(); + return NULL; +} + +void* VoIPController::StartSendThread(void* controller){ + ((VoIPController*)controller)->RunSendThread(); + return NULL; +} + + +void* VoIPController::StartTickThread(void* controller){ + ((VoIPController*) controller)->RunTickThread(); + return NULL; +} + + +void VoIPController::Start(){ + int res; + LOGW("Starting voip controller"); + int32_t cfgFrameSize=60; //ServerConfig::GetSharedInstance()->GetInt("audio_frame_size", 60); + if(cfgFrameSize==20 || cfgFrameSize==40 || cfgFrameSize==60) + outgoingStreams[0]->frameDuration=(uint16_t) cfgFrameSize; + udpSocket->Open(); + if(udpSocket->IsFailed()){ + SetState(STATE_FAILED); + return; + } + + //SendPacket(NULL, 0, currentEndpoint); + + runReceiver=true; + start_thread(recvThread, StartRecvThread, this); + set_thread_priority(recvThread, get_thread_max_priority()); + set_thread_name(recvThread, "voip-recv"); + start_thread(sendThread, StartSendThread, this); + set_thread_priority(sendThread, get_thread_max_priority()); + set_thread_name(sendThread, "voip-send"); + start_thread(tickThread, StartTickThread, this); + set_thread_priority(tickThread, get_thread_max_priority()); + set_thread_name(tickThread, "voip-tick"); +} + +size_t VoIPController::AudioInputCallback(unsigned char* data, size_t length, void* param){ + ((VoIPController*)param)->HandleAudioInput(data, length); + return 0; +} + +void VoIPController::HandleAudioInput(unsigned char *data, size_t len){ + if(stopping) + return; + if(waitingForAcks || dontSendPackets>0){ + LOGV("waiting for RLC, dropping outgoing audio packet"); + return; + } + + unsigned char* buf=outgoingPacketsBufferPool.Get(); + if(buf){ + BufferOutputStream pkt(buf, outgoingPacketsBufferPool.GetSingleBufferSize()); + + unsigned char flags=(unsigned char) (len>255 ? STREAM_DATA_FLAG_LEN16 : 0); + pkt.WriteByte((unsigned char) (1 | flags)); // streamID + flags + if(len>255) + pkt.WriteInt16((int16_t) len); + else + pkt.WriteByte((unsigned char) len); + pkt.WriteInt32(audioTimestampOut); + pkt.WriteBytes(data, len); + + PendingOutgoingPacket p{ + /*.seq=*/GenerateOutSeq(), + /*.type=*/PKT_STREAM_DATA, + /*.len=*/pkt.GetLength(), + /*.data=*/buf, + /*.endpoint=*/NULL, + }; + sendQueue->Put(p); + } + + audioTimestampOut+=outgoingStreams[0]->frameDuration; +} + +void VoIPController::Connect(){ + assert(state!=STATE_WAIT_INIT_ACK); + if(proxyProtocol==PROXY_SOCKS5){ + resolvedProxyAddress=NetworkSocket::ResolveDomainName(proxyAddress); + if(!resolvedProxyAddress){ + LOGW("Error resolving proxy address %s", proxyAddress.c_str()); + SetState(STATE_FAILED); + return; + } + InitUDPProxy(); + } + connectionInitTime=GetCurrentTime(); + SendInit(); +} + + +void VoIPController::SetEncryptionKey(char *key, bool isOutgoing){ + memcpy(encryptionKey, key, 256); + uint8_t sha1[SHA1_LENGTH]; + crypto.sha1((uint8_t*) encryptionKey, 256, sha1); + memcpy(keyFingerprint, sha1+(SHA1_LENGTH-8), 8); + uint8_t sha256[SHA256_LENGTH]; + crypto.sha256((uint8_t*) encryptionKey, 256, sha256); + memcpy(callID, sha256+(SHA256_LENGTH-16), 16); + this->isOutgoing=isOutgoing; +} + +uint32_t VoIPController::GenerateOutSeq(){ + return seq++; +} + +void VoIPController::WritePacketHeader(uint32_t pseq, BufferOutputStream *s, unsigned char type, uint32_t length){ + uint32_t acks=0; + int i; + for(i=0;i<32;i++){ + if(recvPacketTimes[i]>0) + acks|=1; + if(i<31) + acks<<=1; + } + + if(state==STATE_WAIT_INIT || state==STATE_WAIT_INIT_ACK){ + s->WriteInt32(TLID_DECRYPTED_AUDIO_BLOCK); + int64_t randomID; + crypto.rand_bytes((uint8_t *) &randomID, 8); + s->WriteInt64(randomID); + unsigned char randBytes[7]; + crypto.rand_bytes(randBytes, 7); + s->WriteByte(7); + s->WriteBytes(randBytes, 7); + uint32_t pflags=PFLAG_HAS_RECENT_RECV | PFLAG_HAS_SEQ; + if(length>0) + pflags|=PFLAG_HAS_DATA; + if(state==STATE_WAIT_INIT || state==STATE_WAIT_INIT_ACK){ + pflags|=PFLAG_HAS_CALL_ID | PFLAG_HAS_PROTO; + } + pflags|=((uint32_t) type) << 24; + s->WriteInt32(pflags); + + if(pflags & PFLAG_HAS_CALL_ID){ + s->WriteBytes(callID, 16); + } + s->WriteInt32(lastRemoteSeq); + s->WriteInt32(pseq); + s->WriteInt32(acks); + if(pflags & PFLAG_HAS_PROTO){ + s->WriteInt32(PROTOCOL_NAME); + } + if(length>0){ + if(length<=253){ + s->WriteByte((unsigned char) length); + }else{ + s->WriteByte(254); + s->WriteByte((unsigned char) (length & 0xFF)); + s->WriteByte((unsigned char) ((length >> 8) & 0xFF)); + s->WriteByte((unsigned char) ((length >> 16) & 0xFF)); + } + } + }else{ + s->WriteInt32(TLID_SIMPLE_AUDIO_BLOCK); + int64_t randomID; + crypto.rand_bytes((uint8_t *) &randomID, 8); + s->WriteInt64(randomID); + unsigned char randBytes[7]; + crypto.rand_bytes(randBytes, 7); + s->WriteByte(7); + s->WriteBytes(randBytes, 7); + uint32_t lenWithHeader=length+13; + if(lenWithHeader>0){ + if(lenWithHeader<=253){ + s->WriteByte((unsigned char) lenWithHeader); + }else{ + s->WriteByte(254); + s->WriteByte((unsigned char) (lenWithHeader & 0xFF)); + s->WriteByte((unsigned char) ((lenWithHeader >> 8) & 0xFF)); + s->WriteByte((unsigned char) ((lenWithHeader >> 16) & 0xFF)); + } + } + s->WriteByte(type); + s->WriteInt32(lastRemoteSeq); + s->WriteInt32(pseq); + s->WriteInt32(acks); + } + + if(type==PKT_STREAM_DATA || type==PKT_STREAM_DATA_X2 || type==PKT_STREAM_DATA_X3) + conctl->PacketSent(pseq, length); + + memmove(&sentPacketTimes[1], sentPacketTimes, 31*sizeof(double)); + sentPacketTimes[0]=GetCurrentTime(); + lastSentSeq=pseq; + //LOGI("packet header size %d", s->GetLength()); +} + + +void VoIPController::UpdateAudioBitrate(){ + if(encoder){ + if(dataSavingMode || dataSavingRequestedByPeer){ + maxBitrate=maxAudioBitrateSaving; + encoder->SetBitrate(initAudioBitrateSaving); + }else if(networkType==NET_TYPE_GPRS){ + maxBitrate=maxAudioBitrateGPRS; + encoder->SetBitrate(initAudioBitrateGPRS); + }else if(networkType==NET_TYPE_EDGE){ + maxBitrate=maxAudioBitrateEDGE; + encoder->SetBitrate(initAudioBitrateEDGE); + }else{ + maxBitrate=maxAudioBitrate; + encoder->SetBitrate(initAudioBitrate); + } + } +} + + +void VoIPController::SendInit(){ + lock_mutex(endpointsMutex); + uint32_t initSeq=GenerateOutSeq(); + for(std::vector::iterator itr=endpoints.begin();itr!=endpoints.end();++itr){ + if((*itr)->type==EP_TYPE_TCP_RELAY && !useTCP) + continue; + unsigned char* pkt=outgoingPacketsBufferPool.Get(); + if(!pkt){ + LOGW("can't send init, queue overflow"); + continue; + } + BufferOutputStream out(pkt, outgoingPacketsBufferPool.GetSingleBufferSize()); + //WritePacketHeader(out, PKT_INIT, 15); + out.WriteInt32(PROTOCOL_VERSION); + out.WriteInt32(MIN_PROTOCOL_VERSION); + uint32_t flags=0; + if(dataSavingMode) + flags|=INIT_FLAG_DATA_SAVING_ENABLED; + out.WriteInt32(flags); + out.WriteByte(1); // audio codecs count + out.WriteByte(CODEC_OPUS); + out.WriteByte(0); // video codecs count + sendQueue->Put(PendingOutgoingPacket{ + /*.seq=*/initSeq, + /*.type=*/PKT_INIT, + /*.len=*/out.GetLength(), + /*.data=*/pkt, + /*.endpoint=*/*itr + }); + } + unlock_mutex(endpointsMutex); + SetState(STATE_WAIT_INIT_ACK); +} + +void VoIPController::InitUDPProxy(){ + if(realUdpSocket!=udpSocket){ + udpSocket->Close(); + delete udpSocket; + udpSocket=realUdpSocket; + } + NetworkSocket* tcp=NetworkSocket::Create(PROTO_TCP); + tcp->Connect(resolvedProxyAddress, proxyPort); + if(tcp->IsFailed()){ + SetState(STATE_FAILED); + tcp->Close(); + delete tcp; + return; + } + NetworkSocketSOCKS5Proxy* udpProxy=new NetworkSocketSOCKS5Proxy(tcp, udpSocket, proxyUsername, proxyPassword); + udpProxy->InitConnection(); + udpProxy->Open(); + if(udpProxy->IsFailed()){ + udpProxy->Close(); + delete udpProxy; + useTCP=true; + useUDP=false; + udpConnectivityState=UDP_NOT_AVAILABLE; + }else{ + udpSocket=udpProxy; + } +} + +void VoIPController::RunRecvThread(){ + LOGI("Receive thread starting"); + unsigned char buffer[1024]; + NetworkPacket packet; + while(runReceiver){ + packet.data=buffer; + packet.length=1024; + + std::vector readSockets; + std::vector errorSockets; + readSockets.push_back(realUdpSocket); + errorSockets.push_back(realUdpSocket); + + //if(useTCP){ + for(std::vector::iterator itr=endpoints.begin();itr!=endpoints.end();++itr){ + if((*itr)->type==EP_TYPE_TCP_RELAY){ + if((*itr)->socket){ + readSockets.push_back((*itr)->socket); + errorSockets.push_back((*itr)->socket); + } + } + } + //} + + bool selRes=NetworkSocket::Select(readSockets, errorSockets, selectCanceller); + if(!selRes){ + LOGV("Select canceled"); + continue; + } + if(!runReceiver) + return; + + if(!errorSockets.empty()){ + if(std::find(errorSockets.begin(), errorSockets.end(), realUdpSocket)!=errorSockets.end()){ + LOGW("UDP socket failed"); + SetState(STATE_FAILED); + return; + } + } + + NetworkSocket* socket=NULL; + + if(std::find(readSockets.begin(), readSockets.end(), realUdpSocket)!=readSockets.end()){ + socket=udpSocket; + }else if(readSockets.size()>0){ + socket=readSockets[0]; + }else{ + LOGI("no sockets to read from"); + lock_mutex(endpointsMutex); + for(std::vector::iterator itr=errorSockets.begin();itr!=errorSockets.end();++itr){ + for(std::vector::iterator e=endpoints.begin();e!=endpoints.end();++e){ + if((*e)->socket && (*e)->socket==*itr){ + (*e)->socket->Close(); + delete (*e)->socket; + (*e)->socket=NULL; + LOGI("Closing failed TCP socket for %s:%u", (*e)->address.ToString().c_str(), (*e)->port); + break; + } + } + } + unlock_mutex(endpointsMutex); + continue; + } + + socket->Receive(&packet); + if(!packet.address){ + LOGE("Packet has null address. This shouldn't happen."); + continue; + } + size_t len=packet.length; + if(!len){ + LOGE("Packet has zero length."); + continue; + } + //LOGV("Received %d bytes from %s:%d at %.5lf", len, packet.address->ToString().c_str(), packet.port, GetCurrentTime()); + Endpoint* srcEndpoint=NULL; + + IPv4Address* src4=dynamic_cast(packet.address); + if(src4){ + lock_mutex(endpointsMutex); + for(std::vector::iterator itrtr=endpoints.begin();itrtr!=endpoints.end();++itrtr){ + if((*itrtr)->address==*src4 && (*itrtr)->port==packet.port){ + if(((*itrtr)->type!=EP_TYPE_TCP_RELAY && packet.protocol==PROTO_UDP) || ((*itrtr)->type==EP_TYPE_TCP_RELAY && packet.protocol==PROTO_TCP)){ + srcEndpoint=*itrtr; + break; + } + } + } + unlock_mutex(endpointsMutex); + } + + if(!srcEndpoint){ + LOGW("Received a packet from unknown source %s:%u", packet.address->ToString().c_str(), packet.port); + continue; + } + if(len<=0){ + //LOGW("error receiving: %d / %s", errno, strerror(errno)); + continue; + } + if(IS_MOBILE_NETWORK(networkType)) + stats.bytesRecvdMobile+=(uint64_t)len; + else + stats.bytesRecvdWifi+=(uint64_t)len; + BufferInputStream in(buffer, (size_t)len); + try{ + if(memcmp(buffer, srcEndpoint->type==EP_TYPE_UDP_RELAY || srcEndpoint->type==EP_TYPE_TCP_RELAY ? (void*)srcEndpoint->peerTag : (void*)callID, 16)!=0){ + LOGW("Received packet has wrong peerTag"); + + continue; + } + in.Seek(16); + if(in.Remaining()>=16 && (srcEndpoint->type==EP_TYPE_UDP_RELAY || srcEndpoint->type==EP_TYPE_TCP_RELAY) + && *reinterpret_cast(buffer+16)==0xFFFFFFFFFFFFFFFFLL && *reinterpret_cast(buffer+24)==0xFFFFFFFF){ + // relay special request response + in.Seek(16+12); + uint32_t tlid=(uint32_t) in.ReadInt32(); + + if(tlid==TLID_UDP_REFLECTOR_SELF_INFO){ + if(srcEndpoint->type==EP_TYPE_UDP_RELAY && udpConnectivityState==UDP_PING_SENT && in.Remaining()>=32){ + int32_t date=in.ReadInt32(); + int64_t queryID=in.ReadInt64(); + unsigned char myIP[16]; + in.ReadBytes(myIP, 16); + int32_t myPort=in.ReadInt32(); + udpConnectivityState=UDP_AVAILABIE; + //LOGV("Received UDP ping reply from %s:%d: date=%d, queryID=%lld, my IP=%s, my port=%d", srcEndpoint->address.ToString().c_str(), srcEndpoint->port, date, queryID, IPv6Address(myIP).ToString().c_str(), myPort); + } + }else if(tlid==TLID_UDP_REFLECTOR_PEER_INFO){ + if(waitingForRelayPeerInfo && in.Remaining()>=16){ + lock_mutex(endpointsMutex); + uint32_t myAddr=(uint32_t) in.ReadInt32(); + uint32_t myPort=(uint32_t) in.ReadInt32(); + uint32_t peerAddr=(uint32_t) in.ReadInt32(); + uint32_t peerPort=(uint32_t) in.ReadInt32(); + for(std::vector::iterator itrtr=endpoints.begin(); itrtr!=endpoints.end(); ++itrtr){ + Endpoint *ep=*itrtr; + if(ep->type==EP_TYPE_UDP_P2P_INET){ + if(currentEndpoint==ep) + currentEndpoint=preferredRelay; + delete ep; + endpoints.erase(itrtr); + break; + } + } + for(std::vector::iterator itrtr=endpoints.begin(); itrtr!=endpoints.end(); ++itrtr){ + Endpoint *ep=*itrtr; + if(ep->type==EP_TYPE_UDP_P2P_LAN){ + if(currentEndpoint==ep) + currentEndpoint=preferredRelay; + delete ep; + endpoints.erase(itrtr); + break; + } + } + IPv4Address _peerAddr(peerAddr); + IPv6Address emptyV6("::0"); + unsigned char peerTag[16]; + endpoints.push_back(new Endpoint(0, (uint16_t) peerPort, _peerAddr, emptyV6, EP_TYPE_UDP_P2P_INET, peerTag)); + LOGW("Received reflector peer info, my=%08X:%u, peer=%08X:%u", myAddr, myPort, peerAddr, peerPort); + if(myAddr==peerAddr){ + LOGW("Detected LAN"); + IPv4Address lanAddr(0); + udpSocket->GetLocalInterfaceInfo(&lanAddr, NULL); + + BufferOutputStream pkt(8); + pkt.WriteInt32(lanAddr.GetAddress()); + pkt.WriteInt32(udpSocket->GetLocalPort()); + SendPacketReliably(PKT_LAN_ENDPOINT, pkt.GetBuffer(), pkt.GetLength(), 0.5, 10); + } + unlock_mutex(endpointsMutex); + waitingForRelayPeerInfo=false; + } + }else{ + LOGV("Received relay response with unknown tl id: 0x%08X", tlid); + } + continue; + } + if(in.Remaining()<40){ + LOGV("Received packet is too small"); + continue; + } + + unsigned char fingerprint[8], msgHash[16]; + in.ReadBytes(fingerprint, 8); + in.ReadBytes(msgHash, 16); + if(memcmp(fingerprint, keyFingerprint, 8)!=0){ + LOGW("Received packet has wrong key fingerprint"); + + continue; + } + unsigned char key[32], iv[32]; + KDF(msgHash, isOutgoing ? 8 : 0, key, iv); + unsigned char aesOut[MSC_STACK_FALLBACK(in.Remaining(), 1024)]; + crypto.aes_ige_decrypt((unsigned char *) buffer+in.GetOffset(), aesOut, in.Remaining(), key, iv); + memcpy(buffer+in.GetOffset(), aesOut, in.Remaining()); + unsigned char sha[SHA1_LENGTH]; + uint32_t _len=(uint32_t) in.ReadInt32(); + if(_len>in.Remaining()) + _len=in.Remaining(); + crypto.sha1((uint8_t *) (buffer+in.GetOffset()-4), (size_t) (_len+4), sha); + if(memcmp(msgHash, sha+(SHA1_LENGTH-16), 16)!=0){ + LOGW("Received packet has wrong hash after decryption"); + + continue; + } + + lastRecvPacketTime=GetCurrentTime(); + + + /*decryptedAudioBlock random_id:long random_bytes:string flags:# voice_call_id:flags.2?int128 in_seq_no:flags.4?int out_seq_no:flags.4?int + * recent_received_mask:flags.5?int proto:flags.3?int extra:flags.1?string raw_data:flags.0?string = DecryptedAudioBlock +simpleAudioBlock random_id:long random_bytes:string raw_data:string = DecryptedAudioBlock; +*/ + uint32_t ackId, pseq, acks; + unsigned char type; + uint32_t tlid=(uint32_t) in.ReadInt32(); + uint32_t packetInnerLen; + if(tlid==TLID_DECRYPTED_AUDIO_BLOCK){ + in.ReadInt64(); // random id + uint32_t randLen=(uint32_t) in.ReadTlLength(); + in.Seek(in.GetOffset()+randLen+pad4(randLen)); + uint32_t flags=(uint32_t) in.ReadInt32(); + type=(unsigned char) ((flags >> 24) & 0xFF); + if(!(flags & PFLAG_HAS_SEQ && flags & PFLAG_HAS_RECENT_RECV)){ + LOGW("Received packet doesn't have PFLAG_HAS_SEQ, PFLAG_HAS_RECENT_RECV, or both"); + + continue; + } + if(flags & PFLAG_HAS_CALL_ID){ + unsigned char pktCallID[16]; + in.ReadBytes(pktCallID, 16); + if(memcmp(pktCallID, callID, 16)!=0){ + LOGW("Received packet has wrong call id"); + + lastError=TGVOIP_ERROR_UNKNOWN; + SetState(STATE_FAILED); + return; + } + } + ackId=(uint32_t) in.ReadInt32(); + pseq=(uint32_t) in.ReadInt32(); + acks=(uint32_t) in.ReadInt32(); + if(flags & PFLAG_HAS_PROTO){ + uint32_t proto=(uint32_t) in.ReadInt32(); + if(proto!=PROTOCOL_NAME){ + LOGW("Received packet uses wrong protocol"); + + lastError=TGVOIP_ERROR_INCOMPATIBLE; + SetState(STATE_FAILED); + return; + } + } + if(flags & PFLAG_HAS_EXTRA){ + uint32_t extraLen=(uint32_t) in.ReadTlLength(); + in.Seek(in.GetOffset()+extraLen+pad4(extraLen)); + } + if(flags & PFLAG_HAS_DATA){ + packetInnerLen=in.ReadTlLength(); + } + }else if(tlid==TLID_SIMPLE_AUDIO_BLOCK){ + in.ReadInt64(); // random id + uint32_t randLen=(uint32_t) in.ReadTlLength(); + in.Seek(in.GetOffset()+randLen+pad4(randLen)); + packetInnerLen=in.ReadTlLength(); + type=in.ReadByte(); + ackId=(uint32_t) in.ReadInt32(); + pseq=(uint32_t) in.ReadInt32(); + acks=(uint32_t) in.ReadInt32(); + }else{ + LOGW("Received a packet of unknown type %08X", tlid); + + continue; + } + packetsRecieved++; + if(seqgt(pseq, lastRemoteSeq)){ + uint32_t diff=pseq-lastRemoteSeq; + if(diff>31){ + memset(recvPacketTimes, 0, 32*sizeof(double)); + }else{ + memmove(&recvPacketTimes[diff], recvPacketTimes, (32-diff)*sizeof(double)); + if(diff>1){ + memset(recvPacketTimes, 0, diff*sizeof(double)); + } + recvPacketTimes[0]=GetCurrentTime(); + } + lastRemoteSeq=pseq; + }else if(!seqgt(pseq, lastRemoteSeq) && lastRemoteSeq-pseq<32){ + if(recvPacketTimes[lastRemoteSeq-pseq]!=0){ + LOGW("Received duplicated packet for seq %u", pseq); + + continue; + } + recvPacketTimes[lastRemoteSeq-pseq]=GetCurrentTime(); + }else if(lastRemoteSeq-pseq>=32){ + LOGW("Packet %u is out of order and too late", pseq); + + continue; + } + if(seqgt(ackId, lastRemoteAckSeq)){ + uint32_t diff=ackId-lastRemoteAckSeq; + if(diff>31){ + memset(remoteAcks, 0, 32*sizeof(double)); + }else{ + memmove(&remoteAcks[diff], remoteAcks, (32-diff)*sizeof(double)); + if(diff>1){ + memset(remoteAcks, 0, diff*sizeof(double)); + } + remoteAcks[0]=GetCurrentTime(); + } + if(waitingForAcks && lastRemoteAckSeq>=firstSentPing){ + memset(rttHistory, 0, 32*sizeof(double)); + waitingForAcks=false; + dontSendPackets=10; + LOGI("resuming sending"); + } + lastRemoteAckSeq=ackId; + conctl->PacketAcknowledged(ackId); + int i; + for(i=0;i<31;i++){ + if(remoteAcks[i+1]==0){ + if((acks >> (31-i)) & 1){ + remoteAcks[i+1]=GetCurrentTime(); + conctl->PacketAcknowledged(ackId-(i+1)); + } + } + } + lock_mutex(queuedPacketsMutex); + for(i=0;iseqs[j]); + if(qp->seqs[j]==0) + break; + int remoteAcksIndex=lastRemoteAckSeq-qp->seqs[j]; + LOGV("remote acks index %u, value %f", remoteAcksIndex, remoteAcksIndex>=0 && remoteAcksIndex<32 ? remoteAcks[remoteAcksIndex] : -1); + if(seqgt(lastRemoteAckSeq, qp->seqs[j]) && remoteAcksIndex>=0 && remoteAcksIndex<32 && remoteAcks[remoteAcksIndex]>0){ + LOGD("did ack seq %u, removing", qp->seqs[j]); + didAck=true; + break; + } + } + if(didAck){ + if(qp->data) + free(qp->data); + free(qp); + queuedPackets.erase(queuedPackets.begin()+i); + i--; + continue; + } + } + unlock_mutex(queuedPacketsMutex); + } + + if(srcEndpoint!=currentEndpoint && (srcEndpoint->type==EP_TYPE_UDP_RELAY || srcEndpoint->type==EP_TYPE_TCP_RELAY) && ((currentEndpoint->type!=EP_TYPE_UDP_RELAY && currentEndpoint->type!=EP_TYPE_TCP_RELAY) || currentEndpoint->averageRTT==0)){ + if(seqgt(lastSentSeq-32, lastRemoteAckSeq)){ + currentEndpoint=srcEndpoint; + LOGI("Peer network address probably changed, switching to relay"); + if(allowP2p) + SendPublicEndpointsRequest(); + } + } + //LOGV("acks: %u -> %.2lf, %.2lf, %.2lf, %.2lf, %.2lf, %.2lf, %.2lf, %.2lf", lastRemoteAckSeq, remoteAcks[0], remoteAcks[1], remoteAcks[2], remoteAcks[3], remoteAcks[4], remoteAcks[5], remoteAcks[6], remoteAcks[7]); + //LOGD("recv: %u -> %.2lf, %.2lf, %.2lf, %.2lf, %.2lf, %.2lf, %.2lf, %.2lf", lastRemoteSeq, recvPacketTimes[0], recvPacketTimes[1], recvPacketTimes[2], recvPacketTimes[3], recvPacketTimes[4], recvPacketTimes[5], recvPacketTimes[6], recvPacketTimes[7]); + //LOGI("RTT = %.3lf", GetAverageRTT()); + //LOGV("Packet %u type is %d", pseq, type); + if(type==PKT_INIT){ + LOGD("Received init"); + if(!receivedInit){ + receivedInit=true; + currentEndpoint=srcEndpoint; + if(srcEndpoint->type==EP_TYPE_UDP_RELAY || (useTCP && srcEndpoint->type==EP_TYPE_TCP_RELAY)) + preferredRelay=srcEndpoint; + LogDebugInfo(); + } + peerVersion=(uint32_t) in.ReadInt32(); + LOGI("Peer version is %d", peerVersion); + uint32_t minVer=(uint32_t) in.ReadInt32(); + if(minVer>PROTOCOL_VERSION || peerVersion=2 ? 10 : 2)+(peerVersion>=2 ? 6 : 4)*outgoingStreams.size()); + + out.WriteInt32(PROTOCOL_VERSION); + out.WriteInt32(MIN_PROTOCOL_VERSION); + + out.WriteByte((unsigned char) outgoingStreams.size()); + for(i=0; iid); + out.WriteByte(outgoingStreams[i]->type); + out.WriteByte(outgoingStreams[i]->codec); + out.WriteInt16(outgoingStreams[i]->frameDuration); + out.WriteByte((unsigned char) (outgoingStreams[i]->enabled ? 1 : 0)); + } + sendQueue->Put(PendingOutgoingPacket{ + /*.seq=*/GenerateOutSeq(), + /*.type=*/PKT_INIT_ACK, + /*.len=*/out.GetLength(), + /*.data=*/buf, + /*.endpoint=*/NULL + }); + } + } + if(type==PKT_INIT_ACK){ + LOGD("Received init ack"); + + if(!receivedInitAck){ + receivedInitAck=true; + if(packetInnerLen>10){ + peerVersion=in.ReadInt32(); + uint32_t minVer=(uint32_t) in.ReadInt32(); + if(minVer>PROTOCOL_VERSION || peerVersionid=in.ReadByte(); + stm->type=in.ReadByte(); + stm->codec=in.ReadByte(); + if(peerVersion>=2) + stm->frameDuration=(uint16_t) in.ReadInt16(); + else + stm->frameDuration=20; + stm->enabled=in.ReadByte()==1; + incomingStreams.push_back(stm); + if(stm->type==STREAM_TYPE_AUDIO && !incomingAudioStream) + incomingAudioStream=stm; + } + if(!incomingAudioStream) + continue; + + voip_stream_t *outgoingAudioStream=outgoingStreams[0]; + + if(!audioInput){ + LOGI("before create audio io"); + audioInput=tgvoip::audio::AudioInput::Create(currentAudioInput); + audioInput->Configure(48000, 16, 1); + audioOutput=tgvoip::audio::AudioOutput::Create(currentAudioOutput); + audioOutput->Configure(48000, 16, 1); + echoCanceller=new EchoCanceller(config.enableAEC, config.enableNS, config.enableAGC); + encoder=new OpusEncoder(audioInput); + encoder->SetCallback(AudioInputCallback, this); + encoder->SetOutputFrameDuration(outgoingAudioStream->frameDuration); + encoder->SetEchoCanceller(echoCanceller); + encoder->Start(); + if(!micMuted){ + audioInput->Start(); + if(!audioInput->IsInitialized()){ + LOGE("Erorr initializing audio capture"); + lastError=TGVOIP_ERROR_AUDIO_IO; + + SetState(STATE_FAILED); + return; + } + } + if(!audioOutput->IsInitialized()){ + LOGE("Erorr initializing audio playback"); + lastError=TGVOIP_ERROR_AUDIO_IO; + + SetState(STATE_FAILED); + return; + } + UpdateAudioBitrate(); + + jitterBuffer=new JitterBuffer(NULL, incomingAudioStream->frameDuration); + decoder=new OpusDecoder(audioOutput); + decoder->SetEchoCanceller(echoCanceller); + decoder->SetJitterBuffer(jitterBuffer); + decoder->SetFrameDuration(incomingAudioStream->frameDuration); + decoder->Start(); + if(incomingAudioStream->frameDuration>50) + jitterBuffer->SetMinPacketCount((uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_initial_delay_60", 3)); + else if(incomingAudioStream->frameDuration>30) + jitterBuffer->SetMinPacketCount((uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_initial_delay_40", 4)); + else + jitterBuffer->SetMinPacketCount((uint32_t) ServerConfig::GetSharedInstance()->GetInt("jitter_initial_delay_20", 6)); + //audioOutput->Start(); +#ifdef TGVOIP_USE_AUDIO_SESSION +#ifdef __APPLE__ + if(acquireAudioSession){ + acquireAudioSession(^(){ + LOGD("Audio session acquired"); + needNotifyAcquiredAudioSession=true; + }); + }else{ + audio::AudioUnitIO::AudioSessionAcquired(); + } +#endif +#endif + } + setEstablishedAt=GetCurrentTime()+ServerConfig::GetSharedInstance()->GetDouble("established_delay_if_no_stream_data", 1.5); + if(allowP2p) + SendPublicEndpointsRequest(); + } + } + if(type==PKT_STREAM_DATA || type==PKT_STREAM_DATA_X2 || type==PKT_STREAM_DATA_X3){ + if(state!=STATE_ESTABLISHED && receivedInitAck) + SetState(STATE_ESTABLISHED); + int count; + switch(type){ + case PKT_STREAM_DATA_X2: + count=2; + break; + case PKT_STREAM_DATA_X3: + count=3; + break; + case PKT_STREAM_DATA: + default: + count=1; + break; + } + int i; + if(srcEndpoint->type==EP_TYPE_UDP_RELAY && srcEndpoint!=peerPreferredRelay){ + peerPreferredRelay=srcEndpoint; + } + for(i=0;iStart(); + audioOutStarted=true; + } + if(jitterBuffer) + jitterBuffer->HandleInput((unsigned char*) (buffer+in.GetOffset()), sdlen, pts); + if(iToString().c_str(), srcEndpoint->port); + if(srcEndpoint->type!=EP_TYPE_UDP_RELAY && srcEndpoint->type!=EP_TYPE_TCP_RELAY && !allowP2p){ + LOGW("Received p2p ping but p2p is disabled by manual override"); + continue; + } + unsigned char* buf=outgoingPacketsBufferPool.Get(); + if(!buf){ + LOGW("Dropping pong packet, queue overflow"); + continue; + } + BufferOutputStream pkt(buf, outgoingPacketsBufferPool.GetSingleBufferSize()); + pkt.WriteInt32(pseq); + sendQueue->Put(PendingOutgoingPacket{ + /*.seq=*/GenerateOutSeq(), + /*.type=*/PKT_PONG, + /*.len=*/pkt.GetLength(), + /*.data=*/buf, + /*.endpoint=*/srcEndpoint, + }); + } + if(type==PKT_PONG){ + if(packetInnerLen>=4){ + uint32_t pingSeq=(uint32_t) in.ReadInt32(); + if(pingSeq==srcEndpoint->lastPingSeq){ + memmove(&srcEndpoint->rtts[1], srcEndpoint->rtts, sizeof(double)*5); + srcEndpoint->rtts[0]=GetCurrentTime()-srcEndpoint->lastPingTime; + int i; + srcEndpoint->averageRTT=0; + for(i=0;i<6;i++){ + if(srcEndpoint->rtts[i]==0) + break; + srcEndpoint->averageRTT+=srcEndpoint->rtts[i]; + } + srcEndpoint->averageRTT/=i; + LOGD("Current RTT via %s: %.3f, average: %.3f", packet.address->ToString().c_str(), srcEndpoint->rtts[0], srcEndpoint->averageRTT); + } + } + /*if(currentEndpoint!=srcEndpoint && (srcEndpoint->type==EP_TYPE_UDP_P2P_INET || srcEndpoint->type==EP_TYPE_UDP_P2P_LAN)){ + LOGI("Switching to P2P now!"); + currentEndpoint=srcEndpoint; + needSendP2pPing=false; + }*/ + } + if(type==PKT_STREAM_STATE){ + unsigned char id=in.ReadByte(); + unsigned char enabled=in.ReadByte(); + int i; + for(i=0;iid==id){ + incomingStreams[i]->enabled=enabled==1; + UpdateAudioOutputState(); + break; + } + } + } + if(type==PKT_LAN_ENDPOINT){ + LOGV("received lan endpoint"); + uint32_t peerAddr=(uint32_t) in.ReadInt32(); + uint16_t peerPort=(uint16_t) in.ReadInt32(); + lock_mutex(endpointsMutex); + for(std::vector::iterator itrtr=endpoints.begin();itrtr!=endpoints.end();++itrtr){ + if((*itrtr)->type==EP_TYPE_UDP_P2P_LAN){ + if(currentEndpoint==*itrtr) + currentEndpoint=preferredRelay; + delete *itrtr; + endpoints.erase(itrtr); + break; + } + } + IPv4Address v4addr(peerAddr); + IPv6Address v6addr("::0"); + unsigned char peerTag[16]; + endpoints.push_back(new Endpoint(0, peerPort, v4addr, v6addr, EP_TYPE_UDP_P2P_LAN, peerTag)); + unlock_mutex(endpointsMutex); + } + if(type==PKT_NETWORK_CHANGED && currentEndpoint->type!=EP_TYPE_UDP_RELAY && currentEndpoint->type!=EP_TYPE_TCP_RELAY){ + currentEndpoint=preferredRelay; + if(allowP2p) + SendPublicEndpointsRequest(); + if(peerVersion>=2){ + uint32_t flags=(uint32_t) in.ReadInt32(); + dataSavingRequestedByPeer=(flags & INIT_FLAG_DATA_SAVING_ENABLED)==INIT_FLAG_DATA_SAVING_ENABLED; + UpdateDataSavingState(); + UpdateAudioBitrate(); + } + } + }catch(std::out_of_range x){ + LOGW("Error parsing packet: %s", x.what()); + } + } + LOGI("=== recv thread exiting ==="); +} + +void VoIPController::RunSendThread(){ + unsigned char buf[1500]; + while(runReceiver){ + PendingOutgoingPacket pkt=sendQueue->GetBlocking(); + if(pkt.data){ + lock_mutex(endpointsMutex); + Endpoint *endpoint=pkt.endpoint ? pkt.endpoint : currentEndpoint; + if((endpoint->type==EP_TYPE_TCP_RELAY && useTCP) || (endpoint->type!=EP_TYPE_TCP_RELAY && useUDP)){ + BufferOutputStream p(buf, sizeof(buf)); + WritePacketHeader(pkt.seq, &p, pkt.type, pkt.len); + p.WriteBytes(pkt.data, pkt.len); + SendPacket(p.GetBuffer(), p.GetLength(), endpoint); + } + unlock_mutex(endpointsMutex); + outgoingPacketsBufferPool.Reuse(pkt.data); + }else{ + LOGE("tried to send null packet"); + } + } + LOGI("=== send thread exiting ==="); +} + + +void VoIPController::RunTickThread(){ + uint32_t tickCount=0; + bool wasWaitingForAcks=false; + double startTime=GetCurrentTime(); + while(runReceiver){ +#ifndef _WIN32 + usleep(100000); +#else + Sleep(100); +#endif + int prevSignalBarCount=signalBarCount; + signalBarCount=4; + tickCount++; + if(connectionInitTime==0) + continue; + double time=GetCurrentTime(); + if(state==STATE_RECONNECTING) + signalBarCount=1; + if(tickCount%5==0 && (state==STATE_ESTABLISHED || state==STATE_RECONNECTING)){ + memmove(&rttHistory[1], rttHistory, 31*sizeof(double)); + rttHistory[0]=GetAverageRTT(); + /*if(rttHistory[16]>0){ + LOGI("rtt diff: %.3lf", rttHistory[0]-rttHistory[16]); + }*/ + int i; + double v=0; + for(i=1;i<32;i++){ + v+=rttHistory[i-1]-rttHistory[i]; + } + v=v/32; + if(rttHistory[0]>10.0 && rttHistory[8]>10.0 && (networkType==NET_TYPE_EDGE || networkType==NET_TYPE_GPRS)){ + waitingForAcks=true; + signalBarCount=1; + }else{ + waitingForAcks=false; + } + if(waitingForAcks) + wasWaitingForAcks=false; + //LOGI("%.3lf/%.3lf, rtt diff %.3lf, waiting=%d, queue=%d", rttHistory[0], rttHistory[8], v, waitingForAcks, sendQueue->Size()); + if(jitterBuffer){ + int lostCount=jitterBuffer->GetAndResetLostPacketCount(); + if(lostCount>0 || (lostCount<0 && recvLossCount>((uint32_t)-lostCount))) + recvLossCount+=lostCount; + } + } + if(dontSendPackets>0) + dontSendPackets--; + + int i; + + conctl->Tick(); + + if(useTCP && !didAddTcpRelays){ + std::vector relays; + for(std::vector::iterator itr=endpoints.begin(); itr!=endpoints.end(); ++itr){ + if((*itr)->type!=EP_TYPE_UDP_RELAY) + continue; + Endpoint *tcpRelay=new Endpoint(**itr); + tcpRelay->type=EP_TYPE_TCP_RELAY; + tcpRelay->averageRTT=0; + tcpRelay->lastPingSeq=0; + tcpRelay->lastPingTime=0; + memset(tcpRelay->rtts, 0, sizeof(tcpRelay->rtts)); + relays.push_back(tcpRelay); + } + endpoints.insert(endpoints.end(), relays.begin(), relays.end()); + didAddTcpRelays=true; + } + + if(state==STATE_ESTABLISHED && encoder && conctl){ + if((audioInput && !audioInput->IsInitialized()) || (audioOutput && !audioOutput->IsInitialized())){ + LOGE("Audio I/O failed"); + lastError=TGVOIP_ERROR_AUDIO_IO; + SetState(STATE_FAILED); + } + + int act=conctl->GetBandwidthControlAction(); + if(act==TGVOIP_CONCTL_ACT_DECREASE){ + uint32_t bitrate=encoder->GetBitrate(); + if(bitrate>8000) + encoder->SetBitrate(bitrate<(minAudioBitrate+audioBitrateStepDecr) ? minAudioBitrate : (bitrate-audioBitrateStepDecr)); + }else if(act==TGVOIP_CONCTL_ACT_INCREASE){ + uint32_t bitrate=encoder->GetBitrate(); + if(bitrateSetBitrate(bitrate+audioBitrateStepIncr); + } + + if(tickCount%10==0 && encoder){ + uint32_t sendLossCount=conctl->GetSendLossCount(); + memmove(sendLossCountHistory+1, sendLossCountHistory, 31*sizeof(uint32_t)); + sendLossCountHistory[0]=sendLossCount-prevSendLossCount; + prevSendLossCount=sendLossCount; + double avgSendLossCount=0; + for(i=0;i<10;i++){ + avgSendLossCount+=sendLossCountHistory[i]; + } + double packetsPerSec=1000/(double)outgoingStreams[0]->frameDuration; + avgSendLossCount=avgSendLossCount/10/packetsPerSec; + //LOGV("avg send loss: %.1f%%", avgSendLossCount*100); + + if(avgSendLossCount>0.1){ + encoder->SetPacketLoss(40); + }else if(avgSendLossCount>0.075){ + encoder->SetPacketLoss(35); + }else if(avgSendLossCount>0.0625){ + encoder->SetPacketLoss(30); + }else if(avgSendLossCount>0.05){ + encoder->SetPacketLoss(25); + }else if(avgSendLossCount>0.025){ + encoder->SetPacketLoss(20); + }else if(avgSendLossCount>0.01){ + encoder->SetPacketLoss(17); + }else{ + encoder->SetPacketLoss(15); + } + + if(encoder->GetPacketLoss()>30) + signalBarCount=MIN(signalBarCount, 2); + else if(encoder->GetPacketLoss()>20) + signalBarCount=MIN(signalBarCount, 3); + } + } + + bool areThereAnyEnabledStreams=false; + + for(i=0;ienabled) + areThereAnyEnabledStreams=true; + } + + if((waitingForAcks && tickCount%10==0) || (!areThereAnyEnabledStreams && tickCount%2==0)){ + unsigned char* buf=outgoingPacketsBufferPool.Get(); + if(buf){ + sendQueue->Put(PendingOutgoingPacket{ + /*.seq=*/(firstSentPing=GenerateOutSeq()), + /*.type=*/PKT_NOP, + /*.len=*/0, + /*.data=*/buf, + /*.endpoint=*/NULL + }); + } + } + + if(state==STATE_WAIT_INIT_ACK && GetCurrentTime()-stateChangeTime>.5){ + SendInit(); + } + + if(waitingForRelayPeerInfo && GetCurrentTime()-publicEndpointsReqTime>5){ + LOGD("Resending peer relay info request"); + SendPublicEndpointsRequest(); + } + + lock_mutex(queuedPacketsMutex); + for(i=0;itimeout>0 && qp->firstSentTime>0 && GetCurrentTime()-qp->firstSentTime>=qp->timeout){ + LOGD("Removing queued packet because of timeout"); + if(qp->data) + free(qp->data); + free(qp); + queuedPackets.erase(queuedPackets.begin()+i); + i--; + continue; + } + if(GetCurrentTime()-qp->lastSentTime>=qp->retryInterval){ + unsigned char* buf=outgoingPacketsBufferPool.Get(); + if(buf){ + uint32_t seq=GenerateOutSeq(); + memmove(&qp->seqs[1], qp->seqs, 4*9); + qp->seqs[0]=seq; + qp->lastSentTime=GetCurrentTime(); + LOGD("Sending queued packet, seq=%u, type=%u, len=%u", seq, qp->type, unsigned(qp->length)); + if(qp->firstSentTime==0) + qp->firstSentTime=qp->lastSentTime; + if(qp->length) + memcpy(buf, qp->data, qp->length); + sendQueue->Put(PendingOutgoingPacket{ + /*.seq=*/seq, + /*.type=*/qp->type, + /*.len=*/qp->length, + /*.data=*/buf, + /*.endpoint=*/NULL + }); + } + } + } + unlock_mutex(queuedPacketsMutex); + + if(jitterBuffer){ + jitterBuffer->Tick(); + double avgDelay=jitterBuffer->GetAverageDelay(); + double avgLateCount[3]; + jitterBuffer->GetAverageLateCount(avgLateCount); + if(avgDelay>=5) + signalBarCount=1; + else if(avgDelay>=4) + signalBarCount=MIN(signalBarCount, 2); + else if(avgDelay>=3) + signalBarCount=MIN(signalBarCount, 3); + + if(avgLateCount[2]>=0.2) + signalBarCount=1; + else if(avgLateCount[2]>=0.1) + signalBarCount=MIN(signalBarCount, 2); + + } + + lock_mutex(endpointsMutex); + if(state==STATE_ESTABLISHED || state==STATE_RECONNECTING){ + Endpoint* minPingRelay=preferredRelay; + double minPing=preferredRelay->averageRTT; + for(std::vector::iterator e=endpoints.begin();e!=endpoints.end();++e){ + Endpoint* endpoint=*e; + if(endpoint->type==EP_TYPE_TCP_RELAY && !useTCP) + continue; + if(GetCurrentTime()-endpoint->lastPingTime>=10){ + LOGV("Sending ping to %s", endpoint->address.ToString().c_str()); + unsigned char* buf=outgoingPacketsBufferPool.Get(); + if(buf){ + sendQueue->Put(PendingOutgoingPacket{ + /*.seq=*/(endpoint->lastPingSeq=GenerateOutSeq()), + /*.type=*/PKT_PING, + /*.len=*/0, + /*.data=*/buf, + /*.endpoint=*/endpoint + }); + } + endpoint->lastPingTime=GetCurrentTime(); + } + if(endpoint->type==EP_TYPE_UDP_RELAY || (useTCP && endpoint->type==EP_TYPE_TCP_RELAY)){ + double k=endpoint->type==EP_TYPE_UDP_RELAY ? 1 : 2; + if(endpoint->averageRTT>0 && endpoint->averageRTT*kaverageRTT*k; + minPingRelay=endpoint; + } + } + } + if(minPingRelay!=preferredRelay){ + preferredRelay=minPingRelay; + LOGV("set preferred relay to %s", preferredRelay->address.ToString().c_str()); + if(currentEndpoint->type==EP_TYPE_UDP_RELAY || currentEndpoint->type==EP_TYPE_TCP_RELAY) + currentEndpoint=preferredRelay; + LogDebugInfo(); + /*BufferOutputStream pkt(32); + pkt.WriteInt64(preferredRelay->id); + SendPacketReliably(PKT_SWITCH_PREF_RELAY, pkt.GetBuffer(), pkt.GetLength(), 1, 9);*/ + } + if(currentEndpoint->type==EP_TYPE_UDP_RELAY){ + Endpoint* p2p=GetEndpointByType(EP_TYPE_UDP_P2P_INET); + if(p2p){ + Endpoint* lan=GetEndpointByType(EP_TYPE_UDP_P2P_LAN); + if(lan && lan->averageRTT>0 && lan->averageRTTaverageRTT>0 && p2p->averageRTT0 && minPingaverageRTT*p2pToRelaySwitchThreshold){ + LOGI("Switching to relay"); + currentEndpoint=preferredRelay; + LogDebugInfo(); + } + } + } + if(udpConnectivityState==UDP_UNKNOWN){ + for(std::vector::iterator itr=endpoints.begin();itr!=endpoints.end();++itr){ + if((*itr)->type==EP_TYPE_UDP_RELAY){ + SendUdpPing(*itr); + } + } + udpConnectivityState=UDP_PING_SENT; + lastUdpPingTime=time; + udpPingCount=1; + }else if(udpConnectivityState==UDP_PING_SENT){ + if(time-lastUdpPingTime>=0.5){ + if(udpPingCount<4){ + for(std::vector::iterator itr=endpoints.begin();itr!=endpoints.end();++itr){ + if((*itr)->type==EP_TYPE_UDP_RELAY){ + SendUdpPing(*itr); + } + } + udpPingCount++; + lastUdpPingTime=time; + }else{ + LOGW("No UDP ping replies received; assuming no connectivity and trying TCP") + udpConnectivityState=UDP_NOT_AVAILABLE; + useTCP=true; + } + } + } + unlock_mutex(endpointsMutex); + + if(state==STATE_ESTABLISHED || state==STATE_RECONNECTING){ + if(time-lastRecvPacketTime>=config.recv_timeout){ + if(currentEndpoint && currentEndpoint->type!=EP_TYPE_UDP_RELAY && currentEndpoint->type!=EP_TYPE_TCP_RELAY){ + LOGW("Packet receive timeout, switching to relay"); + currentEndpoint=preferredRelay; + for(std::vector::iterator itrtr=endpoints.begin();itrtr!=endpoints.end();++itrtr){ + Endpoint* e=*itrtr; + if(e->type==EP_TYPE_UDP_P2P_INET || e->type==EP_TYPE_UDP_P2P_LAN){ + e->averageRTT=0; + memset(e->rtts, 0, sizeof(e->rtts)); + } + } + if(allowP2p){ + SendPublicEndpointsRequest(); + } + UpdateDataSavingState(); + UpdateAudioBitrate(); + BufferOutputStream s(4); + s.WriteInt32(dataSavingMode ? INIT_FLAG_DATA_SAVING_ENABLED : 0); + SendPacketReliably(PKT_NETWORK_CHANGED, s.GetBuffer(), s.GetLength(), 1, 20); + lastRecvPacketTime=time; + }else{ + LOGW("Packet receive timeout, disconnecting"); + lastError=TGVOIP_ERROR_TIMEOUT; + SetState(STATE_FAILED); + } + } + }else if(state==STATE_WAIT_INIT || state==STATE_WAIT_INIT_ACK){ + if(GetCurrentTime()-connectionInitTime>=config.init_timeout){ + LOGW("Init timeout, disconnecting"); + lastError=TGVOIP_ERROR_TIMEOUT; + SetState(STATE_FAILED); + } + } + + if(state==STATE_ESTABLISHED && time-lastRecvPacketTime>=reconnectingTimeout){ + SetState(STATE_RECONNECTING); + } + + if(state!=STATE_ESTABLISHED && setEstablishedAt>0 && time>=setEstablishedAt){ + SetState(STATE_ESTABLISHED); + setEstablishedAt=0; + } + + if(signalBarCount!=prevSignalBarCount){ + LOGD("SIGNAL BAR COUNT CHANGED: %d", signalBarCount); + if(signalBarCountCallback) + signalBarCountCallback(this, signalBarCount); + } + + + if(statsDump){ + //fprintf(statsDump, "Time\tRTT\tLISeq\tLASeq\tCWnd\tBitrate\tJitter\tJDelay\tAJDelay\n"); + fprintf(statsDump, "%.3f\t%.3f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%.3f\t%.3f\t%.3f\n", + GetCurrentTime()-startTime, + currentEndpoint->rtts[0], + lastRemoteSeq, + seq, + lastRemoteAckSeq, + recvLossCount, + conctl ? conctl->GetSendLossCount() : 0, + conctl ? (int)conctl->GetInflightDataSize() : 0, + encoder ? encoder->GetBitrate() : 0, + encoder ? encoder->GetPacketLoss() : 0, + jitterBuffer ? jitterBuffer->GetLastMeasuredJitter() : 0, + jitterBuffer ? jitterBuffer->GetLastMeasuredDelay()*0.06 : 0, + jitterBuffer ? jitterBuffer->GetAverageDelay()*0.06 : 0); + } + +#if defined(__APPLE__) && defined(TGVOIP_USE_AUDIO_SESSION) + if(needNotifyAcquiredAudioSession){ + needNotifyAcquiredAudioSession=false; + audio::AudioUnitIO::AudioSessionAcquired(); + } +#endif + } + LOGI("=== tick thread exiting ==="); +} + + +Endpoint& VoIPController::GetRemoteEndpoint(){ + //return useLan ? &remoteLanEp : &remotePublicEp; + return *currentEndpoint; +} + + +void VoIPController::SendPacket(unsigned char *data, size_t len, Endpoint* ep){ + if(stopping) + return; + if(ep->type==EP_TYPE_TCP_RELAY && !useTCP) + return; + //dst.sin_addr=ep->address; + //dst.sin_port=htons(ep->port); + //dst.sin_family=AF_INET; + BufferOutputStream out(len+128); + if(ep->type==EP_TYPE_UDP_RELAY || ep->type==EP_TYPE_TCP_RELAY) + out.WriteBytes((unsigned char*)ep->peerTag, 16); + else + out.WriteBytes(callID, 16); + if(len>0){ + BufferOutputStream inner(len+128); + inner.WriteInt32(len); + inner.WriteBytes(data, len); + if(inner.GetLength()%16!=0){ + size_t padLen=16-inner.GetLength()%16; + unsigned char padding[16]; + crypto.rand_bytes((uint8_t *) padding, padLen); + inner.WriteBytes(padding, padLen); + } + assert(inner.GetLength()%16==0); + unsigned char key[32], iv[32], msgHash[SHA1_LENGTH]; + crypto.sha1((uint8_t *) inner.GetBuffer(), len+4, msgHash); + out.WriteBytes(keyFingerprint, 8); + out.WriteBytes((msgHash+(SHA1_LENGTH-16)), 16); + KDF(msgHash+(SHA1_LENGTH-16), isOutgoing ? 0 : 8, key, iv); + unsigned char aesOut[MSC_STACK_FALLBACK(inner.GetLength(), 1500)]; + crypto.aes_ige_encrypt(inner.GetBuffer(), aesOut, inner.GetLength(), key, iv); + out.WriteBytes(aesOut, inner.GetLength()); + } + //LOGV("Sending %d bytes to %s:%d", out.GetLength(), ep->address.ToString().c_str(), ep->port); + if(IS_MOBILE_NETWORK(networkType)) + stats.bytesSentMobile+=(uint64_t)out.GetLength(); + else + stats.bytesSentWifi+=(uint64_t)out.GetLength(); + + NetworkPacket pkt; + pkt.address=(NetworkAddress*)&ep->address; + pkt.port=ep->port; + pkt.length=out.GetLength(); + pkt.data=out.GetBuffer(); + pkt.protocol=ep->type==EP_TYPE_TCP_RELAY ? PROTO_TCP : PROTO_UDP; + //socket->Send(&pkt); + if(ep->type==EP_TYPE_TCP_RELAY){ + if(ep->socket){ + ep->socket->Send(&pkt); + }else{ + LOGI("connecting to tcp: %s:%u", ep->address.ToString().c_str(), ep->port); + NetworkSocket* s; + if(proxyProtocol==PROXY_NONE){ + s=NetworkSocket::Create(PROTO_TCP); + }else if(proxyProtocol==PROXY_SOCKS5){ + NetworkSocket* rawTcp=NetworkSocket::Create(PROTO_TCP); + openingTcpSocket=rawTcp; + rawTcp->Connect(resolvedProxyAddress, proxyPort); + if(rawTcp->IsFailed()){ + openingTcpSocket=NULL; + rawTcp->Close(); + delete rawTcp; + LOGW("Error connecting to SOCKS5 proxy"); + return; + } + NetworkSocketSOCKS5Proxy* proxy=new NetworkSocketSOCKS5Proxy(rawTcp, NULL, proxyUsername, proxyPassword); + openingTcpSocket=rawTcp; + proxy->InitConnection(); + if(proxy->IsFailed()){ + openingTcpSocket=NULL; + LOGW("Proxy initialization failed"); + proxy->Close(); + delete proxy; + return; + } + s=proxy; + }/*else if(proxyProtocol==PROXY_HTTP){ + s=NetworkSocket::Create(PROTO_TCP); + }*/else{ + LOGE("Unsupported proxy protocol %d", proxyProtocol); + SetState(STATE_FAILED); + return; + } + s->Connect(&ep->address, ep->port); + if(s->IsFailed()){ + s->Close(); + delete s; + LOGW("Error connecting to %s:%u", ep->address.ToString().c_str(), ep->port); + }else{ + NetworkSocketTCPObfuscated* tcpWrapper=new NetworkSocketTCPObfuscated(s); + openingTcpSocket=tcpWrapper; + tcpWrapper->InitConnection(); + openingTcpSocket=NULL; + if(tcpWrapper->IsFailed()){ + tcpWrapper->Close(); + delete tcpWrapper; + LOGW("Error initializing connection to %s:%u", ep->address.ToString().c_str(), ep->port); + }else{ + tcpWrapper->Send(&pkt); + ep->socket=tcpWrapper; + selectCanceller->CancelSelect(); + } + } + } + }else{ + udpSocket->Send(&pkt); + } +} + + +void VoIPController::SetNetworkType(int type){ + networkType=type; + UpdateDataSavingState(); + UpdateAudioBitrate(); + std::string itfName=udpSocket->GetLocalInterfaceInfo(NULL, NULL); + if(itfName!=activeNetItfName){ + udpSocket->OnActiveInterfaceChanged(); + LOGI("Active network interface changed: %s -> %s", activeNetItfName.c_str(), itfName.c_str()); + bool isFirstChange=activeNetItfName.length()==0; + activeNetItfName=itfName; + if(isFirstChange) + return; + if(currentEndpoint && currentEndpoint->type!=EP_TYPE_UDP_RELAY){ + if(preferredRelay->type==EP_TYPE_UDP_RELAY) + currentEndpoint=preferredRelay; + lock_mutex(endpointsMutex); + for(std::vector::iterator itr=endpoints.begin();itr!=endpoints.end();){ + Endpoint* endpoint=*itr; + if(endpoint->type==EP_TYPE_UDP_RELAY && useTCP){ + useTCP=false; + if(preferredRelay->type==EP_TYPE_TCP_RELAY){ + preferredRelay=endpoint; + currentEndpoint=endpoint; + } + }else if(endpoint->type==EP_TYPE_TCP_RELAY && endpoint->socket){ + endpoint->socket->Close(); + } + //if(endpoint->type==EP_TYPE_UDP_P2P_INET){ + endpoint->averageRTT=0; + memset(endpoint->rtts, 0, sizeof(endpoint->rtts)); + //} + if(endpoint->type==EP_TYPE_UDP_P2P_LAN){ + delete endpoint; + itr=endpoints.erase(itr); + }else{ + ++itr; + } + } + unlock_mutex(endpointsMutex); + } + udpConnectivityState=UDP_UNKNOWN; + udpPingCount=0; + lastUdpPingTime=0; + if(proxyProtocol==PROXY_SOCKS5) + InitUDPProxy(); + if(allowP2p && currentEndpoint){ + SendPublicEndpointsRequest(); + } + BufferOutputStream s(4); + s.WriteInt32(dataSavingMode ? INIT_FLAG_DATA_SAVING_ENABLED : 0); + SendPacketReliably(PKT_NETWORK_CHANGED, s.GetBuffer(), s.GetLength(), 1, 20); + selectCanceller->CancelSelect(); + } + LOGI("set network type: %d, active interface %s", type, activeNetItfName.c_str()); + /*if(type==NET_TYPE_GPRS || type==NET_TYPE_EDGE) + audioPacketGrouping=2; + else + audioPacketGrouping=1;*/ +} + + +double VoIPController::GetAverageRTT(){ + if(lastSentSeq>=lastRemoteAckSeq){ + uint32_t diff=lastSentSeq-lastRemoteAckSeq; + //LOGV("rtt diff=%u", diff); + if(diff<32){ + int i; + double res=0; + int count=0; + for(i=diff;i<32;i++){ + if(remoteAcks[i-diff]>0){ + res+=(remoteAcks[i-diff]-sentPacketTimes[i]); + count++; + } + } + if(count>0) + res/=count; + return res; + } + } + return 999; +} + +#if defined(__APPLE__) +static void initMachTimestart() { + mach_timebase_info_data_t tb = { 0, 0 }; + mach_timebase_info(&tb); + VoIPController::machTimebase = tb.numer; + VoIPController::machTimebase /= tb.denom; + VoIPController::machTimestart = mach_absolute_time(); +} +#endif + +double VoIPController::GetCurrentTime(){ +#if defined(__linux__) + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return ts.tv_sec+(double)ts.tv_nsec/1000000000.0; +#elif defined(__APPLE__) + static pthread_once_t token = PTHREAD_ONCE_INIT; + pthread_once(&token, &initMachTimestart); + return (mach_absolute_time() - machTimestart) * machTimebase / 1000000000.0f; +#elif defined(_WIN32) + if(!didInitWin32TimeScale){ + LARGE_INTEGER scale; + QueryPerformanceFrequency(&scale); + win32TimeScale=scale.QuadPart; + didInitWin32TimeScale=true; + } + LARGE_INTEGER t; + QueryPerformanceCounter(&t); + return (double)t.QuadPart/(double)win32TimeScale; +#endif +} + +void VoIPController::SetStateCallback(void (* f)(VoIPController*, int)){ + stateCallback=f; + if(stateCallback){ + stateCallback(this, state); + } +} + + +void VoIPController::SetState(int state){ + this->state=state; + LOGV("Call state changed to %d", state); + stateChangeTime=GetCurrentTime(); + if(stateCallback){ + stateCallback(this, state); + } +} + + +void VoIPController::SetMicMute(bool mute){ + micMuted=mute; + if(audioInput){ + if(mute) + audioInput->Stop(); + else + audioInput->Start(); + if(!audioInput->IsInitialized()){ + lastError=TGVOIP_ERROR_AUDIO_IO; + SetState(STATE_FAILED); + return; + } + } + if(echoCanceller) + echoCanceller->Enable(!mute); + int i; + for(i=0;itype==STREAM_TYPE_AUDIO){ + unsigned char buf[2]; + buf[0]=outgoingStreams[i]->id; + buf[1]=(char) (mute ? 0 : 1); + SendPacketReliably(PKT_STREAM_STATE, buf, 2, .5f, 20); + outgoingStreams[i]->enabled=!mute; + } + } +} + + +void VoIPController::UpdateAudioOutputState(){ + bool areAnyAudioStreamsEnabled=false; + int i; + for(i=0;itype==STREAM_TYPE_AUDIO && incomingStreams[i]->enabled) + areAnyAudioStreamsEnabled=true; + } + if(jitterBuffer){ + jitterBuffer->Reset(); + } + if(decoder){ + decoder->ResetQueue(); + } + if(audioOutput){ + if(audioOutput->IsPlaying()!=areAnyAudioStreamsEnabled){ + if(areAnyAudioStreamsEnabled) + audioOutput->Start(); + else + audioOutput->Stop(); + } + } +} + +void VoIPController::KDF(unsigned char* msgKey, size_t x, unsigned char* aesKey, unsigned char* aesIv){ + uint8_t sA[SHA1_LENGTH], sB[SHA1_LENGTH], sC[SHA1_LENGTH], sD[SHA1_LENGTH]; + BufferOutputStream buf(128); + buf.WriteBytes(msgKey, 16); + buf.WriteBytes(encryptionKey+x, 32); + crypto.sha1(buf.GetBuffer(), buf.GetLength(), sA); + buf.Reset(); + buf.WriteBytes(encryptionKey+32+x, 16); + buf.WriteBytes(msgKey, 16); + buf.WriteBytes(encryptionKey+48+x, 16); + crypto.sha1(buf.GetBuffer(), buf.GetLength(), sB); + buf.Reset(); + buf.WriteBytes(encryptionKey+64+x, 32); + buf.WriteBytes(msgKey, 16); + crypto.sha1(buf.GetBuffer(), buf.GetLength(), sC); + buf.Reset(); + buf.WriteBytes(msgKey, 16); + buf.WriteBytes(encryptionKey+96+x, 32); + crypto.sha1(buf.GetBuffer(), buf.GetLength(), sD); + buf.Reset(); + buf.WriteBytes(sA, 8); + buf.WriteBytes(sB+8, 12); + buf.WriteBytes(sC+4, 12); + assert(buf.GetLength()==32); + memcpy(aesKey, buf.GetBuffer(), 32); + buf.Reset(); + buf.WriteBytes(sA+8, 12); + buf.WriteBytes(sB, 8); + buf.WriteBytes(sC+16, 4); + buf.WriteBytes(sD, 8); + assert(buf.GetLength()==32); + memcpy(aesIv, buf.GetBuffer(), 32); +} + +void VoIPController::GetDebugString(char *buffer, size_t len){ + char endpointsBuf[10240]; + memset(endpointsBuf, 0, 10240); + int i; + for(std::vector::iterator itrtr=endpoints.begin();itrtr!=endpoints.end();++itrtr){ + const char* type; + Endpoint* endpoint=*itrtr; + switch(endpoint->type){ + case EP_TYPE_UDP_P2P_INET: + type="UDP_P2P_INET"; + break; + case EP_TYPE_UDP_P2P_LAN: + type="UDP_P2P_LAN"; + break; + case EP_TYPE_UDP_RELAY: + type="UDP_RELAY"; + break; + case EP_TYPE_TCP_RELAY: + type="TCP_RELAY"; + break; + default: + type="UNKNOWN"; + break; + } + if(strlen(endpointsBuf)>10240-1024) + break; + sprintf(endpointsBuf+strlen(endpointsBuf), "%s:%u %dms [%s%s]\n", endpoint->address.ToString().c_str(), endpoint->port, (int)(endpoint->averageRTT*1000), type, currentEndpoint==endpoint ? ", IN_USE" : ""); + } + double avgLate[3]; + if(jitterBuffer) + jitterBuffer->GetAverageLateCount(avgLate); + else + memset(avgLate, 0, 3*sizeof(double)); + snprintf(buffer, len, + "Remote endpoints: \n%s" + "Jitter buffer: %d/%.2f | %.1f, %.1f, %.1f\n" + "RTT avg/min: %d/%d\n" + "Congestion window: %d/%d bytes\n" + "Key fingerprint: %02hhX%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX\n" + "Last sent/ack'd seq: %u/%u\n" + "Last recvd seq: %u\n" + "Send/recv losses: %u/%u (%d%%)\n" + "Audio bitrate: %d kbit\n" +// "Packet grouping: %d\n" + "Frame size out/in: %d/%d\n" + "Bytes sent/recvd: %llu/%llu", + endpointsBuf, + jitterBuffer ? jitterBuffer->GetMinPacketCount() : 0, jitterBuffer ? jitterBuffer->GetAverageDelay() : 0, avgLate[0], avgLate[1], avgLate[2], + // (int)(GetAverageRTT()*1000), 0, + (int)(conctl->GetAverageRTT()*1000), (int)(conctl->GetMinimumRTT()*1000), + int(conctl->GetInflightDataSize()), int(conctl->GetCongestionWindow()), + keyFingerprint[0],keyFingerprint[1],keyFingerprint[2],keyFingerprint[3],keyFingerprint[4],keyFingerprint[5],keyFingerprint[6],keyFingerprint[7], + lastSentSeq, lastRemoteAckSeq, lastRemoteSeq, + conctl->GetSendLossCount(), recvLossCount, encoder ? encoder->GetPacketLoss() : 0, + encoder ? (encoder->GetBitrate()/1000) : 0, +// audioPacketGrouping, + outgoingStreams[0]->frameDuration, incomingStreams.size()>0 ? incomingStreams[0]->frameDuration : 0, + (long long unsigned int)(stats.bytesSentMobile+stats.bytesSentWifi), + (long long unsigned int)(stats.bytesRecvdMobile+stats.bytesRecvdWifi)); +} + + +void VoIPController::SendPublicEndpointsRequest(){ + LOGI("Sending public endpoints request"); + if(preferredRelay){ + SendPublicEndpointsRequest(*preferredRelay); + } + if(peerPreferredRelay && peerPreferredRelay!=preferredRelay){ + SendPublicEndpointsRequest(*peerPreferredRelay); + } +} + +void VoIPController::SendPublicEndpointsRequest(Endpoint& relay){ + if(!useUDP) + return; + LOGD("Sending public endpoints request to %s:%d", relay.address.ToString().c_str(), relay.port); + publicEndpointsReqTime=GetCurrentTime(); + waitingForRelayPeerInfo=true; + unsigned char buf[32]; + memcpy(buf, relay.peerTag, 16); + memset(buf+16, 0xFF, 16); + NetworkPacket pkt; + pkt.data=buf; + pkt.length=32; + pkt.address=(NetworkAddress*)&relay.address; + pkt.port=relay.port; + pkt.protocol=PROTO_UDP; + udpSocket->Send(&pkt); +} + +Endpoint* VoIPController::GetEndpointByType(int type){ + if(type==EP_TYPE_UDP_RELAY && preferredRelay) + return preferredRelay; + for(std::vector::iterator itrtr=endpoints.begin();itrtr!=endpoints.end();++itrtr){ + if((*itrtr)->type==type) + return *itrtr; + } + return NULL; +} + + +float VoIPController::GetOutputLevel(){ + if(!audioOutput || !audioOutStarted){ + return 0.0; + } + return audioOutput->GetLevel(); +} + + +void VoIPController::SendPacketReliably(unsigned char type, unsigned char *data, size_t len, double retryInterval, double timeout){ + LOGD("Send reliably, type=%u, len=%u, retry=%.3f, timeout=%.3f", type, unsigned(len), retryInterval, timeout); + voip_queued_packet_t* pkt=(voip_queued_packet_t *) malloc(sizeof(voip_queued_packet_t)); + memset(pkt, 0, sizeof(voip_queued_packet_t)); + pkt->type=type; + if(data){ + pkt->data=(unsigned char *) malloc(len); + memcpy(pkt->data, data, len); + pkt->length=len; + } + pkt->retryInterval=retryInterval; + pkt->timeout=timeout; + pkt->firstSentTime=0; + pkt->lastSentTime=0; + lock_mutex(queuedPacketsMutex); + queuedPackets.push_back(pkt); + unlock_mutex(queuedPacketsMutex); +} + + +void VoIPController::SetConfig(voip_config_t *cfg){ + memcpy(&config, cfg, sizeof(voip_config_t)); + if(tgvoipLogFile){ + fclose(tgvoipLogFile); + } + if(strlen(cfg->logFilePath)){ + tgvoipLogFile=fopen(cfg->logFilePath, "a"); + tgvoip_log_file_write_header(); + } + if(statsDump) + fclose(statsDump); + if(strlen(cfg->statsDumpFilePath)){ + statsDump=fopen(cfg->statsDumpFilePath, "w"); + if(statsDump) + fprintf(statsDump, "Time\tRTT\tLRSeq\tLSSeq\tLASeq\tLostR\tLostS\tCWnd\tBitrate\tLoss%%\tJitter\tJDelay\tAJDelay\n"); + else + LOGW("Failed to open stats dump file %s for writing", cfg->statsDumpFilePath); + } + UpdateDataSavingState(); + UpdateAudioBitrate(); +} + + +void VoIPController::UpdateDataSavingState(){ + if(config.data_saving==DATA_SAVING_ALWAYS){ + dataSavingMode=true; + }else if(config.data_saving==DATA_SAVING_MOBILE){ + dataSavingMode=networkType==NET_TYPE_GPRS || networkType==NET_TYPE_EDGE || + networkType==NET_TYPE_3G || networkType==NET_TYPE_HSPA || networkType==NET_TYPE_LTE || networkType==NET_TYPE_OTHER_MOBILE; + }else{ + dataSavingMode=false; + } + LOGI("update data saving mode, config %d, enabled %d, reqd by peer %d", config.data_saving, dataSavingMode, dataSavingRequestedByPeer); +} + + +void VoIPController::DebugCtl(int request, int param){ + if(request==1){ // set bitrate + maxBitrate=param; + if(encoder){ + encoder->SetBitrate(maxBitrate); + } + }else if(request==2){ // set packet loss + if(encoder){ + encoder->SetPacketLoss(param); + } + }else if(request==3){ // force enable/disable p2p + allowP2p=param==1; + if(!allowP2p && currentEndpoint && currentEndpoint->type!=EP_TYPE_UDP_RELAY){ + currentEndpoint=preferredRelay; + }else if(allowP2p){ + SendPublicEndpointsRequest(); + } + BufferOutputStream s(4); + s.WriteInt32(dataSavingMode ? INIT_FLAG_DATA_SAVING_ENABLED : 0); + SendPacketReliably(PKT_NETWORK_CHANGED, s.GetBuffer(), s.GetLength(), 1, 20); + }else if(request==4){ + if(echoCanceller) + echoCanceller->Enable(param==1); + } +} + + +const char* VoIPController::GetVersion(){ + return LIBTGVOIP_VERSION; +} + + +int64_t VoIPController::GetPreferredRelayID(){ + if(preferredRelay) + return preferredRelay->id; + return 0; +} + + +int VoIPController::GetLastError(){ + return lastError; +} + + +void VoIPController::GetStats(voip_stats_t *stats){ + memcpy(stats, &this->stats, sizeof(voip_stats_t)); +} + +#ifdef TGVOIP_USE_AUDIO_SESSION +void VoIPController::SetAcquireAudioSession(void (^completion)(void (^)())) { + this->acquireAudioSession = [completion copy]; +} + +void VoIPController::ReleaseAudioSession(void (^completion)()) { + completion(); +} +#endif + +void VoIPController::LogDebugInfo(){ + std::string json="{\"endpoints\":["; + for(std::vector::iterator itr=endpoints.begin();itr!=endpoints.end();++itr){ + Endpoint* e=*itr; + char buffer[1024]; + const char* typeStr="unknown"; + switch(e->type){ + case EP_TYPE_UDP_RELAY: + typeStr="udp_relay"; + break; + case EP_TYPE_UDP_P2P_INET: + typeStr="udp_p2p_inet"; + break; + case EP_TYPE_UDP_P2P_LAN: + typeStr="udp_p2p_lan"; + break; + case EP_TYPE_TCP_RELAY: + typeStr="tcp_relay"; + break; + } + snprintf(buffer, 1024, "{\"address\":\"%s\",\"port\":%u,\"type\":\"%s\",\"rtt\":%u%s%s}", e->address.ToString().c_str(), e->port, typeStr, (unsigned int)round(e->averageRTT*1000), currentEndpoint==&*e ? ",\"in_use\":true" : "", preferredRelay==&*e ? ",\"preferred\":true" : ""); + json+=buffer; + if(itr!=endpoints.end()-1) + json+=","; + } + json+="],"; + char buffer[1024]; + const char* netTypeStr; + switch(networkType){ + case NET_TYPE_WIFI: + netTypeStr="wifi"; + break; + case NET_TYPE_GPRS: + netTypeStr="gprs"; + break; + case NET_TYPE_EDGE: + netTypeStr="edge"; + break; + case NET_TYPE_3G: + netTypeStr="3g"; + break; + case NET_TYPE_HSPA: + netTypeStr="hspa"; + break; + case NET_TYPE_LTE: + netTypeStr="lte"; + break; + case NET_TYPE_ETHERNET: + netTypeStr="ethernet"; + break; + case NET_TYPE_OTHER_HIGH_SPEED: + netTypeStr="other_high_speed"; + break; + case NET_TYPE_OTHER_LOW_SPEED: + netTypeStr="other_low_speed"; + break; + case NET_TYPE_DIALUP: + netTypeStr="dialup"; + break; + case NET_TYPE_OTHER_MOBILE: + netTypeStr="other_mobile"; + break; + default: + netTypeStr="unknown"; + break; + } + snprintf(buffer, 1024, "\"time\":%u,\"network_type\":\"%s\"}", (unsigned int)time(NULL), netTypeStr); + json+=buffer; + debugLogs.push_back(json); +} + +std::string VoIPController::GetDebugLog(){ + std::string log="{\"events\":["; + + for(std::vector::iterator itr=debugLogs.begin();itr!=debugLogs.end();++itr){ + log+=(*itr); + if((itr+1)!=debugLogs.end()) + log+=","; + } + log+="],\"libtgvoip_version\":\"" LIBTGVOIP_VERSION "\"}"; + return log; +} + +void VoIPController::GetDebugLog(char *buffer){ + strcpy(buffer, GetDebugLog().c_str()); +} + +size_t VoIPController::GetDebugLogLength(){ + size_t len=128; + for(std::vector::iterator itr=debugLogs.begin();itr!=debugLogs.end();++itr){ + len+=(*itr).length()+1; + } + return len; +} + +std::vector VoIPController::EnumerateAudioInputs(){ + vector devs; + audio::AudioInput::EnumerateDevices(devs); + return devs; +} + +std::vector VoIPController::EnumerateAudioOutputs(){ + vector devs; + audio::AudioOutput::EnumerateDevices(devs); + return devs; +} + +void VoIPController::SetCurrentAudioInput(std::string id){ + currentAudioInput=id; + if(audioInput) + audioInput->SetCurrentDevice(id); +} + +void VoIPController::SetCurrentAudioOutput(std::string id){ + currentAudioOutput=id; + if(audioOutput) + audioOutput->SetCurrentDevice(id); +} + +std::string VoIPController::GetCurrentAudioInputID(){ + return currentAudioInput; +} + +std::string VoIPController::GetCurrentAudioOutputID(){ + return currentAudioOutput; +} + +void VoIPController::SetProxy(int protocol, std::string address, uint16_t port, std::string username, std::string password){ + proxyProtocol=protocol; + proxyAddress=address; + proxyPort=port; + proxyUsername=username; + proxyPassword=password; +} + +void VoIPController::SendUdpPing(Endpoint *endpoint){ + if(endpoint->type!=EP_TYPE_UDP_RELAY) + return; + LOGV("Sending UDP ping to %s:%d", endpoint->address.ToString().c_str(), endpoint->port); + BufferOutputStream p(1024); + p.WriteBytes(endpoint->peerTag, 16); + p.WriteInt32(-1); + p.WriteInt32(-1); + p.WriteInt32(-1); + p.WriteInt32(-2); + p.WriteInt64(12345); + NetworkPacket pkt; + pkt.address=&endpoint->address; + pkt.port=endpoint->port; + pkt.protocol=PROTO_UDP; + pkt.data=p.GetBuffer(); + pkt.length=p.GetLength(); + udpSocket->Send(&pkt); +} + +int VoIPController::GetSignalBarsCount(){ + return signalBarCount; +} + +void VoIPController::SetSignalBarsCountCallback(void (*f)(VoIPController *, int)){ + signalBarCountCallback=f; +} + +Endpoint::Endpoint(int64_t id, uint16_t port, IPv4Address& _address, IPv6Address& _v6address, char type, unsigned char peerTag[16]) : address(_address), v6address(_v6address){ + this->id=id; + this->port=port; + this->type=type; + memcpy(this->peerTag, peerTag, 16); + LOGV("new endpoint %lld: %s:%u", (long long int)id, address.ToString().c_str(), port); + + lastPingSeq=0; + lastPingTime=0; + averageRTT=0; + memset(rtts, 0, sizeof(rtts)); + socket=NULL; +} + +Endpoint::Endpoint() : address(0), v6address("::0") { + lastPingSeq=0; + lastPingTime=0; + averageRTT=0; + memset(rtts, 0, sizeof(rtts)); + socket=NULL; +} + +#if defined(__APPLE__) && TARGET_OS_IPHONE +void VoIPController::SetRemoteEndpoints(voip_legacy_endpoint_t* buffer, size_t count, bool allowP2P){ + std::vector endpoints; + for(size_t i=0;iSetRemoteEndpoints(endpoints, allowP2P); +} +#endif diff --git a/Telegram/ThirdParty/libtgvoip/VoIPController.h b/Telegram/ThirdParty/libtgvoip/VoIPController.h new file mode 100644 index 000000000..40feb58ba --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/VoIPController.h @@ -0,0 +1,509 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef __VOIPCONTROLLER_H +#define __VOIPCONTROLLER_H + +#ifndef _WIN32 +#include +#include +#endif +#ifdef __APPLE__ +#include +#endif +#include +#include +#include +#include +#include "audio/AudioInput.h" +#include "BlockingQueue.h" +#include "BufferOutputStream.h" +#include "audio/AudioOutput.h" +#include "JitterBuffer.h" +#include "OpusDecoder.h" +#include "OpusEncoder.h" +#include "EchoCanceller.h" +#include "CongestionControl.h" +#include "NetworkSocket.h" + +#define LIBTGVOIP_VERSION "1.0" + +#define STATE_WAIT_INIT 1 +#define STATE_WAIT_INIT_ACK 2 +#define STATE_ESTABLISHED 3 +#define STATE_FAILED 4 +#define STATE_RECONNECTING 5 + +#define TGVOIP_ERROR_UNKNOWN 0 +#define TGVOIP_ERROR_INCOMPATIBLE 1 +#define TGVOIP_ERROR_TIMEOUT 2 +#define TGVOIP_ERROR_AUDIO_IO 3 + +#define NET_TYPE_UNKNOWN 0 +#define NET_TYPE_GPRS 1 +#define NET_TYPE_EDGE 2 +#define NET_TYPE_3G 3 +#define NET_TYPE_HSPA 4 +#define NET_TYPE_LTE 5 +#define NET_TYPE_WIFI 6 +#define NET_TYPE_ETHERNET 7 +#define NET_TYPE_OTHER_HIGH_SPEED 8 +#define NET_TYPE_OTHER_LOW_SPEED 9 +#define NET_TYPE_DIALUP 10 +#define NET_TYPE_OTHER_MOBILE 11 + +#define EP_TYPE_UDP_P2P_INET 1 +#define EP_TYPE_UDP_P2P_LAN 2 +#define EP_TYPE_UDP_RELAY 3 +#define EP_TYPE_TCP_RELAY 4 + +#define DATA_SAVING_NEVER 0 +#define DATA_SAVING_MOBILE 1 +#define DATA_SAVING_ALWAYS 2 + +#ifdef _WIN32 +#undef GetCurrentTime +#endif + +struct voip_stream_t{ + int32_t userID; + unsigned char id; + unsigned char type; + unsigned char codec; + bool enabled; + uint16_t frameDuration; +}; +typedef struct voip_stream_t voip_stream_t; + +struct voip_queued_packet_t{ + unsigned char type; + unsigned char* data; + size_t length; + uint32_t seqs[16]; + double firstSentTime; + double lastSentTime; + double retryInterval; + double timeout; +}; +typedef struct voip_queued_packet_t voip_queued_packet_t; + +struct voip_config_t{ + double init_timeout; + double recv_timeout; + int data_saving; + char logFilePath[256]; + char statsDumpFilePath[256]; + + bool enableAEC; + bool enableNS; + bool enableAGC; +}; +typedef struct voip_config_t voip_config_t; + +#if defined(__APPLE__) && TARGET_OS_IPHONE +// temporary fix for nasty linking errors +struct voip_legacy_endpoint_t{ + const char* address; + const char* address6; + uint16_t port; + int64_t id; + unsigned char peerTag[16]; +}; +typedef struct voip_legacy_endpoint_t voip_legacy_endpoint_t; +#endif + +struct voip_stats_t{ + uint64_t bytesSentWifi; + uint64_t bytesRecvdWifi; + uint64_t bytesSentMobile; + uint64_t bytesRecvdMobile; +}; +typedef struct voip_stats_t voip_stats_t; + +struct voip_crypto_functions_t{ + void (*rand_bytes)(uint8_t* buffer, size_t length); + void (*sha1)(uint8_t* msg, size_t length, uint8_t* output); + void (*sha256)(uint8_t* msg, size_t length, uint8_t* output); + void (*aes_ige_encrypt)(uint8_t* in, uint8_t* out, size_t length, uint8_t* key, uint8_t* iv); + void (*aes_ige_decrypt)(uint8_t* in, uint8_t* out, size_t length, uint8_t* key, uint8_t* iv); + void (*aes_ctr_encrypt)(uint8_t* inout, size_t length, uint8_t* key, uint8_t* iv, uint8_t* ecount, uint32_t* num); +}; +typedef struct voip_crypto_functions_t voip_crypto_functions_t; + +#define SEQ_MAX 0xFFFFFFFF + +inline bool seqgt(uint32_t s1, uint32_t s2){ + return ((s1>s2) && (s1-s2<=SEQ_MAX/2)) || ((s1SEQ_MAX/2)); +} + +namespace tgvoip{ + + enum{ + PROXY_NONE=0, + PROXY_SOCKS5, + //PROXY_HTTP + }; + +class Endpoint{ + friend class VoIPController; +public: + Endpoint(int64_t id, uint16_t port, IPv4Address& address, IPv6Address& v6address, char type, unsigned char* peerTag); + Endpoint(); + int64_t id; + uint16_t port; + IPv4Address address; + IPv6Address v6address; + char type; + unsigned char peerTag[16]; + +private: + double lastPingTime; + uint32_t lastPingSeq; + double rtts[6]; + double averageRTT; + NetworkSocket* socket; +}; + +class AudioDevice{ +public: + std::string id; + std::string displayName; +}; + +class AudioOutputDevice : public AudioDevice{ + +}; + +class AudioInputDevice : public AudioDevice{ + +}; + +class VoIPController +{ +public: + VoIPController(); + ~VoIPController(); + + /** + * Set the initial endpoints (relays) + * @param endpoints Endpoints converted from phone.PhoneConnection TL objects + * @param allowP2p Whether p2p connectivity is allowed + */ + void SetRemoteEndpoints(std::vector endpoints, bool allowP2p); + /** + * Initialize and start all the internal threads + */ + void Start(); + /** + * Initiate connection + */ + void Connect(); + Endpoint& GetRemoteEndpoint(); + /** + * Get the debug info string to be displayed in client UI + * @param buffer The buffer to put the string into + * @param len The length of the buffer + */ + void GetDebugString(char* buffer, size_t len); + /** + * Notify the library of network type change + * @param type The new network type + */ + void SetNetworkType(int type); + /** + * Get the average round-trip time for network packets + * @return + */ + double GetAverageRTT(); + /** + * Set the function to be called whenever the connection state changes + * @param f + */ + void SetStateCallback(void (*f)(VoIPController*, int)); + static double GetCurrentTime(); + /** + * Use this field to store any of your context data associated with this call + */ + void* implData; + /** + * + * @param mute + */ + void SetMicMute(bool mute); + /** + * + * @param key + * @param isOutgoing + */ + void SetEncryptionKey(char* key, bool isOutgoing); + /** + * + * @param cfg + */ + void SetConfig(voip_config_t* cfg); + float GetOutputLevel(); + void DebugCtl(int request, int param); + /** + * + * @param stats + */ + void GetStats(voip_stats_t* stats); + /** + * + * @return + */ + int64_t GetPreferredRelayID(); + /** + * + * @return + */ + int GetLastError(); + /** + * + */ + static voip_crypto_functions_t crypto; + /** + * + * @return + */ + static const char* GetVersion(); +#ifdef TGVOIP_USE_AUDIO_SESSION + void SetAcquireAudioSession(void (^)(void (^)())); + void ReleaseAudioSession(void (^completion)()); +#endif + /** + * + * @return + */ + std::string GetDebugLog(); + /** + * + * @param buffer + */ + void GetDebugLog(char* buffer); + size_t GetDebugLogLength(); + /** + * + * @return + */ + static std::vector EnumerateAudioInputs(); + /** + * + * @return + */ + static std::vector EnumerateAudioOutputs(); + /** + * + * @param id + */ + void SetCurrentAudioInput(std::string id); + /** + * + * @param id + */ + void SetCurrentAudioOutput(std::string id); + /** + * + * @return + */ + std::string GetCurrentAudioInputID(); + /** + * + * @return + */ + std::string GetCurrentAudioOutputID(); + /** + * Set the proxy server to route the data through. Call this before connecting. + * @param protocol PROXY_NONE, PROXY_SOCKS4, or PROXY_SOCKS5 + * @param address IP address or domain name of the server + * @param port Port of the server + * @param username Username; empty string for anonymous + * @param password Password; empty string if none + */ + void SetProxy(int protocol, std::string address, uint16_t port, std::string username, std::string password); + /** + * Get the number of signal bars to display in the client UI. + * @return the number of signal bars, from 1 to 4 + */ + int GetSignalBarsCount(); + /** + * Set the callback to be called when the signal bar count changes. + * @param f + */ + void SetSignalBarsCountCallback(void (*f)(VoIPController*, int)); + +private: + struct PendingOutgoingPacket{ + uint32_t seq; + unsigned char type; + size_t len; + unsigned char* data; + Endpoint* endpoint; + }; + enum{ + UDP_UNKNOWN=0, + UDP_PING_SENT, + UDP_AVAILABIE, + UDP_NOT_AVAILABLE + }; + + static void* StartRecvThread(void* arg); + static void* StartSendThread(void* arg); + static void* StartTickThread(void* arg); + void RunRecvThread(); + void RunSendThread(); + void RunTickThread(); + void SendPacket(unsigned char* data, size_t len, Endpoint* ep); + void HandleAudioInput(unsigned char* data, size_t len); + void UpdateAudioBitrate(); + void SetState(int state); + void UpdateAudioOutputState(); + void SendInit(); + void InitUDPProxy(); + void UpdateDataSavingState(); + void KDF(unsigned char* msgKey, size_t x, unsigned char* aesKey, unsigned char* aesIv); + void WritePacketHeader(uint32_t seq, BufferOutputStream* s, unsigned char type, uint32_t length); + static size_t AudioInputCallback(unsigned char* data, size_t length, void* param); + void SendPublicEndpointsRequest(); + void SendPublicEndpointsRequest(Endpoint& relay); + Endpoint* GetEndpointByType(int type); + void SendPacketReliably(unsigned char type, unsigned char* data, size_t len, double retryInterval, double timeout); + uint32_t GenerateOutSeq(); + void LogDebugInfo(); + void SendUdpPing(Endpoint* endpoint); + int state; + std::vector endpoints; + Endpoint* currentEndpoint; + Endpoint* preferredRelay; + Endpoint* peerPreferredRelay; + bool runReceiver; + uint32_t seq; + uint32_t lastRemoteSeq; + uint32_t lastRemoteAckSeq; + uint32_t lastSentSeq; + double remoteAcks[32]; + double sentPacketTimes[32]; + double recvPacketTimes[32]; + uint32_t sendLossCountHistory[32]; + uint32_t audioTimestampIn; + uint32_t audioTimestampOut; + tgvoip::audio::AudioInput* audioInput; + tgvoip::audio::AudioOutput* audioOutput; + JitterBuffer* jitterBuffer; + OpusDecoder* decoder; + OpusEncoder* encoder; + BlockingQueue* sendQueue; + EchoCanceller* echoCanceller; + tgvoip_mutex_t sendBufferMutex; + tgvoip_mutex_t endpointsMutex; + bool stopping; + bool audioOutStarted; + tgvoip_thread_t recvThread; + tgvoip_thread_t sendThread; + tgvoip_thread_t tickThread; + uint32_t packetsRecieved; + uint32_t recvLossCount; + uint32_t prevSendLossCount; + uint32_t firstSentPing; + double rttHistory[32]; + bool waitingForAcks; + int networkType; + int dontSendPackets; + int lastError; + bool micMuted; + uint32_t maxBitrate; + void (*stateCallback)(VoIPController*, int); + std::vector outgoingStreams; + std::vector incomingStreams; + unsigned char encryptionKey[256]; + unsigned char keyFingerprint[8]; + unsigned char callID[16]; + double stateChangeTime; + bool waitingForRelayPeerInfo; + bool allowP2p; + bool dataSavingMode; + bool dataSavingRequestedByPeer; + std::string activeNetItfName; + double publicEndpointsReqTime; + std::vector queuedPackets; + tgvoip_mutex_t queuedPacketsMutex; + double connectionInitTime; + double lastRecvPacketTime; + voip_config_t config; + int32_t peerVersion; + CongestionControl* conctl; + voip_stats_t stats; + bool receivedInit; + bool receivedInitAck; + std::vector debugLogs; + bool isOutgoing; + NetworkSocket* udpSocket; + NetworkSocket* realUdpSocket; + FILE* statsDump; + std::string currentAudioInput; + std::string currentAudioOutput; + bool useTCP; + bool useUDP; + bool didAddTcpRelays; + double setEstablishedAt; + SocketSelectCanceller* selectCanceller; + NetworkSocket* openingTcpSocket; + + BufferPool outgoingPacketsBufferPool; + int udpConnectivityState; + double lastUdpPingTime; + int udpPingCount; + + int proxyProtocol; + std::string proxyAddress; + uint16_t proxyPort; + std::string proxyUsername; + std::string proxyPassword; + IPv4Address* resolvedProxyAddress; + + int signalBarCount; + void (*signalBarCountCallback)(VoIPController*, int); + + /*** server config values ***/ + uint32_t maxAudioBitrate; + uint32_t maxAudioBitrateEDGE; + uint32_t maxAudioBitrateGPRS; + uint32_t maxAudioBitrateSaving; + uint32_t initAudioBitrate; + uint32_t initAudioBitrateEDGE; + uint32_t initAudioBitrateGPRS; + uint32_t initAudioBitrateSaving; + uint32_t minAudioBitrate; + uint32_t audioBitrateStepIncr; + uint32_t audioBitrateStepDecr; + double relaySwitchThreshold; + double p2pToRelaySwitchThreshold; + double relayToP2pSwitchThreshold; + double reconnectingTimeout; + +#ifdef TGVOIP_USE_AUDIO_SESSION +void (^acquireAudioSession)(void (^)()); +bool needNotifyAcquiredAudioSession; +#endif + +public: +#ifdef __APPLE__ +static double machTimebase; +static uint64_t machTimestart; +#if TARGET_OS_IPHONE +// temporary fix for nasty linking errors +void SetRemoteEndpoints(voip_legacy_endpoint_t* buffer, size_t count, bool allowP2P); +#endif +#endif +#ifdef _WIN32 +static int64_t win32TimeScale; +static bool didInitWin32TimeScale; +#endif +}; + +} + +#endif diff --git a/Telegram/ThirdParty/libtgvoip/VoIPServerConfig.cpp b/Telegram/ThirdParty/libtgvoip/VoIPServerConfig.cpp new file mode 100644 index 000000000..dee561dc6 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/VoIPServerConfig.cpp @@ -0,0 +1,101 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "VoIPServerConfig.h" +#include +#include "logging.h" + +using namespace tgvoip; + +ServerConfig* ServerConfig::sharedInstance=NULL; + +ServerConfig::ServerConfig(){ + init_mutex(mutex); +} + +ServerConfig::~ServerConfig(){ + free_mutex(mutex); +} + +ServerConfig *ServerConfig::GetSharedInstance(){ + if(!sharedInstance) + sharedInstance=new ServerConfig(); + return sharedInstance; +} + +bool ServerConfig::GetBoolean(std::string name, bool fallback){ + MutexGuard sync(mutex); + if(ContainsKey(name)){ + std::string val=config[name]; + if(val=="true") + return true; + if(val=="false") + return false; + } + return fallback; +} + +double ServerConfig::GetDouble(std::string name, double fallback){ + MutexGuard sync(mutex); + if(ContainsKey(name)){ + std::string val=config[name]; + char* end; + const char* start=val.c_str(); + double d=strtod(start, &end); + if(end!=start){ + return d; + } + } + return fallback; +} + +int32_t ServerConfig::GetInt(std::string name, int32_t fallback){ + MutexGuard sync(mutex); + if(ContainsKey(name)){ + std::string val=config[name]; + char* end; + const char* start=val.c_str(); + int32_t d=strtol(start, &end, 0); + if(end!=start){ + return d; + } + } + return fallback; +} + +std::string ServerConfig::GetString(std::string name, std::string fallback){ + MutexGuard sync(mutex); + if(ContainsKey(name)) + return config[name]; + return fallback; +} + +void ServerConfig::Update(std::map newValues){ + MutexGuard sync(mutex); + LOGD("=== Updating voip config ==="); + config.clear(); + for(std::map::iterator itr=newValues.begin();itr!=newValues.end();++itr){ + std::string key=itr->first; + std::string val=itr->second; + LOGV("%s -> %s", key.c_str(), val.c_str()); + config[key]=val; + } +} + +void ServerConfig::Update(const char **values, int count) { + std::map result; + for (int i = 0; i < count / 2; i++) { + result[values[i * 2 + 0]] = std::string(values[i * 2 + 1]); + } + Update(result); +} + + +bool ServerConfig::ContainsKey(std::string key){ + return config.find(key)!=config.end(); +} + + diff --git a/Telegram/ThirdParty/libtgvoip/VoIPServerConfig.h b/Telegram/ThirdParty/libtgvoip/VoIPServerConfig.h new file mode 100644 index 000000000..dc20318d7 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/VoIPServerConfig.h @@ -0,0 +1,37 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef TGVOIP_VOIPSERVERCONFIG_H +#define TGVOIP_VOIPSERVERCONFIG_H + +#include +#include +#include +#include "threading.h" + +namespace tgvoip{ + +class ServerConfig{ +public: + ServerConfig(); + ~ServerConfig(); + static ServerConfig* GetSharedInstance(); + int32_t GetInt(std::string name, int32_t fallback); + double GetDouble(std::string name, double fallback); + std::string GetString(std::string name, std::string fallback); + bool GetBoolean(std::string name, bool fallback); + void Update(std::map newValues); + void Update(const char **values, int count); + +private: + static ServerConfig* sharedInstance; + bool ContainsKey(std::string key); + std::map config; + tgvoip_mutex_t mutex; +}; +} + +#endif //TGVOIP_VOIPSERVERCONFIG_H diff --git a/Telegram/ThirdParty/libtgvoip/audio/AudioInput.cpp b/Telegram/ThirdParty/libtgvoip/audio/AudioInput.cpp new file mode 100644 index 000000000..cc552abbb --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/audio/AudioInput.cpp @@ -0,0 +1,106 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "AudioInput.h" +#include "../logging.h" +#if defined(__ANDROID__) +#include "../os/android/AudioInputAndroid.h" +#elif defined(__APPLE__) +#include +#include "../os/darwin/AudioInputAudioUnit.h" +#if TARGET_OS_OSX +#include "../os/darwin/AudioInputAudioUnitOSX.h" +#endif +#elif defined(_WIN32) +#ifdef TGVOIP_WINXP_COMPAT +#include "../os/windows/AudioInputWave.h" +#endif +#include "../os/windows/AudioInputWASAPI.h" +#elif defined(__linux__) +#include "../os/linux/AudioInputALSA.h" +#include "../os/linux/AudioInputPulse.h" +#else +#error "Unsupported operating system" +#endif + +using namespace tgvoip; +using namespace tgvoip::audio; + +int32_t AudioInput::estimatedDelay=60; + +AudioInput::AudioInput() : currentDevice("default"){ + failed=false; +} + +AudioInput::AudioInput(std::string deviceID) : currentDevice(deviceID){ + failed=false; +} + +AudioInput *AudioInput::Create(std::string deviceID){ +#if defined(__ANDROID__) + return new AudioInputAndroid(); +#elif defined(__APPLE__) +#if TARGET_OS_OSX + if(kCFCoreFoundationVersionNumberIsInitialized()) + delete aip; + else + return aip; + LOGW("in: PulseAudio available but not working; trying ALSA"); + } + return new AudioInputALSA(deviceID); +#endif +} + + +AudioInput::~AudioInput(){ + +} + +bool AudioInput::IsInitialized(){ + return !failed; +} + +void AudioInput::EnumerateDevices(std::vector& devs){ +#if defined(__APPLE__) && TARGET_OS_OSX + AudioInputAudioUnitLegacy::EnumerateDevices(devs); +#elif defined(_WIN32) +#ifdef TGVOIP_WINXP_COMPAT + if(LOBYTE(LOWORD(GetVersion()))<6){ + AudioInputWave::EnumerateDevices(devs); + return; + } +#endif + AudioInputWASAPI::EnumerateDevices(devs); +#elif defined(__linux__) && !defined(__ANDROID__) + if(!AudioInputPulse::IsAvailable() || !AudioInputPulse::EnumerateDevices(devs)) + AudioInputALSA::EnumerateDevices(devs); +#endif +} + +std::string AudioInput::GetCurrentDevice(){ + return currentDevice; +} + +void AudioInput::SetCurrentDevice(std::string deviceID){ + +} + +int32_t AudioInput::GetEstimatedDelay(){ + return estimatedDelay; +} diff --git a/Telegram/ThirdParty/libtgvoip/audio/AudioInput.h b/Telegram/ThirdParty/libtgvoip/audio/AudioInput.h new file mode 100644 index 000000000..18420c94f --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/audio/AudioInput.h @@ -0,0 +1,42 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOINPUT_H +#define LIBTGVOIP_AUDIOINPUT_H + +#include +#include +#include +#include "../MediaStreamItf.h" + +namespace tgvoip{ + +class AudioInputDevice; +class AudioOutputDevice; + +namespace audio{ +class AudioInput : public MediaStreamItf{ +public: + AudioInput(); + AudioInput(std::string deviceID); + virtual ~AudioInput(); + + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels)=0; + bool IsInitialized(); + virtual std::string GetCurrentDevice(); + virtual void SetCurrentDevice(std::string deviceID); + static AudioInput* Create(std::string deviceID); + static void EnumerateDevices(std::vector& devs); + static int32_t GetEstimatedDelay(); + +protected: + std::string currentDevice; + bool failed; + static int32_t estimatedDelay; +}; +}} + +#endif //LIBTGVOIP_AUDIOINPUT_H diff --git a/Telegram/ThirdParty/libtgvoip/audio/AudioOutput.cpp b/Telegram/ThirdParty/libtgvoip/audio/AudioOutput.cpp new file mode 100644 index 000000000..3e3dbd7af --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/audio/AudioOutput.cpp @@ -0,0 +1,121 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "AudioOutput.h" +#include "../logging.h" +#if defined(__ANDROID__) +#include "../os/android/AudioOutputOpenSLES.h" +#include "../os/android/AudioOutputAndroid.h" +#elif defined(__APPLE__) +#include +#include "../os/darwin/AudioOutputAudioUnit.h" +#if TARGET_OS_OSX +#include "../os/darwin/AudioOutputAudioUnitOSX.h" +#endif +#elif defined(_WIN32) +#ifdef TGVOIP_WINXP_COMPAT +#include "../os/windows/AudioOutputWave.h" +#endif +#include "../os/windows/AudioOutputWASAPI.h" +#elif defined(__linux__) +#include "../os/linux/AudioOutputALSA.h" +#include "../os/linux/AudioOutputPulse.h" +#else +#error "Unsupported operating system" +#endif + +using namespace tgvoip; +using namespace tgvoip::audio; + +#if defined(__ANDROID__) +int AudioOutput::systemVersion; +#endif +int32_t AudioOutput::estimatedDelay=60; + +AudioOutput *AudioOutput::Create(std::string deviceID){ +#if defined(__ANDROID__) + if(systemVersion<21) + return new AudioOutputAndroid(); + return new AudioOutputOpenSLES(); +#elif defined(__APPLE__) +#if TARGET_OS_OSX + if(kCFCoreFoundationVersionNumberIsInitialized()) + delete aop; + else + return aop; + LOGW("out: PulseAudio available but not working; trying ALSA"); + } + return new AudioOutputALSA(deviceID); +#endif +} + +AudioOutput::AudioOutput() : currentDevice("default"){ + failed=false; +} + +AudioOutput::AudioOutput(std::string deviceID) : currentDevice(deviceID){ + failed=false; +} + +AudioOutput::~AudioOutput(){ + +} + + +int32_t AudioOutput::GetEstimatedDelay(){ +#if defined(__ANDROID__) + return systemVersion<21 ? 150 : 50; +#endif + return estimatedDelay; +} + +float AudioOutput::GetLevel(){ + return 0; +} + + +void AudioOutput::EnumerateDevices(std::vector& devs){ +#if defined(__APPLE__) && TARGET_OS_OSX + AudioOutputAudioUnitLegacy::EnumerateDevices(devs); +#elif defined(_WIN32) +#ifdef TGVOIP_WINXP_COMPAT + if(LOBYTE(LOWORD(GetVersion()))<6){ + AudioOutputWave::EnumerateDevices(devs); + return; + } +#endif + AudioOutputWASAPI::EnumerateDevices(devs); +#elif defined(__linux__) && !defined(__ANDROID__) + if(!AudioOutputPulse::IsAvailable() || !AudioOutputPulse::EnumerateDevices(devs)) + AudioOutputALSA::EnumerateDevices(devs); +#endif +} + + +std::string AudioOutput::GetCurrentDevice(){ + return currentDevice; +} + +void AudioOutput::SetCurrentDevice(std::string deviceID){ + +} + +bool AudioOutput::IsInitialized(){ + return !failed; +} diff --git a/Telegram/ThirdParty/libtgvoip/audio/AudioOutput.h b/Telegram/ThirdParty/libtgvoip/audio/AudioOutput.h new file mode 100644 index 000000000..02225b4f3 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/audio/AudioOutput.h @@ -0,0 +1,47 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOOUTPUT_H +#define LIBTGVOIP_AUDIOOUTPUT_H + +#include +#include +#include +#include "../MediaStreamItf.h" + +namespace tgvoip{ + +class AudioInputDevice; +class AudioOutputDevice; + +namespace audio{ +class AudioOutput : public MediaStreamItf{ +public: + AudioOutput(); + AudioOutput(std::string deviceID); + virtual ~AudioOutput(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels)=0; + virtual bool IsPlaying()=0; + virtual float GetLevel(); + static int32_t GetEstimatedDelay(); + virtual std::string GetCurrentDevice(); + virtual void SetCurrentDevice(std::string deviceID); + static AudioOutput* Create(std::string deviceID); + static void EnumerateDevices(std::vector& devs); + bool IsInitialized(); + +#if defined(__ANDROID__) + static int systemVersion; +#endif + +protected: + std::string currentDevice; + bool failed; + static int32_t estimatedDelay; +}; +}} + +#endif //LIBTGVOIP_AUDIOOUTPUT_H diff --git a/Telegram/ThirdParty/libtgvoip/audio/Resampler.cpp b/Telegram/ThirdParty/libtgvoip/audio/Resampler.cpp new file mode 100644 index 000000000..076d4ca78 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/audio/Resampler.cpp @@ -0,0 +1,117 @@ +// +// Created by Grishka on 01.04.17. +// + +#include +#include +#include "Resampler.h" + +using namespace tgvoip::audio; +static const int16_t hann[960]={ + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0002, 0x0003, 0x0004, 0x0006, 0x0007, 0x0009, 0x000B, 0x000D, 0x000F, 0x0011, 0x0014, 0x0016, 0x0019, 0x001C, 0x0020, + 0x0023, 0x0027, 0x002A, 0x002E, 0x0033, 0x0037, 0x003B, 0x0040, 0x0045, 0x004A, 0x004F, 0x0054, 0x005A, 0x0060, 0x0065, 0x006B, 0x0072, 0x0078, 0x007F, 0x0085, + 0x008C, 0x0093, 0x009B, 0x00A2, 0x00AA, 0x00B2, 0x00B9, 0x00C2, 0x00CA, 0x00D2, 0x00DB, 0x00E4, 0x00ED, 0x00F6, 0x00FF, 0x0109, 0x0113, 0x011C, 0x0127, 0x0131, + 0x013B, 0x0146, 0x0150, 0x015B, 0x0166, 0x0172, 0x017D, 0x0189, 0x0194, 0x01A0, 0x01AC, 0x01B9, 0x01C5, 0x01D2, 0x01DF, 0x01EC, 0x01F9, 0x0206, 0x0213, 0x0221, + 0x022F, 0x023D, 0x024B, 0x0259, 0x0268, 0x0276, 0x0285, 0x0294, 0x02A3, 0x02B3, 0x02C2, 0x02D2, 0x02E2, 0x02F2, 0x0302, 0x0312, 0x0323, 0x0333, 0x0344, 0x0355, + 0x0366, 0x0378, 0x0389, 0x039B, 0x03AD, 0x03BF, 0x03D1, 0x03E3, 0x03F6, 0x0408, 0x041B, 0x042E, 0x0441, 0x0455, 0x0468, 0x047C, 0x0490, 0x04A4, 0x04B8, 0x04CC, + 0x04E0, 0x04F5, 0x050A, 0x051F, 0x0534, 0x0549, 0x055F, 0x0574, 0x058A, 0x05A0, 0x05B6, 0x05CC, 0x05E2, 0x05F9, 0x0610, 0x0627, 0x063E, 0x0655, 0x066C, 0x0684, + 0x069B, 0x06B3, 0x06CB, 0x06E3, 0x06FC, 0x0714, 0x072D, 0x0745, 0x075E, 0x0777, 0x0791, 0x07AA, 0x07C3, 0x07DD, 0x07F7, 0x0811, 0x082B, 0x0845, 0x0860, 0x087A, + 0x0895, 0x08B0, 0x08CB, 0x08E6, 0x0902, 0x091D, 0x0939, 0x0955, 0x0971, 0x098D, 0x09A9, 0x09C6, 0x09E2, 0x09FF, 0x0A1C, 0x0A39, 0x0A56, 0x0A73, 0x0A91, 0x0AAE, + 0x0ACC, 0x0AEA, 0x0B08, 0x0B26, 0x0B44, 0x0B63, 0x0B81, 0x0BA0, 0x0BBF, 0x0BDE, 0x0BFD, 0x0C1D, 0x0C3C, 0x0C5C, 0x0C7B, 0x0C9B, 0x0CBB, 0x0CDC, 0x0CFC, 0x0D1C, + 0x0D3D, 0x0D5E, 0x0D7F, 0x0DA0, 0x0DC1, 0x0DE2, 0x0E04, 0x0E25, 0x0E47, 0x0E69, 0x0E8B, 0x0EAD, 0x0ECF, 0x0EF1, 0x0F14, 0x0F37, 0x0F59, 0x0F7C, 0x0F9F, 0x0FC2, + 0x0FE6, 0x1009, 0x102D, 0x1051, 0x1074, 0x1098, 0x10BC, 0x10E1, 0x1105, 0x112A, 0x114E, 0x1173, 0x1198, 0x11BD, 0x11E2, 0x1207, 0x122D, 0x1252, 0x1278, 0x129D, + 0x12C3, 0x12E9, 0x130F, 0x1336, 0x135C, 0x1383, 0x13A9, 0x13D0, 0x13F7, 0x141E, 0x1445, 0x146C, 0x1494, 0x14BB, 0x14E3, 0x150A, 0x1532, 0x155A, 0x1582, 0x15AA, + 0x15D3, 0x15FB, 0x1623, 0x164C, 0x1675, 0x169E, 0x16C7, 0x16F0, 0x1719, 0x1742, 0x176C, 0x1795, 0x17BF, 0x17E9, 0x1813, 0x183D, 0x1867, 0x1891, 0x18BB, 0x18E6, + 0x1910, 0x193B, 0x1965, 0x1990, 0x19BB, 0x19E6, 0x1A11, 0x1A3D, 0x1A68, 0x1A93, 0x1ABF, 0x1AEB, 0x1B17, 0x1B42, 0x1B6E, 0x1B9A, 0x1BC7, 0x1BF3, 0x1C1F, 0x1C4C, + 0x1C78, 0x1CA5, 0x1CD2, 0x1CFF, 0x1D2C, 0x1D59, 0x1D86, 0x1DB3, 0x1DE0, 0x1E0E, 0x1E3B, 0x1E69, 0x1E97, 0x1EC4, 0x1EF2, 0x1F20, 0x1F4E, 0x1F7C, 0x1FAB, 0x1FD9, + 0x2007, 0x2036, 0x2065, 0x2093, 0x20C2, 0x20F1, 0x2120, 0x214F, 0x217E, 0x21AD, 0x21DD, 0x220C, 0x223B, 0x226B, 0x229A, 0x22CA, 0x22FA, 0x232A, 0x235A, 0x238A, + 0x23BA, 0x23EA, 0x241A, 0x244B, 0x247B, 0x24AB, 0x24DC, 0x250D, 0x253D, 0x256E, 0x259F, 0x25D0, 0x2601, 0x2632, 0x2663, 0x2694, 0x26C5, 0x26F7, 0x2728, 0x275A, + 0x278B, 0x27BD, 0x27EE, 0x2820, 0x2852, 0x2884, 0x28B6, 0x28E8, 0x291A, 0x294C, 0x297E, 0x29B0, 0x29E3, 0x2A15, 0x2A47, 0x2A7A, 0x2AAC, 0x2ADF, 0x2B12, 0x2B44, + 0x2B77, 0x2BAA, 0x2BDD, 0x2C10, 0x2C43, 0x2C76, 0x2CA9, 0x2CDC, 0x2D0F, 0x2D43, 0x2D76, 0x2DA9, 0x2DDD, 0x2E10, 0x2E44, 0x2E77, 0x2EAB, 0x2EDF, 0x2F12, 0x2F46, + 0x2F7A, 0x2FAE, 0x2FE2, 0x3016, 0x304A, 0x307E, 0x30B2, 0x30E6, 0x311A, 0x314E, 0x3182, 0x31B7, 0x31EB, 0x321F, 0x3254, 0x3288, 0x32BD, 0x32F1, 0x3326, 0x335A, + 0x338F, 0x33C3, 0x33F8, 0x342D, 0x3461, 0x3496, 0x34CB, 0x3500, 0x3535, 0x356A, 0x359F, 0x35D4, 0x3608, 0x363D, 0x3673, 0x36A8, 0x36DD, 0x3712, 0x3747, 0x377C, + 0x37B1, 0x37E6, 0x381C, 0x3851, 0x3886, 0x38BB, 0x38F1, 0x3926, 0x395B, 0x3991, 0x39C6, 0x39FC, 0x3A31, 0x3A66, 0x3A9C, 0x3AD1, 0x3B07, 0x3B3C, 0x3B72, 0x3BA7, + 0x3BDD, 0x3C12, 0x3C48, 0x3C7D, 0x3CB3, 0x3CE9, 0x3D1E, 0x3D54, 0x3D89, 0x3DBF, 0x3DF5, 0x3E2A, 0x3E60, 0x3E95, 0x3ECB, 0x3F01, 0x3F36, 0x3F6C, 0x3FA2, 0x3FD7, + 0x400D, 0x4043, 0x4078, 0x40AE, 0x40E3, 0x4119, 0x414F, 0x4184, 0x41BA, 0x41F0, 0x4225, 0x425B, 0x4290, 0x42C6, 0x42FC, 0x4331, 0x4367, 0x439C, 0x43D2, 0x4407, + 0x443D, 0x4472, 0x44A8, 0x44DD, 0x4513, 0x4548, 0x457E, 0x45B3, 0x45E9, 0x461E, 0x4654, 0x4689, 0x46BE, 0x46F4, 0x4729, 0x475E, 0x4793, 0x47C9, 0x47FE, 0x4833, + 0x4868, 0x489E, 0x48D3, 0x4908, 0x493D, 0x4972, 0x49A7, 0x49DC, 0x4A11, 0x4A46, 0x4A7B, 0x4AB0, 0x4AE5, 0x4B1A, 0x4B4E, 0x4B83, 0x4BB8, 0x4BED, 0x4C21, 0x4C56, + 0x4C8B, 0x4CBF, 0x4CF4, 0x4D28, 0x4D5D, 0x4D91, 0x4DC6, 0x4DFA, 0x4E2E, 0x4E63, 0x4E97, 0x4ECB, 0x4EFF, 0x4F33, 0x4F67, 0x4F9B, 0x4FCF, 0x5003, 0x5037, 0x506B, + 0x509F, 0x50D3, 0x5106, 0x513A, 0x516E, 0x51A1, 0x51D5, 0x5208, 0x523C, 0x526F, 0x52A3, 0x52D6, 0x5309, 0x533C, 0x536F, 0x53A3, 0x53D6, 0x5409, 0x543B, 0x546E, + 0x54A1, 0x54D4, 0x5507, 0x5539, 0x556C, 0x559E, 0x55D1, 0x5603, 0x5636, 0x5668, 0x569A, 0x56CC, 0x56FE, 0x5730, 0x5762, 0x5794, 0x57C6, 0x57F8, 0x5829, 0x585B, + 0x588D, 0x58BE, 0x58F0, 0x5921, 0x5952, 0x5984, 0x59B5, 0x59E6, 0x5A17, 0x5A48, 0x5A79, 0x5AA9, 0x5ADA, 0x5B0B, 0x5B3B, 0x5B6C, 0x5B9C, 0x5BCD, 0x5BFD, 0x5C2D, + 0x5C5D, 0x5C8D, 0x5CBD, 0x5CED, 0x5D1D, 0x5D4D, 0x5D7C, 0x5DAC, 0x5DDB, 0x5E0B, 0x5E3A, 0x5E69, 0x5E99, 0x5EC8, 0x5EF7, 0x5F26, 0x5F54, 0x5F83, 0x5FB2, 0x5FE0, + 0x600F, 0x603D, 0x606B, 0x609A, 0x60C8, 0x60F6, 0x6124, 0x6152, 0x617F, 0x61AD, 0x61DB, 0x6208, 0x6235, 0x6263, 0x6290, 0x62BD, 0x62EA, 0x6317, 0x6344, 0x6370, + 0x639D, 0x63CA, 0x63F6, 0x6422, 0x644E, 0x647B, 0x64A7, 0x64D3, 0x64FE, 0x652A, 0x6556, 0x6581, 0x65AD, 0x65D8, 0x6603, 0x662E, 0x6659, 0x6684, 0x66AF, 0x66DA, + 0x6704, 0x672F, 0x6759, 0x6783, 0x67AD, 0x67D7, 0x6801, 0x682B, 0x6855, 0x687E, 0x68A8, 0x68D1, 0x68FB, 0x6924, 0x694D, 0x6976, 0x699F, 0x69C7, 0x69F0, 0x6A18, + 0x6A41, 0x6A69, 0x6A91, 0x6AB9, 0x6AE1, 0x6B09, 0x6B30, 0x6B58, 0x6B7F, 0x6BA6, 0x6BCE, 0x6BF5, 0x6C1C, 0x6C42, 0x6C69, 0x6C90, 0x6CB6, 0x6CDC, 0x6D03, 0x6D29, + 0x6D4F, 0x6D74, 0x6D9A, 0x6DC0, 0x6DE5, 0x6E0A, 0x6E30, 0x6E55, 0x6E7A, 0x6E9E, 0x6EC3, 0x6EE8, 0x6F0C, 0x6F30, 0x6F55, 0x6F79, 0x6F9D, 0x6FC0, 0x6FE4, 0x7008, + 0x702B, 0x704E, 0x7071, 0x7094, 0x70B7, 0x70DA, 0x70FC, 0x711F, 0x7141, 0x7163, 0x7185, 0x71A7, 0x71C9, 0x71EB, 0x720C, 0x722E, 0x724F, 0x7270, 0x7291, 0x72B2, + 0x72D2, 0x72F3, 0x7313, 0x7333, 0x7354, 0x7374, 0x7393, 0x73B3, 0x73D3, 0x73F2, 0x7411, 0x7430, 0x744F, 0x746E, 0x748D, 0x74AB, 0x74CA, 0x74E8, 0x7506, 0x7524, + 0x7542, 0x7560, 0x757D, 0x759B, 0x75B8, 0x75D5, 0x75F2, 0x760F, 0x762B, 0x7648, 0x7664, 0x7680, 0x769C, 0x76B8, 0x76D4, 0x76F0, 0x770B, 0x7726, 0x7741, 0x775C, + 0x7777, 0x7792, 0x77AC, 0x77C7, 0x77E1, 0x77FB, 0x7815, 0x782F, 0x7848, 0x7862, 0x787B, 0x7894, 0x78AD, 0x78C6, 0x78DF, 0x78F7, 0x7910, 0x7928, 0x7940, 0x7958, + 0x7970, 0x7987, 0x799F, 0x79B6, 0x79CD, 0x79E4, 0x79FB, 0x7A11, 0x7A28, 0x7A3E, 0x7A54, 0x7A6A, 0x7A80, 0x7A96, 0x7AAB, 0x7AC1, 0x7AD6, 0x7AEB, 0x7B00, 0x7B14, + 0x7B29, 0x7B3D, 0x7B51, 0x7B65, 0x7B79, 0x7B8D, 0x7BA1, 0x7BB4, 0x7BC7, 0x7BDA, 0x7BED, 0x7C00, 0x7C13, 0x7C25, 0x7C37, 0x7C49, 0x7C5B, 0x7C6D, 0x7C7F, 0x7C90, + 0x7CA1, 0x7CB2, 0x7CC3, 0x7CD4, 0x7CE5, 0x7CF5, 0x7D05, 0x7D15, 0x7D25, 0x7D35, 0x7D45, 0x7D54, 0x7D63, 0x7D72, 0x7D81, 0x7D90, 0x7D9F, 0x7DAD, 0x7DBB, 0x7DC9, + 0x7DD7, 0x7DE5, 0x7DF2, 0x7E00, 0x7E0D, 0x7E1A, 0x7E27, 0x7E34, 0x7E40, 0x7E4C, 0x7E59, 0x7E65, 0x7E71, 0x7E7C, 0x7E88, 0x7E93, 0x7E9E, 0x7EA9, 0x7EB4, 0x7EBF, + 0x7EC9, 0x7ED3, 0x7EDE, 0x7EE7, 0x7EF1, 0x7EFB, 0x7F04, 0x7F0E, 0x7F17, 0x7F20, 0x7F28, 0x7F31, 0x7F39, 0x7F41, 0x7F4A, 0x7F51, 0x7F59, 0x7F61, 0x7F68, 0x7F6F, + 0x7F76, 0x7F7D, 0x7F84, 0x7F8A, 0x7F90, 0x7F97, 0x7F9D, 0x7FA2, 0x7FA8, 0x7FAD, 0x7FB3, 0x7FB8, 0x7FBD, 0x7FC1, 0x7FC6, 0x7FCA, 0x7FCF, 0x7FD3, 0x7FD6, 0x7FDA, + 0x7FDE, 0x7FE1, 0x7FE4, 0x7FE7, 0x7FEA, 0x7FED, 0x7FEF, 0x7FF1, 0x7FF3, 0x7FF5, 0x7FF7, 0x7FF9, 0x7FFA, 0x7FFB, 0x7FFC, 0x7FFD, 0x7FFE, 0x7FFE, 0x7FFF, 0x7FFF +}; + +#define MIN(a, b) (a> 15) + (int16_t)(((int32_t)in[480+i]*hann[i]) >> 15); + out[1920+i]=(int16_t)(((int32_t)in[960+480+i]*hann[959-i]) >> 15) + (int16_t)(((int32_t)in[960+i]*hann[i]) >> 15); + } +} + +void Resampler::Rescale60To40(int16_t *in, int16_t *out){ + for(int i=0;i<960;i++){ + out[i]=(int16_t)(((int32_t)in[i]*hann[959-i]) >> 15) + (int16_t)(((int32_t)in[480+i]*hann[i]) >> 15); + out[960+i]=(int16_t)(((int32_t)in[1920+i]*hann[i]) >> 15) + (int16_t)(((int32_t)in[1440+i]*hann[959-i]) >> 15); + } +} diff --git a/Telegram/ThirdParty/libtgvoip/audio/Resampler.h b/Telegram/ThirdParty/libtgvoip/audio/Resampler.h new file mode 100644 index 000000000..b1fb7e477 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/audio/Resampler.h @@ -0,0 +1,22 @@ +// +// Created by Grishka on 01.04.17. +// + +#ifndef LIBTGVOIP_RESAMPLER_H +#define LIBTGVOIP_RESAMPLER_H + +#include +#include + +namespace tgvoip{ namespace audio{ + class Resampler{ + public: + static size_t Convert48To44(int16_t* from, int16_t* to, size_t fromLen, size_t toLen); + static size_t Convert44To48(int16_t* from, int16_t* to, size_t fromLen, size_t toLen); + static size_t Convert(int16_t* from, int16_t* to, size_t fromLen, size_t toLen, int num, int denom); + static void Rescale60To80(int16_t* in, int16_t* out); + static void Rescale60To40(int16_t* in, int16_t* out); + }; +}} + +#endif //LIBTGVOIP_RESAMPLER_H diff --git a/Telegram/ThirdParty/libtgvoip/client/android/tg_voip_jni.cpp b/Telegram/ThirdParty/libtgvoip/client/android/tg_voip_jni.cpp new file mode 100644 index 000000000..dcb18dff4 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/client/android/tg_voip_jni.cpp @@ -0,0 +1,308 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include +#include +#include +#include +#include +#include +#include "../../VoIPController.h" +#include "../../os/android/AudioOutputOpenSLES.h" +#include "../../os/android/AudioInputOpenSLES.h" +#include "../../os/android/AudioInputAndroid.h" +#include "../../os/android/AudioOutputAndroid.h" +#include "../../audio/Resampler.h" + +JavaVM* sharedJVM; +jfieldID audioRecordInstanceFld=NULL; +jfieldID audioTrackInstanceFld=NULL; +jmethodID setStateMethod=NULL; +jmethodID setSignalBarsMethod=NULL; + +struct impl_data_android_t{ + jobject javaObject; +}; + +using namespace tgvoip; +using namespace tgvoip::audio; + +void updateConnectionState(VoIPController* cntrlr, int state){ + impl_data_android_t* impl=(impl_data_android_t*) cntrlr->implData; + if(!impl->javaObject) + return; + JNIEnv* env=NULL; + bool didAttach=false; + sharedJVM->GetEnv((void**) &env, JNI_VERSION_1_6); + if(!env){ + sharedJVM->AttachCurrentThread(&env, NULL); + didAttach=true; + } + + if(setStateMethod) + env->CallVoidMethod(impl->javaObject, setStateMethod, state); + + if(didAttach){ + sharedJVM->DetachCurrentThread(); + } +} + +void updateSignalBarCount(VoIPController* cntrlr, int count){ + impl_data_android_t* impl=(impl_data_android_t*) cntrlr->implData; + if(!impl->javaObject) + return; + JNIEnv* env=NULL; + bool didAttach=false; + sharedJVM->GetEnv((void**) &env, JNI_VERSION_1_6); + if(!env){ + sharedJVM->AttachCurrentThread(&env, NULL); + didAttach=true; + } + + if(setSignalBarsMethod) + env->CallVoidMethod(impl->javaObject, setSignalBarsMethod, count); + + if(didAttach){ + sharedJVM->DetachCurrentThread(); + } +} + +extern "C" JNIEXPORT jlong Java_org_telegram_messenger_voip_VoIPController_nativeInit(JNIEnv* env, jobject thiz, jint systemVersion){ + AudioOutputAndroid::systemVersion=systemVersion; + + env->GetJavaVM(&sharedJVM); + if(!AudioInputAndroid::jniClass){ + jclass cls=env->FindClass("org/telegram/messenger/voip/AudioRecordJNI"); + AudioInputAndroid::jniClass=(jclass) env->NewGlobalRef(cls); + AudioInputAndroid::initMethod=env->GetMethodID(cls, "init", "(IIII)V"); + AudioInputAndroid::releaseMethod=env->GetMethodID(cls, "release", "()V"); + AudioInputAndroid::startMethod=env->GetMethodID(cls, "start", "()Z"); + AudioInputAndroid::stopMethod=env->GetMethodID(cls, "stop", "()V"); + + cls=env->FindClass("org/telegram/messenger/voip/AudioTrackJNI"); + AudioOutputAndroid::jniClass=(jclass) env->NewGlobalRef(cls); + AudioOutputAndroid::initMethod=env->GetMethodID(cls, "init", "(IIII)V"); + AudioOutputAndroid::releaseMethod=env->GetMethodID(cls, "release", "()V"); + AudioOutputAndroid::startMethod=env->GetMethodID(cls, "start", "()V"); + AudioOutputAndroid::stopMethod=env->GetMethodID(cls, "stop", "()V"); + } + + setStateMethod=env->GetMethodID(env->GetObjectClass(thiz), "handleStateChange", "(I)V"); + setSignalBarsMethod=env->GetMethodID(env->GetObjectClass(thiz), "handleSignalBarsChange", "(I)V"); + + impl_data_android_t* impl=(impl_data_android_t*) malloc(sizeof(impl_data_android_t)); + impl->javaObject=env->NewGlobalRef(thiz); + VoIPController* cntrlr=new VoIPController(); + cntrlr->implData=impl; + cntrlr->SetStateCallback(updateConnectionState); + cntrlr->SetSignalBarsCountCallback(updateSignalBarCount); + return (jlong)(intptr_t)cntrlr; +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPController_nativeStart(JNIEnv* env, jobject thiz, jlong inst){ + ((VoIPController*)(intptr_t)inst)->Start(); +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPController_nativeConnect(JNIEnv* env, jobject thiz, jlong inst){ + ((VoIPController*)(intptr_t)inst)->Connect(); +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPController_nativeSetProxy(JNIEnv* env, jobject thiz, jlong inst, jstring _address, jint port, jstring _username, jstring _password){ + const char* address=env->GetStringUTFChars(_address, NULL); + const char* username=_username ? env->GetStringUTFChars(_username, NULL) : NULL; + const char* password=_password ? env->GetStringUTFChars(_password, NULL) : NULL; + ((VoIPController*)(intptr_t)inst)->SetProxy(PROXY_SOCKS5, address, (uint16_t)port, username ? username : "", password ? password : ""); + env->ReleaseStringUTFChars(_address, address); + if(username) + env->ReleaseStringUTFChars(_username, username); + if(password) + env->ReleaseStringUTFChars(_password, password); +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPController_nativeSetEncryptionKey(JNIEnv* env, jobject thiz, jlong inst, jbyteArray key, jboolean isOutgoing){ + jbyte* akey=env->GetByteArrayElements(key, NULL); + ((VoIPController*)(intptr_t)inst)->SetEncryptionKey((char *) akey, isOutgoing); + env->ReleaseByteArrayElements(key, akey, JNI_ABORT); +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPController_nativeSetRemoteEndpoints(JNIEnv* env, jobject thiz, jlong inst, jobjectArray endpoints, jboolean allowP2p){ + size_t len=(size_t) env->GetArrayLength(endpoints); +// voip_endpoint_t* eps=(voip_endpoint_t *) malloc(sizeof(voip_endpoint_t)*len); + std::vector eps; + /*public String ip; + public String ipv6; + public int port; + public byte[] peer_tag;*/ + jclass epClass=env->GetObjectClass(env->GetObjectArrayElement(endpoints, 0)); + jfieldID ipFld=env->GetFieldID(epClass, "ip", "Ljava/lang/String;"); + jfieldID ipv6Fld=env->GetFieldID(epClass, "ipv6", "Ljava/lang/String;"); + jfieldID portFld=env->GetFieldID(epClass, "port", "I"); + jfieldID peerTagFld=env->GetFieldID(epClass, "peer_tag", "[B"); + jfieldID idFld=env->GetFieldID(epClass, "id", "J"); + int i; + for(i=0;iGetObjectArrayElement(endpoints, i); + jstring ip=(jstring) env->GetObjectField(endpoint, ipFld); + jstring ipv6=(jstring) env->GetObjectField(endpoint, ipv6Fld); + jint port=env->GetIntField(endpoint, portFld); + jlong id=env->GetLongField(endpoint, idFld); + jbyteArray peerTag=(jbyteArray) env->GetObjectField(endpoint, peerTagFld); + const char* ipChars=env->GetStringUTFChars(ip, NULL); + std::string ipLiteral(ipChars); + IPv4Address v4addr(ipLiteral); + IPv6Address v6addr("::0"); + env->ReleaseStringUTFChars(ip, ipChars); + if(ipv6 && env->GetStringLength(ipv6)){ + const char* ipv6Chars=env->GetStringUTFChars(ipv6, NULL); + v6addr=IPv6Address(ipv6Chars); + env->ReleaseStringUTFChars(ipv6, ipv6Chars); + } + unsigned char pTag[16]; + if(peerTag && env->GetArrayLength(peerTag)){ + jbyte* peerTagBytes=env->GetByteArrayElements(peerTag, NULL); + memcpy(pTag, peerTagBytes, 16); + env->ReleaseByteArrayElements(peerTag, peerTagBytes, JNI_ABORT); + } + eps.push_back(Endpoint((int64_t)id, (uint16_t)port, v4addr, v6addr, EP_TYPE_UDP_RELAY, pTag)); + } + ((VoIPController*)(intptr_t)inst)->SetRemoteEndpoints(eps, allowP2p); +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPController_nativeSetNativeBufferSize(JNIEnv* env, jclass thiz, jint size){ + AudioOutputOpenSLES::nativeBufferSize=size; + AudioInputOpenSLES::nativeBufferSize=size; +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPController_nativeRelease(JNIEnv* env, jobject thiz, jlong inst){ + //env->DeleteGlobalRef(AudioInputAndroid::jniClass); + + VoIPController* ctlr=((VoIPController*)(intptr_t)inst); + impl_data_android_t* impl=(impl_data_android_t*)ctlr->implData; + delete ctlr; + env->DeleteGlobalRef(impl->javaObject); + ((impl_data_android_t*)ctlr->implData)->javaObject=NULL; + free(impl); +} + + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_AudioRecordJNI_nativeCallback(JNIEnv* env, jobject thiz, jobject buffer){ + if(!audioRecordInstanceFld) + audioRecordInstanceFld=env->GetFieldID(env->GetObjectClass(thiz), "nativeInst", "J"); + + jlong inst=env->GetLongField(thiz, audioRecordInstanceFld); + AudioInputAndroid* in=(AudioInputAndroid*)(intptr_t)inst; + in->HandleCallback(env, buffer); +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_AudioTrackJNI_nativeCallback(JNIEnv* env, jobject thiz, jbyteArray buffer){ + if(!audioTrackInstanceFld) + audioTrackInstanceFld=env->GetFieldID(env->GetObjectClass(thiz), "nativeInst", "J"); + + jlong inst=env->GetLongField(thiz, audioTrackInstanceFld); + AudioOutputAndroid* in=(AudioOutputAndroid*)(intptr_t)inst; + in->HandleCallback(env, buffer); +} + +extern "C" JNIEXPORT jstring Java_org_telegram_messenger_voip_VoIPController_nativeGetDebugString(JNIEnv* env, jobject thiz, jlong inst){ + char buf[10240]; + ((VoIPController*)(intptr_t)inst)->GetDebugString(buf, 10240); + return env->NewStringUTF(buf); +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPController_nativeSetNetworkType(JNIEnv* env, jobject thiz, jlong inst, jint type){ + ((VoIPController*)(intptr_t)inst)->SetNetworkType(type); +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPController_nativeSetMicMute(JNIEnv* env, jobject thiz, jlong inst, jboolean mute){ + ((VoIPController*)(intptr_t)inst)->SetMicMute(mute); +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPController_nativeSetConfig(JNIEnv* env, jobject thiz, jlong inst, jdouble recvTimeout, jdouble initTimeout, jint dataSavingMode, jboolean enableAEC, jboolean enableNS, jboolean enableAGC, jstring logFilePath, jstring statsDumpPath){ + voip_config_t cfg; + cfg.init_timeout=initTimeout; + cfg.recv_timeout=recvTimeout; + cfg.data_saving=dataSavingMode; + cfg.enableAEC=enableAEC; + cfg.enableNS=enableNS; + cfg.enableAGC=enableAGC; + if(logFilePath){ + char* path=(char *) env->GetStringUTFChars(logFilePath, NULL); + strncpy(cfg.logFilePath, path, sizeof(cfg.logFilePath)); + cfg.logFilePath[sizeof(cfg.logFilePath)-1]=0; + env->ReleaseStringUTFChars(logFilePath, path); + }else{ + memset(cfg.logFilePath, 0, sizeof(cfg.logFilePath)); + } + if(statsDumpPath){ + char* path=(char *) env->GetStringUTFChars(statsDumpPath, NULL); + strncpy(cfg.statsDumpFilePath, path, sizeof(cfg.statsDumpFilePath)); + cfg.statsDumpFilePath[sizeof(cfg.logFilePath)-1]=0; + env->ReleaseStringUTFChars(logFilePath, path); + }else{ + memset(cfg.statsDumpFilePath, 0, sizeof(cfg.statsDumpFilePath)); + } + ((VoIPController*)(intptr_t)inst)->SetConfig(&cfg); +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPController_nativeDebugCtl(JNIEnv* env, jobject thiz, jlong inst, jint request, jint param){ + ((VoIPController*)(intptr_t)inst)->DebugCtl(request, param); +} + +extern "C" JNIEXPORT jstring Java_org_telegram_messenger_voip_VoIPController_nativeGetVersion(JNIEnv* env, jclass clasz){ + return env->NewStringUTF(VoIPController::GetVersion()); +} + +extern "C" JNIEXPORT jlong Java_org_telegram_messenger_voip_VoIPController_nativeGetPreferredRelayID(JNIEnv* env, jclass clasz, jlong inst){ + return ((VoIPController*)(intptr_t)inst)->GetPreferredRelayID(); +} + +extern "C" JNIEXPORT jint Java_org_telegram_messenger_voip_VoIPController_nativeGetLastError(JNIEnv* env, jclass clasz, jlong inst){ + return ((VoIPController*)(intptr_t)inst)->GetLastError(); +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPController_nativeGetStats(JNIEnv* env, jclass clasz, jlong inst, jobject stats){ + voip_stats_t _stats; + ((VoIPController*)(intptr_t)inst)->GetStats(&_stats); + jclass cls=env->GetObjectClass(stats); + env->SetLongField(stats, env->GetFieldID(cls, "bytesSentWifi", "J"), _stats.bytesSentWifi); + env->SetLongField(stats, env->GetFieldID(cls, "bytesSentMobile", "J"), _stats.bytesSentMobile); + env->SetLongField(stats, env->GetFieldID(cls, "bytesRecvdWifi", "J"), _stats.bytesRecvdWifi); + env->SetLongField(stats, env->GetFieldID(cls, "bytesRecvdMobile", "J"), _stats.bytesRecvdMobile); +} + +extern "C" JNIEXPORT void Java_org_telegram_messenger_voip_VoIPServerConfig_nativeSetConfig(JNIEnv* env, jclass clasz, jobjectArray keys, jobjectArray values){ + std::map config; + int len=env->GetArrayLength(keys); + int i; + for(i=0;iGetObjectArrayElement(keys, i); + jstring jval=(jstring)env->GetObjectArrayElement(values, i); + if(jkey==NULL|| jval==NULL) + continue; + const char* ckey=env->GetStringUTFChars(jkey, NULL); + const char* cval=env->GetStringUTFChars(jval, NULL); + std::string key(ckey); + std::string val(cval); + env->ReleaseStringUTFChars(jkey, ckey); + env->ReleaseStringUTFChars(jval, cval); + config[key]=val; + } + ServerConfig::GetSharedInstance()->Update(config); +} + +extern "C" JNIEXPORT jstring Java_org_telegram_messenger_voip_VoIPController_nativeGetDebugLog(JNIEnv* env, jobject thiz, jlong inst){ + VoIPController* ctlr=((VoIPController*)(intptr_t)inst); + std::string log=ctlr->GetDebugLog(); + return env->NewStringUTF(log.c_str()); +} + +extern "C" JNIEXPORT jint Java_org_telegram_messenger_voip_Resampler_convert44to48(JNIEnv* env, jclass cls, jobject from, jobject to){ + return tgvoip::audio::Resampler::Convert44To48((int16_t *) env->GetDirectBufferAddress(from), (int16_t *) env->GetDirectBufferAddress(to), (size_t) (env->GetDirectBufferCapacity(from)/2), (size_t) (env->GetDirectBufferCapacity(to)/2)); +} + +extern "C" JNIEXPORT jint Java_org_telegram_messenger_voip_Resampler_convert48to44(JNIEnv* env, jclass cls, jobject from, jobject to){ + return tgvoip::audio::Resampler::Convert48To44((int16_t *) env->GetDirectBufferAddress(from), (int16_t *) env->GetDirectBufferAddress(to), (size_t) (env->GetDirectBufferCapacity(from)/2), (size_t) (env->GetDirectBufferCapacity(to)/2)); +} diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip.UWP.vcxproj b/Telegram/ThirdParty/libtgvoip/libtgvoip.UWP.vcxproj new file mode 100644 index 000000000..aa4c104d4 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip.UWP.vcxproj @@ -0,0 +1,509 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + ARM + + + Release + ARM + + + Debug + x64 + + + Release + x64 + + + + {88803693-7606-484b-9d2f-4bb789d57c29} + WindowsRuntimeComponent + libtgvoip + en + 14.0 + true + Windows Store + 10.0.15063.0 + 10.0.10240.0 + 10.0 + libtgvoip + + + + DynamicLibrary + true + v141 + + + DynamicLibrary + true + v141 + + + DynamicLibrary + true + v141 + + + DynamicLibrary + false + true + v141 + + + DynamicLibrary + false + true + v141 + + + DynamicLibrary + false + true + v141 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + false + Debug + + + false + ARM\Debug + + + false + x64\Debug + + + false + Release + + + false + ARM\Release + + + false + x64\Release + + + + NotUsing + _WINRT_DLL;_CRT_SECURE_NO_WARNINGS;_WINSOCK_DEPRECATED_NO_WARNINGS;NOMINMAX;WEBRTC_APM_DEBUG_DUMP=0;TGVOIP_USE_CUSTOM_CRYPTO;%(PreprocessorDefinitions) + pch.h + $(IntDir)pch.pch + /bigobj %(AdditionalOptions) + 28204 + webrtc_dsp;../libopus/include;%(AdditionalIncludeDirectories) + true + + + Console + false + libopus.lib;ws2_32.lib;mmdevapi.lib;%(AdditionalDependencies) + $(SolutionDir)$(MappedPlatform)\libopus\;%(AdditionalLibraryDirectories) + + + + + NotUsing + _WINRT_DLL;_CRT_SECURE_NO_WARNINGS;_WINSOCK_DEPRECATED_NO_WARNINGS;NOMINMAX;WEBRTC_APM_DEBUG_DUMP=0;TGVOIP_USE_CUSTOM_CRYPTO;%(PreprocessorDefinitions) + pch.h + $(IntDir)pch.pch + /bigobj %(AdditionalOptions) + 28204 + webrtc_dsp;../libopus/include;%(AdditionalIncludeDirectories) + + + Console + false + libopus.lib;ws2_32.lib;mmdevapi.lib;%(AdditionalDependencies) + $(SolutionDir)$(MappedPlatform)\libopus\;%(AdditionalLibraryDirectories) + + + + + NotUsing + _WINRT_DLL;_CRT_SECURE_NO_WARNINGS;_WINSOCK_DEPRECATED_NO_WARNINGS;NOMINMAX;WEBRTC_APM_DEBUG_DUMP=0;TGVOIP_USE_CUSTOM_CRYPTO;%(PreprocessorDefinitions) + pch.h + $(IntDir)pch.pch + /bigobj %(AdditionalOptions) + 28204 + webrtc_dsp;../libopus/include;%(AdditionalIncludeDirectories) + + + Console + false + libopus.lib;ws2_32.lib;mmdevapi.lib;%(AdditionalDependencies) + $(SolutionDir)$(MappedPlatform)\libopus\;%(AdditionalLibraryDirectories) + + + + + NotUsing + _WINRT_DLL;_CRT_SECURE_NO_WARNINGS;_WINSOCK_DEPRECATED_NO_WARNINGS;NOMINMAX;WEBRTC_APM_DEBUG_DUMP=0;NDEBUG;TGVOIP_USE_CUSTOM_CRYPTO;%(PreprocessorDefinitions) + pch.h + $(IntDir)pch.pch + /bigobj %(AdditionalOptions) + 28204 + webrtc_dsp;../libopus/include;%(AdditionalIncludeDirectories) + + + Console + false + libopus.lib;ws2_32.lib;mmdevapi.lib;%(AdditionalDependencies) + $(SolutionDir)$(MappedPlatform)\libopus\;%(AdditionalLibraryDirectories) + + + + + NotUsing + _WINRT_DLL;_CRT_SECURE_NO_WARNINGS;_WINSOCK_DEPRECATED_NO_WARNINGS;NOMINMAX;WEBRTC_APM_DEBUG_DUMP=0;NDEBUG;TGVOIP_USE_CUSTOM_CRYPTO;%(PreprocessorDefinitions) + pch.h + $(IntDir)pch.pch + /bigobj %(AdditionalOptions) + 28204 + webrtc_dsp;../libopus/include;%(AdditionalIncludeDirectories) + + + Console + false + libopus.lib;ws2_32.lib;mmdevapi.lib;%(AdditionalDependencies) + $(SolutionDir)$(MappedPlatform)\libopus\;%(AdditionalLibraryDirectories) + + + + + NotUsing + _WINRT_DLL;_CRT_SECURE_NO_WARNINGS;_WINSOCK_DEPRECATED_NO_WARNINGS;NOMINMAX;WEBRTC_APM_DEBUG_DUMP=0;NDEBUG;TGVOIP_USE_CUSTOM_CRYPTO;%(PreprocessorDefinitions) + pch.h + $(IntDir)pch.pch + /bigobj %(AdditionalOptions) + 28204 + webrtc_dsp;../libopus/include;%(AdditionalIncludeDirectories) + + + Console + false + libopus.lib;ws2_32.lib;mmdevapi.lib;%(AdditionalDependencies) + $(SolutionDir)$(MappedPlatform)\libopus\;%(AdditionalLibraryDirectories) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + + + + + + + + + + + + + false + + + false + + + + false + + + false + + + false + + + false + + + false + + + false + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip.UWP.vcxproj.filters b/Telegram/ThirdParty/libtgvoip/libtgvoip.UWP.vcxproj.filters new file mode 100644 index 000000000..18a0832be --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip.UWP.vcxproj.filters @@ -0,0 +1,492 @@ + + + + + 11199e80-17a0-460f-a780-9bfde20eb11c + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tga;tiff;tif;png;wav;mfcribbon-ms + + + {c5b75146-c75a-4c56-aeb2-2781658d7b0a} + + + {de1527d9-7564-4e96-9653-6e023b90d2bc} + + + {3b15701a-65dd-4d52-92d4-a7b64a73b293} + + + + + + + + + + + + + + + + + + + audio + + + audio + + + audio + + + windows + + + windows + + + windows + + + windows + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + windows + + + + + + + + + + + + + + + + + + + + audio + + + audio + + + audio + + + windows + + + windows + + + windows + + + windows + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + windows + + + \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip.WP81.vcxproj b/Telegram/ThirdParty/libtgvoip/libtgvoip.WP81.vcxproj new file mode 100644 index 000000000..4c4993e14 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip.WP81.vcxproj @@ -0,0 +1,420 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + ARM + + + Release + ARM + + + + {21f10158-c078-4bd7-a82a-9c4aeb8e2f8e} + Win32Proj + libtgvoip + libtgvoip + en-US + 12.0 + true + Windows Phone Silverlight + 8.1 + + + + DynamicLibrary + true + v120 + + + DynamicLibrary + false + true + v120 + + + DynamicLibrary + true + v120 + + + DynamicLibrary + false + true + v120 + + + + + + + + + + + + + + + + + + + + + + + + + + + NotUsing + _WINRT_DLL;TGVOIP_WP_SILVERLIGHT;_CRT_SECURE_NO_WARNINGS;NOMINMAX;WEBRTC_APM_DEBUG_DUMP=0;TGVOIP_USE_CUSTOM_CRYPTO;%(PreprocessorDefinitions) + pch.h + $(IntDir)pch.pch + /bigobj %(AdditionalOptions) + true + webrtc_dsp;../TelegramClient/TelegramClient.Opus/opus/include;%(AdditionalIncludeDirectories) + + + Console + false + ws2_32.lib;phoneaudioses.lib;../TelegramClient/$(Platform)/$(Configuration)/TelegramClient.Opus/TelegramClient.Opus.lib;%(AdditionalDependencies) + + + + + NotUsing + _WINRT_DLL;TGVOIP_WP_SILVERLIGHT;NDEBUG;_CRT_SECURE_NO_WARNINGS;NOMINMAX;%(PreprocessorDefinitions) + pch.h + $(IntDir)pch.pch + /bigobj %(AdditionalOptions) + true + webrtc_dsp;../TelegramClient/TelegramClient.Opus/opus/include;%(AdditionalIncludeDirectories) + + + Console + false + ws2_32.lib;phoneaudioses.lib;../TelegramClient/$(Platform)/$(Configuration)/TelegramClient.Opus/TelegramClient.Opus.lib;%(AdditionalDependencies) + + + + + NotUsing + _WINRT_DLL;TGVOIP_WP_SILVERLIGHT;_CRT_SECURE_NO_WARNINGS;NOMINMAX;WEBRTC_APM_DEBUG_DUMP=0;TGVOIP_USE_CUSTOM_CRYPTO;%(PreprocessorDefinitions) + pch.h + $(IntDir)pch.pch + /bigobj %(AdditionalOptions) + true + webrtc_dsp;../TelegramClient/TelegramClient.Opus/opus/include;%(AdditionalIncludeDirectories) + + + Console + false + ws2_32.lib;phoneaudioses.lib;../TelegramClient/$(Platform)/$(Configuration)/TelegramClient.Opus/TelegramClient.Opus.lib;%(AdditionalDependencies) + + + + + NotUsing + _WINRT_DLL;TGVOIP_WP_SILVERLIGHT;NDEBUG;_CRT_SECURE_NO_WARNINGS;NOMINMAX;WEBRTC_APM_DEBUG_DUMP=0;TGVOIP_USE_CUSTOM_CRYPTO;%(PreprocessorDefinitions) + pch.h + $(IntDir)pch.pch + /bigobj %(AdditionalOptions) + true + webrtc_dsp;../TelegramClient/TelegramClient.Opus/opus/include;%(AdditionalIncludeDirectories) + true + + + Console + false + ws2_32.lib;phoneaudioses.lib;../TelegramClient/$(Platform)/$(Configuration)/TelegramClient.Opus/TelegramClient.Opus.lib;%(AdditionalDependencies) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + + + + + + + + + + + + + false + + + false + + + + false + + + false + + + false + + + false + + + false + + + false + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip.WP81.vcxproj.filters b/Telegram/ThirdParty/libtgvoip/libtgvoip.WP81.vcxproj.filters new file mode 100644 index 000000000..c35dfba8c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip.WP81.vcxproj.filters @@ -0,0 +1,492 @@ + + + + + 11199e80-17a0-460f-a780-9bfde20eb11c + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tga;tiff;tif;png;wav;mfcribbon-ms + + + {c5b75146-c75a-4c56-aeb2-2781658d7b0a} + + + {de1527d9-7564-4e96-9653-6e023b90d2bc} + + + {3b15701a-65dd-4d52-92d4-a7b64a73b293} + + + + + + + + + + + + + + + + + + + audio + + + audio + + + audio + + + windows + + + windows + + + windows + + + windows + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + windows + + + + + + + + + + + + + + + + + + + + audio + + + audio + + + audio + + + windows + + + windows + + + windows + + + windows + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + webrtc_dsp + + + windows + + + \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip.gyp b/Telegram/ThirdParty/libtgvoip/libtgvoip.gyp new file mode 100644 index 000000000..863984837 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip.gyp @@ -0,0 +1,398 @@ +# GYP project file for TDesktop + +{ + 'targets': [ + { + 'target_name': 'libtgvoip', + 'type': 'static_library', + 'dependencies': [], + 'defines': [ + 'WEBRTC_APM_DEBUG_DUMP=0', + 'TGVOIP_USE_DESKTOP_DSP', + ], + 'variables': { + 'tgvoip_src_loc': '.', + 'official_build_target%': '', + 'linux_path_opus_include%': '<(DEPTH)/../../../Libraries/opus/include', + }, + 'include_dirs': [ + '<(tgvoip_src_loc)/webrtc_dsp', + '<(linux_path_opus_include)', + ], + 'direct_dependent_settings': { + 'include_dirs': [ + '<(tgvoip_src_loc)', + ], + }, + 'export_dependent_settings': [], + 'sources': [ + '<(tgvoip_src_loc)/BlockingQueue.cpp', + '<(tgvoip_src_loc)/BlockingQueue.h', + '<(tgvoip_src_loc)/BufferInputStream.cpp', + '<(tgvoip_src_loc)/BufferInputStream.h', + '<(tgvoip_src_loc)/BufferOutputStream.cpp', + '<(tgvoip_src_loc)/BufferOutputStream.h', + '<(tgvoip_src_loc)/BufferPool.cpp', + '<(tgvoip_src_loc)/BufferPool.h', + '<(tgvoip_src_loc)/CongestionControl.cpp', + '<(tgvoip_src_loc)/CongestionControl.h', + '<(tgvoip_src_loc)/EchoCanceller.cpp', + '<(tgvoip_src_loc)/EchoCanceller.h', + '<(tgvoip_src_loc)/JitterBuffer.cpp', + '<(tgvoip_src_loc)/JitterBuffer.h', + '<(tgvoip_src_loc)/logging.cpp', + '<(tgvoip_src_loc)/logging.h', + '<(tgvoip_src_loc)/MediaStreamItf.cpp', + '<(tgvoip_src_loc)/MediaStreamItf.h', + '<(tgvoip_src_loc)/OpusDecoder.cpp', + '<(tgvoip_src_loc)/OpusDecoder.h', + '<(tgvoip_src_loc)/OpusEncoder.cpp', + '<(tgvoip_src_loc)/OpusEncoder.h', + '<(tgvoip_src_loc)/threading.h', + '<(tgvoip_src_loc)/VoIPController.cpp', + '<(tgvoip_src_loc)/VoIPController.h', + '<(tgvoip_src_loc)/VoIPServerConfig.cpp', + '<(tgvoip_src_loc)/VoIPServerConfig.h', + '<(tgvoip_src_loc)/audio/AudioInput.cpp', + '<(tgvoip_src_loc)/audio/AudioInput.h', + '<(tgvoip_src_loc)/audio/AudioOutput.cpp', + '<(tgvoip_src_loc)/audio/AudioOutput.h', + '<(tgvoip_src_loc)/audio/Resampler.cpp', + '<(tgvoip_src_loc)/audio/Resampler.h', + '<(tgvoip_src_loc)/NetworkSocket.cpp', + '<(tgvoip_src_loc)/NetworkSocket.h', + + # Windows + '<(tgvoip_src_loc)/os/windows/NetworkSocketWinsock.cpp', + '<(tgvoip_src_loc)/os/windows/NetworkSocketWinsock.h', + '<(tgvoip_src_loc)/os/windows/AudioInputWave.cpp', + '<(tgvoip_src_loc)/os/windows/AudioInputWave.h', + '<(tgvoip_src_loc)/os/windows/AudioOutputWave.cpp', + '<(tgvoip_src_loc)/os/windows/AudioOutputWave.h', + '<(tgvoip_src_loc)/os/windows/AudioOutputWASAPI.cpp', + '<(tgvoip_src_loc)/os/windows/AudioOutputWASAPI.h', + '<(tgvoip_src_loc)/os/windows/AudioInputWASAPI.cpp', + '<(tgvoip_src_loc)/os/windows/AudioInputWASAPI.h', + + # macOS + '<(tgvoip_src_loc)/os/darwin/AudioInputAudioUnit.cpp', + '<(tgvoip_src_loc)/os/darwin/AudioInputAudioUnit.h', + '<(tgvoip_src_loc)/os/darwin/AudioOutputAudioUnit.cpp', + '<(tgvoip_src_loc)/os/darwin/AudioOutputAudioUnit.h', + '<(tgvoip_src_loc)/os/darwin/AudioInputAudioUnitOSX.cpp', + '<(tgvoip_src_loc)/os/darwin/AudioInputAudioUnitOSX.h', + '<(tgvoip_src_loc)/os/darwin/AudioOutputAudioUnitOSX.cpp', + '<(tgvoip_src_loc)/os/darwin/AudioOutputAudioUnitOSX.h', + '<(tgvoip_src_loc)/os/darwin/AudioUnitIO.cpp', + '<(tgvoip_src_loc)/os/darwin/AudioUnitIO.h', + '<(tgvoip_src_loc)/os/darwin/DarwinSpecific.mm', + '<(tgvoip_src_loc)/os/darwin/DarwinSpecific.h', + + # Linux + '<(tgvoip_src_loc)/os/linux/AudioInputALSA.cpp', + '<(tgvoip_src_loc)/os/linux/AudioInputALSA.h', + '<(tgvoip_src_loc)/os/linux/AudioOutputALSA.cpp', + '<(tgvoip_src_loc)/os/linux/AudioOutputALSA.h', + '<(tgvoip_src_loc)/os/linux/AudioOutputPulse.cpp', + '<(tgvoip_src_loc)/os/linux/AudioOutputPulse.h', + '<(tgvoip_src_loc)/os/linux/AudioInputPulse.cpp', + '<(tgvoip_src_loc)/os/linux/AudioInputPulse.h', + '<(tgvoip_src_loc)/os/linux/PulseAudioLoader.cpp', + '<(tgvoip_src_loc)/os/linux/PulseAudioLoader.h', + + # POSIX + '<(tgvoip_src_loc)/os/posix/NetworkSocketPosix.cpp', + '<(tgvoip_src_loc)/os/posix/NetworkSocketPosix.h', + + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/array_view.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/atomicops.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/basictypes.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/checks.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/checks.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/constructormagic.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/safe_compare.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/safe_conversions.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/safe_conversions_impl.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/sanitizer.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/stringutils.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/stringutils.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/base/type_traits.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/audio_util.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/channel_buffer.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/channel_buffer.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/fft4g.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/fft4g.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/include/audio_util.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/ring_buffer.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/ring_buffer.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/auto_corr_to_refl_coef.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/auto_correlation.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/complex_bit_reverse.c', +# 'webrtc_dsp/webrtc/common_audio/signal_processing/complex_bit_reverse_arm.S', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/complex_fft.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/complex_fft_tables.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/copy_set_operations.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/cross_correlation.c', +# '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/cross_correlation_neon.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/division_operations.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/dot_product_with_scale.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/downsample_fast.c', +# '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/downsample_fast_neon.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/energy.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar_fast_q12.c', +# 'webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar_fast_q12_armv7.S', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ma_fast_q12.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/get_hanning_window.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/get_scaling_square.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/ilbc_specific_functions.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/include/real_fft.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/include/signal_processing_library.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl.h', +# '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl_armv7.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl_mips.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/levinson_durbin.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/lpc_to_refl_coef.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/min_max_operations.c', +# '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/min_max_operations_neon.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/randomization_functions.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/real_fft.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/refl_coef_to_lpc.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/resample.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/resample_48khz.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2_internal.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2_internal.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/resample_fractional.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/spl_init.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/spl_inl.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt_floor.c', + #'webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt_floor_arm.S', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/splitting_filter_impl.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/signal_processing/vector_scaling_operations.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/sparse_fir_filter.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/sparse_fir_filter.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/wav_file.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/wav_file.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/wav_header.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/common_audio/wav_header.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_common.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core.h', +# '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_neon.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_optimized_methods.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_sse2.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_resampler.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_resampler.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aec/echo_cancellation.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aec/echo_cancellation.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core_c.cc', +# '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core_neon.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_defines.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aecm/echo_control_mobile.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/aecm/echo_control_mobile.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/analog_agc.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/analog_agc.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/digital_agc.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/digital_agc.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/gain_control.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/logging/apm_data_dumper.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/logging/apm_data_dumper.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/defines.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression_x.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression_x.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/ns_core.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/ns_core.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core_c.c', +# '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core_neon.c', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_defines.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/ns/windows_private.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/splitting_filter.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/splitting_filter.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/three_band_filter_bank.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/three_band_filter_bank.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/block_mean_calculator.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/block_mean_calculator.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_internal.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft.h', +# '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_neon.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_sse2.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_tables_common.h', +# '<(tgvoip_src_loc)/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_tables_neon_sse2.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/system_wrappers/include/asm_defines.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/system_wrappers/include/compile_assert_c.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/system_wrappers/include/cpu_features_wrapper.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/system_wrappers/include/metrics.h', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/system_wrappers/source/cpu_features.cc', + '<(tgvoip_src_loc)/webrtc_dsp/webrtc/typedefs.h', + + ], + 'libraries': [], + 'configurations': { + 'Debug': {}, + 'Release': {}, + }, + 'conditions': [ + [ + '"<(OS)" != "win"', { + 'sources/': [['exclude', '<(tgvoip_src_loc)/os/windows/']], + }, { + 'sources/': [['exclude', '<(tgvoip_src_loc)/os/posix/']], + }, + ], + [ + '"<(OS)" != "mac"', { + 'sources/': [['exclude', '<(tgvoip_src_loc)/os/darwin/']], + }, + ], + [ + '"<(OS)" != "linux"', { + 'sources/': [['exclude', '<(tgvoip_src_loc)/os/linux/']], + }, + ], + [ + '"<(OS)" == "mac"', { + 'xcode_settings': { + 'CLANG_CXX_LANGUAGE_STANDARD': 'c++1z', + }, + 'defines': [ + 'WEBRTC_POSIX', + 'WEBRTC_MAC', + 'TARGET_OS_OSX', + ], + 'conditions': [ + [ '"<(official_build_target)" == "mac32"', { + 'xcode_settings': { + 'MACOSX_DEPLOYMENT_TARGET': '10.6', + 'OTHER_CPLUSPLUSFLAGS': [ '-nostdinc++' ], + }, + 'include_dirs': [ + '/usr/local/macold/include/c++/v1', + '<(DEPTH)/../../../Libraries/macold/openssl/include', + ], + }, { + 'xcode_settings': { + 'MACOSX_DEPLOYMENT_TARGET': '10.8', + 'CLANG_CXX_LIBRARY': 'libc++', + }, + 'include_dirs': [ + '<(DEPTH)/../../../Libraries/openssl/include', + ], + }] + ] + }, + ], + [ + '"<(OS)" == "win"', { + 'msbuild_toolset': 'v141', + 'defines': [ + 'NOMINMAX', + '_USING_V110_SDK71_', + 'TGVOIP_WINXP_COMPAT' + ], + 'libraries': [ + 'winmm', + 'ws2_32', + 'kernel32', + 'user32', + ], + 'msvs_cygwin_shell': 0, + 'msvs_settings': { + 'VCCLCompilerTool': { + 'ProgramDataBaseFileName': '$(OutDir)\\$(ProjectName).pdb', + 'DebugInformationFormat': '3', # Program Database (/Zi) + 'AdditionalOptions': [ + '/MP', # Enable multi process build. + '/EHsc', # Catch C++ exceptions only, extern C functions never throw a C++ exception. + '/wd4068', # Disable "warning C4068: unknown pragma" + ], + 'TreatWChar_tAsBuiltInType': 'false', + }, + }, + 'msvs_external_builder_build_cmd': [ + 'ninja.exe', + '-C', + '$(OutDir)', + '-k0', + '$(ProjectName)', + ], + 'configurations': { + 'Debug': { + 'defines': [ + '_DEBUG', + ], + 'include_dirs': [ + '<(DEPTH)/../../../Libraries/openssl/Debug/include', + ], + 'msvs_settings': { + 'VCCLCompilerTool': { + 'Optimization': '0', # Disabled (/Od) + 'RuntimeLibrary': '1', # Multi-threaded Debug (/MTd) + 'RuntimeTypeInfo': 'true', + }, + 'VCLibrarianTool': { + 'AdditionalOptions': [ + '/NODEFAULTLIB:LIBCMT' + ] + } + }, + }, + 'Release': { + 'defines': [ + 'NDEBUG', + ], + 'include_dirs': [ + '<(DEPTH)/../../../Libraries/openssl/Release/include', + ], + 'msvs_settings': { + 'VCCLCompilerTool': { + 'Optimization': '2', # Maximize Speed (/O2) + 'InlineFunctionExpansion': '2', # Any suitable (/Ob2) + 'EnableIntrinsicFunctions': 'true', # Yes (/Oi) + 'FavorSizeOrSpeed': '1', # Favor fast code (/Ot) + 'RuntimeLibrary': '0', # Multi-threaded (/MT) + 'EnableEnhancedInstructionSet': '2', # Streaming SIMD Extensions 2 (/arch:SSE2) + 'WholeProgramOptimization': 'true', # /GL + }, + 'VCLibrarianTool': { + 'AdditionalOptions': [ + '/LTCG', + ] + }, + }, + }, + }, + }, + ], + [ + '"<(OS)" == "linux"', { + 'defines': [ + 'WEBRTC_POSIX', + ], + 'cflags_cc': [ + '-msse2', + ], + 'direct_dependent_settings': { + 'libraries': [ + + ], + }, + }, + ], + ], + }, + ], + } diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/project.pbxproj b/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/project.pbxproj new file mode 100644 index 000000000..97a3fd307 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/project.pbxproj @@ -0,0 +1,1553 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + 69015D941E9D848700AC9763 /* NetworkSocket.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 69015D921E9D848700AC9763 /* NetworkSocket.cpp */; }; + 69015D951E9D848700AC9763 /* NetworkSocket.h in Headers */ = {isa = PBXBuildFile; fileRef = 69015D931E9D848700AC9763 /* NetworkSocket.h */; }; + 6915307B1E6B5BAB004F643F /* logging.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 6915307A1E6B5BAB004F643F /* logging.cpp */; }; + 692AB8CB1E6759DD00706ACC /* AudioInput.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8881E6759DD00706ACC /* AudioInput.cpp */; }; + 692AB8CC1E6759DD00706ACC /* AudioInput.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8891E6759DD00706ACC /* AudioInput.h */; }; + 692AB8CD1E6759DD00706ACC /* AudioOutput.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB88A1E6759DD00706ACC /* AudioOutput.cpp */; }; + 692AB8CE1E6759DD00706ACC /* AudioOutput.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB88B1E6759DD00706ACC /* AudioOutput.h */; }; + 692AB8CF1E6759DD00706ACC /* BlockingQueue.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB88C1E6759DD00706ACC /* BlockingQueue.cpp */; }; + 692AB8D01E6759DD00706ACC /* BlockingQueue.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB88D1E6759DD00706ACC /* BlockingQueue.h */; }; + 692AB8D11E6759DD00706ACC /* BufferInputStream.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB88E1E6759DD00706ACC /* BufferInputStream.cpp */; }; + 692AB8D21E6759DD00706ACC /* BufferInputStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB88F1E6759DD00706ACC /* BufferInputStream.h */; }; + 692AB8D31E6759DD00706ACC /* BufferOutputStream.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8901E6759DD00706ACC /* BufferOutputStream.cpp */; }; + 692AB8D41E6759DD00706ACC /* BufferOutputStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8911E6759DD00706ACC /* BufferOutputStream.h */; }; + 692AB8D51E6759DD00706ACC /* BufferPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8921E6759DD00706ACC /* BufferPool.cpp */; }; + 692AB8D61E6759DD00706ACC /* BufferPool.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8931E6759DD00706ACC /* BufferPool.h */; }; + 692AB8D81E6759DD00706ACC /* CongestionControl.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8971E6759DD00706ACC /* CongestionControl.cpp */; }; + 692AB8D91E6759DD00706ACC /* CongestionControl.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8981E6759DD00706ACC /* CongestionControl.h */; }; + 692AB8DA1E6759DD00706ACC /* EchoCanceller.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8991E6759DD00706ACC /* EchoCanceller.cpp */; }; + 692AB8DB1E6759DD00706ACC /* EchoCanceller.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB89A1E6759DD00706ACC /* EchoCanceller.h */; }; + 692AB8E51E6759DD00706ACC /* Info.plist in Resources */ = {isa = PBXBuildFile; fileRef = 692AB8A71E6759DD00706ACC /* Info.plist */; }; + 692AB8E61E6759DD00706ACC /* JitterBuffer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8A81E6759DD00706ACC /* JitterBuffer.cpp */; }; + 692AB8E71E6759DD00706ACC /* JitterBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8A91E6759DD00706ACC /* JitterBuffer.h */; }; + 692AB8E81E6759DD00706ACC /* logging.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8AA1E6759DD00706ACC /* logging.h */; }; + 692AB8E91E6759DD00706ACC /* MediaStreamItf.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8AB1E6759DD00706ACC /* MediaStreamItf.cpp */; }; + 692AB8EA1E6759DD00706ACC /* MediaStreamItf.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8AC1E6759DD00706ACC /* MediaStreamItf.h */; }; + 692AB8EB1E6759DD00706ACC /* OpusDecoder.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8AD1E6759DD00706ACC /* OpusDecoder.cpp */; }; + 692AB8EC1E6759DD00706ACC /* OpusDecoder.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8AE1E6759DD00706ACC /* OpusDecoder.h */; }; + 692AB8ED1E6759DD00706ACC /* OpusEncoder.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8AF1E6759DD00706ACC /* OpusEncoder.cpp */; }; + 692AB8EE1E6759DD00706ACC /* OpusEncoder.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8B01E6759DD00706ACC /* OpusEncoder.h */; }; + 692AB8F91E6759DD00706ACC /* AudioInputAudioUnit.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8BE1E6759DD00706ACC /* AudioInputAudioUnit.cpp */; }; + 692AB8FA1E6759DD00706ACC /* AudioInputAudioUnit.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8BF1E6759DD00706ACC /* AudioInputAudioUnit.h */; }; + 692AB8FB1E6759DD00706ACC /* AudioOutputAudioUnit.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8C01E6759DD00706ACC /* AudioOutputAudioUnit.cpp */; }; + 692AB8FC1E6759DD00706ACC /* AudioOutputAudioUnit.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8C11E6759DD00706ACC /* AudioOutputAudioUnit.h */; }; + 692AB8FD1E6759DD00706ACC /* AudioUnitIO.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8C21E6759DD00706ACC /* AudioUnitIO.cpp */; }; + 692AB8FE1E6759DD00706ACC /* AudioUnitIO.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8C31E6759DD00706ACC /* AudioUnitIO.h */; }; + 692AB8FF1E6759DD00706ACC /* TGLogWrapper.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8C41E6759DD00706ACC /* TGLogWrapper.h */; }; + 692AB9001E6759DD00706ACC /* TGLogWrapper.m in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8C51E6759DD00706ACC /* TGLogWrapper.m */; }; + 692AB9011E6759DD00706ACC /* threading.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8C61E6759DD00706ACC /* threading.h */; }; + 692AB9021E6759DD00706ACC /* VoIPController.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8C71E6759DD00706ACC /* VoIPController.cpp */; }; + 692AB9031E6759DD00706ACC /* VoIPController.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8C81E6759DD00706ACC /* VoIPController.h */; }; + 692AB9041E6759DD00706ACC /* VoIPServerConfig.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8C91E6759DD00706ACC /* VoIPServerConfig.cpp */; }; + 692AB9051E6759DD00706ACC /* VoIPServerConfig.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8CA1E6759DD00706ACC /* VoIPServerConfig.h */; }; + 692AB91F1E675F7000706ACC /* AudioToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 692AB91C1E675F7000706ACC /* AudioToolbox.framework */; }; + 692AB9201E675F7000706ACC /* AudioUnit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 692AB91D1E675F7000706ACC /* AudioUnit.framework */; }; + 692AB9211E675F7000706ACC /* CoreAudio.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 692AB91E1E675F7000706ACC /* CoreAudio.framework */; }; + 69791A4D1EE8262400BB85FB /* NetworkSocketPosix.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 69791A4B1EE8262400BB85FB /* NetworkSocketPosix.cpp */; }; + 69791A4E1EE8262400BB85FB /* NetworkSocketPosix.h in Headers */ = {isa = PBXBuildFile; fileRef = 69791A4C1EE8262400BB85FB /* NetworkSocketPosix.h */; }; + 69791A571EE8272A00BB85FB /* Resampler.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 69791A551EE8272A00BB85FB /* Resampler.cpp */; }; + 69791A581EE8272A00BB85FB /* Resampler.h in Headers */ = {isa = PBXBuildFile; fileRef = 69791A561EE8272A00BB85FB /* Resampler.h */; }; + 69960A041EF85C2900F9D091 /* DarwinSpecific.h in Headers */ = {isa = PBXBuildFile; fileRef = 69960A021EF85C2900F9D091 /* DarwinSpecific.h */; }; + 69960A051EF85C2900F9D091 /* DarwinSpecific.mm in Sources */ = {isa = PBXBuildFile; fileRef = 69960A031EF85C2900F9D091 /* DarwinSpecific.mm */; }; + 69A6DD941E95EC7700000E69 /* array_view.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD011E95EC7700000E69 /* array_view.h */; }; + 69A6DD951E95EC7700000E69 /* atomicops.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD021E95EC7700000E69 /* atomicops.h */; }; + 69A6DD961E95EC7700000E69 /* basictypes.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD031E95EC7700000E69 /* basictypes.h */; }; + 69A6DD971E95EC7700000E69 /* checks.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD041E95EC7700000E69 /* checks.cc */; }; + 69A6DD981E95EC7700000E69 /* checks.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD051E95EC7700000E69 /* checks.h */; }; + 69A6DD991E95EC7700000E69 /* constructormagic.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD061E95EC7700000E69 /* constructormagic.h */; }; + 69A6DD9A1E95EC7700000E69 /* safe_compare.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD071E95EC7700000E69 /* safe_compare.h */; }; + 69A6DD9B1E95EC7700000E69 /* safe_conversions.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD081E95EC7700000E69 /* safe_conversions.h */; }; + 69A6DD9C1E95EC7700000E69 /* safe_conversions_impl.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD091E95EC7700000E69 /* safe_conversions_impl.h */; }; + 69A6DD9D1E95EC7700000E69 /* sanitizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD0A1E95EC7700000E69 /* sanitizer.h */; }; + 69A6DD9E1E95EC7700000E69 /* stringutils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD0B1E95EC7700000E69 /* stringutils.cc */; }; + 69A6DD9F1E95EC7700000E69 /* stringutils.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD0C1E95EC7700000E69 /* stringutils.h */; }; + 69A6DDA01E95EC7700000E69 /* type_traits.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD0D1E95EC7700000E69 /* type_traits.h */; }; + 69A6DDA11E95EC7700000E69 /* audio_util.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD0F1E95EC7700000E69 /* audio_util.cc */; }; + 69A6DDA21E95EC7700000E69 /* channel_buffer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD101E95EC7700000E69 /* channel_buffer.cc */; }; + 69A6DDA31E95EC7700000E69 /* channel_buffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD111E95EC7700000E69 /* channel_buffer.h */; }; + 69A6DDA41E95EC7700000E69 /* fft4g.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD121E95EC7700000E69 /* fft4g.c */; }; + 69A6DDA51E95EC7700000E69 /* fft4g.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD131E95EC7700000E69 /* fft4g.h */; }; + 69A6DDA61E95EC7700000E69 /* audio_util.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD151E95EC7700000E69 /* audio_util.h */; }; + 69A6DDA71E95EC7700000E69 /* ring_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD161E95EC7700000E69 /* ring_buffer.c */; }; + 69A6DDA81E95EC7700000E69 /* ring_buffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD171E95EC7700000E69 /* ring_buffer.h */; }; + 69A6DDA91E95EC7700000E69 /* auto_corr_to_refl_coef.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD191E95EC7700000E69 /* auto_corr_to_refl_coef.c */; }; + 69A6DDAA1E95EC7700000E69 /* auto_correlation.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD1A1E95EC7700000E69 /* auto_correlation.c */; }; + 69A6DDAB1E95EC7700000E69 /* complex_bit_reverse.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD1B1E95EC7700000E69 /* complex_bit_reverse.c */; }; + 69A6DDAC1E95EC7700000E69 /* complex_bit_reverse_arm.S in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD1C1E95EC7700000E69 /* complex_bit_reverse_arm.S */; }; + 69A6DDAD1E95EC7700000E69 /* complex_fft.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD1D1E95EC7700000E69 /* complex_fft.c */; }; + 69A6DDAE1E95EC7700000E69 /* complex_fft_tables.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD1E1E95EC7700000E69 /* complex_fft_tables.h */; }; + 69A6DDAF1E95EC7700000E69 /* copy_set_operations.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD1F1E95EC7700000E69 /* copy_set_operations.c */; }; + 69A6DDB01E95EC7700000E69 /* cross_correlation.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD201E95EC7700000E69 /* cross_correlation.c */; }; + 69A6DDB11E95EC7700000E69 /* cross_correlation_neon.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD211E95EC7700000E69 /* cross_correlation_neon.c */; }; + 69A6DDB21E95EC7700000E69 /* division_operations.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD221E95EC7700000E69 /* division_operations.c */; }; + 69A6DDB31E95EC7700000E69 /* dot_product_with_scale.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD231E95EC7700000E69 /* dot_product_with_scale.c */; }; + 69A6DDB41E95EC7700000E69 /* downsample_fast.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD241E95EC7700000E69 /* downsample_fast.c */; }; + 69A6DDB51E95EC7700000E69 /* downsample_fast_neon.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD251E95EC7700000E69 /* downsample_fast_neon.c */; }; + 69A6DDB61E95EC7700000E69 /* energy.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD261E95EC7700000E69 /* energy.c */; }; + 69A6DDB71E95EC7700000E69 /* filter_ar.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD271E95EC7700000E69 /* filter_ar.c */; }; + 69A6DDB81E95EC7700000E69 /* filter_ar_fast_q12.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD281E95EC7700000E69 /* filter_ar_fast_q12.c */; }; + 69A6DDB91E95EC7700000E69 /* filter_ar_fast_q12_armv7.S in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD291E95EC7700000E69 /* filter_ar_fast_q12_armv7.S */; }; + 69A6DDBA1E95EC7700000E69 /* filter_ma_fast_q12.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD2A1E95EC7700000E69 /* filter_ma_fast_q12.c */; }; + 69A6DDBB1E95EC7700000E69 /* get_hanning_window.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD2B1E95EC7700000E69 /* get_hanning_window.c */; }; + 69A6DDBC1E95EC7700000E69 /* get_scaling_square.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD2C1E95EC7700000E69 /* get_scaling_square.c */; }; + 69A6DDBD1E95EC7700000E69 /* ilbc_specific_functions.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD2D1E95EC7700000E69 /* ilbc_specific_functions.c */; }; + 69A6DDBE1E95EC7700000E69 /* real_fft.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD2F1E95EC7700000E69 /* real_fft.h */; }; + 69A6DDBF1E95EC7700000E69 /* signal_processing_library.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD301E95EC7700000E69 /* signal_processing_library.h */; }; + 69A6DDC01E95EC7700000E69 /* spl_inl.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD311E95EC7700000E69 /* spl_inl.h */; }; + 69A6DDC11E95EC7700000E69 /* spl_inl_armv7.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD321E95EC7700000E69 /* spl_inl_armv7.h */; }; + 69A6DDC21E95EC7700000E69 /* spl_inl_mips.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD331E95EC7700000E69 /* spl_inl_mips.h */; }; + 69A6DDC31E95EC7700000E69 /* levinson_durbin.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD341E95EC7700000E69 /* levinson_durbin.c */; }; + 69A6DDC41E95EC7700000E69 /* lpc_to_refl_coef.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD351E95EC7700000E69 /* lpc_to_refl_coef.c */; }; + 69A6DDC51E95EC7700000E69 /* min_max_operations.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD361E95EC7700000E69 /* min_max_operations.c */; }; + 69A6DDC61E95EC7700000E69 /* min_max_operations_neon.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD371E95EC7700000E69 /* min_max_operations_neon.c */; }; + 69A6DDC71E95EC7700000E69 /* randomization_functions.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD381E95EC7700000E69 /* randomization_functions.c */; }; + 69A6DDC81E95EC7700000E69 /* real_fft.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD391E95EC7700000E69 /* real_fft.c */; }; + 69A6DDCA1E95EC7700000E69 /* refl_coef_to_lpc.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD3B1E95EC7700000E69 /* refl_coef_to_lpc.c */; }; + 69A6DDCB1E95EC7700000E69 /* resample.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD3C1E95EC7700000E69 /* resample.c */; }; + 69A6DDCC1E95EC7700000E69 /* resample_48khz.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD3D1E95EC7700000E69 /* resample_48khz.c */; }; + 69A6DDCD1E95EC7700000E69 /* resample_by_2.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD3E1E95EC7700000E69 /* resample_by_2.c */; }; + 69A6DDCE1E95EC7700000E69 /* resample_by_2_internal.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD3F1E95EC7700000E69 /* resample_by_2_internal.c */; }; + 69A6DDCF1E95EC7700000E69 /* resample_by_2_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD401E95EC7700000E69 /* resample_by_2_internal.h */; }; + 69A6DDD01E95EC7700000E69 /* resample_fractional.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD411E95EC7700000E69 /* resample_fractional.c */; }; + 69A6DDD11E95EC7700000E69 /* spl_init.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD421E95EC7700000E69 /* spl_init.c */; }; + 69A6DDD21E95EC7700000E69 /* spl_inl.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD431E95EC7700000E69 /* spl_inl.c */; }; + 69A6DDD31E95EC7700000E69 /* spl_sqrt.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD441E95EC7700000E69 /* spl_sqrt.c */; }; + 69A6DDD41E95EC7700000E69 /* spl_sqrt_floor.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD451E95EC7700000E69 /* spl_sqrt_floor.c */; }; + 69A6DDD51E95EC7700000E69 /* spl_sqrt_floor_arm.S in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD461E95EC7700000E69 /* spl_sqrt_floor_arm.S */; }; + 69A6DDD61E95EC7700000E69 /* splitting_filter_impl.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD471E95EC7700000E69 /* splitting_filter_impl.c */; }; + 69A6DDD71E95EC7700000E69 /* sqrt_of_one_minus_x_squared.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD481E95EC7700000E69 /* sqrt_of_one_minus_x_squared.c */; }; + 69A6DDD81E95EC7700000E69 /* vector_scaling_operations.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD491E95EC7700000E69 /* vector_scaling_operations.c */; }; + 69A6DDD91E95EC7700000E69 /* sparse_fir_filter.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD4A1E95EC7700000E69 /* sparse_fir_filter.cc */; }; + 69A6DDDA1E95EC7700000E69 /* sparse_fir_filter.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD4B1E95EC7700000E69 /* sparse_fir_filter.h */; }; + 69A6DDDB1E95EC7700000E69 /* aec_common.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD4F1E95EC7700000E69 /* aec_common.h */; }; + 69A6DDDC1E95EC7700000E69 /* aec_core.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD501E95EC7700000E69 /* aec_core.cc */; }; + 69A6DDDD1E95EC7700000E69 /* aec_core.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD511E95EC7700000E69 /* aec_core.h */; }; + 69A6DDDE1E95EC7700000E69 /* aec_core_neon.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD521E95EC7700000E69 /* aec_core_neon.cc */; }; + 69A6DDDF1E95EC7700000E69 /* aec_core_optimized_methods.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD531E95EC7700000E69 /* aec_core_optimized_methods.h */; }; + 69A6DDE01E95EC7700000E69 /* aec_core_sse2.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD541E95EC7700000E69 /* aec_core_sse2.cc */; }; + 69A6DDE11E95EC7700000E69 /* aec_resampler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD551E95EC7700000E69 /* aec_resampler.cc */; }; + 69A6DDE21E95EC7700000E69 /* aec_resampler.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD561E95EC7700000E69 /* aec_resampler.h */; }; + 69A6DDE31E95EC7700000E69 /* echo_cancellation.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD571E95EC7700000E69 /* echo_cancellation.cc */; }; + 69A6DDE41E95EC7700000E69 /* echo_cancellation.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD581E95EC7700000E69 /* echo_cancellation.h */; }; + 69A6DDE51E95EC7700000E69 /* aecm_core.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD5A1E95EC7700000E69 /* aecm_core.cc */; }; + 69A6DDE61E95EC7700000E69 /* aecm_core.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD5B1E95EC7700000E69 /* aecm_core.h */; }; + 69A6DDE71E95EC7700000E69 /* aecm_core_c.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD5C1E95EC7700000E69 /* aecm_core_c.cc */; }; + 69A6DDE81E95EC7700000E69 /* aecm_core_neon.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD5D1E95EC7700000E69 /* aecm_core_neon.cc */; }; + 69A6DDE91E95EC7700000E69 /* aecm_defines.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD5E1E95EC7700000E69 /* aecm_defines.h */; }; + 69A6DDEA1E95EC7700000E69 /* echo_control_mobile.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD5F1E95EC7700000E69 /* echo_control_mobile.cc */; }; + 69A6DDEB1E95EC7700000E69 /* echo_control_mobile.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD601E95EC7700000E69 /* echo_control_mobile.h */; }; + 69A6DDEC1E95EC7700000E69 /* analog_agc.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD631E95EC7700000E69 /* analog_agc.c */; }; + 69A6DDED1E95EC7700000E69 /* analog_agc.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD641E95EC7700000E69 /* analog_agc.h */; }; + 69A6DDEE1E95EC7700000E69 /* digital_agc.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD651E95EC7700000E69 /* digital_agc.c */; }; + 69A6DDEF1E95EC7700000E69 /* digital_agc.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD661E95EC7700000E69 /* digital_agc.h */; }; + 69A6DDF01E95EC7700000E69 /* gain_control.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD671E95EC7700000E69 /* gain_control.h */; }; + 69A6DDF11E95EC7700000E69 /* apm_data_dumper.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD691E95EC7700000E69 /* apm_data_dumper.cc */; }; + 69A6DDF21E95EC7700000E69 /* apm_data_dumper.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD6A1E95EC7700000E69 /* apm_data_dumper.h */; }; + 69A6DDF31E95EC7700000E69 /* defines.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD6C1E95EC7700000E69 /* defines.h */; }; + 69A6DDF41E95EC7700000E69 /* noise_suppression.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD6D1E95EC7700000E69 /* noise_suppression.c */; }; + 69A6DDF51E95EC7700000E69 /* noise_suppression.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD6E1E95EC7700000E69 /* noise_suppression.h */; }; + 69A6DDF61E95EC7700000E69 /* noise_suppression_x.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD6F1E95EC7700000E69 /* noise_suppression_x.c */; }; + 69A6DDF71E95EC7700000E69 /* noise_suppression_x.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD701E95EC7700000E69 /* noise_suppression_x.h */; }; + 69A6DDF81E95EC7700000E69 /* ns_core.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD711E95EC7700000E69 /* ns_core.c */; }; + 69A6DDF91E95EC7700000E69 /* ns_core.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD721E95EC7700000E69 /* ns_core.h */; }; + 69A6DDFA1E95EC7700000E69 /* nsx_core.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD731E95EC7700000E69 /* nsx_core.c */; }; + 69A6DDFB1E95EC7700000E69 /* nsx_core.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD741E95EC7700000E69 /* nsx_core.h */; }; + 69A6DDFC1E95EC7700000E69 /* nsx_core_c.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD751E95EC7700000E69 /* nsx_core_c.c */; }; + 69A6DDFD1E95EC7700000E69 /* nsx_core_neon.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD761E95EC7700000E69 /* nsx_core_neon.c */; }; + 69A6DDFE1E95EC7700000E69 /* nsx_defines.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD771E95EC7700000E69 /* nsx_defines.h */; }; + 69A6DDFF1E95EC7700000E69 /* windows_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD781E95EC7700000E69 /* windows_private.h */; }; + 69A6DE001E95EC7800000E69 /* splitting_filter.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD791E95EC7700000E69 /* splitting_filter.cc */; }; + 69A6DE011E95EC7800000E69 /* splitting_filter.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD7A1E95EC7700000E69 /* splitting_filter.h */; }; + 69A6DE021E95EC7800000E69 /* three_band_filter_bank.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD7B1E95EC7700000E69 /* three_band_filter_bank.cc */; }; + 69A6DE031E95EC7800000E69 /* three_band_filter_bank.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD7C1E95EC7700000E69 /* three_band_filter_bank.h */; }; + 69A6DE041E95EC7800000E69 /* block_mean_calculator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD7E1E95EC7700000E69 /* block_mean_calculator.cc */; }; + 69A6DE051E95EC7800000E69 /* block_mean_calculator.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD7F1E95EC7700000E69 /* block_mean_calculator.h */; }; + 69A6DE061E95EC7800000E69 /* delay_estimator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD801E95EC7700000E69 /* delay_estimator.cc */; }; + 69A6DE071E95EC7800000E69 /* delay_estimator.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD811E95EC7700000E69 /* delay_estimator.h */; }; + 69A6DE081E95EC7800000E69 /* delay_estimator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD821E95EC7700000E69 /* delay_estimator_internal.h */; }; + 69A6DE091E95EC7800000E69 /* delay_estimator_wrapper.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD831E95EC7700000E69 /* delay_estimator_wrapper.cc */; }; + 69A6DE0A1E95EC7800000E69 /* delay_estimator_wrapper.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD841E95EC7700000E69 /* delay_estimator_wrapper.h */; }; + 69A6DE0B1E95EC7800000E69 /* ooura_fft.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD851E95EC7700000E69 /* ooura_fft.cc */; }; + 69A6DE0C1E95EC7800000E69 /* ooura_fft.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD861E95EC7700000E69 /* ooura_fft.h */; }; + 69A6DE0D1E95EC7800000E69 /* ooura_fft_neon.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD871E95EC7700000E69 /* ooura_fft_neon.cc */; }; + 69A6DE0E1E95EC7800000E69 /* ooura_fft_sse2.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD881E95EC7700000E69 /* ooura_fft_sse2.cc */; }; + 69A6DE0F1E95EC7800000E69 /* ooura_fft_tables_common.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD891E95EC7700000E69 /* ooura_fft_tables_common.h */; }; + 69A6DE101E95EC7800000E69 /* ooura_fft_tables_neon_sse2.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD8A1E95EC7700000E69 /* ooura_fft_tables_neon_sse2.h */; }; + 69A6DE111E95EC7800000E69 /* asm_defines.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD8D1E95EC7700000E69 /* asm_defines.h */; }; + 69A6DE121E95EC7800000E69 /* compile_assert_c.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD8E1E95EC7700000E69 /* compile_assert_c.h */; }; + 69A6DE131E95EC7800000E69 /* cpu_features_wrapper.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD8F1E95EC7700000E69 /* cpu_features_wrapper.h */; }; + 69A6DE141E95EC7800000E69 /* metrics.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD901E95EC7700000E69 /* metrics.h */; }; + 69A6DE151E95EC7800000E69 /* cpu_features.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DD921E95EC7700000E69 /* cpu_features.cc */; }; + 69A6DE161E95EC7800000E69 /* typedefs.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DD931E95EC7700000E69 /* typedefs.h */; }; + 69A6DE1B1E95ECF000000E69 /* wav_file.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE171E95ECF000000E69 /* wav_file.cc */; }; + 69A6DE1C1E95ECF000000E69 /* wav_file.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE181E95ECF000000E69 /* wav_file.h */; }; + 69A6DE1D1E95ECF000000E69 /* wav_header.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE191E95ECF000000E69 /* wav_header.cc */; }; + 69A6DE1E1E95ECF000000E69 /* wav_header.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE1A1E95ECF000000E69 /* wav_header.h */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 692AB9101E675E8800706ACC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = D08805AC156E8F3600311537; + remoteInfo = Telegraph; + }; + 692AB9121E675E8800706ACC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = D02601D71A55CA2300716290; + remoteInfo = Share; + }; + 692AB9141E675E8800706ACC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = 68744C0D1BB1A9F700FE6542; + remoteInfo = watchkitapp; + }; + 692AB9161E675E8800706ACC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = 68744C191BB1A9F700FE6542; + remoteInfo = "watchkitapp Extension"; + }; + 692AB9181E675E8800706ACC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = D020FADD1D99466A00F279AA; + remoteInfo = SiriIntents; + }; + 692AB91A1E675E8800706ACC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = D020FB0A1D99637100F279AA; + remoteInfo = LegacyDatabase; + }; + 69960A0D1EF85C2900F9D091 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = 099120C01EEAA63400F1366E; + remoteInfo = Widget; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + 69015D921E9D848700AC9763 /* NetworkSocket.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = NetworkSocket.cpp; sourceTree = ""; }; + 69015D931E9D848700AC9763 /* NetworkSocket.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NetworkSocket.h; sourceTree = ""; }; + 6915307A1E6B5BAB004F643F /* logging.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = logging.cpp; sourceTree = ""; }; + 692AB8881E6759DD00706ACC /* AudioInput.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AudioInput.cpp; sourceTree = ""; }; + 692AB8891E6759DD00706ACC /* AudioInput.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioInput.h; sourceTree = ""; }; + 692AB88A1E6759DD00706ACC /* AudioOutput.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AudioOutput.cpp; sourceTree = ""; }; + 692AB88B1E6759DD00706ACC /* AudioOutput.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioOutput.h; sourceTree = ""; }; + 692AB88C1E6759DD00706ACC /* BlockingQueue.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BlockingQueue.cpp; sourceTree = ""; }; + 692AB88D1E6759DD00706ACC /* BlockingQueue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BlockingQueue.h; sourceTree = ""; }; + 692AB88E1E6759DD00706ACC /* BufferInputStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BufferInputStream.cpp; sourceTree = ""; }; + 692AB88F1E6759DD00706ACC /* BufferInputStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BufferInputStream.h; sourceTree = ""; }; + 692AB8901E6759DD00706ACC /* BufferOutputStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BufferOutputStream.cpp; sourceTree = ""; }; + 692AB8911E6759DD00706ACC /* BufferOutputStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BufferOutputStream.h; sourceTree = ""; }; + 692AB8921E6759DD00706ACC /* BufferPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BufferPool.cpp; sourceTree = ""; }; + 692AB8931E6759DD00706ACC /* BufferPool.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BufferPool.h; sourceTree = ""; }; + 692AB8971E6759DD00706ACC /* CongestionControl.cpp */ = {isa = PBXFileReference; explicitFileType = sourcecode.cpp.objcpp; fileEncoding = 4; path = CongestionControl.cpp; sourceTree = ""; }; + 692AB8981E6759DD00706ACC /* CongestionControl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CongestionControl.h; sourceTree = ""; }; + 692AB8991E6759DD00706ACC /* EchoCanceller.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = EchoCanceller.cpp; sourceTree = ""; }; + 692AB89A1E6759DD00706ACC /* EchoCanceller.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = EchoCanceller.h; sourceTree = ""; }; + 692AB8A71E6759DD00706ACC /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 692AB8A81E6759DD00706ACC /* JitterBuffer.cpp */ = {isa = PBXFileReference; explicitFileType = sourcecode.cpp.objcpp; fileEncoding = 4; path = JitterBuffer.cpp; sourceTree = ""; }; + 692AB8A91E6759DD00706ACC /* JitterBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JitterBuffer.h; sourceTree = ""; }; + 692AB8AA1E6759DD00706ACC /* logging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = logging.h; sourceTree = ""; }; + 692AB8AB1E6759DD00706ACC /* MediaStreamItf.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MediaStreamItf.cpp; sourceTree = ""; }; + 692AB8AC1E6759DD00706ACC /* MediaStreamItf.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MediaStreamItf.h; sourceTree = ""; }; + 692AB8AD1E6759DD00706ACC /* OpusDecoder.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = OpusDecoder.cpp; sourceTree = ""; }; + 692AB8AE1E6759DD00706ACC /* OpusDecoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OpusDecoder.h; sourceTree = ""; }; + 692AB8AF1E6759DD00706ACC /* OpusEncoder.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = OpusEncoder.cpp; sourceTree = ""; }; + 692AB8B01E6759DD00706ACC /* OpusEncoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OpusEncoder.h; sourceTree = ""; }; + 692AB8BE1E6759DD00706ACC /* AudioInputAudioUnit.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AudioInputAudioUnit.cpp; sourceTree = ""; }; + 692AB8BF1E6759DD00706ACC /* AudioInputAudioUnit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioInputAudioUnit.h; sourceTree = ""; }; + 692AB8C01E6759DD00706ACC /* AudioOutputAudioUnit.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AudioOutputAudioUnit.cpp; sourceTree = ""; }; + 692AB8C11E6759DD00706ACC /* AudioOutputAudioUnit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioOutputAudioUnit.h; sourceTree = ""; }; + 692AB8C21E6759DD00706ACC /* AudioUnitIO.cpp */ = {isa = PBXFileReference; explicitFileType = sourcecode.cpp.objcpp; fileEncoding = 4; path = AudioUnitIO.cpp; sourceTree = ""; }; + 692AB8C31E6759DD00706ACC /* AudioUnitIO.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioUnitIO.h; sourceTree = ""; }; + 692AB8C41E6759DD00706ACC /* TGLogWrapper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TGLogWrapper.h; sourceTree = ""; }; + 692AB8C51E6759DD00706ACC /* TGLogWrapper.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = TGLogWrapper.m; sourceTree = ""; }; + 692AB8C61E6759DD00706ACC /* threading.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = threading.h; sourceTree = ""; }; + 692AB8C71E6759DD00706ACC /* VoIPController.cpp */ = {isa = PBXFileReference; explicitFileType = sourcecode.cpp.objcpp; fileEncoding = 4; path = VoIPController.cpp; sourceTree = ""; }; + 692AB8C81E6759DD00706ACC /* VoIPController.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VoIPController.h; sourceTree = ""; }; + 692AB8C91E6759DD00706ACC /* VoIPServerConfig.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = VoIPServerConfig.cpp; sourceTree = ""; }; + 692AB8CA1E6759DD00706ACC /* VoIPServerConfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VoIPServerConfig.h; sourceTree = ""; }; + 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = Telegraph.xcodeproj; path = ../../Telegraph.xcodeproj; sourceTree = ""; }; + 692AB91C1E675F7000706ACC /* AudioToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AudioToolbox.framework; path = System/Library/Frameworks/AudioToolbox.framework; sourceTree = SDKROOT; }; + 692AB91D1E675F7000706ACC /* AudioUnit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AudioUnit.framework; path = System/Library/Frameworks/AudioUnit.framework; sourceTree = SDKROOT; }; + 692AB91E1E675F7000706ACC /* CoreAudio.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreAudio.framework; path = System/Library/Frameworks/CoreAudio.framework; sourceTree = SDKROOT; }; + 69791A4B1EE8262400BB85FB /* NetworkSocketPosix.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = NetworkSocketPosix.cpp; path = os/posix/NetworkSocketPosix.cpp; sourceTree = SOURCE_ROOT; }; + 69791A4C1EE8262400BB85FB /* NetworkSocketPosix.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = NetworkSocketPosix.h; path = os/posix/NetworkSocketPosix.h; sourceTree = SOURCE_ROOT; }; + 69791A551EE8272A00BB85FB /* Resampler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Resampler.cpp; sourceTree = ""; }; + 69791A561EE8272A00BB85FB /* Resampler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Resampler.h; sourceTree = ""; }; + 69960A021EF85C2900F9D091 /* DarwinSpecific.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DarwinSpecific.h; sourceTree = ""; }; + 69960A031EF85C2900F9D091 /* DarwinSpecific.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = DarwinSpecific.mm; sourceTree = ""; }; + 69A6DD011E95EC7700000E69 /* array_view.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = array_view.h; sourceTree = ""; }; + 69A6DD021E95EC7700000E69 /* atomicops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atomicops.h; sourceTree = ""; }; + 69A6DD031E95EC7700000E69 /* basictypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = basictypes.h; sourceTree = ""; }; + 69A6DD041E95EC7700000E69 /* checks.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = checks.cc; sourceTree = ""; }; + 69A6DD051E95EC7700000E69 /* checks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = checks.h; sourceTree = ""; }; + 69A6DD061E95EC7700000E69 /* constructormagic.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = constructormagic.h; sourceTree = ""; }; + 69A6DD071E95EC7700000E69 /* safe_compare.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = safe_compare.h; sourceTree = ""; }; + 69A6DD081E95EC7700000E69 /* safe_conversions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = safe_conversions.h; sourceTree = ""; }; + 69A6DD091E95EC7700000E69 /* safe_conversions_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = safe_conversions_impl.h; sourceTree = ""; }; + 69A6DD0A1E95EC7700000E69 /* sanitizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sanitizer.h; sourceTree = ""; }; + 69A6DD0B1E95EC7700000E69 /* stringutils.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = stringutils.cc; sourceTree = ""; }; + 69A6DD0C1E95EC7700000E69 /* stringutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stringutils.h; sourceTree = ""; }; + 69A6DD0D1E95EC7700000E69 /* type_traits.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = type_traits.h; sourceTree = ""; }; + 69A6DD0F1E95EC7700000E69 /* audio_util.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = audio_util.cc; sourceTree = ""; }; + 69A6DD101E95EC7700000E69 /* channel_buffer.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = channel_buffer.cc; sourceTree = ""; }; + 69A6DD111E95EC7700000E69 /* channel_buffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_buffer.h; sourceTree = ""; }; + 69A6DD121E95EC7700000E69 /* fft4g.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fft4g.c; sourceTree = ""; }; + 69A6DD131E95EC7700000E69 /* fft4g.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fft4g.h; sourceTree = ""; }; + 69A6DD151E95EC7700000E69 /* audio_util.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audio_util.h; sourceTree = ""; }; + 69A6DD161E95EC7700000E69 /* ring_buffer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ring_buffer.c; sourceTree = ""; }; + 69A6DD171E95EC7700000E69 /* ring_buffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ring_buffer.h; sourceTree = ""; }; + 69A6DD191E95EC7700000E69 /* auto_corr_to_refl_coef.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = auto_corr_to_refl_coef.c; sourceTree = ""; }; + 69A6DD1A1E95EC7700000E69 /* auto_correlation.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = auto_correlation.c; sourceTree = ""; }; + 69A6DD1B1E95EC7700000E69 /* complex_bit_reverse.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = complex_bit_reverse.c; sourceTree = ""; }; + 69A6DD1C1E95EC7700000E69 /* complex_bit_reverse_arm.S */ = {isa = PBXFileReference; explicitFileType = sourcecode.asm.llvm; fileEncoding = 4; path = complex_bit_reverse_arm.S; sourceTree = ""; }; + 69A6DD1D1E95EC7700000E69 /* complex_fft.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = complex_fft.c; sourceTree = ""; }; + 69A6DD1E1E95EC7700000E69 /* complex_fft_tables.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = complex_fft_tables.h; sourceTree = ""; }; + 69A6DD1F1E95EC7700000E69 /* copy_set_operations.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = copy_set_operations.c; sourceTree = ""; }; + 69A6DD201E95EC7700000E69 /* cross_correlation.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = cross_correlation.c; sourceTree = ""; }; + 69A6DD211E95EC7700000E69 /* cross_correlation_neon.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = cross_correlation_neon.c; sourceTree = ""; }; + 69A6DD221E95EC7700000E69 /* division_operations.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = division_operations.c; sourceTree = ""; }; + 69A6DD231E95EC7700000E69 /* dot_product_with_scale.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dot_product_with_scale.c; sourceTree = ""; }; + 69A6DD241E95EC7700000E69 /* downsample_fast.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = downsample_fast.c; sourceTree = ""; }; + 69A6DD251E95EC7700000E69 /* downsample_fast_neon.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = downsample_fast_neon.c; sourceTree = ""; }; + 69A6DD261E95EC7700000E69 /* energy.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = energy.c; sourceTree = ""; }; + 69A6DD271E95EC7700000E69 /* filter_ar.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = filter_ar.c; sourceTree = ""; }; + 69A6DD281E95EC7700000E69 /* filter_ar_fast_q12.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = filter_ar_fast_q12.c; sourceTree = ""; }; + 69A6DD291E95EC7700000E69 /* filter_ar_fast_q12_armv7.S */ = {isa = PBXFileReference; explicitFileType = sourcecode.asm.llvm; fileEncoding = 4; path = filter_ar_fast_q12_armv7.S; sourceTree = ""; }; + 69A6DD2A1E95EC7700000E69 /* filter_ma_fast_q12.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = filter_ma_fast_q12.c; sourceTree = ""; }; + 69A6DD2B1E95EC7700000E69 /* get_hanning_window.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = get_hanning_window.c; sourceTree = ""; }; + 69A6DD2C1E95EC7700000E69 /* get_scaling_square.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = get_scaling_square.c; sourceTree = ""; }; + 69A6DD2D1E95EC7700000E69 /* ilbc_specific_functions.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ilbc_specific_functions.c; sourceTree = ""; }; + 69A6DD2F1E95EC7700000E69 /* real_fft.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = real_fft.h; sourceTree = ""; }; + 69A6DD301E95EC7700000E69 /* signal_processing_library.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = signal_processing_library.h; sourceTree = ""; }; + 69A6DD311E95EC7700000E69 /* spl_inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = spl_inl.h; sourceTree = ""; }; + 69A6DD321E95EC7700000E69 /* spl_inl_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = spl_inl_armv7.h; sourceTree = ""; }; + 69A6DD331E95EC7700000E69 /* spl_inl_mips.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = spl_inl_mips.h; sourceTree = ""; }; + 69A6DD341E95EC7700000E69 /* levinson_durbin.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = levinson_durbin.c; sourceTree = ""; }; + 69A6DD351E95EC7700000E69 /* lpc_to_refl_coef.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = lpc_to_refl_coef.c; sourceTree = ""; }; + 69A6DD361E95EC7700000E69 /* min_max_operations.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = min_max_operations.c; sourceTree = ""; }; + 69A6DD371E95EC7700000E69 /* min_max_operations_neon.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = min_max_operations_neon.c; sourceTree = ""; }; + 69A6DD381E95EC7700000E69 /* randomization_functions.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = randomization_functions.c; sourceTree = ""; }; + 69A6DD391E95EC7700000E69 /* real_fft.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = real_fft.c; sourceTree = ""; }; + 69A6DD3B1E95EC7700000E69 /* refl_coef_to_lpc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = refl_coef_to_lpc.c; sourceTree = ""; }; + 69A6DD3C1E95EC7700000E69 /* resample.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resample.c; sourceTree = ""; }; + 69A6DD3D1E95EC7700000E69 /* resample_48khz.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resample_48khz.c; sourceTree = ""; }; + 69A6DD3E1E95EC7700000E69 /* resample_by_2.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resample_by_2.c; sourceTree = ""; }; + 69A6DD3F1E95EC7700000E69 /* resample_by_2_internal.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resample_by_2_internal.c; sourceTree = ""; }; + 69A6DD401E95EC7700000E69 /* resample_by_2_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resample_by_2_internal.h; sourceTree = ""; }; + 69A6DD411E95EC7700000E69 /* resample_fractional.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resample_fractional.c; sourceTree = ""; }; + 69A6DD421E95EC7700000E69 /* spl_init.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = spl_init.c; sourceTree = ""; }; + 69A6DD431E95EC7700000E69 /* spl_inl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = spl_inl.c; sourceTree = ""; }; + 69A6DD441E95EC7700000E69 /* spl_sqrt.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = spl_sqrt.c; sourceTree = ""; }; + 69A6DD451E95EC7700000E69 /* spl_sqrt_floor.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = spl_sqrt_floor.c; sourceTree = ""; }; + 69A6DD461E95EC7700000E69 /* spl_sqrt_floor_arm.S */ = {isa = PBXFileReference; explicitFileType = sourcecode.asm.llvm; fileEncoding = 4; path = spl_sqrt_floor_arm.S; sourceTree = ""; }; + 69A6DD471E95EC7700000E69 /* splitting_filter_impl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = splitting_filter_impl.c; sourceTree = ""; }; + 69A6DD481E95EC7700000E69 /* sqrt_of_one_minus_x_squared.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = sqrt_of_one_minus_x_squared.c; sourceTree = ""; }; + 69A6DD491E95EC7700000E69 /* vector_scaling_operations.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = vector_scaling_operations.c; sourceTree = ""; }; + 69A6DD4A1E95EC7700000E69 /* sparse_fir_filter.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = sparse_fir_filter.cc; sourceTree = ""; }; + 69A6DD4B1E95EC7700000E69 /* sparse_fir_filter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sparse_fir_filter.h; sourceTree = ""; }; + 69A6DD4F1E95EC7700000E69 /* aec_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aec_common.h; sourceTree = ""; }; + 69A6DD501E95EC7700000E69 /* aec_core.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aec_core.cc; sourceTree = ""; }; + 69A6DD511E95EC7700000E69 /* aec_core.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aec_core.h; sourceTree = ""; }; + 69A6DD521E95EC7700000E69 /* aec_core_neon.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aec_core_neon.cc; sourceTree = ""; }; + 69A6DD531E95EC7700000E69 /* aec_core_optimized_methods.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aec_core_optimized_methods.h; sourceTree = ""; }; + 69A6DD541E95EC7700000E69 /* aec_core_sse2.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aec_core_sse2.cc; sourceTree = ""; }; + 69A6DD551E95EC7700000E69 /* aec_resampler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aec_resampler.cc; sourceTree = ""; }; + 69A6DD561E95EC7700000E69 /* aec_resampler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aec_resampler.h; sourceTree = ""; }; + 69A6DD571E95EC7700000E69 /* echo_cancellation.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = echo_cancellation.cc; sourceTree = ""; }; + 69A6DD581E95EC7700000E69 /* echo_cancellation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = echo_cancellation.h; sourceTree = ""; }; + 69A6DD5A1E95EC7700000E69 /* aecm_core.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aecm_core.cc; sourceTree = ""; }; + 69A6DD5B1E95EC7700000E69 /* aecm_core.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aecm_core.h; sourceTree = ""; }; + 69A6DD5C1E95EC7700000E69 /* aecm_core_c.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aecm_core_c.cc; sourceTree = ""; }; + 69A6DD5D1E95EC7700000E69 /* aecm_core_neon.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aecm_core_neon.cc; sourceTree = ""; }; + 69A6DD5E1E95EC7700000E69 /* aecm_defines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aecm_defines.h; sourceTree = ""; }; + 69A6DD5F1E95EC7700000E69 /* echo_control_mobile.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = echo_control_mobile.cc; sourceTree = ""; }; + 69A6DD601E95EC7700000E69 /* echo_control_mobile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = echo_control_mobile.h; sourceTree = ""; }; + 69A6DD631E95EC7700000E69 /* analog_agc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = analog_agc.c; sourceTree = ""; }; + 69A6DD641E95EC7700000E69 /* analog_agc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = analog_agc.h; sourceTree = ""; }; + 69A6DD651E95EC7700000E69 /* digital_agc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = digital_agc.c; sourceTree = ""; }; + 69A6DD661E95EC7700000E69 /* digital_agc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = digital_agc.h; sourceTree = ""; }; + 69A6DD671E95EC7700000E69 /* gain_control.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = gain_control.h; sourceTree = ""; }; + 69A6DD691E95EC7700000E69 /* apm_data_dumper.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = apm_data_dumper.cc; sourceTree = ""; }; + 69A6DD6A1E95EC7700000E69 /* apm_data_dumper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = apm_data_dumper.h; sourceTree = ""; }; + 69A6DD6C1E95EC7700000E69 /* defines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = defines.h; sourceTree = ""; }; + 69A6DD6D1E95EC7700000E69 /* noise_suppression.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = noise_suppression.c; sourceTree = ""; }; + 69A6DD6E1E95EC7700000E69 /* noise_suppression.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = noise_suppression.h; sourceTree = ""; }; + 69A6DD6F1E95EC7700000E69 /* noise_suppression_x.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = noise_suppression_x.c; sourceTree = ""; }; + 69A6DD701E95EC7700000E69 /* noise_suppression_x.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = noise_suppression_x.h; sourceTree = ""; }; + 69A6DD711E95EC7700000E69 /* ns_core.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ns_core.c; sourceTree = ""; }; + 69A6DD721E95EC7700000E69 /* ns_core.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ns_core.h; sourceTree = ""; }; + 69A6DD731E95EC7700000E69 /* nsx_core.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = nsx_core.c; sourceTree = ""; }; + 69A6DD741E95EC7700000E69 /* nsx_core.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = nsx_core.h; sourceTree = ""; }; + 69A6DD751E95EC7700000E69 /* nsx_core_c.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = nsx_core_c.c; sourceTree = ""; }; + 69A6DD761E95EC7700000E69 /* nsx_core_neon.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = nsx_core_neon.c; sourceTree = ""; }; + 69A6DD771E95EC7700000E69 /* nsx_defines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = nsx_defines.h; sourceTree = ""; }; + 69A6DD781E95EC7700000E69 /* windows_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = windows_private.h; sourceTree = ""; }; + 69A6DD791E95EC7700000E69 /* splitting_filter.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = splitting_filter.cc; sourceTree = ""; }; + 69A6DD7A1E95EC7700000E69 /* splitting_filter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = splitting_filter.h; sourceTree = ""; }; + 69A6DD7B1E95EC7700000E69 /* three_band_filter_bank.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = three_band_filter_bank.cc; sourceTree = ""; }; + 69A6DD7C1E95EC7700000E69 /* three_band_filter_bank.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = three_band_filter_bank.h; sourceTree = ""; }; + 69A6DD7E1E95EC7700000E69 /* block_mean_calculator.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = block_mean_calculator.cc; sourceTree = ""; }; + 69A6DD7F1E95EC7700000E69 /* block_mean_calculator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = block_mean_calculator.h; sourceTree = ""; }; + 69A6DD801E95EC7700000E69 /* delay_estimator.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = delay_estimator.cc; sourceTree = ""; }; + 69A6DD811E95EC7700000E69 /* delay_estimator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = delay_estimator.h; sourceTree = ""; }; + 69A6DD821E95EC7700000E69 /* delay_estimator_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = delay_estimator_internal.h; sourceTree = ""; }; + 69A6DD831E95EC7700000E69 /* delay_estimator_wrapper.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = delay_estimator_wrapper.cc; sourceTree = ""; }; + 69A6DD841E95EC7700000E69 /* delay_estimator_wrapper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = delay_estimator_wrapper.h; sourceTree = ""; }; + 69A6DD851E95EC7700000E69 /* ooura_fft.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ooura_fft.cc; sourceTree = ""; }; + 69A6DD861E95EC7700000E69 /* ooura_fft.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ooura_fft.h; sourceTree = ""; }; + 69A6DD871E95EC7700000E69 /* ooura_fft_neon.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ooura_fft_neon.cc; sourceTree = ""; }; + 69A6DD881E95EC7700000E69 /* ooura_fft_sse2.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ooura_fft_sse2.cc; sourceTree = ""; }; + 69A6DD891E95EC7700000E69 /* ooura_fft_tables_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ooura_fft_tables_common.h; sourceTree = ""; }; + 69A6DD8A1E95EC7700000E69 /* ooura_fft_tables_neon_sse2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ooura_fft_tables_neon_sse2.h; sourceTree = ""; }; + 69A6DD8D1E95EC7700000E69 /* asm_defines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = asm_defines.h; sourceTree = ""; }; + 69A6DD8E1E95EC7700000E69 /* compile_assert_c.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = compile_assert_c.h; sourceTree = ""; }; + 69A6DD8F1E95EC7700000E69 /* cpu_features_wrapper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpu_features_wrapper.h; sourceTree = ""; }; + 69A6DD901E95EC7700000E69 /* metrics.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = metrics.h; sourceTree = ""; }; + 69A6DD921E95EC7700000E69 /* cpu_features.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = cpu_features.cc; sourceTree = ""; }; + 69A6DD931E95EC7700000E69 /* typedefs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = typedefs.h; sourceTree = ""; }; + 69A6DE171E95ECF000000E69 /* wav_file.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = wav_file.cc; sourceTree = ""; }; + 69A6DE181E95ECF000000E69 /* wav_file.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = wav_file.h; sourceTree = ""; }; + 69A6DE191E95ECF000000E69 /* wav_header.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = wav_header.cc; sourceTree = ""; }; + 69A6DE1A1E95ECF000000E69 /* wav_header.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = wav_header.h; sourceTree = ""; }; + 69F842361E67540700C110F7 /* libtgvoip.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = libtgvoip.framework; sourceTree = BUILT_PRODUCTS_DIR; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 69F842321E67540700C110F7 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 692AB91F1E675F7000706ACC /* AudioToolbox.framework in Frameworks */, + 692AB9201E675F7000706ACC /* AudioUnit.framework in Frameworks */, + 692AB9211E675F7000706ACC /* CoreAudio.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 69015D871E9D846F00AC9763 /* posix */ = { + isa = PBXGroup; + children = ( + 69791A4B1EE8262400BB85FB /* NetworkSocketPosix.cpp */, + 69791A4C1EE8262400BB85FB /* NetworkSocketPosix.h */, + ); + name = posix; + path = ../../../../TDesktop/TBuild/tdesktop/third_party/libtgvoip/os/posix; + sourceTree = ""; + }; + 692AB8861E6759BF00706ACC /* libtgvoip */ = { + isa = PBXGroup; + children = ( + 692AB8871E6759DD00706ACC /* audio */, + 692AB88C1E6759DD00706ACC /* BlockingQueue.cpp */, + 692AB88D1E6759DD00706ACC /* BlockingQueue.h */, + 692AB88E1E6759DD00706ACC /* BufferInputStream.cpp */, + 692AB88F1E6759DD00706ACC /* BufferInputStream.h */, + 692AB8901E6759DD00706ACC /* BufferOutputStream.cpp */, + 692AB8911E6759DD00706ACC /* BufferOutputStream.h */, + 692AB8921E6759DD00706ACC /* BufferPool.cpp */, + 692AB8931E6759DD00706ACC /* BufferPool.h */, + 692AB8971E6759DD00706ACC /* CongestionControl.cpp */, + 692AB8981E6759DD00706ACC /* CongestionControl.h */, + 692AB8991E6759DD00706ACC /* EchoCanceller.cpp */, + 692AB89A1E6759DD00706ACC /* EchoCanceller.h */, + 692AB8A71E6759DD00706ACC /* Info.plist */, + 692AB8A81E6759DD00706ACC /* JitterBuffer.cpp */, + 692AB8A91E6759DD00706ACC /* JitterBuffer.h */, + 6915307A1E6B5BAB004F643F /* logging.cpp */, + 692AB8AA1E6759DD00706ACC /* logging.h */, + 692AB8AB1E6759DD00706ACC /* MediaStreamItf.cpp */, + 692AB8AC1E6759DD00706ACC /* MediaStreamItf.h */, + 69015D921E9D848700AC9763 /* NetworkSocket.cpp */, + 69015D931E9D848700AC9763 /* NetworkSocket.h */, + 692AB8AD1E6759DD00706ACC /* OpusDecoder.cpp */, + 692AB8AE1E6759DD00706ACC /* OpusDecoder.h */, + 692AB8AF1E6759DD00706ACC /* OpusEncoder.cpp */, + 692AB8B01E6759DD00706ACC /* OpusEncoder.h */, + 692AB8B11E6759DD00706ACC /* os */, + 692AB8C61E6759DD00706ACC /* threading.h */, + 692AB8C71E6759DD00706ACC /* VoIPController.cpp */, + 692AB8C81E6759DD00706ACC /* VoIPController.h */, + 692AB8C91E6759DD00706ACC /* VoIPServerConfig.cpp */, + 692AB8CA1E6759DD00706ACC /* VoIPServerConfig.h */, + 69A6DCFE1E95EC7700000E69 /* webrtc_dsp */, + ); + name = libtgvoip; + sourceTree = ""; + }; + 692AB8871E6759DD00706ACC /* audio */ = { + isa = PBXGroup; + children = ( + 692AB8881E6759DD00706ACC /* AudioInput.cpp */, + 692AB8891E6759DD00706ACC /* AudioInput.h */, + 692AB88A1E6759DD00706ACC /* AudioOutput.cpp */, + 692AB88B1E6759DD00706ACC /* AudioOutput.h */, + 69791A551EE8272A00BB85FB /* Resampler.cpp */, + 69791A561EE8272A00BB85FB /* Resampler.h */, + ); + path = audio; + sourceTree = ""; + }; + 692AB8B11E6759DD00706ACC /* os */ = { + isa = PBXGroup; + children = ( + 69015D871E9D846F00AC9763 /* posix */, + 692AB8BD1E6759DD00706ACC /* darwin */, + ); + path = os; + sourceTree = ""; + }; + 692AB8BD1E6759DD00706ACC /* darwin */ = { + isa = PBXGroup; + children = ( + 692AB8BE1E6759DD00706ACC /* AudioInputAudioUnit.cpp */, + 692AB8BF1E6759DD00706ACC /* AudioInputAudioUnit.h */, + 692AB8C01E6759DD00706ACC /* AudioOutputAudioUnit.cpp */, + 692AB8C11E6759DD00706ACC /* AudioOutputAudioUnit.h */, + 692AB8C21E6759DD00706ACC /* AudioUnitIO.cpp */, + 692AB8C31E6759DD00706ACC /* AudioUnitIO.h */, + 692AB8C41E6759DD00706ACC /* TGLogWrapper.h */, + 692AB8C51E6759DD00706ACC /* TGLogWrapper.m */, + 69960A021EF85C2900F9D091 /* DarwinSpecific.h */, + 69960A031EF85C2900F9D091 /* DarwinSpecific.mm */, + ); + path = darwin; + sourceTree = ""; + }; + 692AB9061E675E8700706ACC /* Frameworks */ = { + isa = PBXGroup; + children = ( + 692AB91C1E675F7000706ACC /* AudioToolbox.framework */, + 692AB91D1E675F7000706ACC /* AudioUnit.framework */, + 692AB91E1E675F7000706ACC /* CoreAudio.framework */, + 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */, + ); + name = Frameworks; + sourceTree = ""; + }; + 692AB9081E675E8800706ACC /* Products */ = { + isa = PBXGroup; + children = ( + 692AB9111E675E8800706ACC /* Telegram.app */, + 692AB9131E675E8800706ACC /* Share.appex */, + 692AB9151E675E8800706ACC /* watchkitapp.app */, + 692AB9171E675E8800706ACC /* watchkitapp Extension.appex */, + 692AB9191E675E8800706ACC /* SiriIntents.appex */, + 692AB91B1E675E8800706ACC /* LegacyDatabase.framework */, + 69960A0E1EF85C2900F9D091 /* Widget.appex */, + ); + name = Products; + sourceTree = ""; + }; + 69A6DCFE1E95EC7700000E69 /* webrtc_dsp */ = { + isa = PBXGroup; + children = ( + 69A6DCFF1E95EC7700000E69 /* webrtc */, + ); + path = webrtc_dsp; + sourceTree = ""; + }; + 69A6DCFF1E95EC7700000E69 /* webrtc */ = { + isa = PBXGroup; + children = ( + 69A6DD001E95EC7700000E69 /* base */, + 69A6DD0E1E95EC7700000E69 /* common_audio */, + 69A6DD4C1E95EC7700000E69 /* modules */, + 69A6DD8B1E95EC7700000E69 /* system_wrappers */, + 69A6DD931E95EC7700000E69 /* typedefs.h */, + ); + path = webrtc; + sourceTree = ""; + }; + 69A6DD001E95EC7700000E69 /* base */ = { + isa = PBXGroup; + children = ( + 69A6DD011E95EC7700000E69 /* array_view.h */, + 69A6DD021E95EC7700000E69 /* atomicops.h */, + 69A6DD031E95EC7700000E69 /* basictypes.h */, + 69A6DD041E95EC7700000E69 /* checks.cc */, + 69A6DD051E95EC7700000E69 /* checks.h */, + 69A6DD061E95EC7700000E69 /* constructormagic.h */, + 69A6DD071E95EC7700000E69 /* safe_compare.h */, + 69A6DD081E95EC7700000E69 /* safe_conversions.h */, + 69A6DD091E95EC7700000E69 /* safe_conversions_impl.h */, + 69A6DD0A1E95EC7700000E69 /* sanitizer.h */, + 69A6DD0B1E95EC7700000E69 /* stringutils.cc */, + 69A6DD0C1E95EC7700000E69 /* stringutils.h */, + 69A6DD0D1E95EC7700000E69 /* type_traits.h */, + ); + path = base; + sourceTree = ""; + }; + 69A6DD0E1E95EC7700000E69 /* common_audio */ = { + isa = PBXGroup; + children = ( + 69A6DD0F1E95EC7700000E69 /* audio_util.cc */, + 69A6DD101E95EC7700000E69 /* channel_buffer.cc */, + 69A6DD111E95EC7700000E69 /* channel_buffer.h */, + 69A6DD121E95EC7700000E69 /* fft4g.c */, + 69A6DD131E95EC7700000E69 /* fft4g.h */, + 69A6DD141E95EC7700000E69 /* include */, + 69A6DD161E95EC7700000E69 /* ring_buffer.c */, + 69A6DD171E95EC7700000E69 /* ring_buffer.h */, + 69A6DD181E95EC7700000E69 /* signal_processing */, + 69A6DD4A1E95EC7700000E69 /* sparse_fir_filter.cc */, + 69A6DD4B1E95EC7700000E69 /* sparse_fir_filter.h */, + 69A6DE171E95ECF000000E69 /* wav_file.cc */, + 69A6DE181E95ECF000000E69 /* wav_file.h */, + 69A6DE191E95ECF000000E69 /* wav_header.cc */, + 69A6DE1A1E95ECF000000E69 /* wav_header.h */, + ); + path = common_audio; + sourceTree = ""; + }; + 69A6DD141E95EC7700000E69 /* include */ = { + isa = PBXGroup; + children = ( + 69A6DD151E95EC7700000E69 /* audio_util.h */, + ); + path = include; + sourceTree = ""; + }; + 69A6DD181E95EC7700000E69 /* signal_processing */ = { + isa = PBXGroup; + children = ( + 69A6DD191E95EC7700000E69 /* auto_corr_to_refl_coef.c */, + 69A6DD1A1E95EC7700000E69 /* auto_correlation.c */, + 69A6DD1B1E95EC7700000E69 /* complex_bit_reverse.c */, + 69A6DD1C1E95EC7700000E69 /* complex_bit_reverse_arm.S */, + 69A6DD1D1E95EC7700000E69 /* complex_fft.c */, + 69A6DD1E1E95EC7700000E69 /* complex_fft_tables.h */, + 69A6DD1F1E95EC7700000E69 /* copy_set_operations.c */, + 69A6DD201E95EC7700000E69 /* cross_correlation.c */, + 69A6DD211E95EC7700000E69 /* cross_correlation_neon.c */, + 69A6DD221E95EC7700000E69 /* division_operations.c */, + 69A6DD231E95EC7700000E69 /* dot_product_with_scale.c */, + 69A6DD241E95EC7700000E69 /* downsample_fast.c */, + 69A6DD251E95EC7700000E69 /* downsample_fast_neon.c */, + 69A6DD261E95EC7700000E69 /* energy.c */, + 69A6DD271E95EC7700000E69 /* filter_ar.c */, + 69A6DD281E95EC7700000E69 /* filter_ar_fast_q12.c */, + 69A6DD291E95EC7700000E69 /* filter_ar_fast_q12_armv7.S */, + 69A6DD2A1E95EC7700000E69 /* filter_ma_fast_q12.c */, + 69A6DD2B1E95EC7700000E69 /* get_hanning_window.c */, + 69A6DD2C1E95EC7700000E69 /* get_scaling_square.c */, + 69A6DD2D1E95EC7700000E69 /* ilbc_specific_functions.c */, + 69A6DD2E1E95EC7700000E69 /* include */, + 69A6DD341E95EC7700000E69 /* levinson_durbin.c */, + 69A6DD351E95EC7700000E69 /* lpc_to_refl_coef.c */, + 69A6DD361E95EC7700000E69 /* min_max_operations.c */, + 69A6DD371E95EC7700000E69 /* min_max_operations_neon.c */, + 69A6DD381E95EC7700000E69 /* randomization_functions.c */, + 69A6DD391E95EC7700000E69 /* real_fft.c */, + 69A6DD3B1E95EC7700000E69 /* refl_coef_to_lpc.c */, + 69A6DD3C1E95EC7700000E69 /* resample.c */, + 69A6DD3D1E95EC7700000E69 /* resample_48khz.c */, + 69A6DD3E1E95EC7700000E69 /* resample_by_2.c */, + 69A6DD3F1E95EC7700000E69 /* resample_by_2_internal.c */, + 69A6DD401E95EC7700000E69 /* resample_by_2_internal.h */, + 69A6DD411E95EC7700000E69 /* resample_fractional.c */, + 69A6DD421E95EC7700000E69 /* spl_init.c */, + 69A6DD431E95EC7700000E69 /* spl_inl.c */, + 69A6DD441E95EC7700000E69 /* spl_sqrt.c */, + 69A6DD451E95EC7700000E69 /* spl_sqrt_floor.c */, + 69A6DD461E95EC7700000E69 /* spl_sqrt_floor_arm.S */, + 69A6DD471E95EC7700000E69 /* splitting_filter_impl.c */, + 69A6DD481E95EC7700000E69 /* sqrt_of_one_minus_x_squared.c */, + 69A6DD491E95EC7700000E69 /* vector_scaling_operations.c */, + ); + path = signal_processing; + sourceTree = ""; + }; + 69A6DD2E1E95EC7700000E69 /* include */ = { + isa = PBXGroup; + children = ( + 69A6DD2F1E95EC7700000E69 /* real_fft.h */, + 69A6DD301E95EC7700000E69 /* signal_processing_library.h */, + 69A6DD311E95EC7700000E69 /* spl_inl.h */, + 69A6DD321E95EC7700000E69 /* spl_inl_armv7.h */, + 69A6DD331E95EC7700000E69 /* spl_inl_mips.h */, + ); + path = include; + sourceTree = ""; + }; + 69A6DD4C1E95EC7700000E69 /* modules */ = { + isa = PBXGroup; + children = ( + 69A6DD4D1E95EC7700000E69 /* audio_processing */, + ); + path = modules; + sourceTree = ""; + }; + 69A6DD4D1E95EC7700000E69 /* audio_processing */ = { + isa = PBXGroup; + children = ( + 69A6DD4E1E95EC7700000E69 /* aec */, + 69A6DD591E95EC7700000E69 /* aecm */, + 69A6DD611E95EC7700000E69 /* agc */, + 69A6DD681E95EC7700000E69 /* logging */, + 69A6DD6B1E95EC7700000E69 /* ns */, + 69A6DD791E95EC7700000E69 /* splitting_filter.cc */, + 69A6DD7A1E95EC7700000E69 /* splitting_filter.h */, + 69A6DD7B1E95EC7700000E69 /* three_band_filter_bank.cc */, + 69A6DD7C1E95EC7700000E69 /* three_band_filter_bank.h */, + 69A6DD7D1E95EC7700000E69 /* utility */, + ); + path = audio_processing; + sourceTree = ""; + }; + 69A6DD4E1E95EC7700000E69 /* aec */ = { + isa = PBXGroup; + children = ( + 69A6DD4F1E95EC7700000E69 /* aec_common.h */, + 69A6DD501E95EC7700000E69 /* aec_core.cc */, + 69A6DD511E95EC7700000E69 /* aec_core.h */, + 69A6DD521E95EC7700000E69 /* aec_core_neon.cc */, + 69A6DD531E95EC7700000E69 /* aec_core_optimized_methods.h */, + 69A6DD541E95EC7700000E69 /* aec_core_sse2.cc */, + 69A6DD551E95EC7700000E69 /* aec_resampler.cc */, + 69A6DD561E95EC7700000E69 /* aec_resampler.h */, + 69A6DD571E95EC7700000E69 /* echo_cancellation.cc */, + 69A6DD581E95EC7700000E69 /* echo_cancellation.h */, + ); + path = aec; + sourceTree = ""; + }; + 69A6DD591E95EC7700000E69 /* aecm */ = { + isa = PBXGroup; + children = ( + 69A6DD5A1E95EC7700000E69 /* aecm_core.cc */, + 69A6DD5B1E95EC7700000E69 /* aecm_core.h */, + 69A6DD5C1E95EC7700000E69 /* aecm_core_c.cc */, + 69A6DD5D1E95EC7700000E69 /* aecm_core_neon.cc */, + 69A6DD5E1E95EC7700000E69 /* aecm_defines.h */, + 69A6DD5F1E95EC7700000E69 /* echo_control_mobile.cc */, + 69A6DD601E95EC7700000E69 /* echo_control_mobile.h */, + ); + path = aecm; + sourceTree = ""; + }; + 69A6DD611E95EC7700000E69 /* agc */ = { + isa = PBXGroup; + children = ( + 69A6DD621E95EC7700000E69 /* legacy */, + ); + path = agc; + sourceTree = ""; + }; + 69A6DD621E95EC7700000E69 /* legacy */ = { + isa = PBXGroup; + children = ( + 69A6DD631E95EC7700000E69 /* analog_agc.c */, + 69A6DD641E95EC7700000E69 /* analog_agc.h */, + 69A6DD651E95EC7700000E69 /* digital_agc.c */, + 69A6DD661E95EC7700000E69 /* digital_agc.h */, + 69A6DD671E95EC7700000E69 /* gain_control.h */, + ); + path = legacy; + sourceTree = ""; + }; + 69A6DD681E95EC7700000E69 /* logging */ = { + isa = PBXGroup; + children = ( + 69A6DD691E95EC7700000E69 /* apm_data_dumper.cc */, + 69A6DD6A1E95EC7700000E69 /* apm_data_dumper.h */, + ); + path = logging; + sourceTree = ""; + }; + 69A6DD6B1E95EC7700000E69 /* ns */ = { + isa = PBXGroup; + children = ( + 69A6DD6C1E95EC7700000E69 /* defines.h */, + 69A6DD6D1E95EC7700000E69 /* noise_suppression.c */, + 69A6DD6E1E95EC7700000E69 /* noise_suppression.h */, + 69A6DD6F1E95EC7700000E69 /* noise_suppression_x.c */, + 69A6DD701E95EC7700000E69 /* noise_suppression_x.h */, + 69A6DD711E95EC7700000E69 /* ns_core.c */, + 69A6DD721E95EC7700000E69 /* ns_core.h */, + 69A6DD731E95EC7700000E69 /* nsx_core.c */, + 69A6DD741E95EC7700000E69 /* nsx_core.h */, + 69A6DD751E95EC7700000E69 /* nsx_core_c.c */, + 69A6DD761E95EC7700000E69 /* nsx_core_neon.c */, + 69A6DD771E95EC7700000E69 /* nsx_defines.h */, + 69A6DD781E95EC7700000E69 /* windows_private.h */, + ); + path = ns; + sourceTree = ""; + }; + 69A6DD7D1E95EC7700000E69 /* utility */ = { + isa = PBXGroup; + children = ( + 69A6DD7E1E95EC7700000E69 /* block_mean_calculator.cc */, + 69A6DD7F1E95EC7700000E69 /* block_mean_calculator.h */, + 69A6DD801E95EC7700000E69 /* delay_estimator.cc */, + 69A6DD811E95EC7700000E69 /* delay_estimator.h */, + 69A6DD821E95EC7700000E69 /* delay_estimator_internal.h */, + 69A6DD831E95EC7700000E69 /* delay_estimator_wrapper.cc */, + 69A6DD841E95EC7700000E69 /* delay_estimator_wrapper.h */, + 69A6DD851E95EC7700000E69 /* ooura_fft.cc */, + 69A6DD861E95EC7700000E69 /* ooura_fft.h */, + 69A6DD871E95EC7700000E69 /* ooura_fft_neon.cc */, + 69A6DD881E95EC7700000E69 /* ooura_fft_sse2.cc */, + 69A6DD891E95EC7700000E69 /* ooura_fft_tables_common.h */, + 69A6DD8A1E95EC7700000E69 /* ooura_fft_tables_neon_sse2.h */, + ); + path = utility; + sourceTree = ""; + }; + 69A6DD8B1E95EC7700000E69 /* system_wrappers */ = { + isa = PBXGroup; + children = ( + 69A6DD8C1E95EC7700000E69 /* include */, + 69A6DD911E95EC7700000E69 /* source */, + ); + path = system_wrappers; + sourceTree = ""; + }; + 69A6DD8C1E95EC7700000E69 /* include */ = { + isa = PBXGroup; + children = ( + 69A6DD8D1E95EC7700000E69 /* asm_defines.h */, + 69A6DD8E1E95EC7700000E69 /* compile_assert_c.h */, + 69A6DD8F1E95EC7700000E69 /* cpu_features_wrapper.h */, + 69A6DD901E95EC7700000E69 /* metrics.h */, + ); + path = include; + sourceTree = ""; + }; + 69A6DD911E95EC7700000E69 /* source */ = { + isa = PBXGroup; + children = ( + 69A6DD921E95EC7700000E69 /* cpu_features.cc */, + ); + path = source; + sourceTree = ""; + }; + 69F8422C1E67540700C110F7 = { + isa = PBXGroup; + children = ( + 692AB8861E6759BF00706ACC /* libtgvoip */, + 69F842371E67540700C110F7 /* Products */, + 692AB9061E675E8700706ACC /* Frameworks */, + ); + sourceTree = ""; + }; + 69F842371E67540700C110F7 /* Products */ = { + isa = PBXGroup; + children = ( + 69F842361E67540700C110F7 /* libtgvoip.framework */, + ); + name = Products; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXHeadersBuildPhase section */ + 69F842331E67540700C110F7 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + 692AB9011E6759DD00706ACC /* threading.h in Headers */, + 692AB8EA1E6759DD00706ACC /* MediaStreamItf.h in Headers */, + 692AB8EE1E6759DD00706ACC /* OpusEncoder.h in Headers */, + 69A6DDEB1E95EC7700000E69 /* echo_control_mobile.h in Headers */, + 692AB8CE1E6759DD00706ACC /* AudioOutput.h in Headers */, + 692AB8D91E6759DD00706ACC /* CongestionControl.h in Headers */, + 69A6DDFE1E95EC7700000E69 /* nsx_defines.h in Headers */, + 692AB8CC1E6759DD00706ACC /* AudioInput.h in Headers */, + 69A6DDA51E95EC7700000E69 /* fft4g.h in Headers */, + 69A6DDC01E95EC7700000E69 /* spl_inl.h in Headers */, + 69A6DE0F1E95EC7800000E69 /* ooura_fft_tables_common.h in Headers */, + 69A6DDDA1E95EC7700000E69 /* sparse_fir_filter.h in Headers */, + 69A6DDE61E95EC7700000E69 /* aecm_core.h in Headers */, + 692AB8EC1E6759DD00706ACC /* OpusDecoder.h in Headers */, + 69A6DE1E1E95ECF000000E69 /* wav_header.h in Headers */, + 69A6DD981E95EC7700000E69 /* checks.h in Headers */, + 692AB8E81E6759DD00706ACC /* logging.h in Headers */, + 69A6DDED1E95EC7700000E69 /* analog_agc.h in Headers */, + 69A6DE081E95EC7800000E69 /* delay_estimator_internal.h in Headers */, + 69A6DDE91E95EC7700000E69 /* aecm_defines.h in Headers */, + 69A6DD9C1E95EC7700000E69 /* safe_conversions_impl.h in Headers */, + 69A6DDAE1E95EC7700000E69 /* complex_fft_tables.h in Headers */, + 692AB8FF1E6759DD00706ACC /* TGLogWrapper.h in Headers */, + 692AB8D41E6759DD00706ACC /* BufferOutputStream.h in Headers */, + 69791A4E1EE8262400BB85FB /* NetworkSocketPosix.h in Headers */, + 692AB9051E6759DD00706ACC /* VoIPServerConfig.h in Headers */, + 69A6DDF31E95EC7700000E69 /* defines.h in Headers */, + 69A6DD9D1E95EC7700000E69 /* sanitizer.h in Headers */, + 692AB9031E6759DD00706ACC /* VoIPController.h in Headers */, + 69A6DDC11E95EC7700000E69 /* spl_inl_armv7.h in Headers */, + 69A6DE031E95EC7800000E69 /* three_band_filter_bank.h in Headers */, + 69A6DDFB1E95EC7700000E69 /* nsx_core.h in Headers */, + 69A6DDE41E95EC7700000E69 /* echo_cancellation.h in Headers */, + 69A6DDF71E95EC7700000E69 /* noise_suppression_x.h in Headers */, + 69A6DD9B1E95EC7700000E69 /* safe_conversions.h in Headers */, + 69A6DE071E95EC7800000E69 /* delay_estimator.h in Headers */, + 69A6DDFF1E95EC7700000E69 /* windows_private.h in Headers */, + 69A6DD961E95EC7700000E69 /* basictypes.h in Headers */, + 69A6DE161E95EC7800000E69 /* typedefs.h in Headers */, + 69A6DDE21E95EC7700000E69 /* aec_resampler.h in Headers */, + 69A6DD9F1E95EC7700000E69 /* stringutils.h in Headers */, + 69A6DDDB1E95EC7700000E69 /* aec_common.h in Headers */, + 69A6DDDD1E95EC7700000E69 /* aec_core.h in Headers */, + 69A6DE051E95EC7800000E69 /* block_mean_calculator.h in Headers */, + 69A6DD951E95EC7700000E69 /* atomicops.h in Headers */, + 69A6DD991E95EC7700000E69 /* constructormagic.h in Headers */, + 69A6DDA01E95EC7700000E69 /* type_traits.h in Headers */, + 69A6DDBE1E95EC7700000E69 /* real_fft.h in Headers */, + 692AB8FC1E6759DD00706ACC /* AudioOutputAudioUnit.h in Headers */, + 69A6DD941E95EC7700000E69 /* array_view.h in Headers */, + 692AB8D01E6759DD00706ACC /* BlockingQueue.h in Headers */, + 69A6DDF91E95EC7700000E69 /* ns_core.h in Headers */, + 69A6DDA31E95EC7700000E69 /* channel_buffer.h in Headers */, + 69A6DE0A1E95EC7800000E69 /* delay_estimator_wrapper.h in Headers */, + 69A6DE141E95EC7800000E69 /* metrics.h in Headers */, + 692AB8FE1E6759DD00706ACC /* AudioUnitIO.h in Headers */, + 69015D951E9D848700AC9763 /* NetworkSocket.h in Headers */, + 69A6DE131E95EC7800000E69 /* cpu_features_wrapper.h in Headers */, + 69A6DDDF1E95EC7700000E69 /* aec_core_optimized_methods.h in Headers */, + 69A6DE101E95EC7800000E69 /* ooura_fft_tables_neon_sse2.h in Headers */, + 69A6DDBF1E95EC7700000E69 /* signal_processing_library.h in Headers */, + 69A6DDCF1E95EC7700000E69 /* resample_by_2_internal.h in Headers */, + 69A6DDA81E95EC7700000E69 /* ring_buffer.h in Headers */, + 69A6DE111E95EC7800000E69 /* asm_defines.h in Headers */, + 69A6DE011E95EC7800000E69 /* splitting_filter.h in Headers */, + 69A6DE0C1E95EC7800000E69 /* ooura_fft.h in Headers */, + 69A6DDA61E95EC7700000E69 /* audio_util.h in Headers */, + 692AB8FA1E6759DD00706ACC /* AudioInputAudioUnit.h in Headers */, + 69A6DD9A1E95EC7700000E69 /* safe_compare.h in Headers */, + 69A6DDEF1E95EC7700000E69 /* digital_agc.h in Headers */, + 69A6DDF21E95EC7700000E69 /* apm_data_dumper.h in Headers */, + 69960A041EF85C2900F9D091 /* DarwinSpecific.h in Headers */, + 69A6DDC21E95EC7700000E69 /* spl_inl_mips.h in Headers */, + 69791A581EE8272A00BB85FB /* Resampler.h in Headers */, + 692AB8DB1E6759DD00706ACC /* EchoCanceller.h in Headers */, + 69A6DE1C1E95ECF000000E69 /* wav_file.h in Headers */, + 692AB8D61E6759DD00706ACC /* BufferPool.h in Headers */, + 69A6DDF51E95EC7700000E69 /* noise_suppression.h in Headers */, + 692AB8E71E6759DD00706ACC /* JitterBuffer.h in Headers */, + 69A6DE121E95EC7800000E69 /* compile_assert_c.h in Headers */, + 692AB8D21E6759DD00706ACC /* BufferInputStream.h in Headers */, + 69A6DDF01E95EC7700000E69 /* gain_control.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXHeadersBuildPhase section */ + +/* Begin PBXNativeTarget section */ + 69F842351E67540700C110F7 /* libtgvoip */ = { + isa = PBXNativeTarget; + buildConfigurationList = 69F8423E1E67540700C110F7 /* Build configuration list for PBXNativeTarget "libtgvoip" */; + buildPhases = ( + 69F842311E67540700C110F7 /* Sources */, + 69F842321E67540700C110F7 /* Frameworks */, + 69F842331E67540700C110F7 /* Headers */, + 69F842341E67540700C110F7 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = libtgvoip; + productName = libtgvoip; + productReference = 69F842361E67540700C110F7 /* libtgvoip.framework */; + productType = "com.apple.product-type.framework"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 69F8422D1E67540700C110F7 /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 0820; + ORGANIZATIONNAME = Grishka; + TargetAttributes = { + 69F842351E67540700C110F7 = { + CreatedOnToolsVersion = 8.2.1; + ProvisioningStyle = Automatic; + }; + }; + }; + buildConfigurationList = 69F842301E67540700C110F7 /* Build configuration list for PBXProject "libtgvoip" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + ); + mainGroup = 69F8422C1E67540700C110F7; + productRefGroup = 69F842371E67540700C110F7 /* Products */; + projectDirPath = ""; + projectReferences = ( + { + ProductGroup = 692AB9081E675E8800706ACC /* Products */; + ProjectRef = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + }, + ); + projectRoot = ""; + targets = ( + 69F842351E67540700C110F7 /* libtgvoip */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXReferenceProxy section */ + 692AB9111E675E8800706ACC /* Telegram.app */ = { + isa = PBXReferenceProxy; + fileType = wrapper.application; + path = Telegram.app; + remoteRef = 692AB9101E675E8800706ACC /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 692AB9131E675E8800706ACC /* Share.appex */ = { + isa = PBXReferenceProxy; + fileType = "wrapper.app-extension"; + path = Share.appex; + remoteRef = 692AB9121E675E8800706ACC /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 692AB9151E675E8800706ACC /* watchkitapp.app */ = { + isa = PBXReferenceProxy; + fileType = wrapper.application; + path = watchkitapp.app; + remoteRef = 692AB9141E675E8800706ACC /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 692AB9171E675E8800706ACC /* watchkitapp Extension.appex */ = { + isa = PBXReferenceProxy; + fileType = "wrapper.app-extension"; + path = "watchkitapp Extension.appex"; + remoteRef = 692AB9161E675E8800706ACC /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 692AB9191E675E8800706ACC /* SiriIntents.appex */ = { + isa = PBXReferenceProxy; + fileType = "wrapper.app-extension"; + path = SiriIntents.appex; + remoteRef = 692AB9181E675E8800706ACC /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 692AB91B1E675E8800706ACC /* LegacyDatabase.framework */ = { + isa = PBXReferenceProxy; + fileType = wrapper.framework; + path = LegacyDatabase.framework; + remoteRef = 692AB91A1E675E8800706ACC /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 69960A0E1EF85C2900F9D091 /* Widget.appex */ = { + isa = PBXReferenceProxy; + fileType = "wrapper.app-extension"; + path = Widget.appex; + remoteRef = 69960A0D1EF85C2900F9D091 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; +/* End PBXReferenceProxy section */ + +/* Begin PBXResourcesBuildPhase section */ + 69F842341E67540700C110F7 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 692AB8E51E6759DD00706ACC /* Info.plist in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 69F842311E67540700C110F7 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 6915307B1E6B5BAB004F643F /* logging.cpp in Sources */, + 69A6DDD31E95EC7700000E69 /* spl_sqrt.c in Sources */, + 69A6DDC51E95EC7700000E69 /* min_max_operations.c in Sources */, + 692AB9041E6759DD00706ACC /* VoIPServerConfig.cpp in Sources */, + 69A6DD971E95EC7700000E69 /* checks.cc in Sources */, + 69A6DDB11E95EC7700000E69 /* cross_correlation_neon.c in Sources */, + 69A6DDF11E95EC7700000E69 /* apm_data_dumper.cc in Sources */, + 69A6DDFA1E95EC7700000E69 /* nsx_core.c in Sources */, + 69A6DDAD1E95EC7700000E69 /* complex_fft.c in Sources */, + 69A6DDA41E95EC7700000E69 /* fft4g.c in Sources */, + 692AB9021E6759DD00706ACC /* VoIPController.cpp in Sources */, + 69A6DDB61E95EC7700000E69 /* energy.c in Sources */, + 69960A051EF85C2900F9D091 /* DarwinSpecific.mm in Sources */, + 69A6DDA11E95EC7700000E69 /* audio_util.cc in Sources */, + 69A6DDE31E95EC7700000E69 /* echo_cancellation.cc in Sources */, + 69A6DDD71E95EC7700000E69 /* sqrt_of_one_minus_x_squared.c in Sources */, + 69791A4D1EE8262400BB85FB /* NetworkSocketPosix.cpp in Sources */, + 692AB8D81E6759DD00706ACC /* CongestionControl.cpp in Sources */, + 69A6DDCD1E95EC7700000E69 /* resample_by_2.c in Sources */, + 69A6DDE51E95EC7700000E69 /* aecm_core.cc in Sources */, + 69A6DDBA1E95EC7700000E69 /* filter_ma_fast_q12.c in Sources */, + 69A6DDE81E95EC7700000E69 /* aecm_core_neon.cc in Sources */, + 69A6DDBB1E95EC7700000E69 /* get_hanning_window.c in Sources */, + 69A6DDB51E95EC7700000E69 /* downsample_fast_neon.c in Sources */, + 69015D941E9D848700AC9763 /* NetworkSocket.cpp in Sources */, + 69A6DDC41E95EC7700000E69 /* lpc_to_refl_coef.c in Sources */, + 69A6DDEC1E95EC7700000E69 /* analog_agc.c in Sources */, + 69A6DDA71E95EC7700000E69 /* ring_buffer.c in Sources */, + 692AB8FB1E6759DD00706ACC /* AudioOutputAudioUnit.cpp in Sources */, + 692AB8EB1E6759DD00706ACC /* OpusDecoder.cpp in Sources */, + 692AB8E61E6759DD00706ACC /* JitterBuffer.cpp in Sources */, + 69A6DE1D1E95ECF000000E69 /* wav_header.cc in Sources */, + 69A6DDBC1E95EC7700000E69 /* get_scaling_square.c in Sources */, + 69A6DDDC1E95EC7700000E69 /* aec_core.cc in Sources */, + 692AB9001E6759DD00706ACC /* TGLogWrapper.m in Sources */, + 69A6DDC71E95EC7700000E69 /* randomization_functions.c in Sources */, + 69A6DDD51E95EC7700000E69 /* spl_sqrt_floor_arm.S in Sources */, + 69A6DDAF1E95EC7700000E69 /* copy_set_operations.c in Sources */, + 692AB8F91E6759DD00706ACC /* AudioInputAudioUnit.cpp in Sources */, + 69A6DDFC1E95EC7700000E69 /* nsx_core_c.c in Sources */, + 69A6DDE71E95EC7700000E69 /* aecm_core_c.cc in Sources */, + 69A6DDC61E95EC7700000E69 /* min_max_operations_neon.c in Sources */, + 69A6DDB01E95EC7700000E69 /* cross_correlation.c in Sources */, + 692AB8D11E6759DD00706ACC /* BufferInputStream.cpp in Sources */, + 69791A571EE8272A00BB85FB /* Resampler.cpp in Sources */, + 69A6DE0B1E95EC7800000E69 /* ooura_fft.cc in Sources */, + 69A6DDB21E95EC7700000E69 /* division_operations.c in Sources */, + 69A6DDCA1E95EC7700000E69 /* refl_coef_to_lpc.c in Sources */, + 69A6DDD21E95EC7700000E69 /* spl_inl.c in Sources */, + 69A6DDA21E95EC7700000E69 /* channel_buffer.cc in Sources */, + 69A6DDA91E95EC7700000E69 /* auto_corr_to_refl_coef.c in Sources */, + 69A6DDD01E95EC7700000E69 /* resample_fractional.c in Sources */, + 69A6DDCB1E95EC7700000E69 /* resample.c in Sources */, + 69A6DDD61E95EC7700000E69 /* splitting_filter_impl.c in Sources */, + 69A6DDEE1E95EC7700000E69 /* digital_agc.c in Sources */, + 69A6DDDE1E95EC7700000E69 /* aec_core_neon.cc in Sources */, + 69A6DDF81E95EC7700000E69 /* ns_core.c in Sources */, + 692AB8E91E6759DD00706ACC /* MediaStreamItf.cpp in Sources */, + 69A6DDE11E95EC7700000E69 /* aec_resampler.cc in Sources */, + 692AB8DA1E6759DD00706ACC /* EchoCanceller.cpp in Sources */, + 69A6DDB71E95EC7700000E69 /* filter_ar.c in Sources */, + 69A6DE041E95EC7800000E69 /* block_mean_calculator.cc in Sources */, + 69A6DD9E1E95EC7700000E69 /* stringutils.cc in Sources */, + 692AB8D31E6759DD00706ACC /* BufferOutputStream.cpp in Sources */, + 69A6DE001E95EC7800000E69 /* splitting_filter.cc in Sources */, + 69A6DDC81E95EC7700000E69 /* real_fft.c in Sources */, + 69A6DDD11E95EC7700000E69 /* spl_init.c in Sources */, + 69A6DE061E95EC7800000E69 /* delay_estimator.cc in Sources */, + 69A6DDD41E95EC7700000E69 /* spl_sqrt_floor.c in Sources */, + 69A6DDB41E95EC7700000E69 /* downsample_fast.c in Sources */, + 69A6DDEA1E95EC7700000E69 /* echo_control_mobile.cc in Sources */, + 69A6DDF41E95EC7700000E69 /* noise_suppression.c in Sources */, + 692AB8CF1E6759DD00706ACC /* BlockingQueue.cpp in Sources */, + 69A6DDC31E95EC7700000E69 /* levinson_durbin.c in Sources */, + 69A6DDF61E95EC7700000E69 /* noise_suppression_x.c in Sources */, + 69A6DE1B1E95ECF000000E69 /* wav_file.cc in Sources */, + 69A6DDCE1E95EC7700000E69 /* resample_by_2_internal.c in Sources */, + 692AB8D51E6759DD00706ACC /* BufferPool.cpp in Sources */, + 692AB8CB1E6759DD00706ACC /* AudioInput.cpp in Sources */, + 69A6DDCC1E95EC7700000E69 /* resample_48khz.c in Sources */, + 69A6DDAC1E95EC7700000E69 /* complex_bit_reverse_arm.S in Sources */, + 69A6DDD81E95EC7700000E69 /* vector_scaling_operations.c in Sources */, + 69A6DE0D1E95EC7800000E69 /* ooura_fft_neon.cc in Sources */, + 692AB8FD1E6759DD00706ACC /* AudioUnitIO.cpp in Sources */, + 69A6DDB31E95EC7700000E69 /* dot_product_with_scale.c in Sources */, + 69A6DDB91E95EC7700000E69 /* filter_ar_fast_q12_armv7.S in Sources */, + 69A6DDAA1E95EC7700000E69 /* auto_correlation.c in Sources */, + 69A6DDFD1E95EC7700000E69 /* nsx_core_neon.c in Sources */, + 69A6DDE01E95EC7700000E69 /* aec_core_sse2.cc in Sources */, + 69A6DDBD1E95EC7700000E69 /* ilbc_specific_functions.c in Sources */, + 69A6DE0E1E95EC7800000E69 /* ooura_fft_sse2.cc in Sources */, + 692AB8CD1E6759DD00706ACC /* AudioOutput.cpp in Sources */, + 69A6DE151E95EC7800000E69 /* cpu_features.cc in Sources */, + 69A6DE021E95EC7800000E69 /* three_band_filter_bank.cc in Sources */, + 69A6DDB81E95EC7700000E69 /* filter_ar_fast_q12.c in Sources */, + 69A6DE091E95EC7800000E69 /* delay_estimator_wrapper.cc in Sources */, + 692AB8ED1E6759DD00706ACC /* OpusEncoder.cpp in Sources */, + 69A6DDAB1E95EC7700000E69 /* complex_bit_reverse.c in Sources */, + 69A6DDD91E95EC7700000E69 /* sparse_fir_filter.cc in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin XCBuildConfiguration section */ + 69F8423C1E67540700C110F7 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + CURRENT_PROJECT_VERSION = 1; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.2; + MTL_ENABLE_DEBUG_INFO = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + TARGETED_DEVICE_FAMILY = "1,2"; + VERSIONING_SYSTEM = "apple-generic"; + VERSION_INFO_PREFIX = ""; + }; + name = Debug; + }; + 69F8423D1E67540700C110F7 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + CURRENT_PROJECT_VERSION = 1; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.2; + MTL_ENABLE_DEBUG_INFO = NO; + SDKROOT = iphoneos; + TARGETED_DEVICE_FAMILY = "1,2"; + VALIDATE_PRODUCT = YES; + VERSIONING_SYSTEM = "apple-generic"; + VERSION_INFO_PREFIX = ""; + }; + name = Release; + }; + 69F8423F1E67540700C110F7 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_CXX_LANGUAGE_STANDARD = "c++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CODE_SIGN_IDENTITY = ""; + DEFINES_MODULE = YES; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + DYLIB_INSTALL_NAME_BASE = "@rpath"; + HEADER_SEARCH_PATHS = ( + "$(PROJECT_DIR)/../../Telegraph/thirdparty/opus/include/opus", + "$(inherited)", + "$(PROJECT_DIR)/../../Telegraph", + webrtc_dsp, + ); + INFOPLIST_FILE = "$(SRCROOT)/Info.plist"; + INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; + IPHONEOS_DEPLOYMENT_TARGET = 6.0; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = "$(inherited)"; + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.6; + OTHER_CFLAGS = ( + "-DTGVOIP_USE_CUSTOM_CRYPTO", + "-DWEBRTC_APM_DEBUG_DUMP=0", + "-DWEBRTC_POSIX", + "-DTGVOIP_HAVE_TGLOG", + ); + PRODUCT_BUNDLE_IDENTIFIER = me.grishka.libtgvoip; + PRODUCT_NAME = "$(TARGET_NAME)"; + SKIP_INSTALL = YES; + }; + name = Debug; + }; + 69F842401E67540700C110F7 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_CXX_LANGUAGE_STANDARD = "c++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CODE_SIGN_IDENTITY = ""; + DEFINES_MODULE = YES; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + DYLIB_INSTALL_NAME_BASE = "@rpath"; + HEADER_SEARCH_PATHS = ( + "$(PROJECT_DIR)/../../Telegraph/thirdparty/opus/include/opus", + "$(inherited)", + "$(PROJECT_DIR)/../../Telegraph", + webrtc_dsp, + ); + INFOPLIST_FILE = "$(SRCROOT)/Info.plist"; + INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; + IPHONEOS_DEPLOYMENT_TARGET = 6.0; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = "$(inherited)"; + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.6; + OTHER_CFLAGS = ( + "-DTGVOIP_USE_CUSTOM_CRYPTO", + "-DWEBRTC_APM_DEBUG_DUMP=0", + "-DWEBRTC_POSIX", + "-DTGVOIP_HAVE_TGLOG", + ); + PRODUCT_BUNDLE_IDENTIFIER = me.grishka.libtgvoip; + PRODUCT_NAME = "$(TARGET_NAME)"; + SKIP_INSTALL = YES; + }; + name = Release; + }; + D04D01C31E678C0D0086DDC0 /* Debug AppStore */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + CURRENT_PROJECT_VERSION = 1; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.2; + MTL_ENABLE_DEBUG_INFO = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + TARGETED_DEVICE_FAMILY = "1,2"; + VERSIONING_SYSTEM = "apple-generic"; + VERSION_INFO_PREFIX = ""; + }; + name = "Debug AppStore"; + }; + D04D01C41E678C0D0086DDC0 /* Debug AppStore */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_CXX_LANGUAGE_STANDARD = "c++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CODE_SIGN_IDENTITY = ""; + DEFINES_MODULE = YES; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + DYLIB_INSTALL_NAME_BASE = "@rpath"; + HEADER_SEARCH_PATHS = ( + "$(PROJECT_DIR)/../../Telegraph/thirdparty/opus/include/opus", + "$(inherited)", + "$(PROJECT_DIR)/../../Telegraph", + webrtc_dsp, + ); + INFOPLIST_FILE = "$(SRCROOT)/Info.plist"; + INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; + IPHONEOS_DEPLOYMENT_TARGET = 6.0; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = "$(inherited)"; + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.6; + OTHER_CFLAGS = ( + "-DTGVOIP_USE_CUSTOM_CRYPTO", + "-DWEBRTC_APM_DEBUG_DUMP=0", + "-DWEBRTC_POSIX", + "-DTGVOIP_HAVE_TGLOG", + ); + PRODUCT_BUNDLE_IDENTIFIER = me.grishka.libtgvoip; + PRODUCT_NAME = "$(TARGET_NAME)"; + SKIP_INSTALL = YES; + }; + name = "Debug AppStore"; + }; + D04D01CB1E678C230086DDC0 /* Hockeyapp */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + CURRENT_PROJECT_VERSION = 1; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.2; + MTL_ENABLE_DEBUG_INFO = NO; + SDKROOT = iphoneos; + TARGETED_DEVICE_FAMILY = "1,2"; + VALIDATE_PRODUCT = YES; + VERSIONING_SYSTEM = "apple-generic"; + VERSION_INFO_PREFIX = ""; + }; + name = Hockeyapp; + }; + D04D01CC1E678C230086DDC0 /* Hockeyapp */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_CXX_LANGUAGE_STANDARD = "c++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CODE_SIGN_IDENTITY = ""; + DEFINES_MODULE = YES; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + DYLIB_INSTALL_NAME_BASE = "@rpath"; + HEADER_SEARCH_PATHS = ( + "$(PROJECT_DIR)/../../Telegraph/thirdparty/opus/include/opus", + "$(inherited)", + "$(PROJECT_DIR)/../../Telegraph", + webrtc_dsp, + ); + INFOPLIST_FILE = "$(SRCROOT)/Info.plist"; + INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; + IPHONEOS_DEPLOYMENT_TARGET = 6.0; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = "$(inherited)"; + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.6; + OTHER_CFLAGS = ( + "-DTGVOIP_USE_CUSTOM_CRYPTO", + "-DWEBRTC_APM_DEBUG_DUMP=0", + "-DWEBRTC_POSIX", + "-DTGVOIP_HAVE_TGLOG", + ); + PRODUCT_BUNDLE_IDENTIFIER = me.grishka.libtgvoip; + PRODUCT_NAME = "$(TARGET_NAME)"; + SKIP_INSTALL = YES; + }; + name = Hockeyapp; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 69F842301E67540700C110F7 /* Build configuration list for PBXProject "libtgvoip" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 69F8423C1E67540700C110F7 /* Debug */, + D04D01C31E678C0D0086DDC0 /* Debug AppStore */, + 69F8423D1E67540700C110F7 /* Release */, + D04D01CB1E678C230086DDC0 /* Hockeyapp */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 69F8423E1E67540700C110F7 /* Build configuration list for PBXNativeTarget "libtgvoip" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 69F8423F1E67540700C110F7 /* Debug */, + D04D01C41E678C0D0086DDC0 /* Debug AppStore */, + 69F842401E67540700C110F7 /* Release */, + D04D01CC1E678C230086DDC0 /* Hockeyapp */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 69F8422D1E67540700C110F7 /* Project object */; +} diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 000000000..55fe87423 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ + + + + + diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/project.xcworkspace/xcuserdata/grishka.xcuserdatad/UserInterfaceState.xcuserstate b/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/project.xcworkspace/xcuserdata/grishka.xcuserdatad/UserInterfaceState.xcuserstate new file mode 100644 index 0000000000000000000000000000000000000000..24e6306be3fcdbb3396ef98deffdcd8fe89cd297 GIT binary patch literal 9235 zcmcIpd0Z3M_P_T|5+(@=lZ~vn6a=-h3JTV$kwB#?A^}uf(hx?7211j7QcHVWYt`3U ztJdxU?yYs{zNy`x-K*AGpWU^KU0>T*yZ3h|!vtdO`}L3C4?d8|+%xB#dzSAx=T2Qq zz#op}(Bg8vXQkRazw2qDOee z<~`ikC;_p^fQ-n5GSFZ&1Pw(OqhTl$Wua^|92KF-s2oi}6=*7|L~c}ts!C%OyWjW(lB)P;7SooEl*iylFbqQ}tV z=qdCZdLF%uUPs5#DRdg0L0_V;&=2Sw)?h78!W`CNJvLw?HsNIKz$rKtr{OG|jdO4= zF2I-J3AhYjjwj+tcrq@>Q}7I2i)Z3l*n_XY^|%4g$E~;xUyYaGrT99$8sCW5;*Izg zd^^4yZ^qm3PP`l6haba_<0tTw_yB$pzl2}Lui#hlG5jVzj!)qa@Mrie{uY0S&*9$) zAu6IKiNr`uB$@OhgGm<2Cxv7*8AqI?gj`O_NhPTvb4defBz`iF%qIbIC21vXYx8ytWJ^6|J#xRVA(J@BG#Ml`JlfvXOdCYJopDAEQFe8~lridBET*8cI zF0D>$ZVd#EA|+BGHA+NDNI$LIoi(3t57*Mqp3taC^98)&a2L`bEf8S35QlUigYA&& z;nMP5BMVAOojIAV(jr%8eyJ-j(^*{N${aqvq+~?SNLOL0)9K-iWv-HG!O;A0i`U1y zg1*)!zBwWZPDcGu`gUYS7Gy;>WJeB^0&-A50D zM|WdzLAkfh-{7SyGwBWLnMvL{KH%Xp1rtay6z8z&h(F+uv^zt**G(ZWQsd_ri0opl zgthZr7RSQHhy4xB7vLZ$B=Yz3;n`FLQE7paL(1B+=DESFD9lQhnt^7b^iEU@j!rZS zQiL7rTs2iO#mXXlQ&{xO70B0xyr>TPK^mk>40zOt^j&B!YJmPQpbPoYJQxTULF#PE zrf)=X^t-Ut$=-%$f26gZuSgxYYyuK~L{DFdLbJ-27)Ad^!=-{;0hCG> zws@QC`TD3=szQ7ZcvSK}nz<5CmHGn_KICt1C}|dqP%j2jOsO~?Q?j50V7x9+L>qOIz(mTamsKZGqvPXdC1U@GFqkcmu6` z*ct9ix(nSa1mQhU(1~`#2*{Wb@Yh8e+JgQTiQ7K(fZ&k*=zbUpg~G*tdNC3*Dtc*I ziTDY0ASU`CTmq@m!qey(l)ia$)SQbJQQ*lDFetB}XxMb7}Lw( z74)jeK%#sD9gXGBF&GCKVy?V}eBI~-dK;YtClo_TZ@#>P-j569vFJmJN8^P+ZW$L1 zWQq01=(9M=@#u3fgG-=X4RMrTqi^FVzoR@$1)d_T!~j8n0g3yMn4t8X=qL0u`UU-p zenY>bKQO`=CO{cn4ijM#Oonop0u?*aAS|Qs=t3^cVkK5#HI2%t6d*11_Zqkf*1>Ip zNjU+1Tzp+?LjxZYAuO4CT!f&>G(kKxq?+r!p?U$C1Y&WC#&(U^;ECVet#wV5y0*`Q zoPj>!;^%r>1Cgi%k^O+4TNT3N;RsEFO1>q;hv|h^KoMFvb&^6cNUIG*5xh?TVl%c< zFJcQ+c48a2h0P2SrT3uyUkT{3Chms^2n_l|RVN+@)sRs&+3RnXRtMoBNZ*aqaRwd? zH82gPcjKYdn!{iQwcyo)pN7PV6H3$Y zB~S-G2tW({ZHKEMV-|%5-sO#WtJ+)mnW9cnZE5{?M%!}%e&QWM{UHJoT@gU6JlX(R zCFr`qnq68eSxEgWOaZ+`Wu~-NT{+3a*?qyLEN@FofX`ak6v%7|1zY$~q&-uZDx^(U z;3~lrQ*kABLp|^?w;NaE8axdepb`9nC;HJy^HfYKb60svTxC^LDn0I!NhQTqWmC#M z6H8`Lu>TW5O_{rF{G^gfN+pu~nMcX=3TL^yhll8r*(h%_Xld~GB2i%>#iYV~sTsmv z?CZpJFc0R7kMPL18PcPr3^!swEtfr&&do=<>799~u)5q$zr2Y9xCyCoGY;Ywd?gOy zFmllhl?*m1=!+)EoL*yAcq5HbiY6Mly|;x%kCvQ;Q|3y~_O3xIG{Zt@0*i;sj+MYT zC%S?HyR29z6sw5hh8Cc_ZoCk;H1!&{&)i@i+0|h_|T)+F$`759gjsb7fR^c6N1`LREG{$RBQ;@6DD> z8qTig+p@)q=b0-^1q*2Fm>sKveXe?2IC1Nn@U66?p}m#FOLA=&zD?Nig_gu8@4$C@ zxWUwDaSrRXV0`9w;kyKOf1BRJWyB&vXo^I9kd(}-5VBIdcNBM0Gj`z(cniK+nCR-f zVSXn44DkUPq-{JPSOjgN(%bP48sXH&U3k0D)X`B8?-H=>9_YAWG8GlZ$~xYIX%n#p z@5TG@etbV%3jm8@$rk(|eh5E|AAzN?5>~-#$e2EfpBwGVXH@zd8lzXWVkL-{G0sG- z7ON+*dL)6&LHw*B<`8}gKaHP(Ww0Dpz;#<_Tj#>nD2*0R3xyl2P-W~j-jH8_Njf|6 z3xtvwuJkStdn6BMi(l;ldbf`P8-<~`#pvU~5G{^jU&!Ag3@UVXi6e<}7qD8=^ELd2 zP_$l$>pPK)>O}R?(U~wD4D@t_!e%)vk-vrCmRcQr64vyzI`|zr7T|aBd+-mqG1lhb z5Am74EeW;xC)D0I2+hl)xaI|aj=zed+=suRepwr(Z0XzP;P3H&;+ik~BmOz2(Jyeb zq|xt!Mt=|l>*1D|MuZ`XzH&$cVPOMogwz?-nPMcSj`Ls9S0d36F76o}(ZeRVRVWie zFDtbcLNk&&Zq}jbv&V>;Scny=i4AWc4wB;G^r2udBF2xHo+~pTM^{B6yufj091CmE$vmA-~i*QAypf74GO$Nyrc~RIDV>1$T)}ZTpESf&i?|hB9O*yoRre7r=x1T0ae2ms_#~ylOzQM zGLZrTaZpQ%X9#2psi1H~9TqD?anp3XXS&E#0qFiNc@&n2n^bwY)W0F2i&P8T{|;$S z-jQi!4xJgvbTWg~l9^-{nGHK&C+vcI;9l6hjd;iv#Ea61kJQ6`un+cAXn7Yt5YpKq zPKIJ2N!UxllVU@X^i@2=p?7RO&9q1z?c~Cx!H`I998Y6>eoN5b90^Cyg=m9XKTA9z za{C&2;T+2-eywb6rYIEgH%B`R(IZVlN|uly?Hl*dlDxHemtt~ASjL$E6aWs;9uOrt}5F;z$p-!?29)XO$*10iiorgVK z)&E%o5o;>48YLChky~iNA~%!uWCPgmuN_8G+ zs50}O>!%AtdwyYIM~yhWOFP&~whKnw22XX89q>%N5qC$8`1Jozh)EjkB@YTV+(-74 z`^f|FEIbF#!wXx;L*!xd2zeC#39rHHa6+)*;J7#t(=96;4XH`AE6NHBr-E(KQ^8>u zAgu`TZT?_uxXK%9;Cp(gV&Tl29`sy*T(YR-F{wYKE12 zINDUvsS=LD5!efFMMEvoY5znLeU^NQ(!0p#R$Ivr%xhz6?>P4yS@ z=@B;h5Bb5vrNp`Bd}nl%bL2;%F#LD23%B!Gq2G0eaj=_wIQPaNkU+_+kXBjOWKba&r)iZuF`sn!i7Za<*JDIW2 zvoppF?{zX(c%L2w)3PYc`Qk*@XME~WrXQ1m^gEdT%m8K}a}kruq%nh-bodZHg41vY zK88==Q}}ELGnk%14Pq{4hB28;7Hy-?(mGcJpTigMC45D%Q)fh?8R2$Kto4UW0{#Z^ zWkj-g+eNd+$Crht`RQE23)y1qyC9_n^`m$@+9X9NnA79i*iDg1skg}=Xr~7*eHjSN zazHEz=Sif^oiD^kC>Mjjr7_q{5AT~=>1Y!S<>qAN(RVxbZ4o;EQSCC%TOUai6gf*X^KPG+##tS$?11U3QY*TMz920?qtI9~fTWY*l| zb6SREN?D>zE8}EVS(+?URv;TCE0$HryfUARmo>=zviY(mSx|PRY>8}{Y=vy4Y?W+{ z>;~Cd**e*J*>2e(*~_wDH_5M(cgU}mFOe^kuaMs+@0Rb7?~>mu zzfb=$u~qS?;*{bu#Se-f6+bI}Rs5cS5=cT)f-b?3 zU`jA2SQG3CDGB)r(-W>s*p~2M!V3w<65dL9JKQ#cU3n$L6ym*paN09nVf-E7>Y`8asoX$u_cnb{-pGm$GZwo7oNQCiXV=4t6KI zi@k^4%|6CH$sS~%W}juBXJ2EFvnSY->?!tL_H*_d_FMLQ_9yli_BZwqC95v`Ky`*|Ybwu@^>SNWXsiS>WF%& zdb#>K_4Vr2>VK$jRNtiDtnN~8QEyZ4Q14RTtG-XYSN){=u==$6$3$adUgDU$D6MxWXG|3vP#;!@xq-!!XLo|7sv6@oN zB#l=S(Ja+$)a=(hpm|91h~_cP6Pm9y=QO`-k(Ov>T7}l2?WaxCrfUalhiVJ7W3;oi zb=t++8?$K~&o!V~gR_%7}KJ5YRi`rw_gP~t! z=|@h^X*erq=Tf--++Z$?8_td9#&S-sn5*R)xE3zNMYuL@A-9-Y$}Q)v3VtI+v%ZMt^dBHcAQ&@Itz)NRsj z)$P#j((Tsm(e2YcraP#6O81QJIo%Q6Te`P(|I)p$`%rgU_p$D4-LHDAXY_KtN}s6L z>N&ktpRUi)57rOWkJOLSkJgXXJN3o-N&2aJx4v3GO<%9~>*wp6^dWsj-=<%vU!q^F z-=x1&f49C@n;!+;4c$aKP}K;RVBC!%K!&3?~hz4DTA=H+*UM+VF3~cSd9+MwwAz zFjERO4vl7~?qOc%#cW*;rw$G*%gB8?Q62F+OBGXgp&)YvN2PrlF=`rYuvA zDbJK|8eu9nm6$Fwm6;}*CYz?1rkVn#YfQJB_L~lxj+@>yoi=@J`qXsRbk6jX=@-*) z$?oJ?$)03yvM;$Mc|me}@}lI9a9*|i}hCPHtSC7J=XiI zd#(Gek6RB}pSC`0ecpQ1dcu0rddm8N^&{&U>nAp~&1f5FOScWS4Yg(4a&5zH1-3CZ zw{4cqWAoa4wutQ-8`zfEmf2R=ZnWKD>$G*-w%T^t?zP=#+iQE+cEEPjcFOjvow29d zi|nQL%j^^Em)ob=Ywfe_9=q4>v$xn6*xT)k>>c)H_7(P(_Eq*Z_6_z;_S@`t*tgoZ z*>~6v+F!Q6W`Dze%zoT{!v3-S8~bk#t%Gyu9Y#m8!{V?x9FBgDY)7tRxTC-^(oy8N z#Br%(ti$P;=%{f79V;BSJN7#M>G;5LE~PMKY)WZLMM_;tLrPP$b3=rPpUimiQ~IA0 F{2#0)S{?uZ literal 0 HcmV?d00001 diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/libtgvoip.xcscheme b/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/libtgvoip.xcscheme new file mode 100644 index 000000000..a24e940e3 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/libtgvoip.xcscheme @@ -0,0 +1,80 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/xcschememanagement.plist b/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/xcschememanagement.plist new file mode 100644 index 000000000..253fba52b --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/xcschememanagement.plist @@ -0,0 +1,22 @@ + + + + + SchemeUserState + + libtgvoip.xcscheme + + orderHint + 0 + + + SuppressBuildableAutocreation + + 69F842351E67540700C110F7 + + primary + + + + + diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/xcschememanagement.plist b/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/xcschememanagement.plist new file mode 100644 index 000000000..3572d3d9c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/xcschememanagement.plist @@ -0,0 +1,14 @@ + + + + + SuppressBuildableAutocreation + + 69F842351E67540700C110F7 + + primary + + + + + diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/project.pbxproj b/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/project.pbxproj new file mode 100644 index 000000000..d5195f180 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/project.pbxproj @@ -0,0 +1,1567 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + 690725BE1EBBD5DE005D860B /* NetworkSocketPosix.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 690725BC1EBBD5DE005D860B /* NetworkSocketPosix.cpp */; }; + 690725BF1EBBD5DE005D860B /* NetworkSocketPosix.h in Headers */ = {isa = PBXBuildFile; fileRef = 690725BD1EBBD5DE005D860B /* NetworkSocketPosix.h */; }; + 690725C21EBBD5F2005D860B /* NetworkSocket.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 690725C01EBBD5F2005D860B /* NetworkSocket.cpp */; }; + 690725C31EBBD5F2005D860B /* NetworkSocket.h in Headers */ = {isa = PBXBuildFile; fileRef = 690725C11EBBD5F2005D860B /* NetworkSocket.h */; }; + 6915307B1E6B5BAB004F643F /* logging.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 6915307A1E6B5BAB004F643F /* logging.cpp */; }; + 692AB8CB1E6759DD00706ACC /* AudioInput.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8881E6759DD00706ACC /* AudioInput.cpp */; }; + 692AB8CC1E6759DD00706ACC /* AudioInput.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8891E6759DD00706ACC /* AudioInput.h */; }; + 692AB8CD1E6759DD00706ACC /* AudioOutput.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB88A1E6759DD00706ACC /* AudioOutput.cpp */; }; + 692AB8CE1E6759DD00706ACC /* AudioOutput.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB88B1E6759DD00706ACC /* AudioOutput.h */; }; + 692AB8CF1E6759DD00706ACC /* BlockingQueue.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB88C1E6759DD00706ACC /* BlockingQueue.cpp */; }; + 692AB8D01E6759DD00706ACC /* BlockingQueue.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB88D1E6759DD00706ACC /* BlockingQueue.h */; }; + 692AB8D11E6759DD00706ACC /* BufferInputStream.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB88E1E6759DD00706ACC /* BufferInputStream.cpp */; }; + 692AB8D21E6759DD00706ACC /* BufferInputStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB88F1E6759DD00706ACC /* BufferInputStream.h */; }; + 692AB8D31E6759DD00706ACC /* BufferOutputStream.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8901E6759DD00706ACC /* BufferOutputStream.cpp */; }; + 692AB8D41E6759DD00706ACC /* BufferOutputStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8911E6759DD00706ACC /* BufferOutputStream.h */; }; + 692AB8D51E6759DD00706ACC /* BufferPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8921E6759DD00706ACC /* BufferPool.cpp */; }; + 692AB8D61E6759DD00706ACC /* BufferPool.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8931E6759DD00706ACC /* BufferPool.h */; }; + 692AB8D81E6759DD00706ACC /* CongestionControl.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8971E6759DD00706ACC /* CongestionControl.cpp */; }; + 692AB8D91E6759DD00706ACC /* CongestionControl.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8981E6759DD00706ACC /* CongestionControl.h */; }; + 692AB8DA1E6759DD00706ACC /* EchoCanceller.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8991E6759DD00706ACC /* EchoCanceller.cpp */; }; + 692AB8DB1E6759DD00706ACC /* EchoCanceller.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB89A1E6759DD00706ACC /* EchoCanceller.h */; }; + 692AB8E51E6759DD00706ACC /* Info.plist in Resources */ = {isa = PBXBuildFile; fileRef = 692AB8A71E6759DD00706ACC /* Info.plist */; }; + 692AB8E61E6759DD00706ACC /* JitterBuffer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8A81E6759DD00706ACC /* JitterBuffer.cpp */; }; + 692AB8E71E6759DD00706ACC /* JitterBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8A91E6759DD00706ACC /* JitterBuffer.h */; }; + 692AB8E81E6759DD00706ACC /* logging.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8AA1E6759DD00706ACC /* logging.h */; }; + 692AB8E91E6759DD00706ACC /* MediaStreamItf.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8AB1E6759DD00706ACC /* MediaStreamItf.cpp */; }; + 692AB8EA1E6759DD00706ACC /* MediaStreamItf.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8AC1E6759DD00706ACC /* MediaStreamItf.h */; }; + 692AB8EB1E6759DD00706ACC /* OpusDecoder.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8AD1E6759DD00706ACC /* OpusDecoder.cpp */; }; + 692AB8EC1E6759DD00706ACC /* OpusDecoder.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8AE1E6759DD00706ACC /* OpusDecoder.h */; }; + 692AB8ED1E6759DD00706ACC /* OpusEncoder.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8AF1E6759DD00706ACC /* OpusEncoder.cpp */; }; + 692AB8EE1E6759DD00706ACC /* OpusEncoder.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8B01E6759DD00706ACC /* OpusEncoder.h */; }; + 692AB9011E6759DD00706ACC /* threading.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8C61E6759DD00706ACC /* threading.h */; }; + 692AB9021E6759DD00706ACC /* VoIPController.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8C71E6759DD00706ACC /* VoIPController.cpp */; }; + 692AB9031E6759DD00706ACC /* VoIPController.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8C81E6759DD00706ACC /* VoIPController.h */; }; + 692AB9041E6759DD00706ACC /* VoIPServerConfig.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 692AB8C91E6759DD00706ACC /* VoIPServerConfig.cpp */; }; + 692AB9051E6759DD00706ACC /* VoIPServerConfig.h in Headers */ = {isa = PBXBuildFile; fileRef = 692AB8CA1E6759DD00706ACC /* VoIPServerConfig.h */; }; + 692AB91F1E675F7000706ACC /* AudioToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 692AB91C1E675F7000706ACC /* AudioToolbox.framework */; }; + 692AB9201E675F7000706ACC /* AudioUnit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 692AB91D1E675F7000706ACC /* AudioUnit.framework */; }; + 692AB9211E675F7000706ACC /* CoreAudio.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 692AB91E1E675F7000706ACC /* CoreAudio.framework */; }; + 695B20621EBD39FF00E31757 /* DarwinSpecific.h in Headers */ = {isa = PBXBuildFile; fileRef = 695B20601EBD39FF00E31757 /* DarwinSpecific.h */; }; + 698848421F4B39F700076DF0 /* AudioInputAudioUnit.h in Headers */ = {isa = PBXBuildFile; fileRef = 6988483C1F4B39F700076DF0 /* AudioInputAudioUnit.h */; }; + 698848441F4B39F700076DF0 /* AudioOutputAudioUnit.h in Headers */ = {isa = PBXBuildFile; fileRef = 6988483E1F4B39F700076DF0 /* AudioOutputAudioUnit.h */; }; + 698848461F4B39F700076DF0 /* AudioUnitIO.h in Headers */ = {isa = PBXBuildFile; fileRef = 698848401F4B39F700076DF0 /* AudioUnitIO.h */; }; + 69A6DEB91E96149300000E69 /* array_view.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE231E96149300000E69 /* array_view.h */; }; + 69A6DEBA1E96149300000E69 /* atomicops.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE241E96149300000E69 /* atomicops.h */; }; + 69A6DEBB1E96149300000E69 /* basictypes.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE251E96149300000E69 /* basictypes.h */; }; + 69A6DEBC1E96149300000E69 /* checks.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE261E96149300000E69 /* checks.cc */; }; + 69A6DEBD1E96149300000E69 /* checks.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE271E96149300000E69 /* checks.h */; }; + 69A6DEBE1E96149300000E69 /* constructormagic.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE281E96149300000E69 /* constructormagic.h */; }; + 69A6DEBF1E96149300000E69 /* safe_compare.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE291E96149300000E69 /* safe_compare.h */; }; + 69A6DEC01E96149300000E69 /* safe_conversions.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE2A1E96149300000E69 /* safe_conversions.h */; }; + 69A6DEC11E96149300000E69 /* safe_conversions_impl.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE2B1E96149300000E69 /* safe_conversions_impl.h */; }; + 69A6DEC21E96149300000E69 /* sanitizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE2C1E96149300000E69 /* sanitizer.h */; }; + 69A6DEC31E96149300000E69 /* stringutils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE2D1E96149300000E69 /* stringutils.cc */; }; + 69A6DEC41E96149300000E69 /* stringutils.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE2E1E96149300000E69 /* stringutils.h */; }; + 69A6DEC51E96149300000E69 /* type_traits.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE2F1E96149300000E69 /* type_traits.h */; }; + 69A6DEC61E96149300000E69 /* audio_util.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE311E96149300000E69 /* audio_util.cc */; }; + 69A6DEC71E96149300000E69 /* channel_buffer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE321E96149300000E69 /* channel_buffer.cc */; }; + 69A6DEC81E96149300000E69 /* channel_buffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE331E96149300000E69 /* channel_buffer.h */; }; + 69A6DEC91E96149300000E69 /* fft4g.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE341E96149300000E69 /* fft4g.c */; }; + 69A6DECA1E96149300000E69 /* fft4g.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE351E96149300000E69 /* fft4g.h */; }; + 69A6DECB1E96149300000E69 /* audio_util.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE371E96149300000E69 /* audio_util.h */; }; + 69A6DECC1E96149300000E69 /* ring_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE381E96149300000E69 /* ring_buffer.c */; }; + 69A6DECD1E96149300000E69 /* ring_buffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE391E96149300000E69 /* ring_buffer.h */; }; + 69A6DECE1E96149300000E69 /* auto_corr_to_refl_coef.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE3B1E96149300000E69 /* auto_corr_to_refl_coef.c */; }; + 69A6DECF1E96149300000E69 /* auto_correlation.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE3C1E96149300000E69 /* auto_correlation.c */; }; + 69A6DED01E96149300000E69 /* complex_bit_reverse.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE3D1E96149300000E69 /* complex_bit_reverse.c */; }; + 69A6DED21E96149300000E69 /* complex_fft.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE3F1E96149300000E69 /* complex_fft.c */; }; + 69A6DED31E96149300000E69 /* complex_fft_tables.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE401E96149300000E69 /* complex_fft_tables.h */; }; + 69A6DED41E96149300000E69 /* copy_set_operations.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE411E96149300000E69 /* copy_set_operations.c */; }; + 69A6DED51E96149300000E69 /* cross_correlation.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE421E96149300000E69 /* cross_correlation.c */; }; + 69A6DED61E96149300000E69 /* cross_correlation_neon.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE431E96149300000E69 /* cross_correlation_neon.c */; }; + 69A6DED71E96149300000E69 /* division_operations.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE441E96149300000E69 /* division_operations.c */; }; + 69A6DED81E96149300000E69 /* dot_product_with_scale.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE451E96149300000E69 /* dot_product_with_scale.c */; }; + 69A6DED91E96149300000E69 /* downsample_fast.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE461E96149300000E69 /* downsample_fast.c */; }; + 69A6DEDA1E96149300000E69 /* downsample_fast_neon.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE471E96149300000E69 /* downsample_fast_neon.c */; }; + 69A6DEDB1E96149300000E69 /* energy.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE481E96149300000E69 /* energy.c */; }; + 69A6DEDC1E96149300000E69 /* filter_ar.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE491E96149300000E69 /* filter_ar.c */; }; + 69A6DEDD1E96149300000E69 /* filter_ar_fast_q12.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE4A1E96149300000E69 /* filter_ar_fast_q12.c */; }; + 69A6DEDF1E96149300000E69 /* filter_ma_fast_q12.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE4C1E96149300000E69 /* filter_ma_fast_q12.c */; }; + 69A6DEE01E96149300000E69 /* get_hanning_window.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE4D1E96149300000E69 /* get_hanning_window.c */; }; + 69A6DEE11E96149300000E69 /* get_scaling_square.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE4E1E96149300000E69 /* get_scaling_square.c */; }; + 69A6DEE21E96149300000E69 /* ilbc_specific_functions.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE4F1E96149300000E69 /* ilbc_specific_functions.c */; }; + 69A6DEE31E96149300000E69 /* real_fft.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE511E96149300000E69 /* real_fft.h */; }; + 69A6DEE41E96149300000E69 /* signal_processing_library.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE521E96149300000E69 /* signal_processing_library.h */; }; + 69A6DEE51E96149300000E69 /* spl_inl.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE531E96149300000E69 /* spl_inl.h */; }; + 69A6DEE61E96149300000E69 /* spl_inl_armv7.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE541E96149300000E69 /* spl_inl_armv7.h */; }; + 69A6DEE71E96149300000E69 /* spl_inl_mips.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE551E96149300000E69 /* spl_inl_mips.h */; }; + 69A6DEE81E96149300000E69 /* levinson_durbin.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE561E96149300000E69 /* levinson_durbin.c */; }; + 69A6DEE91E96149300000E69 /* lpc_to_refl_coef.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE571E96149300000E69 /* lpc_to_refl_coef.c */; }; + 69A6DEEA1E96149300000E69 /* min_max_operations.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE581E96149300000E69 /* min_max_operations.c */; }; + 69A6DEEB1E96149300000E69 /* min_max_operations_neon.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE591E96149300000E69 /* min_max_operations_neon.c */; }; + 69A6DEEC1E96149300000E69 /* randomization_functions.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE5A1E96149300000E69 /* randomization_functions.c */; }; + 69A6DEED1E96149300000E69 /* real_fft.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE5B1E96149300000E69 /* real_fft.c */; }; + 69A6DEEE1E96149300000E69 /* refl_coef_to_lpc.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE5C1E96149300000E69 /* refl_coef_to_lpc.c */; }; + 69A6DEEF1E96149300000E69 /* resample.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE5D1E96149300000E69 /* resample.c */; }; + 69A6DEF01E96149300000E69 /* resample_48khz.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE5E1E96149300000E69 /* resample_48khz.c */; }; + 69A6DEF11E96149300000E69 /* resample_by_2.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE5F1E96149300000E69 /* resample_by_2.c */; }; + 69A6DEF21E96149300000E69 /* resample_by_2_internal.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE601E96149300000E69 /* resample_by_2_internal.c */; }; + 69A6DEF31E96149300000E69 /* resample_by_2_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE611E96149300000E69 /* resample_by_2_internal.h */; }; + 69A6DEF41E96149300000E69 /* resample_fractional.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE621E96149300000E69 /* resample_fractional.c */; }; + 69A6DEF51E96149300000E69 /* spl_init.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE631E96149300000E69 /* spl_init.c */; }; + 69A6DEF61E96149300000E69 /* spl_inl.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE641E96149300000E69 /* spl_inl.c */; }; + 69A6DEF71E96149300000E69 /* spl_sqrt.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE651E96149300000E69 /* spl_sqrt.c */; }; + 69A6DEF81E96149300000E69 /* spl_sqrt_floor.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE661E96149300000E69 /* spl_sqrt_floor.c */; }; + 69A6DEFA1E96149300000E69 /* splitting_filter_impl.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE681E96149300000E69 /* splitting_filter_impl.c */; }; + 69A6DEFB1E96149300000E69 /* sqrt_of_one_minus_x_squared.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE691E96149300000E69 /* sqrt_of_one_minus_x_squared.c */; }; + 69A6DEFC1E96149300000E69 /* vector_scaling_operations.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE6A1E96149300000E69 /* vector_scaling_operations.c */; }; + 69A6DEFD1E96149300000E69 /* sparse_fir_filter.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE6B1E96149300000E69 /* sparse_fir_filter.cc */; }; + 69A6DEFE1E96149300000E69 /* sparse_fir_filter.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE6C1E96149300000E69 /* sparse_fir_filter.h */; }; + 69A6DEFF1E96149300000E69 /* wav_file.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE6D1E96149300000E69 /* wav_file.cc */; }; + 69A6DF001E96149300000E69 /* wav_file.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE6E1E96149300000E69 /* wav_file.h */; }; + 69A6DF011E96149300000E69 /* wav_header.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE6F1E96149300000E69 /* wav_header.cc */; }; + 69A6DF021E96149300000E69 /* wav_header.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE701E96149300000E69 /* wav_header.h */; }; + 69A6DF031E96149300000E69 /* aec_common.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE741E96149300000E69 /* aec_common.h */; }; + 69A6DF041E96149300000E69 /* aec_core.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE751E96149300000E69 /* aec_core.cc */; }; + 69A6DF051E96149300000E69 /* aec_core.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE761E96149300000E69 /* aec_core.h */; }; + 69A6DF061E96149300000E69 /* aec_core_neon.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE771E96149300000E69 /* aec_core_neon.cc */; }; + 69A6DF071E96149300000E69 /* aec_core_optimized_methods.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE781E96149300000E69 /* aec_core_optimized_methods.h */; }; + 69A6DF081E96149300000E69 /* aec_core_sse2.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE791E96149300000E69 /* aec_core_sse2.cc */; }; + 69A6DF091E96149300000E69 /* aec_resampler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE7A1E96149300000E69 /* aec_resampler.cc */; }; + 69A6DF0A1E96149300000E69 /* aec_resampler.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE7B1E96149300000E69 /* aec_resampler.h */; }; + 69A6DF0B1E96149300000E69 /* echo_cancellation.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE7C1E96149300000E69 /* echo_cancellation.cc */; }; + 69A6DF0C1E96149300000E69 /* echo_cancellation.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE7D1E96149300000E69 /* echo_cancellation.h */; }; + 69A6DF0D1E96149300000E69 /* aecm_core.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE7F1E96149300000E69 /* aecm_core.cc */; }; + 69A6DF0E1E96149300000E69 /* aecm_core.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE801E96149300000E69 /* aecm_core.h */; }; + 69A6DF0F1E96149300000E69 /* aecm_core_c.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE811E96149300000E69 /* aecm_core_c.cc */; }; + 69A6DF101E96149300000E69 /* aecm_core_neon.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE821E96149300000E69 /* aecm_core_neon.cc */; }; + 69A6DF111E96149300000E69 /* aecm_defines.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE831E96149300000E69 /* aecm_defines.h */; }; + 69A6DF121E96149300000E69 /* echo_control_mobile.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE841E96149300000E69 /* echo_control_mobile.cc */; }; + 69A6DF131E96149300000E69 /* echo_control_mobile.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE851E96149300000E69 /* echo_control_mobile.h */; }; + 69A6DF141E96149300000E69 /* analog_agc.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE881E96149300000E69 /* analog_agc.c */; }; + 69A6DF151E96149300000E69 /* analog_agc.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE891E96149300000E69 /* analog_agc.h */; }; + 69A6DF161E96149300000E69 /* digital_agc.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE8A1E96149300000E69 /* digital_agc.c */; }; + 69A6DF171E96149300000E69 /* digital_agc.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE8B1E96149300000E69 /* digital_agc.h */; }; + 69A6DF181E96149300000E69 /* gain_control.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE8C1E96149300000E69 /* gain_control.h */; }; + 69A6DF191E96149300000E69 /* apm_data_dumper.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE8E1E96149300000E69 /* apm_data_dumper.cc */; }; + 69A6DF1A1E96149300000E69 /* apm_data_dumper.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE8F1E96149300000E69 /* apm_data_dumper.h */; }; + 69A6DF1B1E96149300000E69 /* defines.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE911E96149300000E69 /* defines.h */; }; + 69A6DF1C1E96149300000E69 /* noise_suppression.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE921E96149300000E69 /* noise_suppression.c */; }; + 69A6DF1D1E96149300000E69 /* noise_suppression.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE931E96149300000E69 /* noise_suppression.h */; }; + 69A6DF1E1E96149300000E69 /* noise_suppression_x.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE941E96149300000E69 /* noise_suppression_x.c */; }; + 69A6DF1F1E96149300000E69 /* noise_suppression_x.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE951E96149300000E69 /* noise_suppression_x.h */; }; + 69A6DF201E96149300000E69 /* ns_core.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE961E96149300000E69 /* ns_core.c */; }; + 69A6DF211E96149300000E69 /* ns_core.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE971E96149300000E69 /* ns_core.h */; }; + 69A6DF221E96149300000E69 /* nsx_core.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE981E96149300000E69 /* nsx_core.c */; }; + 69A6DF231E96149300000E69 /* nsx_core.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE991E96149300000E69 /* nsx_core.h */; }; + 69A6DF241E96149300000E69 /* nsx_core_c.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE9A1E96149300000E69 /* nsx_core_c.c */; }; + 69A6DF251E96149300000E69 /* nsx_core_neon.c in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE9B1E96149300000E69 /* nsx_core_neon.c */; }; + 69A6DF261E96149300000E69 /* nsx_defines.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE9C1E96149300000E69 /* nsx_defines.h */; }; + 69A6DF271E96149300000E69 /* windows_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE9D1E96149300000E69 /* windows_private.h */; }; + 69A6DF281E96149300000E69 /* splitting_filter.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DE9E1E96149300000E69 /* splitting_filter.cc */; }; + 69A6DF291E96149300000E69 /* splitting_filter.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DE9F1E96149300000E69 /* splitting_filter.h */; }; + 69A6DF2A1E96149300000E69 /* three_band_filter_bank.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DEA01E96149300000E69 /* three_band_filter_bank.cc */; }; + 69A6DF2B1E96149300000E69 /* three_band_filter_bank.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEA11E96149300000E69 /* three_band_filter_bank.h */; }; + 69A6DF2C1E96149300000E69 /* block_mean_calculator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DEA31E96149300000E69 /* block_mean_calculator.cc */; }; + 69A6DF2D1E96149300000E69 /* block_mean_calculator.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEA41E96149300000E69 /* block_mean_calculator.h */; }; + 69A6DF2E1E96149300000E69 /* delay_estimator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DEA51E96149300000E69 /* delay_estimator.cc */; }; + 69A6DF2F1E96149300000E69 /* delay_estimator.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEA61E96149300000E69 /* delay_estimator.h */; }; + 69A6DF301E96149300000E69 /* delay_estimator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEA71E96149300000E69 /* delay_estimator_internal.h */; }; + 69A6DF311E96149300000E69 /* delay_estimator_wrapper.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DEA81E96149300000E69 /* delay_estimator_wrapper.cc */; }; + 69A6DF321E96149300000E69 /* delay_estimator_wrapper.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEA91E96149300000E69 /* delay_estimator_wrapper.h */; }; + 69A6DF331E96149300000E69 /* ooura_fft.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DEAA1E96149300000E69 /* ooura_fft.cc */; }; + 69A6DF341E96149300000E69 /* ooura_fft.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEAB1E96149300000E69 /* ooura_fft.h */; }; + 69A6DF351E96149300000E69 /* ooura_fft_neon.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DEAC1E96149300000E69 /* ooura_fft_neon.cc */; }; + 69A6DF361E96149300000E69 /* ooura_fft_sse2.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DEAD1E96149300000E69 /* ooura_fft_sse2.cc */; }; + 69A6DF371E96149300000E69 /* ooura_fft_tables_common.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEAE1E96149300000E69 /* ooura_fft_tables_common.h */; }; + 69A6DF381E96149300000E69 /* ooura_fft_tables_neon_sse2.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEAF1E96149300000E69 /* ooura_fft_tables_neon_sse2.h */; }; + 69A6DF391E96149300000E69 /* asm_defines.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEB21E96149300000E69 /* asm_defines.h */; }; + 69A6DF3A1E96149300000E69 /* compile_assert_c.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEB31E96149300000E69 /* compile_assert_c.h */; }; + 69A6DF3B1E96149300000E69 /* cpu_features_wrapper.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEB41E96149300000E69 /* cpu_features_wrapper.h */; }; + 69A6DF3C1E96149300000E69 /* metrics.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEB51E96149300000E69 /* metrics.h */; }; + 69A6DF3D1E96149300000E69 /* cpu_features.cc in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DEB71E96149300000E69 /* cpu_features.cc */; }; + 69A6DF3E1E96149300000E69 /* typedefs.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DEB81E96149300000E69 /* typedefs.h */; }; + 69A6DF431E9614B700000E69 /* AudioInputAudioUnitOSX.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DF3F1E9614B700000E69 /* AudioInputAudioUnitOSX.cpp */; }; + 69A6DF441E9614B700000E69 /* AudioInputAudioUnitOSX.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DF401E9614B700000E69 /* AudioInputAudioUnitOSX.h */; }; + 69A6DF451E9614B700000E69 /* AudioOutputAudioUnitOSX.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 69A6DF411E9614B700000E69 /* AudioOutputAudioUnitOSX.cpp */; }; + 69A6DF461E9614B700000E69 /* AudioOutputAudioUnitOSX.h in Headers */ = {isa = PBXBuildFile; fileRef = 69A6DF421E9614B700000E69 /* AudioOutputAudioUnitOSX.h */; }; + 69AC14911F4B41CF00AC3173 /* Resampler.h in Headers */ = {isa = PBXBuildFile; fileRef = 69AC148F1F4B41CF00AC3173 /* Resampler.h */; }; + C2A87DD81F4B6A33002D3F73 /* Resampler.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C2A87DD71F4B6A33002D3F73 /* Resampler.cpp */; }; + C2A87DDA1F4B6A57002D3F73 /* DarwinSpecific.mm in Sources */ = {isa = PBXBuildFile; fileRef = C2A87DD91F4B6A57002D3F73 /* DarwinSpecific.mm */; }; + C2A87DDF1F4B6A61002D3F73 /* AudioInputAudioUnit.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C2A87DDB1F4B6A61002D3F73 /* AudioInputAudioUnit.cpp */; }; + C2A87DE01F4B6A61002D3F73 /* AudioOutputAudioUnit.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C2A87DDD1F4B6A61002D3F73 /* AudioOutputAudioUnit.cpp */; }; + C2A87DE41F4B6AD3002D3F73 /* AudioUnitIO.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C2A87DE31F4B6AD3002D3F73 /* AudioUnitIO.cpp */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 692AB9101E675E8800706ACC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = D08805AC156E8F3600311537; + remoteInfo = Telegraph; + }; + 692AB9121E675E8800706ACC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = D02601D71A55CA2300716290; + remoteInfo = Share; + }; + 692AB9141E675E8800706ACC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = 68744C0D1BB1A9F700FE6542; + remoteInfo = watchkitapp; + }; + 692AB9161E675E8800706ACC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = 68744C191BB1A9F700FE6542; + remoteInfo = "watchkitapp Extension"; + }; + 692AB9181E675E8800706ACC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = D020FADD1D99466A00F279AA; + remoteInfo = SiriIntents; + }; + 692AB91A1E675E8800706ACC /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = D020FB0A1D99637100F279AA; + remoteInfo = LegacyDatabase; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + 690725BC1EBBD5DE005D860B /* NetworkSocketPosix.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = NetworkSocketPosix.cpp; sourceTree = ""; }; + 690725BD1EBBD5DE005D860B /* NetworkSocketPosix.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NetworkSocketPosix.h; sourceTree = ""; }; + 690725C01EBBD5F2005D860B /* NetworkSocket.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = NetworkSocket.cpp; sourceTree = ""; }; + 690725C11EBBD5F2005D860B /* NetworkSocket.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NetworkSocket.h; sourceTree = ""; }; + 6915307A1E6B5BAB004F643F /* logging.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = logging.cpp; sourceTree = ""; }; + 692AB8881E6759DD00706ACC /* AudioInput.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AudioInput.cpp; sourceTree = ""; }; + 692AB8891E6759DD00706ACC /* AudioInput.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioInput.h; sourceTree = ""; }; + 692AB88A1E6759DD00706ACC /* AudioOutput.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AudioOutput.cpp; sourceTree = ""; }; + 692AB88B1E6759DD00706ACC /* AudioOutput.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioOutput.h; sourceTree = ""; }; + 692AB88C1E6759DD00706ACC /* BlockingQueue.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BlockingQueue.cpp; sourceTree = ""; }; + 692AB88D1E6759DD00706ACC /* BlockingQueue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BlockingQueue.h; sourceTree = ""; }; + 692AB88E1E6759DD00706ACC /* BufferInputStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BufferInputStream.cpp; sourceTree = ""; }; + 692AB88F1E6759DD00706ACC /* BufferInputStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BufferInputStream.h; sourceTree = ""; }; + 692AB8901E6759DD00706ACC /* BufferOutputStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BufferOutputStream.cpp; sourceTree = ""; }; + 692AB8911E6759DD00706ACC /* BufferOutputStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BufferOutputStream.h; sourceTree = ""; }; + 692AB8921E6759DD00706ACC /* BufferPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BufferPool.cpp; sourceTree = ""; }; + 692AB8931E6759DD00706ACC /* BufferPool.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BufferPool.h; sourceTree = ""; }; + 692AB8971E6759DD00706ACC /* CongestionControl.cpp */ = {isa = PBXFileReference; explicitFileType = sourcecode.cpp.objcpp; fileEncoding = 4; path = CongestionControl.cpp; sourceTree = ""; }; + 692AB8981E6759DD00706ACC /* CongestionControl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CongestionControl.h; sourceTree = ""; }; + 692AB8991E6759DD00706ACC /* EchoCanceller.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = EchoCanceller.cpp; sourceTree = ""; }; + 692AB89A1E6759DD00706ACC /* EchoCanceller.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = EchoCanceller.h; sourceTree = ""; }; + 692AB8A71E6759DD00706ACC /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 692AB8A81E6759DD00706ACC /* JitterBuffer.cpp */ = {isa = PBXFileReference; explicitFileType = sourcecode.cpp.objcpp; fileEncoding = 4; path = JitterBuffer.cpp; sourceTree = ""; }; + 692AB8A91E6759DD00706ACC /* JitterBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JitterBuffer.h; sourceTree = ""; }; + 692AB8AA1E6759DD00706ACC /* logging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = logging.h; sourceTree = ""; }; + 692AB8AB1E6759DD00706ACC /* MediaStreamItf.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MediaStreamItf.cpp; sourceTree = ""; }; + 692AB8AC1E6759DD00706ACC /* MediaStreamItf.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MediaStreamItf.h; sourceTree = ""; }; + 692AB8AD1E6759DD00706ACC /* OpusDecoder.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = OpusDecoder.cpp; sourceTree = ""; }; + 692AB8AE1E6759DD00706ACC /* OpusDecoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OpusDecoder.h; sourceTree = ""; }; + 692AB8AF1E6759DD00706ACC /* OpusEncoder.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = OpusEncoder.cpp; sourceTree = ""; }; + 692AB8B01E6759DD00706ACC /* OpusEncoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OpusEncoder.h; sourceTree = ""; }; + 692AB8C61E6759DD00706ACC /* threading.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = threading.h; sourceTree = ""; }; + 692AB8C71E6759DD00706ACC /* VoIPController.cpp */ = {isa = PBXFileReference; explicitFileType = sourcecode.cpp.objcpp; fileEncoding = 4; path = VoIPController.cpp; sourceTree = ""; }; + 692AB8C81E6759DD00706ACC /* VoIPController.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VoIPController.h; sourceTree = ""; }; + 692AB8C91E6759DD00706ACC /* VoIPServerConfig.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = VoIPServerConfig.cpp; sourceTree = ""; }; + 692AB8CA1E6759DD00706ACC /* VoIPServerConfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VoIPServerConfig.h; sourceTree = ""; }; + 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = Telegraph.xcodeproj; path = ../../Telegraph.xcodeproj; sourceTree = ""; }; + 692AB91C1E675F7000706ACC /* AudioToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AudioToolbox.framework; path = System/Library/Frameworks/AudioToolbox.framework; sourceTree = SDKROOT; }; + 692AB91D1E675F7000706ACC /* AudioUnit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AudioUnit.framework; path = System/Library/Frameworks/AudioUnit.framework; sourceTree = SDKROOT; }; + 692AB91E1E675F7000706ACC /* CoreAudio.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreAudio.framework; path = System/Library/Frameworks/CoreAudio.framework; sourceTree = SDKROOT; }; + 695B20601EBD39FF00E31757 /* DarwinSpecific.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DarwinSpecific.h; sourceTree = ""; }; + 695B20611EBD39FF00E31757 /* DarwinSpecific.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = DarwinSpecific.mm; path = ../../../../../libtgvoip/os/darwin/DarwinSpecific.mm; sourceTree = ""; }; + 6988483B1F4B39F700076DF0 /* AudioInputAudioUnit.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = AudioInputAudioUnit.cpp; path = ../../../../../libtgvoip/os/darwin/AudioInputAudioUnit.cpp; sourceTree = ""; }; + 6988483C1F4B39F700076DF0 /* AudioInputAudioUnit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = AudioInputAudioUnit.h; path = ../../../../../libtgvoip/os/darwin/AudioInputAudioUnit.h; sourceTree = ""; }; + 6988483D1F4B39F700076DF0 /* AudioOutputAudioUnit.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = AudioOutputAudioUnit.cpp; path = ../../../../../libtgvoip/os/darwin/AudioOutputAudioUnit.cpp; sourceTree = ""; }; + 6988483E1F4B39F700076DF0 /* AudioOutputAudioUnit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = AudioOutputAudioUnit.h; path = ../../../../../libtgvoip/os/darwin/AudioOutputAudioUnit.h; sourceTree = ""; }; + 6988483F1F4B39F700076DF0 /* AudioUnitIO.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = AudioUnitIO.cpp; path = ../../../../../libtgvoip/os/darwin/AudioUnitIO.cpp; sourceTree = ""; }; + 698848401F4B39F700076DF0 /* AudioUnitIO.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = AudioUnitIO.h; path = ../../../../../libtgvoip/os/darwin/AudioUnitIO.h; sourceTree = ""; }; + 69A6DE231E96149300000E69 /* array_view.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = array_view.h; sourceTree = ""; }; + 69A6DE241E96149300000E69 /* atomicops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atomicops.h; sourceTree = ""; }; + 69A6DE251E96149300000E69 /* basictypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = basictypes.h; sourceTree = ""; }; + 69A6DE261E96149300000E69 /* checks.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = checks.cc; sourceTree = ""; }; + 69A6DE271E96149300000E69 /* checks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = checks.h; sourceTree = ""; }; + 69A6DE281E96149300000E69 /* constructormagic.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = constructormagic.h; sourceTree = ""; }; + 69A6DE291E96149300000E69 /* safe_compare.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = safe_compare.h; sourceTree = ""; }; + 69A6DE2A1E96149300000E69 /* safe_conversions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = safe_conversions.h; sourceTree = ""; }; + 69A6DE2B1E96149300000E69 /* safe_conversions_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = safe_conversions_impl.h; sourceTree = ""; }; + 69A6DE2C1E96149300000E69 /* sanitizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sanitizer.h; sourceTree = ""; }; + 69A6DE2D1E96149300000E69 /* stringutils.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = stringutils.cc; sourceTree = ""; }; + 69A6DE2E1E96149300000E69 /* stringutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stringutils.h; sourceTree = ""; }; + 69A6DE2F1E96149300000E69 /* type_traits.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = type_traits.h; sourceTree = ""; }; + 69A6DE311E96149300000E69 /* audio_util.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = audio_util.cc; sourceTree = ""; }; + 69A6DE321E96149300000E69 /* channel_buffer.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = channel_buffer.cc; sourceTree = ""; }; + 69A6DE331E96149300000E69 /* channel_buffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_buffer.h; sourceTree = ""; }; + 69A6DE341E96149300000E69 /* fft4g.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fft4g.c; sourceTree = ""; }; + 69A6DE351E96149300000E69 /* fft4g.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fft4g.h; sourceTree = ""; }; + 69A6DE371E96149300000E69 /* audio_util.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audio_util.h; sourceTree = ""; }; + 69A6DE381E96149300000E69 /* ring_buffer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ring_buffer.c; sourceTree = ""; }; + 69A6DE391E96149300000E69 /* ring_buffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ring_buffer.h; sourceTree = ""; }; + 69A6DE3B1E96149300000E69 /* auto_corr_to_refl_coef.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = auto_corr_to_refl_coef.c; sourceTree = ""; }; + 69A6DE3C1E96149300000E69 /* auto_correlation.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = auto_correlation.c; sourceTree = ""; }; + 69A6DE3D1E96149300000E69 /* complex_bit_reverse.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = complex_bit_reverse.c; sourceTree = ""; }; + 69A6DE3F1E96149300000E69 /* complex_fft.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = complex_fft.c; sourceTree = ""; }; + 69A6DE401E96149300000E69 /* complex_fft_tables.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = complex_fft_tables.h; sourceTree = ""; }; + 69A6DE411E96149300000E69 /* copy_set_operations.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = copy_set_operations.c; sourceTree = ""; }; + 69A6DE421E96149300000E69 /* cross_correlation.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = cross_correlation.c; sourceTree = ""; }; + 69A6DE431E96149300000E69 /* cross_correlation_neon.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = cross_correlation_neon.c; sourceTree = ""; }; + 69A6DE441E96149300000E69 /* division_operations.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = division_operations.c; sourceTree = ""; }; + 69A6DE451E96149300000E69 /* dot_product_with_scale.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dot_product_with_scale.c; sourceTree = ""; }; + 69A6DE461E96149300000E69 /* downsample_fast.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = downsample_fast.c; sourceTree = ""; }; + 69A6DE471E96149300000E69 /* downsample_fast_neon.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = downsample_fast_neon.c; sourceTree = ""; }; + 69A6DE481E96149300000E69 /* energy.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = energy.c; sourceTree = ""; }; + 69A6DE491E96149300000E69 /* filter_ar.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = filter_ar.c; sourceTree = ""; }; + 69A6DE4A1E96149300000E69 /* filter_ar_fast_q12.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = filter_ar_fast_q12.c; sourceTree = ""; }; + 69A6DE4C1E96149300000E69 /* filter_ma_fast_q12.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = filter_ma_fast_q12.c; sourceTree = ""; }; + 69A6DE4D1E96149300000E69 /* get_hanning_window.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = get_hanning_window.c; sourceTree = ""; }; + 69A6DE4E1E96149300000E69 /* get_scaling_square.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = get_scaling_square.c; sourceTree = ""; }; + 69A6DE4F1E96149300000E69 /* ilbc_specific_functions.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ilbc_specific_functions.c; sourceTree = ""; }; + 69A6DE511E96149300000E69 /* real_fft.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = real_fft.h; sourceTree = ""; }; + 69A6DE521E96149300000E69 /* signal_processing_library.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = signal_processing_library.h; sourceTree = ""; }; + 69A6DE531E96149300000E69 /* spl_inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = spl_inl.h; sourceTree = ""; }; + 69A6DE541E96149300000E69 /* spl_inl_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = spl_inl_armv7.h; sourceTree = ""; }; + 69A6DE551E96149300000E69 /* spl_inl_mips.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = spl_inl_mips.h; sourceTree = ""; }; + 69A6DE561E96149300000E69 /* levinson_durbin.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = levinson_durbin.c; sourceTree = ""; }; + 69A6DE571E96149300000E69 /* lpc_to_refl_coef.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = lpc_to_refl_coef.c; sourceTree = ""; }; + 69A6DE581E96149300000E69 /* min_max_operations.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = min_max_operations.c; sourceTree = ""; }; + 69A6DE591E96149300000E69 /* min_max_operations_neon.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = min_max_operations_neon.c; sourceTree = ""; }; + 69A6DE5A1E96149300000E69 /* randomization_functions.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = randomization_functions.c; sourceTree = ""; }; + 69A6DE5B1E96149300000E69 /* real_fft.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = real_fft.c; sourceTree = ""; }; + 69A6DE5C1E96149300000E69 /* refl_coef_to_lpc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = refl_coef_to_lpc.c; sourceTree = ""; }; + 69A6DE5D1E96149300000E69 /* resample.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resample.c; sourceTree = ""; }; + 69A6DE5E1E96149300000E69 /* resample_48khz.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resample_48khz.c; sourceTree = ""; }; + 69A6DE5F1E96149300000E69 /* resample_by_2.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resample_by_2.c; sourceTree = ""; }; + 69A6DE601E96149300000E69 /* resample_by_2_internal.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resample_by_2_internal.c; sourceTree = ""; }; + 69A6DE611E96149300000E69 /* resample_by_2_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resample_by_2_internal.h; sourceTree = ""; }; + 69A6DE621E96149300000E69 /* resample_fractional.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resample_fractional.c; sourceTree = ""; }; + 69A6DE631E96149300000E69 /* spl_init.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = spl_init.c; sourceTree = ""; }; + 69A6DE641E96149300000E69 /* spl_inl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = spl_inl.c; sourceTree = ""; }; + 69A6DE651E96149300000E69 /* spl_sqrt.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = spl_sqrt.c; sourceTree = ""; }; + 69A6DE661E96149300000E69 /* spl_sqrt_floor.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = spl_sqrt_floor.c; sourceTree = ""; }; + 69A6DE681E96149300000E69 /* splitting_filter_impl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = splitting_filter_impl.c; sourceTree = ""; }; + 69A6DE691E96149300000E69 /* sqrt_of_one_minus_x_squared.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = sqrt_of_one_minus_x_squared.c; sourceTree = ""; }; + 69A6DE6A1E96149300000E69 /* vector_scaling_operations.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = vector_scaling_operations.c; sourceTree = ""; }; + 69A6DE6B1E96149300000E69 /* sparse_fir_filter.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = sparse_fir_filter.cc; sourceTree = ""; }; + 69A6DE6C1E96149300000E69 /* sparse_fir_filter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sparse_fir_filter.h; sourceTree = ""; }; + 69A6DE6D1E96149300000E69 /* wav_file.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = wav_file.cc; sourceTree = ""; }; + 69A6DE6E1E96149300000E69 /* wav_file.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = wav_file.h; sourceTree = ""; }; + 69A6DE6F1E96149300000E69 /* wav_header.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = wav_header.cc; sourceTree = ""; }; + 69A6DE701E96149300000E69 /* wav_header.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = wav_header.h; sourceTree = ""; }; + 69A6DE741E96149300000E69 /* aec_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aec_common.h; sourceTree = ""; }; + 69A6DE751E96149300000E69 /* aec_core.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aec_core.cc; sourceTree = ""; }; + 69A6DE761E96149300000E69 /* aec_core.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aec_core.h; sourceTree = ""; }; + 69A6DE771E96149300000E69 /* aec_core_neon.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aec_core_neon.cc; sourceTree = ""; }; + 69A6DE781E96149300000E69 /* aec_core_optimized_methods.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aec_core_optimized_methods.h; sourceTree = ""; }; + 69A6DE791E96149300000E69 /* aec_core_sse2.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aec_core_sse2.cc; sourceTree = ""; }; + 69A6DE7A1E96149300000E69 /* aec_resampler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aec_resampler.cc; sourceTree = ""; }; + 69A6DE7B1E96149300000E69 /* aec_resampler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aec_resampler.h; sourceTree = ""; }; + 69A6DE7C1E96149300000E69 /* echo_cancellation.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = echo_cancellation.cc; sourceTree = ""; }; + 69A6DE7D1E96149300000E69 /* echo_cancellation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = echo_cancellation.h; sourceTree = ""; }; + 69A6DE7F1E96149300000E69 /* aecm_core.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aecm_core.cc; sourceTree = ""; }; + 69A6DE801E96149300000E69 /* aecm_core.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aecm_core.h; sourceTree = ""; }; + 69A6DE811E96149300000E69 /* aecm_core_c.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aecm_core_c.cc; sourceTree = ""; }; + 69A6DE821E96149300000E69 /* aecm_core_neon.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = aecm_core_neon.cc; sourceTree = ""; }; + 69A6DE831E96149300000E69 /* aecm_defines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aecm_defines.h; sourceTree = ""; }; + 69A6DE841E96149300000E69 /* echo_control_mobile.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = echo_control_mobile.cc; sourceTree = ""; }; + 69A6DE851E96149300000E69 /* echo_control_mobile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = echo_control_mobile.h; sourceTree = ""; }; + 69A6DE881E96149300000E69 /* analog_agc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = analog_agc.c; sourceTree = ""; }; + 69A6DE891E96149300000E69 /* analog_agc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = analog_agc.h; sourceTree = ""; }; + 69A6DE8A1E96149300000E69 /* digital_agc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = digital_agc.c; sourceTree = ""; }; + 69A6DE8B1E96149300000E69 /* digital_agc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = digital_agc.h; sourceTree = ""; }; + 69A6DE8C1E96149300000E69 /* gain_control.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = gain_control.h; sourceTree = ""; }; + 69A6DE8E1E96149300000E69 /* apm_data_dumper.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = apm_data_dumper.cc; sourceTree = ""; }; + 69A6DE8F1E96149300000E69 /* apm_data_dumper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = apm_data_dumper.h; sourceTree = ""; }; + 69A6DE911E96149300000E69 /* defines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = defines.h; sourceTree = ""; }; + 69A6DE921E96149300000E69 /* noise_suppression.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = noise_suppression.c; sourceTree = ""; }; + 69A6DE931E96149300000E69 /* noise_suppression.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = noise_suppression.h; sourceTree = ""; }; + 69A6DE941E96149300000E69 /* noise_suppression_x.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = noise_suppression_x.c; sourceTree = ""; }; + 69A6DE951E96149300000E69 /* noise_suppression_x.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = noise_suppression_x.h; sourceTree = ""; }; + 69A6DE961E96149300000E69 /* ns_core.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ns_core.c; sourceTree = ""; }; + 69A6DE971E96149300000E69 /* ns_core.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ns_core.h; sourceTree = ""; }; + 69A6DE981E96149300000E69 /* nsx_core.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = nsx_core.c; sourceTree = ""; }; + 69A6DE991E96149300000E69 /* nsx_core.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = nsx_core.h; sourceTree = ""; }; + 69A6DE9A1E96149300000E69 /* nsx_core_c.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = nsx_core_c.c; sourceTree = ""; }; + 69A6DE9B1E96149300000E69 /* nsx_core_neon.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = nsx_core_neon.c; sourceTree = ""; }; + 69A6DE9C1E96149300000E69 /* nsx_defines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = nsx_defines.h; sourceTree = ""; }; + 69A6DE9D1E96149300000E69 /* windows_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = windows_private.h; sourceTree = ""; }; + 69A6DE9E1E96149300000E69 /* splitting_filter.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = splitting_filter.cc; sourceTree = ""; }; + 69A6DE9F1E96149300000E69 /* splitting_filter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = splitting_filter.h; sourceTree = ""; }; + 69A6DEA01E96149300000E69 /* three_band_filter_bank.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = three_band_filter_bank.cc; sourceTree = ""; }; + 69A6DEA11E96149300000E69 /* three_band_filter_bank.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = three_band_filter_bank.h; sourceTree = ""; }; + 69A6DEA31E96149300000E69 /* block_mean_calculator.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = block_mean_calculator.cc; sourceTree = ""; }; + 69A6DEA41E96149300000E69 /* block_mean_calculator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = block_mean_calculator.h; sourceTree = ""; }; + 69A6DEA51E96149300000E69 /* delay_estimator.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = delay_estimator.cc; sourceTree = ""; }; + 69A6DEA61E96149300000E69 /* delay_estimator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = delay_estimator.h; sourceTree = ""; }; + 69A6DEA71E96149300000E69 /* delay_estimator_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = delay_estimator_internal.h; sourceTree = ""; }; + 69A6DEA81E96149300000E69 /* delay_estimator_wrapper.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = delay_estimator_wrapper.cc; sourceTree = ""; }; + 69A6DEA91E96149300000E69 /* delay_estimator_wrapper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = delay_estimator_wrapper.h; sourceTree = ""; }; + 69A6DEAA1E96149300000E69 /* ooura_fft.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ooura_fft.cc; sourceTree = ""; }; + 69A6DEAB1E96149300000E69 /* ooura_fft.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ooura_fft.h; sourceTree = ""; }; + 69A6DEAC1E96149300000E69 /* ooura_fft_neon.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ooura_fft_neon.cc; sourceTree = ""; }; + 69A6DEAD1E96149300000E69 /* ooura_fft_sse2.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ooura_fft_sse2.cc; sourceTree = ""; }; + 69A6DEAE1E96149300000E69 /* ooura_fft_tables_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ooura_fft_tables_common.h; sourceTree = ""; }; + 69A6DEAF1E96149300000E69 /* ooura_fft_tables_neon_sse2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ooura_fft_tables_neon_sse2.h; sourceTree = ""; }; + 69A6DEB21E96149300000E69 /* asm_defines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = asm_defines.h; sourceTree = ""; }; + 69A6DEB31E96149300000E69 /* compile_assert_c.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = compile_assert_c.h; sourceTree = ""; }; + 69A6DEB41E96149300000E69 /* cpu_features_wrapper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpu_features_wrapper.h; sourceTree = ""; }; + 69A6DEB51E96149300000E69 /* metrics.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = metrics.h; sourceTree = ""; }; + 69A6DEB71E96149300000E69 /* cpu_features.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = cpu_features.cc; sourceTree = ""; }; + 69A6DEB81E96149300000E69 /* typedefs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = typedefs.h; sourceTree = ""; }; + 69A6DF3F1E9614B700000E69 /* AudioInputAudioUnitOSX.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AudioInputAudioUnitOSX.cpp; sourceTree = ""; }; + 69A6DF401E9614B700000E69 /* AudioInputAudioUnitOSX.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioInputAudioUnitOSX.h; sourceTree = ""; }; + 69A6DF411E9614B700000E69 /* AudioOutputAudioUnitOSX.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AudioOutputAudioUnitOSX.cpp; sourceTree = ""; }; + 69A6DF421E9614B700000E69 /* AudioOutputAudioUnitOSX.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioOutputAudioUnitOSX.h; sourceTree = ""; }; + 69AC148E1F4B41CF00AC3173 /* Resampler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = Resampler.cpp; path = "../../../../Telegram-iOS/submodules/libtgvoip/audio/Resampler.cpp"; sourceTree = ""; }; + 69AC148F1F4B41CF00AC3173 /* Resampler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Resampler.h; path = "../../../../Telegram-iOS/submodules/libtgvoip/audio/Resampler.h"; sourceTree = ""; }; + 69F842361E67540700C110F7 /* libtgvoip.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = libtgvoip.framework; sourceTree = BUILT_PRODUCTS_DIR; }; + C2A87DD71F4B6A33002D3F73 /* Resampler.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = Resampler.cpp; path = audio/Resampler.cpp; sourceTree = ""; }; + C2A87DD91F4B6A57002D3F73 /* DarwinSpecific.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; name = DarwinSpecific.mm; path = os/darwin/DarwinSpecific.mm; sourceTree = ""; }; + C2A87DDB1F4B6A61002D3F73 /* AudioInputAudioUnit.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = AudioInputAudioUnit.cpp; path = os/darwin/AudioInputAudioUnit.cpp; sourceTree = ""; }; + C2A87DDC1F4B6A61002D3F73 /* AudioInputAudioUnitOSX.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = AudioInputAudioUnitOSX.cpp; path = os/darwin/AudioInputAudioUnitOSX.cpp; sourceTree = ""; }; + C2A87DDD1F4B6A61002D3F73 /* AudioOutputAudioUnit.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = AudioOutputAudioUnit.cpp; path = os/darwin/AudioOutputAudioUnit.cpp; sourceTree = ""; }; + C2A87DDE1F4B6A61002D3F73 /* AudioOutputAudioUnitOSX.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = AudioOutputAudioUnitOSX.cpp; path = os/darwin/AudioOutputAudioUnitOSX.cpp; sourceTree = ""; }; + C2A87DE11F4B6A89002D3F73 /* AudioInput.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = AudioInput.cpp; path = audio/AudioInput.cpp; sourceTree = ""; }; + C2A87DE21F4B6A89002D3F73 /* AudioOutput.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = AudioOutput.cpp; path = audio/AudioOutput.cpp; sourceTree = ""; }; + C2A87DE31F4B6AD3002D3F73 /* AudioUnitIO.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = AudioUnitIO.cpp; path = os/darwin/AudioUnitIO.cpp; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 69F842321E67540700C110F7 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 692AB91F1E675F7000706ACC /* AudioToolbox.framework in Frameworks */, + 692AB9201E675F7000706ACC /* AudioUnit.framework in Frameworks */, + 692AB9211E675F7000706ACC /* CoreAudio.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 690725BB1EBBD5DE005D860B /* posix */ = { + isa = PBXGroup; + children = ( + 690725BC1EBBD5DE005D860B /* NetworkSocketPosix.cpp */, + 690725BD1EBBD5DE005D860B /* NetworkSocketPosix.h */, + ); + path = posix; + sourceTree = ""; + }; + 692AB8861E6759BF00706ACC /* libtgvoip */ = { + isa = PBXGroup; + children = ( + 692AB8871E6759DD00706ACC /* audio */, + 692AB88C1E6759DD00706ACC /* BlockingQueue.cpp */, + 692AB88D1E6759DD00706ACC /* BlockingQueue.h */, + 692AB88E1E6759DD00706ACC /* BufferInputStream.cpp */, + 692AB88F1E6759DD00706ACC /* BufferInputStream.h */, + 692AB8901E6759DD00706ACC /* BufferOutputStream.cpp */, + 692AB8911E6759DD00706ACC /* BufferOutputStream.h */, + 692AB8921E6759DD00706ACC /* BufferPool.cpp */, + 692AB8931E6759DD00706ACC /* BufferPool.h */, + 692AB8971E6759DD00706ACC /* CongestionControl.cpp */, + 692AB8981E6759DD00706ACC /* CongestionControl.h */, + 692AB8991E6759DD00706ACC /* EchoCanceller.cpp */, + 692AB89A1E6759DD00706ACC /* EchoCanceller.h */, + 692AB8A71E6759DD00706ACC /* Info.plist */, + 692AB8A81E6759DD00706ACC /* JitterBuffer.cpp */, + 692AB8A91E6759DD00706ACC /* JitterBuffer.h */, + 6915307A1E6B5BAB004F643F /* logging.cpp */, + 692AB8AA1E6759DD00706ACC /* logging.h */, + 692AB8AB1E6759DD00706ACC /* MediaStreamItf.cpp */, + 692AB8AC1E6759DD00706ACC /* MediaStreamItf.h */, + 690725C01EBBD5F2005D860B /* NetworkSocket.cpp */, + 690725C11EBBD5F2005D860B /* NetworkSocket.h */, + 692AB8AD1E6759DD00706ACC /* OpusDecoder.cpp */, + 692AB8AE1E6759DD00706ACC /* OpusDecoder.h */, + 692AB8AF1E6759DD00706ACC /* OpusEncoder.cpp */, + 692AB8B01E6759DD00706ACC /* OpusEncoder.h */, + 692AB8B11E6759DD00706ACC /* os */, + 692AB8C61E6759DD00706ACC /* threading.h */, + 692AB8C71E6759DD00706ACC /* VoIPController.cpp */, + 692AB8C81E6759DD00706ACC /* VoIPController.h */, + 692AB8C91E6759DD00706ACC /* VoIPServerConfig.cpp */, + 692AB8CA1E6759DD00706ACC /* VoIPServerConfig.h */, + 69A6DE201E96149300000E69 /* webrtc_dsp */, + ); + name = libtgvoip; + sourceTree = ""; + }; + 692AB8871E6759DD00706ACC /* audio */ = { + isa = PBXGroup; + children = ( + 692AB8881E6759DD00706ACC /* AudioInput.cpp */, + 692AB8891E6759DD00706ACC /* AudioInput.h */, + 692AB88A1E6759DD00706ACC /* AudioOutput.cpp */, + 692AB88B1E6759DD00706ACC /* AudioOutput.h */, + 69AC148E1F4B41CF00AC3173 /* Resampler.cpp */, + 69AC148F1F4B41CF00AC3173 /* Resampler.h */, + ); + path = audio; + sourceTree = ""; + }; + 692AB8B11E6759DD00706ACC /* os */ = { + isa = PBXGroup; + children = ( + 690725BB1EBBD5DE005D860B /* posix */, + 692AB8BD1E6759DD00706ACC /* darwin */, + ); + path = os; + sourceTree = ""; + }; + 692AB8BD1E6759DD00706ACC /* darwin */ = { + isa = PBXGroup; + children = ( + 6988483B1F4B39F700076DF0 /* AudioInputAudioUnit.cpp */, + 6988483C1F4B39F700076DF0 /* AudioInputAudioUnit.h */, + 6988483D1F4B39F700076DF0 /* AudioOutputAudioUnit.cpp */, + 6988483E1F4B39F700076DF0 /* AudioOutputAudioUnit.h */, + 6988483F1F4B39F700076DF0 /* AudioUnitIO.cpp */, + 698848401F4B39F700076DF0 /* AudioUnitIO.h */, + 69A6DF3F1E9614B700000E69 /* AudioInputAudioUnitOSX.cpp */, + 69A6DF401E9614B700000E69 /* AudioInputAudioUnitOSX.h */, + 69A6DF411E9614B700000E69 /* AudioOutputAudioUnitOSX.cpp */, + 69A6DF421E9614B700000E69 /* AudioOutputAudioUnitOSX.h */, + 695B20601EBD39FF00E31757 /* DarwinSpecific.h */, + 695B20611EBD39FF00E31757 /* DarwinSpecific.mm */, + ); + path = darwin; + sourceTree = ""; + }; + 692AB9061E675E8700706ACC /* Frameworks */ = { + isa = PBXGroup; + children = ( + 692AB91C1E675F7000706ACC /* AudioToolbox.framework */, + 692AB91D1E675F7000706ACC /* AudioUnit.framework */, + 692AB91E1E675F7000706ACC /* CoreAudio.framework */, + 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */, + ); + name = Frameworks; + sourceTree = ""; + }; + 692AB9081E675E8800706ACC /* Products */ = { + isa = PBXGroup; + children = ( + 692AB9111E675E8800706ACC /* Telegram.app */, + 692AB9131E675E8800706ACC /* Share.appex */, + 692AB9151E675E8800706ACC /* watchkitapp.app */, + 692AB9171E675E8800706ACC /* watchkitapp Extension.appex */, + 692AB9191E675E8800706ACC /* SiriIntents.appex */, + 692AB91B1E675E8800706ACC /* LegacyDatabase.framework */, + ); + name = Products; + sourceTree = ""; + }; + 69A6DE201E96149300000E69 /* webrtc_dsp */ = { + isa = PBXGroup; + children = ( + 69A6DE211E96149300000E69 /* webrtc */, + ); + path = webrtc_dsp; + sourceTree = ""; + }; + 69A6DE211E96149300000E69 /* webrtc */ = { + isa = PBXGroup; + children = ( + 69A6DE221E96149300000E69 /* base */, + 69A6DE301E96149300000E69 /* common_audio */, + 69A6DE711E96149300000E69 /* modules */, + 69A6DEB01E96149300000E69 /* system_wrappers */, + 69A6DEB81E96149300000E69 /* typedefs.h */, + ); + path = webrtc; + sourceTree = ""; + }; + 69A6DE221E96149300000E69 /* base */ = { + isa = PBXGroup; + children = ( + 69A6DE231E96149300000E69 /* array_view.h */, + 69A6DE241E96149300000E69 /* atomicops.h */, + 69A6DE251E96149300000E69 /* basictypes.h */, + 69A6DE261E96149300000E69 /* checks.cc */, + 69A6DE271E96149300000E69 /* checks.h */, + 69A6DE281E96149300000E69 /* constructormagic.h */, + 69A6DE291E96149300000E69 /* safe_compare.h */, + 69A6DE2A1E96149300000E69 /* safe_conversions.h */, + 69A6DE2B1E96149300000E69 /* safe_conversions_impl.h */, + 69A6DE2C1E96149300000E69 /* sanitizer.h */, + 69A6DE2D1E96149300000E69 /* stringutils.cc */, + 69A6DE2E1E96149300000E69 /* stringutils.h */, + 69A6DE2F1E96149300000E69 /* type_traits.h */, + ); + path = base; + sourceTree = ""; + }; + 69A6DE301E96149300000E69 /* common_audio */ = { + isa = PBXGroup; + children = ( + 69A6DE311E96149300000E69 /* audio_util.cc */, + 69A6DE321E96149300000E69 /* channel_buffer.cc */, + 69A6DE331E96149300000E69 /* channel_buffer.h */, + 69A6DE341E96149300000E69 /* fft4g.c */, + 69A6DE351E96149300000E69 /* fft4g.h */, + 69A6DE361E96149300000E69 /* include */, + 69A6DE381E96149300000E69 /* ring_buffer.c */, + 69A6DE391E96149300000E69 /* ring_buffer.h */, + 69A6DE3A1E96149300000E69 /* signal_processing */, + 69A6DE6B1E96149300000E69 /* sparse_fir_filter.cc */, + 69A6DE6C1E96149300000E69 /* sparse_fir_filter.h */, + 69A6DE6D1E96149300000E69 /* wav_file.cc */, + 69A6DE6E1E96149300000E69 /* wav_file.h */, + 69A6DE6F1E96149300000E69 /* wav_header.cc */, + 69A6DE701E96149300000E69 /* wav_header.h */, + ); + path = common_audio; + sourceTree = ""; + }; + 69A6DE361E96149300000E69 /* include */ = { + isa = PBXGroup; + children = ( + 69A6DE371E96149300000E69 /* audio_util.h */, + ); + path = include; + sourceTree = ""; + }; + 69A6DE3A1E96149300000E69 /* signal_processing */ = { + isa = PBXGroup; + children = ( + 69A6DE3B1E96149300000E69 /* auto_corr_to_refl_coef.c */, + 69A6DE3C1E96149300000E69 /* auto_correlation.c */, + 69A6DE3D1E96149300000E69 /* complex_bit_reverse.c */, + 69A6DE3F1E96149300000E69 /* complex_fft.c */, + 69A6DE401E96149300000E69 /* complex_fft_tables.h */, + 69A6DE411E96149300000E69 /* copy_set_operations.c */, + 69A6DE421E96149300000E69 /* cross_correlation.c */, + 69A6DE431E96149300000E69 /* cross_correlation_neon.c */, + 69A6DE441E96149300000E69 /* division_operations.c */, + 69A6DE451E96149300000E69 /* dot_product_with_scale.c */, + 69A6DE461E96149300000E69 /* downsample_fast.c */, + 69A6DE471E96149300000E69 /* downsample_fast_neon.c */, + 69A6DE481E96149300000E69 /* energy.c */, + 69A6DE491E96149300000E69 /* filter_ar.c */, + 69A6DE4A1E96149300000E69 /* filter_ar_fast_q12.c */, + 69A6DE4C1E96149300000E69 /* filter_ma_fast_q12.c */, + 69A6DE4D1E96149300000E69 /* get_hanning_window.c */, + 69A6DE4E1E96149300000E69 /* get_scaling_square.c */, + 69A6DE4F1E96149300000E69 /* ilbc_specific_functions.c */, + 69A6DE501E96149300000E69 /* include */, + 69A6DE561E96149300000E69 /* levinson_durbin.c */, + 69A6DE571E96149300000E69 /* lpc_to_refl_coef.c */, + 69A6DE581E96149300000E69 /* min_max_operations.c */, + 69A6DE591E96149300000E69 /* min_max_operations_neon.c */, + 69A6DE5A1E96149300000E69 /* randomization_functions.c */, + 69A6DE5B1E96149300000E69 /* real_fft.c */, + 69A6DE5C1E96149300000E69 /* refl_coef_to_lpc.c */, + 69A6DE5D1E96149300000E69 /* resample.c */, + 69A6DE5E1E96149300000E69 /* resample_48khz.c */, + 69A6DE5F1E96149300000E69 /* resample_by_2.c */, + 69A6DE601E96149300000E69 /* resample_by_2_internal.c */, + 69A6DE611E96149300000E69 /* resample_by_2_internal.h */, + 69A6DE621E96149300000E69 /* resample_fractional.c */, + 69A6DE631E96149300000E69 /* spl_init.c */, + 69A6DE641E96149300000E69 /* spl_inl.c */, + 69A6DE651E96149300000E69 /* spl_sqrt.c */, + 69A6DE661E96149300000E69 /* spl_sqrt_floor.c */, + 69A6DE681E96149300000E69 /* splitting_filter_impl.c */, + 69A6DE691E96149300000E69 /* sqrt_of_one_minus_x_squared.c */, + 69A6DE6A1E96149300000E69 /* vector_scaling_operations.c */, + ); + path = signal_processing; + sourceTree = ""; + }; + 69A6DE501E96149300000E69 /* include */ = { + isa = PBXGroup; + children = ( + 69A6DE511E96149300000E69 /* real_fft.h */, + 69A6DE521E96149300000E69 /* signal_processing_library.h */, + 69A6DE531E96149300000E69 /* spl_inl.h */, + 69A6DE541E96149300000E69 /* spl_inl_armv7.h */, + 69A6DE551E96149300000E69 /* spl_inl_mips.h */, + ); + path = include; + sourceTree = ""; + }; + 69A6DE711E96149300000E69 /* modules */ = { + isa = PBXGroup; + children = ( + 69A6DE721E96149300000E69 /* audio_processing */, + ); + path = modules; + sourceTree = ""; + }; + 69A6DE721E96149300000E69 /* audio_processing */ = { + isa = PBXGroup; + children = ( + 69A6DE731E96149300000E69 /* aec */, + 69A6DE7E1E96149300000E69 /* aecm */, + 69A6DE861E96149300000E69 /* agc */, + 69A6DE8D1E96149300000E69 /* logging */, + 69A6DE901E96149300000E69 /* ns */, + 69A6DE9E1E96149300000E69 /* splitting_filter.cc */, + 69A6DE9F1E96149300000E69 /* splitting_filter.h */, + 69A6DEA01E96149300000E69 /* three_band_filter_bank.cc */, + 69A6DEA11E96149300000E69 /* three_band_filter_bank.h */, + 69A6DEA21E96149300000E69 /* utility */, + ); + path = audio_processing; + sourceTree = ""; + }; + 69A6DE731E96149300000E69 /* aec */ = { + isa = PBXGroup; + children = ( + 69A6DE741E96149300000E69 /* aec_common.h */, + 69A6DE751E96149300000E69 /* aec_core.cc */, + 69A6DE761E96149300000E69 /* aec_core.h */, + 69A6DE771E96149300000E69 /* aec_core_neon.cc */, + 69A6DE781E96149300000E69 /* aec_core_optimized_methods.h */, + 69A6DE791E96149300000E69 /* aec_core_sse2.cc */, + 69A6DE7A1E96149300000E69 /* aec_resampler.cc */, + 69A6DE7B1E96149300000E69 /* aec_resampler.h */, + 69A6DE7C1E96149300000E69 /* echo_cancellation.cc */, + 69A6DE7D1E96149300000E69 /* echo_cancellation.h */, + ); + path = aec; + sourceTree = ""; + }; + 69A6DE7E1E96149300000E69 /* aecm */ = { + isa = PBXGroup; + children = ( + 69A6DE7F1E96149300000E69 /* aecm_core.cc */, + 69A6DE801E96149300000E69 /* aecm_core.h */, + 69A6DE811E96149300000E69 /* aecm_core_c.cc */, + 69A6DE821E96149300000E69 /* aecm_core_neon.cc */, + 69A6DE831E96149300000E69 /* aecm_defines.h */, + 69A6DE841E96149300000E69 /* echo_control_mobile.cc */, + 69A6DE851E96149300000E69 /* echo_control_mobile.h */, + ); + path = aecm; + sourceTree = ""; + }; + 69A6DE861E96149300000E69 /* agc */ = { + isa = PBXGroup; + children = ( + 69A6DE871E96149300000E69 /* legacy */, + ); + path = agc; + sourceTree = ""; + }; + 69A6DE871E96149300000E69 /* legacy */ = { + isa = PBXGroup; + children = ( + 69A6DE881E96149300000E69 /* analog_agc.c */, + 69A6DE891E96149300000E69 /* analog_agc.h */, + 69A6DE8A1E96149300000E69 /* digital_agc.c */, + 69A6DE8B1E96149300000E69 /* digital_agc.h */, + 69A6DE8C1E96149300000E69 /* gain_control.h */, + ); + path = legacy; + sourceTree = ""; + }; + 69A6DE8D1E96149300000E69 /* logging */ = { + isa = PBXGroup; + children = ( + 69A6DE8E1E96149300000E69 /* apm_data_dumper.cc */, + 69A6DE8F1E96149300000E69 /* apm_data_dumper.h */, + ); + path = logging; + sourceTree = ""; + }; + 69A6DE901E96149300000E69 /* ns */ = { + isa = PBXGroup; + children = ( + 69A6DE911E96149300000E69 /* defines.h */, + 69A6DE921E96149300000E69 /* noise_suppression.c */, + 69A6DE931E96149300000E69 /* noise_suppression.h */, + 69A6DE941E96149300000E69 /* noise_suppression_x.c */, + 69A6DE951E96149300000E69 /* noise_suppression_x.h */, + 69A6DE961E96149300000E69 /* ns_core.c */, + 69A6DE971E96149300000E69 /* ns_core.h */, + 69A6DE981E96149300000E69 /* nsx_core.c */, + 69A6DE991E96149300000E69 /* nsx_core.h */, + 69A6DE9A1E96149300000E69 /* nsx_core_c.c */, + 69A6DE9B1E96149300000E69 /* nsx_core_neon.c */, + 69A6DE9C1E96149300000E69 /* nsx_defines.h */, + 69A6DE9D1E96149300000E69 /* windows_private.h */, + ); + path = ns; + sourceTree = ""; + }; + 69A6DEA21E96149300000E69 /* utility */ = { + isa = PBXGroup; + children = ( + 69A6DEA31E96149300000E69 /* block_mean_calculator.cc */, + 69A6DEA41E96149300000E69 /* block_mean_calculator.h */, + 69A6DEA51E96149300000E69 /* delay_estimator.cc */, + 69A6DEA61E96149300000E69 /* delay_estimator.h */, + 69A6DEA71E96149300000E69 /* delay_estimator_internal.h */, + 69A6DEA81E96149300000E69 /* delay_estimator_wrapper.cc */, + 69A6DEA91E96149300000E69 /* delay_estimator_wrapper.h */, + 69A6DEAA1E96149300000E69 /* ooura_fft.cc */, + 69A6DEAB1E96149300000E69 /* ooura_fft.h */, + 69A6DEAC1E96149300000E69 /* ooura_fft_neon.cc */, + 69A6DEAD1E96149300000E69 /* ooura_fft_sse2.cc */, + 69A6DEAE1E96149300000E69 /* ooura_fft_tables_common.h */, + 69A6DEAF1E96149300000E69 /* ooura_fft_tables_neon_sse2.h */, + ); + path = utility; + sourceTree = ""; + }; + 69A6DEB01E96149300000E69 /* system_wrappers */ = { + isa = PBXGroup; + children = ( + 69A6DEB11E96149300000E69 /* include */, + 69A6DEB61E96149300000E69 /* source */, + ); + path = system_wrappers; + sourceTree = ""; + }; + 69A6DEB11E96149300000E69 /* include */ = { + isa = PBXGroup; + children = ( + 69A6DEB21E96149300000E69 /* asm_defines.h */, + 69A6DEB31E96149300000E69 /* compile_assert_c.h */, + 69A6DEB41E96149300000E69 /* cpu_features_wrapper.h */, + 69A6DEB51E96149300000E69 /* metrics.h */, + ); + path = include; + sourceTree = ""; + }; + 69A6DEB61E96149300000E69 /* source */ = { + isa = PBXGroup; + children = ( + 69A6DEB71E96149300000E69 /* cpu_features.cc */, + ); + path = source; + sourceTree = ""; + }; + 69F8422C1E67540700C110F7 = { + isa = PBXGroup; + children = ( + C2A87DE31F4B6AD3002D3F73 /* AudioUnitIO.cpp */, + C2A87DE11F4B6A89002D3F73 /* AudioInput.cpp */, + C2A87DE21F4B6A89002D3F73 /* AudioOutput.cpp */, + C2A87DDB1F4B6A61002D3F73 /* AudioInputAudioUnit.cpp */, + C2A87DDC1F4B6A61002D3F73 /* AudioInputAudioUnitOSX.cpp */, + C2A87DDD1F4B6A61002D3F73 /* AudioOutputAudioUnit.cpp */, + C2A87DDE1F4B6A61002D3F73 /* AudioOutputAudioUnitOSX.cpp */, + C2A87DD91F4B6A57002D3F73 /* DarwinSpecific.mm */, + C2A87DD71F4B6A33002D3F73 /* Resampler.cpp */, + 692AB8861E6759BF00706ACC /* libtgvoip */, + 69F842371E67540700C110F7 /* Products */, + 692AB9061E675E8700706ACC /* Frameworks */, + ); + sourceTree = ""; + }; + 69F842371E67540700C110F7 /* Products */ = { + isa = PBXGroup; + children = ( + 69F842361E67540700C110F7 /* libtgvoip.framework */, + ); + name = Products; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXHeadersBuildPhase section */ + 69F842331E67540700C110F7 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + 69A6DF181E96149300000E69 /* gain_control.h in Headers */, + 69A6DF231E96149300000E69 /* nsx_core.h in Headers */, + 692AB9011E6759DD00706ACC /* threading.h in Headers */, + 698848461F4B39F700076DF0 /* AudioUnitIO.h in Headers */, + 69A6DF2B1E96149300000E69 /* three_band_filter_bank.h in Headers */, + 692AB8EA1E6759DD00706ACC /* MediaStreamItf.h in Headers */, + 69A6DF3E1E96149300000E69 /* typedefs.h in Headers */, + 69A6DF001E96149300000E69 /* wav_file.h in Headers */, + 692AB8EE1E6759DD00706ACC /* OpusEncoder.h in Headers */, + 692AB8CE1E6759DD00706ACC /* AudioOutput.h in Headers */, + 69A6DF321E96149300000E69 /* delay_estimator_wrapper.h in Headers */, + 69A6DEBD1E96149300000E69 /* checks.h in Headers */, + 69A6DF1B1E96149300000E69 /* defines.h in Headers */, + 69A6DEF31E96149300000E69 /* resample_by_2_internal.h in Headers */, + 69A6DF341E96149300000E69 /* ooura_fft.h in Headers */, + 69A6DEBA1E96149300000E69 /* atomicops.h in Headers */, + 69A6DF0A1E96149300000E69 /* aec_resampler.h in Headers */, + 695B20621EBD39FF00E31757 /* DarwinSpecific.h in Headers */, + 69A6DEE51E96149300000E69 /* spl_inl.h in Headers */, + 69A6DF3B1E96149300000E69 /* cpu_features_wrapper.h in Headers */, + 69A6DF211E96149300000E69 /* ns_core.h in Headers */, + 69A6DF051E96149300000E69 /* aec_core.h in Headers */, + 69A6DF441E9614B700000E69 /* AudioInputAudioUnitOSX.h in Headers */, + 692AB8D91E6759DD00706ACC /* CongestionControl.h in Headers */, + 69A6DF031E96149300000E69 /* aec_common.h in Headers */, + 69A6DEBE1E96149300000E69 /* constructormagic.h in Headers */, + 69A6DED31E96149300000E69 /* complex_fft_tables.h in Headers */, + 69A6DF021E96149300000E69 /* wav_header.h in Headers */, + 69A6DECB1E96149300000E69 /* audio_util.h in Headers */, + 692AB8CC1E6759DD00706ACC /* AudioInput.h in Headers */, + 69A6DEC51E96149300000E69 /* type_traits.h in Headers */, + 692AB8EC1E6759DD00706ACC /* OpusDecoder.h in Headers */, + 692AB8E81E6759DD00706ACC /* logging.h in Headers */, + 69A6DF3A1E96149300000E69 /* compile_assert_c.h in Headers */, + 69A6DF071E96149300000E69 /* aec_core_optimized_methods.h in Headers */, + 69A6DEC81E96149300000E69 /* channel_buffer.h in Headers */, + 692AB8D41E6759DD00706ACC /* BufferOutputStream.h in Headers */, + 692AB9051E6759DD00706ACC /* VoIPServerConfig.h in Headers */, + 69A6DF461E9614B700000E69 /* AudioOutputAudioUnitOSX.h in Headers */, + 692AB9031E6759DD00706ACC /* VoIPController.h in Headers */, + 69A6DEB91E96149300000E69 /* array_view.h in Headers */, + 692AB8D01E6759DD00706ACC /* BlockingQueue.h in Headers */, + 69A6DECD1E96149300000E69 /* ring_buffer.h in Headers */, + 69A6DEE61E96149300000E69 /* spl_inl_armv7.h in Headers */, + 69A6DF171E96149300000E69 /* digital_agc.h in Headers */, + 69A6DF0C1E96149300000E69 /* echo_cancellation.h in Headers */, + 69A6DEFE1E96149300000E69 /* sparse_fir_filter.h in Headers */, + 69A6DEC21E96149300000E69 /* sanitizer.h in Headers */, + 69A6DF151E96149300000E69 /* analog_agc.h in Headers */, + 69A6DF381E96149300000E69 /* ooura_fft_tables_neon_sse2.h in Headers */, + 69A6DF271E96149300000E69 /* windows_private.h in Headers */, + 69A6DF291E96149300000E69 /* splitting_filter.h in Headers */, + 692AB8DB1E6759DD00706ACC /* EchoCanceller.h in Headers */, + 69A6DF391E96149300000E69 /* asm_defines.h in Headers */, + 690725C31EBBD5F2005D860B /* NetworkSocket.h in Headers */, + 69A6DF1D1E96149300000E69 /* noise_suppression.h in Headers */, + 69A6DEBF1E96149300000E69 /* safe_compare.h in Headers */, + 69A6DF111E96149300000E69 /* aecm_defines.h in Headers */, + 69A6DEE31E96149300000E69 /* real_fft.h in Headers */, + 69A6DECA1E96149300000E69 /* fft4g.h in Headers */, + 69A6DF2D1E96149300000E69 /* block_mean_calculator.h in Headers */, + 69A6DF3C1E96149300000E69 /* metrics.h in Headers */, + 692AB8D61E6759DD00706ACC /* BufferPool.h in Headers */, + 69A6DF0E1E96149300000E69 /* aecm_core.h in Headers */, + 698848421F4B39F700076DF0 /* AudioInputAudioUnit.h in Headers */, + 69A6DF2F1E96149300000E69 /* delay_estimator.h in Headers */, + 69A6DEC41E96149300000E69 /* stringutils.h in Headers */, + 69A6DF1F1E96149300000E69 /* noise_suppression_x.h in Headers */, + 692AB8E71E6759DD00706ACC /* JitterBuffer.h in Headers */, + 690725BF1EBBD5DE005D860B /* NetworkSocketPosix.h in Headers */, + 69A6DF131E96149300000E69 /* echo_control_mobile.h in Headers */, + 69A6DEE41E96149300000E69 /* signal_processing_library.h in Headers */, + 69A6DF1A1E96149300000E69 /* apm_data_dumper.h in Headers */, + 692AB8D21E6759DD00706ACC /* BufferInputStream.h in Headers */, + 69A6DF371E96149300000E69 /* ooura_fft_tables_common.h in Headers */, + 69A6DF301E96149300000E69 /* delay_estimator_internal.h in Headers */, + 69A6DEBB1E96149300000E69 /* basictypes.h in Headers */, + 69A6DEE71E96149300000E69 /* spl_inl_mips.h in Headers */, + 69AC14911F4B41CF00AC3173 /* Resampler.h in Headers */, + 69A6DF261E96149300000E69 /* nsx_defines.h in Headers */, + 698848441F4B39F700076DF0 /* AudioOutputAudioUnit.h in Headers */, + 69A6DEC11E96149300000E69 /* safe_conversions_impl.h in Headers */, + 69A6DEC01E96149300000E69 /* safe_conversions.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXHeadersBuildPhase section */ + +/* Begin PBXNativeTarget section */ + 69F842351E67540700C110F7 /* libtgvoip */ = { + isa = PBXNativeTarget; + buildConfigurationList = 69F8423E1E67540700C110F7 /* Build configuration list for PBXNativeTarget "libtgvoip" */; + buildPhases = ( + 69F842311E67540700C110F7 /* Sources */, + 69F842321E67540700C110F7 /* Frameworks */, + 69F842331E67540700C110F7 /* Headers */, + 69F842341E67540700C110F7 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = libtgvoip; + productName = libtgvoip; + productReference = 69F842361E67540700C110F7 /* libtgvoip.framework */; + productType = "com.apple.product-type.framework"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 69F8422D1E67540700C110F7 /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 0820; + ORGANIZATIONNAME = Grishka; + TargetAttributes = { + 69F842351E67540700C110F7 = { + CreatedOnToolsVersion = 8.2.1; + ProvisioningStyle = Automatic; + }; + }; + }; + buildConfigurationList = 69F842301E67540700C110F7 /* Build configuration list for PBXProject "libtgvoip_osx" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + ); + mainGroup = 69F8422C1E67540700C110F7; + productRefGroup = 69F842371E67540700C110F7 /* Products */; + projectDirPath = ""; + projectReferences = ( + { + ProductGroup = 692AB9081E675E8800706ACC /* Products */; + ProjectRef = 692AB9071E675E8800706ACC /* Telegraph.xcodeproj */; + }, + ); + projectRoot = ""; + targets = ( + 69F842351E67540700C110F7 /* libtgvoip */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXReferenceProxy section */ + 692AB9111E675E8800706ACC /* Telegram.app */ = { + isa = PBXReferenceProxy; + fileType = wrapper.application; + path = Telegram.app; + remoteRef = 692AB9101E675E8800706ACC /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 692AB9131E675E8800706ACC /* Share.appex */ = { + isa = PBXReferenceProxy; + fileType = "wrapper.app-extension"; + path = Share.appex; + remoteRef = 692AB9121E675E8800706ACC /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 692AB9151E675E8800706ACC /* watchkitapp.app */ = { + isa = PBXReferenceProxy; + fileType = wrapper.application; + path = watchkitapp.app; + remoteRef = 692AB9141E675E8800706ACC /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 692AB9171E675E8800706ACC /* watchkitapp Extension.appex */ = { + isa = PBXReferenceProxy; + fileType = "wrapper.app-extension"; + path = "watchkitapp Extension.appex"; + remoteRef = 692AB9161E675E8800706ACC /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 692AB9191E675E8800706ACC /* SiriIntents.appex */ = { + isa = PBXReferenceProxy; + fileType = "wrapper.app-extension"; + path = SiriIntents.appex; + remoteRef = 692AB9181E675E8800706ACC /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 692AB91B1E675E8800706ACC /* LegacyDatabase.framework */ = { + isa = PBXReferenceProxy; + fileType = wrapper.framework; + path = LegacyDatabase.framework; + remoteRef = 692AB91A1E675E8800706ACC /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; +/* End PBXReferenceProxy section */ + +/* Begin PBXResourcesBuildPhase section */ + 69F842341E67540700C110F7 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 692AB8E51E6759DD00706ACC /* Info.plist in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 69F842311E67540700C110F7 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 69A6DEFB1E96149300000E69 /* sqrt_of_one_minus_x_squared.c in Sources */, + 69A6DEDD1E96149300000E69 /* filter_ar_fast_q12.c in Sources */, + 69A6DEF41E96149300000E69 /* resample_fractional.c in Sources */, + 6915307B1E6B5BAB004F643F /* logging.cpp in Sources */, + 69A6DEE11E96149300000E69 /* get_scaling_square.c in Sources */, + C2A87DE41F4B6AD3002D3F73 /* AudioUnitIO.cpp in Sources */, + 690725C21EBBD5F2005D860B /* NetworkSocket.cpp in Sources */, + 69A6DEC71E96149300000E69 /* channel_buffer.cc in Sources */, + 69A6DF191E96149300000E69 /* apm_data_dumper.cc in Sources */, + 69A6DF221E96149300000E69 /* nsx_core.c in Sources */, + 69A6DEDC1E96149300000E69 /* filter_ar.c in Sources */, + 69A6DEEB1E96149300000E69 /* min_max_operations_neon.c in Sources */, + 69A6DEE81E96149300000E69 /* levinson_durbin.c in Sources */, + 69A6DEE21E96149300000E69 /* ilbc_specific_functions.c in Sources */, + 692AB9041E6759DD00706ACC /* VoIPServerConfig.cpp in Sources */, + 69A6DF0B1E96149300000E69 /* echo_cancellation.cc in Sources */, + 69A6DED61E96149300000E69 /* cross_correlation_neon.c in Sources */, + 69A6DEF71E96149300000E69 /* spl_sqrt.c in Sources */, + 69A6DEED1E96149300000E69 /* real_fft.c in Sources */, + 692AB9021E6759DD00706ACC /* VoIPController.cpp in Sources */, + 69A6DF0D1E96149300000E69 /* aecm_core.cc in Sources */, + 69A6DF101E96149300000E69 /* aecm_core_neon.cc in Sources */, + 69A6DED71E96149300000E69 /* division_operations.c in Sources */, + 69A6DEDB1E96149300000E69 /* energy.c in Sources */, + 69A6DEC61E96149300000E69 /* audio_util.cc in Sources */, + 69A6DF141E96149300000E69 /* analog_agc.c in Sources */, + 69A6DEF81E96149300000E69 /* spl_sqrt_floor.c in Sources */, + 69A6DEF61E96149300000E69 /* spl_inl.c in Sources */, + 69A6DEEF1E96149300000E69 /* resample.c in Sources */, + 69A6DEF21E96149300000E69 /* resample_by_2_internal.c in Sources */, + 69A6DF011E96149300000E69 /* wav_header.cc in Sources */, + 69A6DF041E96149300000E69 /* aec_core.cc in Sources */, + 692AB8D81E6759DD00706ACC /* CongestionControl.cpp in Sources */, + 69A6DEE91E96149300000E69 /* lpc_to_refl_coef.c in Sources */, + 69A6DEF51E96149300000E69 /* spl_init.c in Sources */, + 69A6DF241E96149300000E69 /* nsx_core_c.c in Sources */, + 69A6DF0F1E96149300000E69 /* aecm_core_c.cc in Sources */, + 69A6DECC1E96149300000E69 /* ring_buffer.c in Sources */, + 692AB8EB1E6759DD00706ACC /* OpusDecoder.cpp in Sources */, + 69A6DED81E96149300000E69 /* dot_product_with_scale.c in Sources */, + 69A6DF331E96149300000E69 /* ooura_fft.cc in Sources */, + 69A6DEF11E96149300000E69 /* resample_by_2.c in Sources */, + 69A6DEEC1E96149300000E69 /* randomization_functions.c in Sources */, + 69A6DEEE1E96149300000E69 /* refl_coef_to_lpc.c in Sources */, + C2A87DDF1F4B6A61002D3F73 /* AudioInputAudioUnit.cpp in Sources */, + 69A6DF431E9614B700000E69 /* AudioInputAudioUnitOSX.cpp in Sources */, + C2A87DE01F4B6A61002D3F73 /* AudioOutputAudioUnit.cpp in Sources */, + 69A6DF451E9614B700000E69 /* AudioOutputAudioUnitOSX.cpp in Sources */, + 69A6DEFC1E96149300000E69 /* vector_scaling_operations.c in Sources */, + 692AB8E61E6759DD00706ACC /* JitterBuffer.cpp in Sources */, + 692AB8CB1E6759DD00706ACC /* AudioInput.cpp in Sources */, + 692AB8CD1E6759DD00706ACC /* AudioOutput.cpp in Sources */, + C2A87DDA1F4B6A57002D3F73 /* DarwinSpecific.mm in Sources */, + C2A87DD81F4B6A33002D3F73 /* Resampler.cpp in Sources */, + 69A6DEFA1E96149300000E69 /* splitting_filter_impl.c in Sources */, + 69A6DEE01E96149300000E69 /* get_hanning_window.c in Sources */, + 69A6DF161E96149300000E69 /* digital_agc.c in Sources */, + 69A6DF061E96149300000E69 /* aec_core_neon.cc in Sources */, + 69A6DF201E96149300000E69 /* ns_core.c in Sources */, + 69A6DF091E96149300000E69 /* aec_resampler.cc in Sources */, + 692AB8D11E6759DD00706ACC /* BufferInputStream.cpp in Sources */, + 692AB8E91E6759DD00706ACC /* MediaStreamItf.cpp in Sources */, + 69A6DF2C1E96149300000E69 /* block_mean_calculator.cc in Sources */, + 69A6DEBC1E96149300000E69 /* checks.cc in Sources */, + 692AB8DA1E6759DD00706ACC /* EchoCanceller.cpp in Sources */, + 69A6DF281E96149300000E69 /* splitting_filter.cc in Sources */, + 692AB8D31E6759DD00706ACC /* BufferOutputStream.cpp in Sources */, + 692AB8CF1E6759DD00706ACC /* BlockingQueue.cpp in Sources */, + 69A6DF2E1E96149300000E69 /* delay_estimator.cc in Sources */, + 69A6DEF01E96149300000E69 /* resample_48khz.c in Sources */, + 69A6DECF1E96149300000E69 /* auto_correlation.c in Sources */, + 69A6DF121E96149300000E69 /* echo_control_mobile.cc in Sources */, + 690725BE1EBBD5DE005D860B /* NetworkSocketPosix.cpp in Sources */, + 69A6DF1C1E96149300000E69 /* noise_suppression.c in Sources */, + 69A6DED41E96149300000E69 /* copy_set_operations.c in Sources */, + 69A6DEC31E96149300000E69 /* stringutils.cc in Sources */, + 69A6DF1E1E96149300000E69 /* noise_suppression_x.c in Sources */, + 692AB8D51E6759DD00706ACC /* BufferPool.cpp in Sources */, + 69A6DED01E96149300000E69 /* complex_bit_reverse.c in Sources */, + 69A6DEDF1E96149300000E69 /* filter_ma_fast_q12.c in Sources */, + 69A6DEFF1E96149300000E69 /* wav_file.cc in Sources */, + 69A6DF351E96149300000E69 /* ooura_fft_neon.cc in Sources */, + 69A6DECE1E96149300000E69 /* auto_corr_to_refl_coef.c in Sources */, + 69A6DEFD1E96149300000E69 /* sparse_fir_filter.cc in Sources */, + 69A6DED91E96149300000E69 /* downsample_fast.c in Sources */, + 69A6DF251E96149300000E69 /* nsx_core_neon.c in Sources */, + 69A6DF081E96149300000E69 /* aec_core_sse2.cc in Sources */, + 69A6DEEA1E96149300000E69 /* min_max_operations.c in Sources */, + 69A6DF361E96149300000E69 /* ooura_fft_sse2.cc in Sources */, + 69A6DED51E96149300000E69 /* cross_correlation.c in Sources */, + 69A6DF3D1E96149300000E69 /* cpu_features.cc in Sources */, + 69A6DF2A1E96149300000E69 /* three_band_filter_bank.cc in Sources */, + 69A6DED21E96149300000E69 /* complex_fft.c in Sources */, + 69A6DF311E96149300000E69 /* delay_estimator_wrapper.cc in Sources */, + 69A6DEDA1E96149300000E69 /* downsample_fast_neon.c in Sources */, + 69A6DEC91E96149300000E69 /* fft4g.c in Sources */, + 692AB8ED1E6759DD00706ACC /* OpusEncoder.cpp in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin XCBuildConfiguration section */ + 69F8423C1E67540700C110F7 /* Debug Hockeyapp */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + CURRENT_PROJECT_VERSION = 1; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.2; + MTL_ENABLE_DEBUG_INFO = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + TARGETED_DEVICE_FAMILY = "1,2"; + VERSIONING_SYSTEM = "apple-generic"; + VERSION_INFO_PREFIX = ""; + }; + name = "Debug Hockeyapp"; + }; + 69F8423D1E67540700C110F7 /* Release Hockeyapp */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + CURRENT_PROJECT_VERSION = 1; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.2; + MTL_ENABLE_DEBUG_INFO = NO; + SDKROOT = iphoneos; + TARGETED_DEVICE_FAMILY = "1,2"; + VALIDATE_PRODUCT = YES; + VERSIONING_SYSTEM = "apple-generic"; + VERSION_INFO_PREFIX = ""; + }; + name = "Release Hockeyapp"; + }; + 69F8423F1E67540700C110F7 /* Debug Hockeyapp */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_CXX_LANGUAGE_STANDARD = "c++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CODE_SIGN_IDENTITY = ""; + DEFINES_MODULE = YES; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + DYLIB_INSTALL_NAME_BASE = "@rpath"; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + "../../thrid-party/opus/include/opus", + "../../third-party/opus/include/opus", + webrtc_dsp, + "../Telegram-Mac/third-party/opus/include/opus", + "../Telegram-Mac/thrid-party/opus/include/opus", + ); + INFOPLIST_FILE = "$(SRCROOT)/Info.plist"; + INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; + IPHONEOS_DEPLOYMENT_TARGET = 6.0; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = "$(inherited)"; + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.7; + OTHER_CFLAGS = ( + "-DTGVOIP_USE_CUSTOM_CRYPTO", + "-DWEBRTC_POSIX", + "-DWEBRTC_APM_DEBUG_DUMP=0", + "-DTGVOIP_USE_DESKTOP_DSP", + "-DWEBRTC_MAC", + ); + PRODUCT_BUNDLE_IDENTIFIER = me.grishka.libtgvoip; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = macosx; + SKIP_INSTALL = YES; + }; + name = "Debug Hockeyapp"; + }; + 69F842401E67540700C110F7 /* Release Hockeyapp */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_CXX_LANGUAGE_STANDARD = "c++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CODE_SIGN_IDENTITY = ""; + DEFINES_MODULE = YES; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + DYLIB_INSTALL_NAME_BASE = "@rpath"; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + "../../thrid-party/opus/include/opus", + "../../third-party/opus/include/opus", + webrtc_dsp, + "../Telegram-Mac/third-party/opus/include/opus", + "../Telegram-Mac/thrid-party/opus/include/opus", + ); + INFOPLIST_FILE = "$(SRCROOT)/Info.plist"; + INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; + IPHONEOS_DEPLOYMENT_TARGET = 6.0; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = "$(inherited)"; + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.7; + OTHER_CFLAGS = ( + "-DTGVOIP_USE_CUSTOM_CRYPTO", + "-DWEBRTC_POSIX", + "-DWEBRTC_APM_DEBUG_DUMP=0", + "-DTGVOIP_USE_DESKTOP_DSP", + "-DWEBRTC_MAC", + ); + PRODUCT_BUNDLE_IDENTIFIER = me.grishka.libtgvoip; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = macosx; + SKIP_INSTALL = YES; + }; + name = "Release Hockeyapp"; + }; + D04D01C31E678C0D0086DDC0 /* Debug AppStore */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + CURRENT_PROJECT_VERSION = 1; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.2; + MTL_ENABLE_DEBUG_INFO = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + TARGETED_DEVICE_FAMILY = "1,2"; + VERSIONING_SYSTEM = "apple-generic"; + VERSION_INFO_PREFIX = ""; + }; + name = "Debug AppStore"; + }; + D04D01C41E678C0D0086DDC0 /* Debug AppStore */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_CXX_LANGUAGE_STANDARD = "c++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CODE_SIGN_IDENTITY = ""; + DEFINES_MODULE = YES; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + DYLIB_INSTALL_NAME_BASE = "@rpath"; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + "../../thrid-party/opus/include/opus", + "../../third-party/opus/include/opus", + webrtc_dsp, + "../Telegram-Mac/third-party/opus/include/opus", + "../Telegram-Mac/thrid-party/opus/include/opus", + ); + INFOPLIST_FILE = "$(SRCROOT)/Info.plist"; + INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; + IPHONEOS_DEPLOYMENT_TARGET = 6.0; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = "$(inherited)"; + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.7; + OTHER_CFLAGS = ( + "-DTGVOIP_USE_CUSTOM_CRYPTO", + "-DWEBRTC_POSIX", + "-DWEBRTC_APM_DEBUG_DUMP=0", + "-DTGVOIP_USE_DESKTOP_DSP", + "-DWEBRTC_MAC", + ); + PRODUCT_BUNDLE_IDENTIFIER = me.grishka.libtgvoip; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = macosx; + SKIP_INSTALL = YES; + }; + name = "Debug AppStore"; + }; + D04D01CB1E678C230086DDC0 /* Release AppStore */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + CURRENT_PROJECT_VERSION = 1; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 10.2; + MTL_ENABLE_DEBUG_INFO = NO; + SDKROOT = iphoneos; + TARGETED_DEVICE_FAMILY = "1,2"; + VALIDATE_PRODUCT = YES; + VERSIONING_SYSTEM = "apple-generic"; + VERSION_INFO_PREFIX = ""; + }; + name = "Release AppStore"; + }; + D04D01CC1E678C230086DDC0 /* Release AppStore */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_CXX_LANGUAGE_STANDARD = "c++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CODE_SIGN_IDENTITY = ""; + DEFINES_MODULE = YES; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + DYLIB_INSTALL_NAME_BASE = "@rpath"; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + "../../thrid-party/opus/include/opus", + "../../third-party/opus/include/opus", + webrtc_dsp, + "../Telegram-Mac/third-party/opus/include/opus", + "../Telegram-Mac/thrid-party/opus/include/opus", + ); + INFOPLIST_FILE = "$(SRCROOT)/Info.plist"; + INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; + IPHONEOS_DEPLOYMENT_TARGET = 6.0; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = "$(inherited)"; + MACH_O_TYPE = staticlib; + MACOSX_DEPLOYMENT_TARGET = 10.7; + OTHER_CFLAGS = ( + "-DTGVOIP_USE_CUSTOM_CRYPTO", + "-DWEBRTC_POSIX", + "-DWEBRTC_APM_DEBUG_DUMP=0", + "-DTGVOIP_USE_DESKTOP_DSP", + "-DWEBRTC_MAC", + ); + PRODUCT_BUNDLE_IDENTIFIER = me.grishka.libtgvoip; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = macosx; + SKIP_INSTALL = YES; + }; + name = "Release AppStore"; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 69F842301E67540700C110F7 /* Build configuration list for PBXProject "libtgvoip_osx" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 69F8423C1E67540700C110F7 /* Debug Hockeyapp */, + D04D01C31E678C0D0086DDC0 /* Debug AppStore */, + 69F8423D1E67540700C110F7 /* Release Hockeyapp */, + D04D01CB1E678C230086DDC0 /* Release AppStore */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = "Release Hockeyapp"; + }; + 69F8423E1E67540700C110F7 /* Build configuration list for PBXNativeTarget "libtgvoip" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 69F8423F1E67540700C110F7 /* Debug Hockeyapp */, + D04D01C41E678C0D0086DDC0 /* Debug AppStore */, + 69F842401E67540700C110F7 /* Release Hockeyapp */, + D04D01CC1E678C230086DDC0 /* Release AppStore */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = "Release Hockeyapp"; + }; +/* End XCConfigurationList section */ + }; + rootObject = 69F8422D1E67540700C110F7 /* Project object */; +} diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 000000000..55fe87423 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ + + + + + diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/project.xcworkspace/xcuserdata/grishka.xcuserdatad/UserInterfaceState.xcuserstate b/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/project.xcworkspace/xcuserdata/grishka.xcuserdatad/UserInterfaceState.xcuserstate new file mode 100644 index 0000000000000000000000000000000000000000..24e6306be3fcdbb3396ef98deffdcd8fe89cd297 GIT binary patch literal 9235 zcmcIpd0Z3M_P_T|5+(@=lZ~vn6a=-h3JTV$kwB#?A^}uf(hx?7211j7QcHVWYt`3U ztJdxU?yYs{zNy`x-K*AGpWU^KU0>T*yZ3h|!vtdO`}L3C4?d8|+%xB#dzSAx=T2Qq zz#op}(Bg8vXQkRazw2qDOee z<~`ikC;_p^fQ-n5GSFZ&1Pw(OqhTl$Wua^|92KF-s2oi}6=*7|L~c}ts!C%OyWjW(lB)P;7SooEl*iylFbqQ}tV z=qdCZdLF%uUPs5#DRdg0L0_V;&=2Sw)?h78!W`CNJvLw?HsNIKz$rKtr{OG|jdO4= zF2I-J3AhYjjwj+tcrq@>Q}7I2i)Z3l*n_XY^|%4g$E~;xUyYaGrT99$8sCW5;*Izg zd^^4yZ^qm3PP`l6haba_<0tTw_yB$pzl2}Lui#hlG5jVzj!)qa@Mrie{uY0S&*9$) zAu6IKiNr`uB$@OhgGm<2Cxv7*8AqI?gj`O_NhPTvb4defBz`iF%qIbIC21vXYx8ytWJ^6|J#xRVA(J@BG#Ml`JlfvXOdCYJopDAEQFe8~lridBET*8cI zF0D>$ZVd#EA|+BGHA+NDNI$LIoi(3t57*Mqp3taC^98)&a2L`bEf8S35QlUigYA&& z;nMP5BMVAOojIAV(jr%8eyJ-j(^*{N${aqvq+~?SNLOL0)9K-iWv-HG!O;A0i`U1y zg1*)!zBwWZPDcGu`gUYS7Gy;>WJeB^0&-A50D zM|WdzLAkfh-{7SyGwBWLnMvL{KH%Xp1rtay6z8z&h(F+uv^zt**G(ZWQsd_ri0opl zgthZr7RSQHhy4xB7vLZ$B=Yz3;n`FLQE7paL(1B+=DESFD9lQhnt^7b^iEU@j!rZS zQiL7rTs2iO#mXXlQ&{xO70B0xyr>TPK^mk>40zOt^j&B!YJmPQpbPoYJQxTULF#PE zrf)=X^t-Ut$=-%$f26gZuSgxYYyuK~L{DFdLbJ-27)Ad^!=-{;0hCG> zws@QC`TD3=szQ7ZcvSK}nz<5CmHGn_KICt1C}|dqP%j2jOsO~?Q?j50V7x9+L>qOIz(mTamsKZGqvPXdC1U@GFqkcmu6` z*ct9ix(nSa1mQhU(1~`#2*{Wb@Yh8e+JgQTiQ7K(fZ&k*=zbUpg~G*tdNC3*Dtc*I ziTDY0ASU`CTmq@m!qey(l)ia$)SQbJQQ*lDFetB}XxMb7}Lw( z74)jeK%#sD9gXGBF&GCKVy?V}eBI~-dK;YtClo_TZ@#>P-j569vFJmJN8^P+ZW$L1 zWQq01=(9M=@#u3fgG-=X4RMrTqi^FVzoR@$1)d_T!~j8n0g3yMn4t8X=qL0u`UU-p zenY>bKQO`=CO{cn4ijM#Oonop0u?*aAS|Qs=t3^cVkK5#HI2%t6d*11_Zqkf*1>Ip zNjU+1Tzp+?LjxZYAuO4CT!f&>G(kKxq?+r!p?U$C1Y&WC#&(U^;ECVet#wV5y0*`Q zoPj>!;^%r>1Cgi%k^O+4TNT3N;RsEFO1>q;hv|h^KoMFvb&^6cNUIG*5xh?TVl%c< zFJcQ+c48a2h0P2SrT3uyUkT{3Chms^2n_l|RVN+@)sRs&+3RnXRtMoBNZ*aqaRwd? zH82gPcjKYdn!{iQwcyo)pN7PV6H3$Y zB~S-G2tW({ZHKEMV-|%5-sO#WtJ+)mnW9cnZE5{?M%!}%e&QWM{UHJoT@gU6JlX(R zCFr`qnq68eSxEgWOaZ+`Wu~-NT{+3a*?qyLEN@FofX`ak6v%7|1zY$~q&-uZDx^(U z;3~lrQ*kABLp|^?w;NaE8axdepb`9nC;HJy^HfYKb60svTxC^LDn0I!NhQTqWmC#M z6H8`Lu>TW5O_{rF{G^gfN+pu~nMcX=3TL^yhll8r*(h%_Xld~GB2i%>#iYV~sTsmv z?CZpJFc0R7kMPL18PcPr3^!swEtfr&&do=<>799~u)5q$zr2Y9xCyCoGY;Ywd?gOy zFmllhl?*m1=!+)EoL*yAcq5HbiY6Mly|;x%kCvQ;Q|3y~_O3xIG{Zt@0*i;sj+MYT zC%S?HyR29z6sw5hh8Cc_ZoCk;H1!&{&)i@i+0|h_|T)+F$`759gjsb7fR^c6N1`LREG{$RBQ;@6DD> z8qTig+p@)q=b0-^1q*2Fm>sKveXe?2IC1Nn@U66?p}m#FOLA=&zD?Nig_gu8@4$C@ zxWUwDaSrRXV0`9w;kyKOf1BRJWyB&vXo^I9kd(}-5VBIdcNBM0Gj`z(cniK+nCR-f zVSXn44DkUPq-{JPSOjgN(%bP48sXH&U3k0D)X`B8?-H=>9_YAWG8GlZ$~xYIX%n#p z@5TG@etbV%3jm8@$rk(|eh5E|AAzN?5>~-#$e2EfpBwGVXH@zd8lzXWVkL-{G0sG- z7ON+*dL)6&LHw*B<`8}gKaHP(Ww0Dpz;#<_Tj#>nD2*0R3xyl2P-W~j-jH8_Njf|6 z3xtvwuJkStdn6BMi(l;ldbf`P8-<~`#pvU~5G{^jU&!Ag3@UVXi6e<}7qD8=^ELd2 zP_$l$>pPK)>O}R?(U~wD4D@t_!e%)vk-vrCmRcQr64vyzI`|zr7T|aBd+-mqG1lhb z5Am74EeW;xC)D0I2+hl)xaI|aj=zed+=suRepwr(Z0XzP;P3H&;+ik~BmOz2(Jyeb zq|xt!Mt=|l>*1D|MuZ`XzH&$cVPOMogwz?-nPMcSj`Ls9S0d36F76o}(ZeRVRVWie zFDtbcLNk&&Zq}jbv&V>;Scny=i4AWc4wB;G^r2udBF2xHo+~pTM^{B6yufj091CmE$vmA-~i*QAypf74GO$Nyrc~RIDV>1$T)}ZTpESf&i?|hB9O*yoRre7r=x1T0ae2ms_#~ylOzQM zGLZrTaZpQ%X9#2psi1H~9TqD?anp3XXS&E#0qFiNc@&n2n^bwY)W0F2i&P8T{|;$S z-jQi!4xJgvbTWg~l9^-{nGHK&C+vcI;9l6hjd;iv#Ea61kJQ6`un+cAXn7Yt5YpKq zPKIJ2N!UxllVU@X^i@2=p?7RO&9q1z?c~Cx!H`I998Y6>eoN5b90^Cyg=m9XKTA9z za{C&2;T+2-eywb6rYIEgH%B`R(IZVlN|uly?Hl*dlDxHemtt~ASjL$E6aWs;9uOrt}5F;z$p-!?29)XO$*10iiorgVK z)&E%o5o;>48YLChky~iNA~%!uWCPgmuN_8G+ zs50}O>!%AtdwyYIM~yhWOFP&~whKnw22XX89q>%N5qC$8`1Jozh)EjkB@YTV+(-74 z`^f|FEIbF#!wXx;L*!xd2zeC#39rHHa6+)*;J7#t(=96;4XH`AE6NHBr-E(KQ^8>u zAgu`TZT?_uxXK%9;Cp(gV&Tl29`sy*T(YR-F{wYKE12 zINDUvsS=LD5!efFMMEvoY5znLeU^NQ(!0p#R$Ivr%xhz6?>P4yS@ z=@B;h5Bb5vrNp`Bd}nl%bL2;%F#LD23%B!Gq2G0eaj=_wIQPaNkU+_+kXBjOWKba&r)iZuF`sn!i7Za<*JDIW2 zvoppF?{zX(c%L2w)3PYc`Qk*@XME~WrXQ1m^gEdT%m8K}a}kruq%nh-bodZHg41vY zK88==Q}}ELGnk%14Pq{4hB28;7Hy-?(mGcJpTigMC45D%Q)fh?8R2$Kto4UW0{#Z^ zWkj-g+eNd+$Crht`RQE23)y1qyC9_n^`m$@+9X9NnA79i*iDg1skg}=Xr~7*eHjSN zazHEz=Sif^oiD^kC>Mjjr7_q{5AT~=>1Y!S<>qAN(RVxbZ4o;EQSCC%TOUai6gf*X^KPG+##tS$?11U3QY*TMz920?qtI9~fTWY*l| zb6SREN?D>zE8}EVS(+?URv;TCE0$HryfUARmo>=zviY(mSx|PRY>8}{Y=vy4Y?W+{ z>;~Cd**e*J*>2e(*~_wDH_5M(cgU}mFOe^kuaMs+@0Rb7?~>mu zzfb=$u~qS?;*{bu#Se-f6+bI}Rs5cS5=cT)f-b?3 zU`jA2SQG3CDGB)r(-W>s*p~2M!V3w<65dL9JKQ#cU3n$L6ym*paN09nVf-E7>Y`8asoX$u_cnb{-pGm$GZwo7oNQCiXV=4t6KI zi@k^4%|6CH$sS~%W}juBXJ2EFvnSY->?!tL_H*_d_FMLQ_9yli_BZwqC95v`Ky`*|Ybwu@^>SNWXsiS>WF%& zdb#>K_4Vr2>VK$jRNtiDtnN~8QEyZ4Q14RTtG-XYSN){=u==$6$3$adUgDU$D6MxWXG|3vP#;!@xq-!!XLo|7sv6@oN zB#l=S(Ja+$)a=(hpm|91h~_cP6Pm9y=QO`-k(Ov>T7}l2?WaxCrfUalhiVJ7W3;oi zb=t++8?$K~&o!V~gR_%7}KJ5YRi`rw_gP~t! z=|@h^X*erq=Tf--++Z$?8_td9#&S-sn5*R)xE3zNMYuL@A-9-Y$}Q)v3VtI+v%ZMt^dBHcAQ&@Itz)NRsj z)$P#j((Tsm(e2YcraP#6O81QJIo%Q6Te`P(|I)p$`%rgU_p$D4-LHDAXY_KtN}s6L z>N&ktpRUi)57rOWkJOLSkJgXXJN3o-N&2aJx4v3GO<%9~>*wp6^dWsj-=<%vU!q^F z-=x1&f49C@n;!+;4c$aKP}K;RVBC!%K!&3?~hz4DTA=H+*UM+VF3~cSd9+MwwAz zFjERO4vl7~?qOc%#cW*;rw$G*%gB8?Q62F+OBGXgp&)YvN2PrlF=`rYuvA zDbJK|8eu9nm6$Fwm6;}*CYz?1rkVn#YfQJB_L~lxj+@>yoi=@J`qXsRbk6jX=@-*) z$?oJ?$)03yvM;$Mc|me}@}lI9a9*|i}hCPHtSC7J=XiI zd#(Gek6RB}pSC`0ecpQ1dcu0rddm8N^&{&U>nAp~&1f5FOScWS4Yg(4a&5zH1-3CZ zw{4cqWAoa4wutQ-8`zfEmf2R=ZnWKD>$G*-w%T^t?zP=#+iQE+cEEPjcFOjvow29d zi|nQL%j^^Em)ob=Ywfe_9=q4>v$xn6*xT)k>>c)H_7(P(_Eq*Z_6_z;_S@`t*tgoZ z*>~6v+F!Q6W`Dze%zoT{!v3-S8~bk#t%Gyu9Y#m8!{V?x9FBgDY)7tRxTC-^(oy8N z#Br%(ti$P;=%{f79V;BSJN7#M>G;5LE~PMKY)WZLMM_;tLrPP$b3=rPpUimiQ~IA0 F{2#0)S{?uZ literal 0 HcmV?d00001 diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/libtgvoip.xcscheme b/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/libtgvoip.xcscheme new file mode 100644 index 000000000..a24e940e3 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/libtgvoip.xcscheme @@ -0,0 +1,80 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/xcschememanagement.plist b/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/xcschememanagement.plist new file mode 100644 index 000000000..253fba52b --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/xcuserdata/grishka.xcuserdatad/xcschemes/xcschememanagement.plist @@ -0,0 +1,22 @@ + + + + + SchemeUserState + + libtgvoip.xcscheme + + orderHint + 0 + + + SuppressBuildableAutocreation + + 69F842351E67540700C110F7 + + primary + + + + + diff --git a/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/xcschememanagement.plist b/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/xcschememanagement.plist new file mode 100644 index 000000000..3572d3d9c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/libtgvoip_osx.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/xcschememanagement.plist @@ -0,0 +1,14 @@ + + + + + SuppressBuildableAutocreation + + 69F842351E67540700C110F7 + + primary + + + + + diff --git a/Telegram/ThirdParty/libtgvoip/logging.cpp b/Telegram/ThirdParty/libtgvoip/logging.cpp new file mode 100644 index 000000000..bc90dcb33 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/logging.cpp @@ -0,0 +1,99 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + + +#include +#include +#include + +#include "VoIPController.h" + +#ifdef __ANDROID__ +#include +#elif defined(__linux__) +#include +#endif + +#ifdef __APPLE__ +#include +#include "os/darwin/DarwinSpecific.h" +#endif + +FILE* tgvoipLogFile=NULL; + +void tgvoip_log_file_printf(char level, const char* msg, ...){ + if(tgvoipLogFile){ + va_list argptr; + va_start(argptr, msg); + time_t t = time(0); + struct tm *now = localtime(&t); + fprintf(tgvoipLogFile, "%02d-%02d %02d:%02d:%02d %c: ", now->tm_mon + 1, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec, level); + vfprintf(tgvoipLogFile, msg, argptr); + fprintf(tgvoipLogFile, "\n"); + fflush(tgvoipLogFile); + } +} + +void tgvoip_log_file_write_header(){ + if(tgvoipLogFile){ + time_t t = time(0); + struct tm *now = localtime(&t); +#if defined(_WIN32) + #if WINAPI_PARTITION_DESKTOP + char systemVersion[64]; + OSVERSIONINFOA vInfo; + vInfo.dwOSVersionInfoSize=sizeof(vInfo); + GetVersionExA(&vInfo); + snprintf(systemVersion, sizeof(systemVersion), "Windows %d.%d.%d %s", vInfo.dwMajorVersion, vInfo.dwMinorVersion, vInfo.dwBuildNumber, vInfo.szCSDVersion); +#else + char* systemVersion="Windows RT"; +#endif +#elif defined(__linux__) +#ifdef __ANDROID__ + char systemVersion[128]; + char sysRel[PROP_VALUE_MAX]; + char deviceVendor[PROP_VALUE_MAX]; + char deviceModel[PROP_VALUE_MAX]; + __system_property_get("ro.build.version.release", sysRel); + __system_property_get("ro.product.manufacturer", deviceVendor); + __system_property_get("ro.product.model", deviceModel); + snprintf(systemVersion, sizeof(systemVersion), "Android %s (%s %s)", sysRel, deviceVendor, deviceModel); +#else + struct utsname sysname; + uname(&sysname); + char systemVersion[128]; + snprintf(systemVersion, sizeof(systemVersion), "%s %s (%s)", sysname.sysname, sysname.release, sysname.version); +#endif +#elif defined(__APPLE__) + char osxVer[128]; + tgvoip::DarwinSpecific::GetSystemName(osxVer, sizeof(osxVer)); + char systemVersion[128]; +#if TARGET_OS_OSX + snprintf(systemVersion, sizeof(systemVersion), "OS X %s", osxVer); +#elif TARGET_OS_IPHONE + snprintf(systemVersion, sizeof(systemVersion), "iOS %s", osxVer); +#else + snprintf(systemVersion, sizeof(systemVersion), "Unknown Darwin %s", osxVer); +#endif +#else + const char* systemVersion="Unknown OS"; +#endif + +#if defined(__aarch64__) + const char* cpuArch="ARM64"; +#elif defined(__arm__) || defined(_M_ARM) + const char* cpuArch="ARM"; +#elif defined(_M_X64) || defined(__x86_64__) + const char* cpuArch="x86_64"; +#elif defined(_M_IX86) || defined(__i386__) + const char* cpuArch="x86"; +#else + const char* cpuArch="Unknown CPU"; +#endif + + fprintf(tgvoipLogFile, "---------------\nlibtgvoip v" LIBTGVOIP_VERSION " on %s %s\nLog started on %d/%02d/%d at %d:%02d:%02d\n---------------\n", systemVersion, cpuArch, now->tm_mday, now->tm_mon+1, now->tm_year+1900, now->tm_hour, now->tm_min, now->tm_sec); + } +} diff --git a/Telegram/ThirdParty/libtgvoip/logging.h b/Telegram/ThirdParty/libtgvoip/logging.h new file mode 100644 index 000000000..cbe811bb5 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/logging.h @@ -0,0 +1,97 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef __LOGGING_H +#define __LOGGING_H +#define LSTR_INT(x) LSTR_DO_INT(x) +#define LSTR_DO_INT(x) #x + +#ifdef __APPLE__ +#include +#endif + +void tgvoip_log_file_printf(char level, const char* msg, ...); +void tgvoip_log_file_write_header(); + +#if defined(__ANDROID__) + +#include + +//#define _LOG_WRAP(...) __BASE_FILE__":"LSTR_INT(__LINE__)": "__VA_ARGS__ +#define _LOG_WRAP(...) __VA_ARGS__ +#define TAG "tg-voip-native" +#define LOGV(...) {__android_log_print(ANDROID_LOG_VERBOSE, TAG, _LOG_WRAP(__VA_ARGS__)); tgvoip_log_file_printf('V', __VA_ARGS__);} +#define LOGD(...) {__android_log_print(ANDROID_LOG_DEBUG, TAG, _LOG_WRAP(__VA_ARGS__)); tgvoip_log_file_printf('D', __VA_ARGS__);} +#define LOGI(...) {__android_log_print(ANDROID_LOG_INFO, TAG, _LOG_WRAP(__VA_ARGS__)); tgvoip_log_file_printf('I', __VA_ARGS__);} +#define LOGW(...) {__android_log_print(ANDROID_LOG_WARN, TAG, _LOG_WRAP(__VA_ARGS__)); tgvoip_log_file_printf('W', __VA_ARGS__);} +#define LOGE(...) {__android_log_print(ANDROID_LOG_ERROR, TAG, _LOG_WRAP(__VA_ARGS__)); tgvoip_log_file_printf('E', __VA_ARGS__);} + +#elif defined(__APPLE__) && TARGET_OS_IPHONE && defined(TGVOIP_HAVE_TGLOG) + +#include "os/darwin/TGLogWrapper.h" + +#define LOGV(msg, ...) {__tgvoip_call_tglog("V/tgvoip: " msg, ##__VA_ARGS__); tgvoip_log_file_printf('V', msg, ##__VA_ARGS__);} +#define LOGD(msg, ...) {__tgvoip_call_tglog("D/tgvoip: " msg, ##__VA_ARGS__); tgvoip_log_file_printf('D', msg, ##__VA_ARGS__);} +#define LOGI(msg, ...) {__tgvoip_call_tglog("I/tgvoip: " msg, ##__VA_ARGS__); tgvoip_log_file_printf('I', msg, ##__VA_ARGS__);} +#define LOGW(msg, ...) {__tgvoip_call_tglog("W/tgvoip: " msg, ##__VA_ARGS__); tgvoip_log_file_printf('W', msg, ##__VA_ARGS__);} +#define LOGE(msg, ...) {__tgvoip_call_tglog("E/tgvoip: " msg, ##__VA_ARGS__); tgvoip_log_file_printf('E', msg, ##__VA_ARGS__);} + +#elif defined(_WIN32) && defined(_DEBUG) + +#include +#include + +#if !defined(snprintf) && defined(_WIN32) && defined(__cplusplus_winrt) +#define snprintf _snprintf +#endif + +#define _TGVOIP_W32_LOG_PRINT(verb, msg, ...){ char __log_buf[1024]; snprintf(__log_buf, 1024, "%c/tgvoip: " msg "\n", verb, ##__VA_ARGS__); OutputDebugStringA(__log_buf); tgvoip_log_file_printf((char)verb, msg, __VA_ARGS__);} + +#define LOGV(msg, ...) _TGVOIP_W32_LOG_PRINT('V', msg, ##__VA_ARGS__) +#define LOGD(msg, ...) _TGVOIP_W32_LOG_PRINT('D', msg, ##__VA_ARGS__) +#define LOGI(msg, ...) _TGVOIP_W32_LOG_PRINT('I', msg, ##__VA_ARGS__) +#define LOGW(msg, ...) _TGVOIP_W32_LOG_PRINT('W', msg, ##__VA_ARGS__) +#define LOGE(msg, ...) _TGVOIP_W32_LOG_PRINT('E', msg, ##__VA_ARGS__) + +#else + +#include + +#define _TGVOIP_LOG_PRINT(verb, msg, ...) {printf("%c/tgvoip: " msg "\n", verb, ##__VA_ARGS__); tgvoip_log_file_printf(verb, msg, ##__VA_ARGS__);} + +#define LOGV(msg, ...) _TGVOIP_LOG_PRINT('V', msg, ##__VA_ARGS__) +#define LOGD(msg, ...) _TGVOIP_LOG_PRINT('D', msg, ##__VA_ARGS__) +#define LOGI(msg, ...) _TGVOIP_LOG_PRINT('I', msg, ##__VA_ARGS__) +#define LOGW(msg, ...) _TGVOIP_LOG_PRINT('W', msg, ##__VA_ARGS__) +#define LOGE(msg, ...) _TGVOIP_LOG_PRINT('E', msg, ##__VA_ARGS__) + +#endif + + +#ifdef TGVOIP_LOG_VERBOSITY +#if TGVOIP_LOG_VERBOSITY<5 +#undef LOGV +#define LOGV(msg, ...) +#endif +#if TGVOIP_LOG_VERBOSITY<4 +#undef LOGD +#define LOGD(msg, ...) +#endif +#if TGVOIP_LOG_VERBOSITY<3 +#undef LOGI +#define LOGI(msg, ...) +#endif +#if TGVOIP_LOG_VERBOSITY<2 +#undef LOGW +#define LOGW(msg, ...) +#endif +#if TGVOIP_LOG_VERBOSITY<1 +#undef LOGE +#define LOGE(msg, ...) +#endif +#endif + +#endif //__LOGGING_H diff --git a/Telegram/ThirdParty/libtgvoip/os/android/AudioInputAndroid.cpp b/Telegram/ThirdParty/libtgvoip/os/android/AudioInputAndroid.cpp new file mode 100644 index 000000000..b9499512e --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/android/AudioInputAndroid.cpp @@ -0,0 +1,123 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "AudioInputAndroid.h" +#include +#include "../../logging.h" + +extern JavaVM* sharedJVM; + +using namespace tgvoip; +using namespace tgvoip::audio; + +jmethodID AudioInputAndroid::initMethod=NULL; +jmethodID AudioInputAndroid::releaseMethod=NULL; +jmethodID AudioInputAndroid::startMethod=NULL; +jmethodID AudioInputAndroid::stopMethod=NULL; +jclass AudioInputAndroid::jniClass=NULL; + +AudioInputAndroid::AudioInputAndroid(){ + JNIEnv* env=NULL; + bool didAttach=false; + sharedJVM->GetEnv((void**) &env, JNI_VERSION_1_6); + if(!env){ + sharedJVM->AttachCurrentThread(&env, NULL); + didAttach=true; + } + + jmethodID ctor=env->GetMethodID(jniClass, "", "(J)V"); + jobject obj=env->NewObject(jniClass, ctor, (jlong)(intptr_t)this); + javaObject=env->NewGlobalRef(obj); + + if(didAttach){ + sharedJVM->DetachCurrentThread(); + } + running=false; + init_mutex(mutex); +} + +AudioInputAndroid::~AudioInputAndroid(){ + { + MutexGuard guard(mutex); + JNIEnv *env=NULL; + bool didAttach=false; + sharedJVM->GetEnv((void **) &env, JNI_VERSION_1_6); + if(!env){ + sharedJVM->AttachCurrentThread(&env, NULL); + didAttach=true; + } + + env->CallVoidMethod(javaObject, releaseMethod); + env->DeleteGlobalRef(javaObject); + javaObject=NULL; + + if(didAttach){ + sharedJVM->DetachCurrentThread(); + } + } + free_mutex(mutex); +} + +void AudioInputAndroid::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + MutexGuard guard(mutex); + JNIEnv* env=NULL; + bool didAttach=false; + sharedJVM->GetEnv((void**) &env, JNI_VERSION_1_6); + if(!env){ + sharedJVM->AttachCurrentThread(&env, NULL); + didAttach=true; + } + + env->CallVoidMethod(javaObject, initMethod, sampleRate, bitsPerSample, channels, 960*2); + + if(didAttach){ + sharedJVM->DetachCurrentThread(); + } +} + +void AudioInputAndroid::Start(){ + MutexGuard guard(mutex); + JNIEnv* env=NULL; + bool didAttach=false; + sharedJVM->GetEnv((void**) &env, JNI_VERSION_1_6); + if(!env){ + sharedJVM->AttachCurrentThread(&env, NULL); + didAttach=true; + } + + failed=!env->CallBooleanMethod(javaObject, startMethod); + + if(didAttach){ + sharedJVM->DetachCurrentThread(); + } + running=true; +} + +void AudioInputAndroid::Stop(){ + MutexGuard guard(mutex); + running=false; + JNIEnv* env=NULL; + bool didAttach=false; + sharedJVM->GetEnv((void**) &env, JNI_VERSION_1_6); + if(!env){ + sharedJVM->AttachCurrentThread(&env, NULL); + didAttach=true; + } + + env->CallVoidMethod(javaObject, stopMethod); + + if(didAttach){ + sharedJVM->DetachCurrentThread(); + } +} + +void AudioInputAndroid::HandleCallback(JNIEnv* env, jobject buffer){ + if(!running) + return; + unsigned char* buf=(unsigned char*) env->GetDirectBufferAddress(buffer); + size_t len=(size_t) env->GetDirectBufferCapacity(buffer); + InvokeCallback(buf, len); +} \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/os/android/AudioInputAndroid.h b/Telegram/ThirdParty/libtgvoip/os/android/AudioInputAndroid.h new file mode 100644 index 000000000..6a042d6fd --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/android/AudioInputAndroid.h @@ -0,0 +1,38 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOINPUTANDROID_H +#define LIBTGVOIP_AUDIOINPUTANDROID_H + +#include +#include "../../audio/AudioInput.h" +#include "../../threading.h" + +namespace tgvoip{ namespace audio{ +class AudioInputAndroid : public AudioInput{ + +public: + AudioInputAndroid(); + virtual ~AudioInputAndroid(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + void HandleCallback(JNIEnv* env, jobject buffer); + static jmethodID initMethod; + static jmethodID releaseMethod; + static jmethodID startMethod; + static jmethodID stopMethod; + static jclass jniClass; + +private: + jobject javaObject; + bool running; + tgvoip_mutex_t mutex; + +}; +}} + +#endif //LIBTGVOIP_AUDIOINPUTANDROID_H diff --git a/Telegram/ThirdParty/libtgvoip/os/android/AudioInputOpenSLES.cpp b/Telegram/ThirdParty/libtgvoip/os/android/AudioInputOpenSLES.cpp new file mode 100644 index 000000000..bfba06472 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/android/AudioInputOpenSLES.cpp @@ -0,0 +1,137 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include +#include +#include + +#include "AudioInputOpenSLES.h" +#include "../../logging.h" +#include "OpenSLEngineWrapper.h" + +#define CHECK_SL_ERROR(res, msg) if(res!=SL_RESULT_SUCCESS){ LOGE(msg); return; } +#define BUFFER_SIZE 960 // 20 ms + +using namespace tgvoip; +using namespace tgvoip::audio; + +int AudioInputOpenSLES::nativeBufferSize; + +AudioInputOpenSLES::AudioInputOpenSLES(){ + slEngine=OpenSLEngineWrapper::CreateEngine(); + + LOGI("Native buffer size is %u samples", nativeBufferSize); + if(nativeBufferSizeBUFFER_SIZE && nativeBufferSize%BUFFER_SIZE!=0){ + LOGE("native buffer size is not multiple of 20ms!!"); + nativeBufferSize+=nativeBufferSize%BUFFER_SIZE; + } + if(nativeBufferSize==BUFFER_SIZE) + nativeBufferSize*=2; + LOGI("Adjusted native buffer size is %u", nativeBufferSize); + + buffer=(int16_t*)calloc(BUFFER_SIZE, sizeof(int16_t)); + nativeBuffer=(int16_t*)calloc((size_t) nativeBufferSize, sizeof(int16_t)); + slRecorderObj=NULL; +} + +AudioInputOpenSLES::~AudioInputOpenSLES(){ + //Stop(); + (*slBufferQueue)->Clear(slBufferQueue); + (*slRecorderObj)->Destroy(slRecorderObj); + slRecorderObj=NULL; + slRecorder=NULL; + slBufferQueue=NULL; + slEngine=NULL; + OpenSLEngineWrapper::DestroyEngine(); + free(buffer); + buffer=NULL; + free(nativeBuffer); + nativeBuffer=NULL; +} + +void AudioInputOpenSLES::BufferCallback(SLAndroidSimpleBufferQueueItf bq, void *context){ + ((AudioInputOpenSLES*)context)->HandleSLCallback(); +} + +void AudioInputOpenSLES::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + assert(slRecorderObj==NULL); + SLDataLocator_IODevice loc_dev = {SL_DATALOCATOR_IODEVICE, + SL_IODEVICE_AUDIOINPUT, + SL_DEFAULTDEVICEID_AUDIOINPUT, NULL}; + SLDataSource audioSrc = {&loc_dev, NULL}; + SLDataLocator_AndroidSimpleBufferQueue loc_bq = + {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 1}; + SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM, channels, sampleRate*1000, + SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16, + channels==2 ? (SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT) : SL_SPEAKER_FRONT_CENTER, SL_BYTEORDER_LITTLEENDIAN}; + SLDataSink audioSnk = {&loc_bq, &format_pcm}; + + const SLInterfaceID id[2] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION}; + const SLboolean req[2] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE}; + SLresult result = (*slEngine)->CreateAudioRecorder(slEngine, &slRecorderObj, &audioSrc, &audioSnk, 2, id, req); + CHECK_SL_ERROR(result, "Error creating recorder"); + + SLAndroidConfigurationItf recorderConfig; + result = (*slRecorderObj)->GetInterface(slRecorderObj, SL_IID_ANDROIDCONFIGURATION, &recorderConfig); + SLint32 streamType = SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION; + result = (*recorderConfig)->SetConfiguration(recorderConfig, SL_ANDROID_KEY_RECORDING_PRESET, &streamType, sizeof(SLint32)); + + result=(*slRecorderObj)->Realize(slRecorderObj, SL_BOOLEAN_FALSE); + CHECK_SL_ERROR(result, "Error realizing recorder"); + + result=(*slRecorderObj)->GetInterface(slRecorderObj, SL_IID_RECORD, &slRecorder); + CHECK_SL_ERROR(result, "Error getting recorder interface"); + + result=(*slRecorderObj)->GetInterface(slRecorderObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &slBufferQueue); + CHECK_SL_ERROR(result, "Error getting buffer queue"); + + result=(*slBufferQueue)->RegisterCallback(slBufferQueue, AudioInputOpenSLES::BufferCallback, this); + CHECK_SL_ERROR(result, "Error setting buffer queue callback"); + + (*slBufferQueue)->Enqueue(slBufferQueue, nativeBuffer, nativeBufferSize*sizeof(int16_t)); +} + +void AudioInputOpenSLES::Start(){ + SLresult result=(*slRecorder)->SetRecordState(slRecorder, SL_RECORDSTATE_RECORDING); + CHECK_SL_ERROR(result, "Error starting record"); +} + +void AudioInputOpenSLES::Stop(){ + SLresult result=(*slRecorder)->SetRecordState(slRecorder, SL_RECORDSTATE_STOPPED); + CHECK_SL_ERROR(result, "Error stopping record"); +} + + +void AudioInputOpenSLES::HandleSLCallback(){ + //SLmillisecond pMsec = 0; + //(*slRecorder)->GetPosition(slRecorder, &pMsec); + //LOGI("Callback! pos=%lu", pMsec); + //InvokeCallback((unsigned char*)buffer, BUFFER_SIZE*sizeof(int16_t)); + //fwrite(nativeBuffer, 1, nativeBufferSize*2, test); + + if(nativeBufferSize==BUFFER_SIZE){ + //LOGV("nativeBufferSize==BUFFER_SIZE"); + InvokeCallback((unsigned char *) nativeBuffer, BUFFER_SIZE*sizeof(int16_t)); + }else if(nativeBufferSize=BUFFER_SIZE){ + InvokeCallback((unsigned char *) buffer, BUFFER_SIZE*sizeof(int16_t)); + positionInBuffer=0; + } + memcpy(((unsigned char*)buffer)+positionInBuffer*2, nativeBuffer, (size_t)nativeBufferSize*2); + positionInBuffer+=nativeBufferSize; + }else if(nativeBufferSize>BUFFER_SIZE){ + //LOGV("nativeBufferSize>BUFFER_SIZE"); + for(unsigned int offset=0;offsetEnqueue(slBufferQueue, nativeBuffer, nativeBufferSize*sizeof(int16_t)); +} + diff --git a/Telegram/ThirdParty/libtgvoip/os/android/AudioInputOpenSLES.h b/Telegram/ThirdParty/libtgvoip/os/android/AudioInputOpenSLES.h new file mode 100644 index 000000000..655be250f --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/android/AudioInputOpenSLES.h @@ -0,0 +1,40 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOINPUTOPENSLES_H +#define LIBTGVOIP_AUDIOINPUTOPENSLES_H + +#include +#include + +#include "../../audio/AudioInput.h" + +namespace tgvoip{ namespace audio{ +class AudioInputOpenSLES : public AudioInput{ + +public: + AudioInputOpenSLES(); + virtual ~AudioInputOpenSLES(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + + static int nativeBufferSize; + +private: + static void BufferCallback(SLAndroidSimpleBufferQueueItf bq, void *context); + void HandleSLCallback(); + SLEngineItf slEngine; + SLObjectItf slRecorderObj; + SLRecordItf slRecorder; + SLAndroidSimpleBufferQueueItf slBufferQueue; + int16_t* buffer; + int16_t* nativeBuffer; + size_t positionInBuffer; +}; +}} + +#endif //LIBTGVOIP_AUDIOINPUTOPENSLES_H diff --git a/Telegram/ThirdParty/libtgvoip/os/android/AudioOutputAndroid.cpp b/Telegram/ThirdParty/libtgvoip/os/android/AudioOutputAndroid.cpp new file mode 100644 index 000000000..f5510a989 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/android/AudioOutputAndroid.cpp @@ -0,0 +1,125 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "AudioOutputAndroid.h" +#include +#include "../../logging.h" + +extern JavaVM* sharedJVM; + +using namespace tgvoip; +using namespace tgvoip::audio; + +jmethodID AudioOutputAndroid::initMethod=NULL; +jmethodID AudioOutputAndroid::releaseMethod=NULL; +jmethodID AudioOutputAndroid::startMethod=NULL; +jmethodID AudioOutputAndroid::stopMethod=NULL; +jclass AudioOutputAndroid::jniClass=NULL; + +AudioOutputAndroid::AudioOutputAndroid(){ + JNIEnv* env=NULL; + bool didAttach=false; + sharedJVM->GetEnv((void**) &env, JNI_VERSION_1_6); + if(!env){ + sharedJVM->AttachCurrentThread(&env, NULL); + didAttach=true; + } + + jmethodID ctor=env->GetMethodID(jniClass, "", "(J)V"); + jobject obj=env->NewObject(jniClass, ctor, (jlong)(intptr_t)this); + javaObject=env->NewGlobalRef(obj); + + if(didAttach){ + sharedJVM->DetachCurrentThread(); + } + running=false; +} + +AudioOutputAndroid::~AudioOutputAndroid(){ + JNIEnv* env=NULL; + bool didAttach=false; + sharedJVM->GetEnv((void**) &env, JNI_VERSION_1_6); + if(!env){ + sharedJVM->AttachCurrentThread(&env, NULL); + didAttach=true; + } + + env->CallVoidMethod(javaObject, releaseMethod); + env->DeleteGlobalRef(javaObject); + javaObject=NULL; + + if(didAttach){ + sharedJVM->DetachCurrentThread(); + } +} + +void AudioOutputAndroid::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + JNIEnv* env=NULL; + bool didAttach=false; + sharedJVM->GetEnv((void**) &env, JNI_VERSION_1_6); + if(!env){ + sharedJVM->AttachCurrentThread(&env, NULL); + didAttach=true; + } + + env->CallVoidMethod(javaObject, initMethod, sampleRate, bitsPerSample, channels, 960*2); + + if(didAttach){ + sharedJVM->DetachCurrentThread(); + } +} + +void AudioOutputAndroid::Start(){ + JNIEnv* env=NULL; + bool didAttach=false; + sharedJVM->GetEnv((void**) &env, JNI_VERSION_1_6); + if(!env){ + sharedJVM->AttachCurrentThread(&env, NULL); + didAttach=true; + } + + env->CallVoidMethod(javaObject, startMethod); + + if(didAttach){ + sharedJVM->DetachCurrentThread(); + } + running=true; +} + +void AudioOutputAndroid::Stop(){ + running=false; + JNIEnv* env=NULL; + bool didAttach=false; + sharedJVM->GetEnv((void**) &env, JNI_VERSION_1_6); + if(!env){ + sharedJVM->AttachCurrentThread(&env, NULL); + didAttach=true; + } + + env->CallVoidMethod(javaObject, stopMethod); + + if(didAttach){ + sharedJVM->DetachCurrentThread(); + } +} + +void AudioOutputAndroid::HandleCallback(JNIEnv* env, jbyteArray buffer){ + if(!running) + return; + unsigned char* buf=(unsigned char*) env->GetByteArrayElements(buffer, NULL); + size_t len=(size_t) env->GetArrayLength(buffer); + InvokeCallback(buf, len); + env->ReleaseByteArrayElements(buffer, (jbyte *) buf, 0); +} + + +bool AudioOutputAndroid::IsPlaying(){ + return false; +} + +float AudioOutputAndroid::GetLevel(){ + return 0; +} diff --git a/Telegram/ThirdParty/libtgvoip/os/android/AudioOutputAndroid.h b/Telegram/ThirdParty/libtgvoip/os/android/AudioOutputAndroid.h new file mode 100644 index 000000000..6950f8ca0 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/android/AudioOutputAndroid.h @@ -0,0 +1,39 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOOUTPUTANDROID_H +#define LIBTGVOIP_AUDIOOUTPUTANDROID_H + +#include +#include "../../audio/AudioOutput.h" + +namespace tgvoip{ namespace audio{ +class AudioOutputAndroid : public AudioOutput{ + +public: + + AudioOutputAndroid(); + virtual ~AudioOutputAndroid(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + virtual bool IsPlaying() override; + virtual float GetLevel() override; + void HandleCallback(JNIEnv* env, jbyteArray buffer); + static jmethodID initMethod; + static jmethodID releaseMethod; + static jmethodID startMethod; + static jmethodID stopMethod; + static jclass jniClass; + +private: + jobject javaObject; + bool running; + +}; +}} + +#endif //LIBTGVOIP_AUDIOOUTPUTANDROID_H diff --git a/Telegram/ThirdParty/libtgvoip/os/android/AudioOutputOpenSLES.cpp b/Telegram/ThirdParty/libtgvoip/os/android/AudioOutputOpenSLES.cpp new file mode 100644 index 000000000..b290ffb96 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/android/AudioOutputOpenSLES.cpp @@ -0,0 +1,171 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include +#include +#include +#include "AudioOutputOpenSLES.h" +#include "../../logging.h" +#include "../../VoIPController.h" +#include "OpenSLEngineWrapper.h" +#include "AudioInputAndroid.h" + +#define CHECK_SL_ERROR(res, msg) if(res!=SL_RESULT_SUCCESS){ LOGE(msg); return; } +#define BUFFER_SIZE 960 // 20 ms + +using namespace tgvoip; +using namespace tgvoip::audio; + +int AudioOutputOpenSLES::nativeBufferSize; + +AudioOutputOpenSLES::AudioOutputOpenSLES(){ + SLresult result; + slEngine=OpenSLEngineWrapper::CreateEngine(); + + const SLInterfaceID pOutputMixIDs[] = {}; + const SLboolean pOutputMixRequired[] = {}; + result = (*slEngine)->CreateOutputMix(slEngine, &slOutputMixObj, 0, pOutputMixIDs, pOutputMixRequired); + CHECK_SL_ERROR(result, "Error creating output mix"); + + result = (*slOutputMixObj)->Realize(slOutputMixObj, SL_BOOLEAN_FALSE); + CHECK_SL_ERROR(result, "Error realizing output mix"); + + LOGI("Native buffer size is %u samples", nativeBufferSize); + /*if(nativeBufferSizeBUFFER_SIZE && nativeBufferSize%BUFFER_SIZE!=0){ + LOGE("native buffer size is not multiple of 20ms!!"); + nativeBufferSize+=nativeBufferSize%BUFFER_SIZE; + } + LOGI("Adjusted native buffer size is %u", nativeBufferSize);*/ + + buffer=(int16_t*)calloc(BUFFER_SIZE, sizeof(int16_t)); + nativeBuffer=(int16_t*)calloc((size_t) nativeBufferSize, sizeof(int16_t)); + slPlayerObj=NULL; + remainingDataSize=0; +} + +AudioOutputOpenSLES::~AudioOutputOpenSLES(){ + if(!stopped) + Stop(); + (*slBufferQueue)->Clear(slBufferQueue); + LOGV("destroy slPlayerObj"); + (*slPlayerObj)->Destroy(slPlayerObj); + LOGV("destroy slOutputMixObj"); + (*slOutputMixObj)->Destroy(slOutputMixObj); + OpenSLEngineWrapper::DestroyEngine(); + free(buffer); + free(nativeBuffer); +} + + +void AudioOutputOpenSLES::SetNativeBufferSize(int size){ + AudioOutputOpenSLES::nativeBufferSize=size; +} + +void AudioOutputOpenSLES::BufferCallback(SLAndroidSimpleBufferQueueItf bq, void *context){ + ((AudioOutputOpenSLES*)context)->HandleSLCallback(); +} + +void AudioOutputOpenSLES::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + assert(slPlayerObj==NULL); + SLDataLocator_AndroidSimpleBufferQueue locatorBufferQueue = + {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 1}; + SLDataFormat_PCM formatPCM = {SL_DATAFORMAT_PCM, channels, sampleRate*1000, + SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16, + channels==2 ? (SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT) : SL_SPEAKER_FRONT_CENTER, SL_BYTEORDER_LITTLEENDIAN}; + SLDataSource audioSrc = {&locatorBufferQueue, &formatPCM}; + SLDataLocator_OutputMix locatorOutMix = {SL_DATALOCATOR_OUTPUTMIX, slOutputMixObj}; + SLDataSink audioSnk = {&locatorOutMix, NULL}; + + const SLInterfaceID id[2] = {SL_IID_BUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION}; + const SLboolean req[2] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE}; + SLresult result = (*slEngine)->CreateAudioPlayer(slEngine, &slPlayerObj, &audioSrc, &audioSnk, 2, id, req); + CHECK_SL_ERROR(result, "Error creating player"); + + + SLAndroidConfigurationItf playerConfig; + result = (*slPlayerObj)->GetInterface(slPlayerObj, SL_IID_ANDROIDCONFIGURATION, &playerConfig); + SLint32 streamType = SL_ANDROID_STREAM_VOICE; + result = (*playerConfig)->SetConfiguration(playerConfig, SL_ANDROID_KEY_STREAM_TYPE, &streamType, sizeof(SLint32)); + + + result=(*slPlayerObj)->Realize(slPlayerObj, SL_BOOLEAN_FALSE); + CHECK_SL_ERROR(result, "Error realizing player"); + + result=(*slPlayerObj)->GetInterface(slPlayerObj, SL_IID_PLAY, &slPlayer); + CHECK_SL_ERROR(result, "Error getting player interface"); + + result=(*slPlayerObj)->GetInterface(slPlayerObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &slBufferQueue); + CHECK_SL_ERROR(result, "Error getting buffer queue"); + + result=(*slBufferQueue)->RegisterCallback(slBufferQueue, AudioOutputOpenSLES::BufferCallback, this); + CHECK_SL_ERROR(result, "Error setting buffer queue callback"); + + (*slBufferQueue)->Enqueue(slBufferQueue, nativeBuffer, nativeBufferSize*sizeof(int16_t)); +} + +bool AudioOutputOpenSLES::IsPhone(){ + return false; +} + +void AudioOutputOpenSLES::EnableLoudspeaker(bool enabled){ + +} + +void AudioOutputOpenSLES::Start(){ + stopped=false; + SLresult result=(*slPlayer)->SetPlayState(slPlayer, SL_PLAYSTATE_PLAYING); + CHECK_SL_ERROR(result, "Error starting player"); +} + +void AudioOutputOpenSLES::Stop(){ + stopped=true; + LOGV("Stopping OpenSL output"); + SLresult result=(*slPlayer)->SetPlayState(slPlayer, SL_PLAYSTATE_PAUSED); + CHECK_SL_ERROR(result, "Error starting player"); +} + +void AudioOutputOpenSLES::HandleSLCallback(){ + /*if(stopped){ + //LOGV("left HandleSLCallback early"); + return; + }*/ + //LOGV("before InvokeCallback"); + if(!stopped){ + while(remainingDataSize0) + memmove(remainingData, remainingData+nativeBufferSize*2, remainingDataSize); + //InvokeCallback((unsigned char *) nativeBuffer, nativeBufferSize*sizeof(int16_t)); + }else{ + memset(nativeBuffer, 0, nativeBufferSize*2); + } + + (*slBufferQueue)->Enqueue(slBufferQueue, nativeBuffer, nativeBufferSize*sizeof(int16_t)); + //LOGV("left HandleSLCallback"); +} + + +bool AudioOutputOpenSLES::IsPlaying(){ + if(slPlayer){ + uint32_t state; + (*slPlayer)->GetPlayState(slPlayer, &state); + return state==SL_PLAYSTATE_PLAYING; + } + return false; +} + + +float AudioOutputOpenSLES::GetLevel(){ + return 0; // we don't use this anyway +} \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/os/android/AudioOutputOpenSLES.h b/Telegram/ThirdParty/libtgvoip/os/android/AudioOutputOpenSLES.h new file mode 100644 index 000000000..67da81237 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/android/AudioOutputOpenSLES.h @@ -0,0 +1,47 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOOUTPUTOPENSLES_H +#define LIBTGVOIP_AUDIOOUTPUTOPENSLES_H + +#include +#include + +#include "../../audio/AudioOutput.h" + +namespace tgvoip{ namespace audio{ +class AudioOutputOpenSLES : public AudioOutput{ +public: + AudioOutputOpenSLES(); + virtual ~AudioOutputOpenSLES(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual bool IsPhone(); + virtual void EnableLoudspeaker(bool enabled); + virtual void Start(); + virtual void Stop(); + virtual bool IsPlaying(); + virtual float GetLevel(); + + static void SetNativeBufferSize(int size); + static int nativeBufferSize; + +private: + static void BufferCallback(SLAndroidSimpleBufferQueueItf bq, void *context); + void HandleSLCallback(); + SLEngineItf slEngine; + SLObjectItf slPlayerObj; + SLObjectItf slOutputMixObj; + SLPlayItf slPlayer; + SLAndroidSimpleBufferQueueItf slBufferQueue; + int16_t* buffer; + int16_t* nativeBuffer; + bool stopped; + unsigned char remainingData[10240]; + size_t remainingDataSize; +}; +}} + +#endif //LIBTGVOIP_AUDIOOUTPUTANDROID_H diff --git a/Telegram/ThirdParty/libtgvoip/os/android/OpenSLEngineWrapper.cpp b/Telegram/ThirdParty/libtgvoip/os/android/OpenSLEngineWrapper.cpp new file mode 100644 index 000000000..086970726 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/android/OpenSLEngineWrapper.cpp @@ -0,0 +1,48 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include +#include "OpenSLEngineWrapper.h" +#include "../../logging.h" + +#define CHECK_SL_ERROR(res, msg) if(res!=SL_RESULT_SUCCESS){ LOGE(msg); return NULL; } + +using namespace tgvoip; +using namespace tgvoip::audio; + + +SLObjectItf OpenSLEngineWrapper::sharedEngineObj=NULL; +SLEngineItf OpenSLEngineWrapper::sharedEngine=NULL; +int OpenSLEngineWrapper::count=0; + +void OpenSLEngineWrapper::DestroyEngine(){ + count--; + LOGI("release: engine instance count %d", count); + if(count==0){ + (*sharedEngineObj)->Destroy(sharedEngineObj); + sharedEngineObj=NULL; + sharedEngine=NULL; + } + LOGI("after release"); +} + +SLEngineItf OpenSLEngineWrapper::CreateEngine(){ + count++; + if(sharedEngine) + return sharedEngine; + const SLInterfaceID pIDs[1] = {SL_IID_ENGINE}; + const SLboolean pIDsRequired[1] = {SL_BOOLEAN_TRUE}; + SLresult result = slCreateEngine(&sharedEngineObj, 0, NULL, 1, pIDs, pIDsRequired); + CHECK_SL_ERROR(result, "Error creating engine"); + + result=(*sharedEngineObj)->Realize(sharedEngineObj, SL_BOOLEAN_FALSE); + CHECK_SL_ERROR(result, "Error realizing engine"); + + result = (*sharedEngineObj)->GetInterface(sharedEngineObj, SL_IID_ENGINE, &sharedEngine); + CHECK_SL_ERROR(result, "Error getting engine interface"); + return sharedEngine; +} + diff --git a/Telegram/ThirdParty/libtgvoip/os/android/OpenSLEngineWrapper.h b/Telegram/ThirdParty/libtgvoip/os/android/OpenSLEngineWrapper.h new file mode 100644 index 000000000..ff0958602 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/android/OpenSLEngineWrapper.h @@ -0,0 +1,26 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_OPENSLENGINEWRAPPER_H +#define LIBTGVOIP_OPENSLENGINEWRAPPER_H + +#include +#include + +namespace tgvoip{ namespace audio{ +class OpenSLEngineWrapper{ +public: + static SLEngineItf CreateEngine(); + static void DestroyEngine(); + +private: + static SLObjectItf sharedEngineObj; + static SLEngineItf sharedEngine; + static int count; +}; +}} + +#endif //LIBTGVOIP_OPENSLENGINEWRAPPER_H diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnit.cpp b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnit.cpp new file mode 100644 index 000000000..decd08605 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnit.cpp @@ -0,0 +1,82 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include +#include +#include +#include "AudioUnitIO.h" +#include "AudioInputAudioUnit.h" +#include "../../logging.h" + +#define BUFFER_SIZE 960 + +using namespace tgvoip; +using namespace tgvoip::audio; + +AudioInputAudioUnit::AudioInputAudioUnit(std::string deviceID){ + remainingDataSize=0; + isRecording=false; + this->io=AudioUnitIO::Get(); +#if TARGET_OS_OSX + io->SetCurrentDevice(true, deviceID); +#endif + io->AttachInput(this); + failed=io->IsFailed(); +} + +AudioInputAudioUnit::~AudioInputAudioUnit(){ + io->DetachInput(); + AudioUnitIO::Release(); +} + +void AudioInputAudioUnit::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + io->Configure(sampleRate, bitsPerSample, channels); +} + +void AudioInputAudioUnit::Start(){ + isRecording=true; + io->EnableInput(true); + failed=io->IsFailed(); +} + +void AudioInputAudioUnit::Stop(){ + isRecording=false; + io->EnableInput(false); +} + +void AudioInputAudioUnit::HandleBufferCallback(AudioBufferList *ioData){ + int i; + int j; + for(i=0;imNumberBuffers;i++){ + AudioBuffer buf=ioData->mBuffers[i]; +#if TARGET_OS_OSX + assert(remainingDataSize+buf.mDataByteSize/2<10240); + float* src=reinterpret_cast(buf.mData); + int16_t* dst=reinterpret_cast(remainingData+remainingDataSize); + for(j=0;j=BUFFER_SIZE*2){ + InvokeCallback((unsigned char*)remainingData, BUFFER_SIZE*2); + remainingDataSize-=BUFFER_SIZE*2; + if(remainingDataSize>0){ + memmove(remainingData, remainingData+(BUFFER_SIZE*2), remainingDataSize); + } + } + } +} + +#if TARGET_OS_OSX +void AudioInputAudioUnit::SetCurrentDevice(std::string deviceID){ + io->SetCurrentDevice(true, deviceID); +} +#endif diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnit.h b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnit.h new file mode 100644 index 000000000..e66a80b24 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnit.h @@ -0,0 +1,37 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOINPUTAUDIOUNIT_H +#define LIBTGVOIP_AUDIOINPUTAUDIOUNIT_H + +#include +#include "../../audio/AudioInput.h" + +namespace tgvoip{ namespace audio{ +class AudioUnitIO; + +class AudioInputAudioUnit : public AudioInput{ + +public: + AudioInputAudioUnit(std::string deviceID); + virtual ~AudioInputAudioUnit(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + void HandleBufferCallback(AudioBufferList* ioData); +#if TARGET_OS_OSX + virtual void SetCurrentDevice(std::string deviceID); +#endif + +private: + unsigned char remainingData[10240]; + size_t remainingDataSize; + bool isRecording; + AudioUnitIO* io; +}; +}} + +#endif //LIBTGVOIP_AUDIOINPUTAUDIOUNIT_H diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnitOSX.cpp b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnitOSX.cpp new file mode 100644 index 000000000..149dc9056 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnitOSX.cpp @@ -0,0 +1,309 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include +#include +#include "AudioInputAudioUnitOSX.h" +#include "../../logging.h" +#include "../../audio/Resampler.h" +#include "../../VoIPController.h" + +#define BUFFER_SIZE 960 +#define CHECK_AU_ERROR(res, msg) if(res!=noErr){ LOGE("input: " msg": OSStatus=%d", (int)res); failed=true; return; } + +#define kOutputBus 0 +#define kInputBus 1 + +using namespace tgvoip; +using namespace tgvoip::audio; + +AudioInputAudioUnitLegacy::AudioInputAudioUnitLegacy(std::string deviceID) : AudioInput(deviceID){ + remainingDataSize=0; + isRecording=false; + + OSStatus status; + AudioComponentDescription inputDesc={ + .componentType = kAudioUnitType_Output, .componentSubType = /*kAudioUnitSubType_HALOutput*/kAudioUnitSubType_VoiceProcessingIO, .componentFlags = 0, .componentFlagsMask = 0, + .componentManufacturer = kAudioUnitManufacturer_Apple + }; + AudioComponent component=AudioComponentFindNext(NULL, &inputDesc); + status=AudioComponentInstanceNew(component, &unit); + CHECK_AU_ERROR(status, "Error creating AudioUnit"); + + UInt32 flag=0; + status = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, sizeof(flag)); + CHECK_AU_ERROR(status, "Error enabling AudioUnit output"); + flag=1; + status = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag)); + CHECK_AU_ERROR(status, "Error enabling AudioUnit input"); + + SetCurrentDevice(deviceID); + + CFRunLoopRef theRunLoop = NULL; + AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyRunLoop, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster }; + status = AudioObjectSetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop); + + propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice; + propertyAddress.mScope = kAudioObjectPropertyScopeGlobal; + propertyAddress.mElement = kAudioObjectPropertyElementMaster; + AudioObjectAddPropertyListener(kAudioObjectSystemObject, &propertyAddress, AudioInputAudioUnitLegacy::DefaultDeviceChangedCallback, this); + + AURenderCallbackStruct callbackStruct; + callbackStruct.inputProc = AudioInputAudioUnitLegacy::BufferCallback; + callbackStruct.inputProcRefCon=this; + status = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &callbackStruct, sizeof(callbackStruct)); + CHECK_AU_ERROR(status, "Error setting input buffer callback"); + status=AudioUnitInitialize(unit); + CHECK_AU_ERROR(status, "Error initializing unit"); + + inBufferList.mBuffers[0].mData=malloc(10240); + inBufferList.mBuffers[0].mDataByteSize=10240; + inBufferList.mNumberBuffers=1; +} + +AudioInputAudioUnitLegacy::~AudioInputAudioUnitLegacy(){ + AudioObjectPropertyAddress propertyAddress; + propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice; + propertyAddress.mScope = kAudioObjectPropertyScopeGlobal; + propertyAddress.mElement = kAudioObjectPropertyElementMaster; + AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &propertyAddress, AudioInputAudioUnitLegacy::DefaultDeviceChangedCallback, this); + + AudioUnitUninitialize(unit); + AudioComponentInstanceDispose(unit); + free(inBufferList.mBuffers[0].mData); +} + +void AudioInputAudioUnitLegacy::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ +} + +void AudioInputAudioUnitLegacy::Start(){ + isRecording=true; + OSStatus status=AudioOutputUnitStart(unit); + CHECK_AU_ERROR(status, "Error starting AudioUnit"); +} + +void AudioInputAudioUnitLegacy::Stop(){ + isRecording=false; + OSStatus status=AudioOutputUnitStart(unit); + CHECK_AU_ERROR(status, "Error stopping AudioUnit"); +} + +OSStatus AudioInputAudioUnitLegacy::BufferCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData){ + AudioInputAudioUnitLegacy* input=(AudioInputAudioUnitLegacy*) inRefCon; + input->inBufferList.mBuffers[0].mDataByteSize=10240; + OSStatus res=AudioUnitRender(input->unit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &input->inBufferList); + input->HandleBufferCallback(&input->inBufferList); + return noErr; +} + +void AudioInputAudioUnitLegacy::HandleBufferCallback(AudioBufferList *ioData){ + int i; + for(i=0;imNumberBuffers;i++){ + AudioBuffer buf=ioData->mBuffers[i]; + size_t len=buf.mDataByteSize; + if(hardwareSampleRate!=48000){ + len=tgvoip::audio::Resampler::Convert((int16_t*)buf.mData, (int16_t*)(remainingData+remainingDataSize), buf.mDataByteSize/2, (10240-(buf.mDataByteSize+remainingDataSize))/2, 48000, hardwareSampleRate)*2; + }else{ + assert(remainingDataSize+buf.mDataByteSize<10240); + memcpy(remainingData+remainingDataSize, buf.mData, buf.mDataByteSize); + } + remainingDataSize+=len; + while(remainingDataSize>=BUFFER_SIZE*2){ + InvokeCallback((unsigned char*)remainingData, BUFFER_SIZE*2); + remainingDataSize-=BUFFER_SIZE*2; + if(remainingDataSize>0){ + memmove(remainingData, remainingData+(BUFFER_SIZE*2), remainingDataSize); + } + } + } +} + + +void AudioInputAudioUnitLegacy::EnumerateDevices(std::vector& devs){ + AudioObjectPropertyAddress propertyAddress = { + kAudioHardwarePropertyDevices, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster + }; + + UInt32 dataSize = 0; + OSStatus status = AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize); + if(kAudioHardwareNoError != status) { + LOGE("AudioObjectGetPropertyDataSize (kAudioHardwarePropertyDevices) failed: %i", status); + return; + } + + UInt32 deviceCount = (UInt32)(dataSize / sizeof(AudioDeviceID)); + + + AudioDeviceID *audioDevices = (AudioDeviceID*)(malloc(dataSize)); + + status = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize, audioDevices); + if(kAudioHardwareNoError != status) { + LOGE("AudioObjectGetPropertyData (kAudioHardwarePropertyDevices) failed: %i", status); + free(audioDevices); + audioDevices = NULL; + return; + } + + + // Iterate through all the devices and determine which are input-capable + propertyAddress.mScope = kAudioDevicePropertyScopeInput; + for(UInt32 i = 0; i < deviceCount; ++i) { + // Query device UID + CFStringRef deviceUID = NULL; + dataSize = sizeof(deviceUID); + propertyAddress.mSelector = kAudioDevicePropertyDeviceUID; + status = AudioObjectGetPropertyData(audioDevices[i], &propertyAddress, 0, NULL, &dataSize, &deviceUID); + if(kAudioHardwareNoError != status) { + LOGE("AudioObjectGetPropertyData (kAudioDevicePropertyDeviceUID) failed: %i", status); + continue; + } + + // Query device name + CFStringRef deviceName = NULL; + dataSize = sizeof(deviceName); + propertyAddress.mSelector = kAudioDevicePropertyDeviceNameCFString; + status = AudioObjectGetPropertyData(audioDevices[i], &propertyAddress, 0, NULL, &dataSize, &deviceName); + if(kAudioHardwareNoError != status) { + LOGE("AudioObjectGetPropertyData (kAudioDevicePropertyDeviceNameCFString) failed: %i", status); + continue; + } + + // Determine if the device is an input device (it is an input device if it has input channels) + dataSize = 0; + propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration; + status = AudioObjectGetPropertyDataSize(audioDevices[i], &propertyAddress, 0, NULL, &dataSize); + if(kAudioHardwareNoError != status) { + LOGE("AudioObjectGetPropertyDataSize (kAudioDevicePropertyStreamConfiguration) failed: %i", status); + continue; + } + + AudioBufferList *bufferList = (AudioBufferList*)(malloc(dataSize)); + + status = AudioObjectGetPropertyData(audioDevices[i], &propertyAddress, 0, NULL, &dataSize, bufferList); + if(kAudioHardwareNoError != status || 0 == bufferList->mNumberBuffers) { + if(kAudioHardwareNoError != status) + LOGE("AudioObjectGetPropertyData (kAudioDevicePropertyStreamConfiguration) failed: %i", status); + free(bufferList); + bufferList = NULL; + continue; + } + + free(bufferList); + bufferList = NULL; + + AudioInputDevice dev; + char buf[1024]; + CFStringGetCString(deviceName, buf, 1024, kCFStringEncodingUTF8); + dev.displayName=std::string(buf); + CFStringGetCString(deviceUID, buf, 1024, kCFStringEncodingUTF8); + dev.id=std::string(buf); + devs.push_back(dev); + } + + free(audioDevices); + audioDevices = NULL; +} + +void AudioInputAudioUnitLegacy::SetCurrentDevice(std::string deviceID){ + UInt32 size=sizeof(AudioDeviceID); + AudioDeviceID inputDevice=NULL; + OSStatus status; + + if(deviceID=="default"){ + AudioObjectPropertyAddress propertyAddress; + propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice; + propertyAddress.mScope = kAudioObjectPropertyScopeGlobal; + propertyAddress.mElement = kAudioObjectPropertyElementMaster; + UInt32 propsize = sizeof(AudioDeviceID); + status = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &propsize, &inputDevice); + CHECK_AU_ERROR(status, "Error getting default input device"); + }else{ + AudioObjectPropertyAddress propertyAddress = { + kAudioHardwarePropertyDevices, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster + }; + UInt32 dataSize = 0; + status = AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize); + CHECK_AU_ERROR(status, "Error getting devices size"); + UInt32 deviceCount = (UInt32)(dataSize / sizeof(AudioDeviceID)); + AudioDeviceID audioDevices[deviceCount]; + status = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize, audioDevices); + CHECK_AU_ERROR(status, "Error getting device list"); + for(UInt32 i = 0; i < deviceCount; ++i) { + // Query device UID + CFStringRef deviceUID = NULL; + dataSize = sizeof(deviceUID); + propertyAddress.mSelector = kAudioDevicePropertyDeviceUID; + status = AudioObjectGetPropertyData(audioDevices[i], &propertyAddress, 0, NULL, &dataSize, &deviceUID); + CHECK_AU_ERROR(status, "Error getting device uid"); + char buf[1024]; + CFStringGetCString(deviceUID, buf, 1024, kCFStringEncodingUTF8); + if(deviceID==buf){ + LOGV("Found device for id %s", buf); + inputDevice=audioDevices[i]; + break; + } + } + if(!inputDevice){ + LOGW("Requested device not found, using default"); + SetCurrentDevice("default"); + return; + } + } + + status =AudioUnitSetProperty(unit, + kAudioOutputUnitProperty_CurrentDevice, + kAudioUnitScope_Global, + kInputBus, + &inputDevice, + size); + CHECK_AU_ERROR(status, "Error setting input device"); + + AudioStreamBasicDescription hardwareFormat; + size=sizeof(hardwareFormat); + status=AudioUnitGetProperty(unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kInputBus, &hardwareFormat, &size); + CHECK_AU_ERROR(status, "Error getting hardware format"); + hardwareSampleRate=hardwareFormat.mSampleRate; + + AudioStreamBasicDescription desiredFormat={ + .mSampleRate=hardwareFormat.mSampleRate, .mFormatID=kAudioFormatLinearPCM, .mFormatFlags=kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian, + .mFramesPerPacket=1, .mChannelsPerFrame=1, .mBitsPerChannel=16, .mBytesPerPacket=2, .mBytesPerFrame=2 + }; + + status=AudioUnitSetProperty(unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &desiredFormat, sizeof(desiredFormat)); + CHECK_AU_ERROR(status, "Error setting format"); + + LOGD("Switched capture device, new sample rate %d", hardwareSampleRate); + + this->currentDevice=deviceID; + + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyBufferFrameSize, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster + }; + size=4; + UInt32 bufferFrameSize; + status=AudioObjectGetPropertyData(inputDevice, &propertyAddress, 0, NULL, &size, &bufferFrameSize); + if(status==noErr){ + estimatedDelay=bufferFrameSize/48; + LOGD("CoreAudio buffer size for output device is %u frames (%u ms)", bufferFrameSize, estimatedDelay); + } +} + +OSStatus AudioInputAudioUnitLegacy::DefaultDeviceChangedCallback(AudioObjectID inObjectID, UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses, void *inClientData){ + LOGV("System default input device changed"); + AudioInputAudioUnitLegacy* self=(AudioInputAudioUnitLegacy*)inClientData; + if(self->currentDevice=="default"){ + self->SetCurrentDevice(self->currentDevice); + } + return noErr; +} diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnitOSX.h b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnitOSX.h new file mode 100644 index 000000000..cd0f3c631 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioInputAudioUnitOSX.h @@ -0,0 +1,40 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOINPUTAUDIOUNIT_OSX_H +#define LIBTGVOIP_AUDIOINPUTAUDIOUNIT_OSX_H + +#include +#import +#import +#include "../../audio/AudioInput.h" + +namespace tgvoip{ namespace audio{ +class AudioInputAudioUnitLegacy : public AudioInput{ + +public: + AudioInputAudioUnitLegacy(std::string deviceID); + virtual ~AudioInputAudioUnitLegacy(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + void HandleBufferCallback(AudioBufferList* ioData); + static void EnumerateDevices(std::vector& devs); + virtual void SetCurrentDevice(std::string deviceID); + +private: + static OSStatus BufferCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData); + static OSStatus DefaultDeviceChangedCallback(AudioObjectID inObjectID, UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses, void *inClientData); + unsigned char remainingData[10240]; + size_t remainingDataSize; + bool isRecording; + AudioUnit unit; + AudioBufferList inBufferList; + int hardwareSampleRate; +}; +}} + +#endif //LIBTGVOIP_AUDIOINPUTAUDIOUNIT_OSX_H diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnit.cpp b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnit.cpp new file mode 100644 index 000000000..7114d4dbf --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnit.cpp @@ -0,0 +1,125 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include +#include +#include +#include "AudioOutputAudioUnit.h" +#include "../../logging.h" +#include "AudioUnitIO.h" + +#define BUFFER_SIZE 960 +const int8_t permutation[33]={0,1,2,3,4,4,5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,8,9,9,9,9,9,9,9,9,9,9,9}; + +using namespace tgvoip; +using namespace tgvoip::audio; + +AudioOutputAudioUnit::AudioOutputAudioUnit(std::string deviceID){ + isPlaying=false; + remainingDataSize=0; + level=0.0; + this->io=AudioUnitIO::Get(); +#if TARGET_OS_OSX + io->SetCurrentDevice(false, deviceID); +#endif + io->AttachOutput(this); + failed=io->IsFailed(); +} + +AudioOutputAudioUnit::~AudioOutputAudioUnit(){ + io->DetachOutput(); + AudioUnitIO::Release(); +} + +void AudioOutputAudioUnit::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + io->Configure(sampleRate, bitsPerSample, channels); +} + +bool AudioOutputAudioUnit::IsPhone(){ + return false; +} + +void AudioOutputAudioUnit::EnableLoudspeaker(bool enabled){ + +} + +void AudioOutputAudioUnit::Start(){ + isPlaying=true; + io->EnableOutput(true); + failed=io->IsFailed(); +} + +void AudioOutputAudioUnit::Stop(){ + isPlaying=false; + io->EnableOutput(false); +} + +bool AudioOutputAudioUnit::IsPlaying(){ + return isPlaying; +} + +float AudioOutputAudioUnit::GetLevel(){ + return level / 9.0; +} + +void AudioOutputAudioUnit::HandleBufferCallback(AudioBufferList *ioData){ + int i; + unsigned int k; + int16_t absVal=0; + for(i=0;imNumberBuffers;i++){ + AudioBuffer buf=ioData->mBuffers[i]; + if(!isPlaying){ + memset(buf.mData, 0, buf.mDataByteSize); + return; + } + while(remainingDataSize(buf.mData); + int16_t* src=reinterpret_cast(remainingData); + for(k=0;kabsVal) + absVal=absolute; + } + + if (absVal>absMax) + absMax=absVal; + + count++; + if (count>=10) { + count=0; + + short position=absMax/1000; + if (position==0 && absMax>250) { + position=1; + } + level=permutation[position]; + absMax>>=2; + }*/ + } +} + +#if TARGET_OS_OSX +void AudioOutputAudioUnit::SetCurrentDevice(std::string deviceID){ + io->SetCurrentDevice(false, deviceID); +} +#endif diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnit.h b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnit.h new file mode 100644 index 000000000..18561b6a1 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnit.h @@ -0,0 +1,43 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOOUTPUTAUDIOUNIT_H +#define LIBTGVOIP_AUDIOOUTPUTAUDIOUNIT_H + +#include +#include "../../audio/AudioOutput.h" + +namespace tgvoip{ namespace audio{ +class AudioUnitIO; + +class AudioOutputAudioUnit : public AudioOutput{ +public: + AudioOutputAudioUnit(std::string deviceID); + virtual ~AudioOutputAudioUnit(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual bool IsPhone(); + virtual void EnableLoudspeaker(bool enabled); + virtual void Start(); + virtual void Stop(); + virtual bool IsPlaying(); + virtual float GetLevel(); + void HandleBufferCallback(AudioBufferList* ioData); +#if TARGET_OS_OSX + virtual void SetCurrentDevice(std::string deviceID); +#endif + +private: + bool isPlaying; + unsigned char remainingData[10240]; + size_t remainingDataSize; + AudioUnitIO* io; + float level; + int16_t absMax; + int count; +}; +}} + +#endif //LIBTGVOIP_AUDIOOUTPUTAUDIOUNIT_H diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnitOSX.cpp b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnitOSX.cpp new file mode 100644 index 000000000..49c9662d4 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnitOSX.cpp @@ -0,0 +1,368 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include +#include +#include +#include "AudioOutputAudioUnitOSX.h" +#include "../../logging.h" +#include "../../VoIPController.h" + +#define BUFFER_SIZE 960 +#define CHECK_AU_ERROR(res, msg) if(res!=noErr){ LOGE("output: " msg": OSStatus=%d", (int)res); return; } + +#define kOutputBus 0 +#define kInputBus 1 + +using namespace tgvoip; +using namespace tgvoip::audio; + +AudioOutputAudioUnitLegacy::AudioOutputAudioUnitLegacy(std::string deviceID){ + remainingDataSize=0; + isPlaying=false; + sysDevID=NULL; + + OSStatus status; + AudioComponentDescription inputDesc={ + .componentType = kAudioUnitType_Output, .componentSubType = kAudioUnitSubType_HALOutput, .componentFlags = 0, .componentFlagsMask = 0, + .componentManufacturer = kAudioUnitManufacturer_Apple + }; + AudioComponent component=AudioComponentFindNext(NULL, &inputDesc); + status=AudioComponentInstanceNew(component, &unit); + CHECK_AU_ERROR(status, "Error creating AudioUnit"); + + UInt32 flag=1; + status = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, sizeof(flag)); + CHECK_AU_ERROR(status, "Error enabling AudioUnit output"); + flag=0; + status = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag)); + CHECK_AU_ERROR(status, "Error enabling AudioUnit input"); + + char model[128]; + memset(model, 0, sizeof(model)); + size_t msize=sizeof(model); + int mres=sysctlbyname("hw.model", model, &msize, NULL, 0); + if(mres==0){ + LOGV("Mac model: %s", model); + isMacBookPro=(strncmp("MacBookPro", model, 10)==0); + } + + SetCurrentDevice(deviceID); + + CFRunLoopRef theRunLoop = NULL; + AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyRunLoop, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster }; + status = AudioObjectSetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop); + + propertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + propertyAddress.mScope = kAudioObjectPropertyScopeGlobal; + propertyAddress.mElement = kAudioObjectPropertyElementMaster; + AudioObjectAddPropertyListener(kAudioObjectSystemObject, &propertyAddress, AudioOutputAudioUnitLegacy::DefaultDeviceChangedCallback, this); + + AudioStreamBasicDescription desiredFormat={ + .mSampleRate=/*hardwareFormat.mSampleRate*/48000, .mFormatID=kAudioFormatLinearPCM, .mFormatFlags=kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian, + .mFramesPerPacket=1, .mChannelsPerFrame=1, .mBitsPerChannel=16, .mBytesPerPacket=2, .mBytesPerFrame=2 + }; + + status=AudioUnitSetProperty(unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &desiredFormat, sizeof(desiredFormat)); + CHECK_AU_ERROR(status, "Error setting format"); + + AURenderCallbackStruct callbackStruct; + callbackStruct.inputProc = AudioOutputAudioUnitLegacy::BufferCallback; + callbackStruct.inputProcRefCon=this; + status = AudioUnitSetProperty(unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &callbackStruct, sizeof(callbackStruct)); + CHECK_AU_ERROR(status, "Error setting input buffer callback"); + status=AudioUnitInitialize(unit); + CHECK_AU_ERROR(status, "Error initializing unit"); +} + +AudioOutputAudioUnitLegacy::~AudioOutputAudioUnitLegacy(){ + AudioObjectPropertyAddress propertyAddress; + propertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + propertyAddress.mScope = kAudioObjectPropertyScopeGlobal; + propertyAddress.mElement = kAudioObjectPropertyElementMaster; + AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &propertyAddress, AudioOutputAudioUnitLegacy::DefaultDeviceChangedCallback, this); + + AudioObjectPropertyAddress dataSourceProp={ + kAudioDevicePropertyDataSource, + kAudioDevicePropertyScopeOutput, + kAudioObjectPropertyElementMaster + }; + if(isMacBookPro && sysDevID && AudioObjectHasProperty(sysDevID, &dataSourceProp)){ + AudioObjectRemovePropertyListener(sysDevID, &dataSourceProp, AudioOutputAudioUnitLegacy::DefaultDeviceChangedCallback, this); + } + + AudioUnitUninitialize(unit); + AudioComponentInstanceDispose(unit); +} + +void AudioOutputAudioUnitLegacy::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ +} + +void AudioOutputAudioUnitLegacy::Start(){ + isPlaying=true; + OSStatus status=AudioOutputUnitStart(unit); + CHECK_AU_ERROR(status, "Error starting AudioUnit"); +} + +void AudioOutputAudioUnitLegacy::Stop(){ + isPlaying=false; + OSStatus status=AudioOutputUnitStart(unit); + CHECK_AU_ERROR(status, "Error stopping AudioUnit"); +} + +OSStatus AudioOutputAudioUnitLegacy::BufferCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData){ + AudioOutputAudioUnitLegacy* input=(AudioOutputAudioUnitLegacy*) inRefCon; + input->HandleBufferCallback(ioData); + return noErr; +} + +bool AudioOutputAudioUnitLegacy::IsPlaying(){ + return isPlaying; +} + +void AudioOutputAudioUnitLegacy::HandleBufferCallback(AudioBufferList *ioData){ + int i; + unsigned int k; + int16_t absVal=0; + for(i=0;imNumberBuffers;i++){ + AudioBuffer buf=ioData->mBuffers[i]; + if(!isPlaying){ + memset(buf.mData, 0, buf.mDataByteSize); + return; + } + while(remainingDataSize& devs){ + AudioObjectPropertyAddress propertyAddress = { + kAudioHardwarePropertyDevices, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster + }; + + UInt32 dataSize = 0; + OSStatus status = AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize); + if(kAudioHardwareNoError != status) { + LOGE("AudioObjectGetPropertyDataSize (kAudioHardwarePropertyDevices) failed: %i", status); + return; + } + + UInt32 deviceCount = (UInt32)(dataSize / sizeof(AudioDeviceID)); + + + AudioDeviceID *audioDevices = (AudioDeviceID*)(malloc(dataSize)); + + status = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize, audioDevices); + if(kAudioHardwareNoError != status) { + LOGE("AudioObjectGetPropertyData (kAudioHardwarePropertyDevices) failed: %i", status); + free(audioDevices); + audioDevices = NULL; + return; + } + + + // Iterate through all the devices and determine which are input-capable + propertyAddress.mScope = kAudioDevicePropertyScopeOutput; + for(UInt32 i = 0; i < deviceCount; ++i) { + // Query device UID + CFStringRef deviceUID = NULL; + dataSize = sizeof(deviceUID); + propertyAddress.mSelector = kAudioDevicePropertyDeviceUID; + status = AudioObjectGetPropertyData(audioDevices[i], &propertyAddress, 0, NULL, &dataSize, &deviceUID); + if(kAudioHardwareNoError != status) { + LOGE("AudioObjectGetPropertyData (kAudioDevicePropertyDeviceUID) failed: %i", status); + continue; + } + + // Query device name + CFStringRef deviceName = NULL; + dataSize = sizeof(deviceName); + propertyAddress.mSelector = kAudioDevicePropertyDeviceNameCFString; + status = AudioObjectGetPropertyData(audioDevices[i], &propertyAddress, 0, NULL, &dataSize, &deviceName); + if(kAudioHardwareNoError != status) { + LOGE("AudioObjectGetPropertyData (kAudioDevicePropertyDeviceNameCFString) failed: %i", status); + continue; + } + + // Determine if the device is an input device (it is an input device if it has input channels) + dataSize = 0; + propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration; + status = AudioObjectGetPropertyDataSize(audioDevices[i], &propertyAddress, 0, NULL, &dataSize); + if(kAudioHardwareNoError != status) { + LOGE("AudioObjectGetPropertyDataSize (kAudioDevicePropertyStreamConfiguration) failed: %i", status); + continue; + } + + AudioBufferList *bufferList = (AudioBufferList*)(malloc(dataSize)); + + status = AudioObjectGetPropertyData(audioDevices[i], &propertyAddress, 0, NULL, &dataSize, bufferList); + if(kAudioHardwareNoError != status || 0 == bufferList->mNumberBuffers) { + if(kAudioHardwareNoError != status) + LOGE("AudioObjectGetPropertyData (kAudioDevicePropertyStreamConfiguration) failed: %i", status); + free(bufferList); + bufferList = NULL; + continue; + } + + free(bufferList); + bufferList = NULL; + + AudioOutputDevice dev; + char buf[1024]; + CFStringGetCString(deviceName, buf, 1024, kCFStringEncodingUTF8); + dev.displayName=std::string(buf); + CFStringGetCString(deviceUID, buf, 1024, kCFStringEncodingUTF8); + dev.id=std::string(buf); + devs.push_back(dev); + } + + free(audioDevices); + audioDevices = NULL; +} + +void AudioOutputAudioUnitLegacy::SetCurrentDevice(std::string deviceID){ + UInt32 size=sizeof(AudioDeviceID); + AudioDeviceID outputDevice=NULL; + OSStatus status; + AudioObjectPropertyAddress dataSourceProp={ + kAudioDevicePropertyDataSource, + kAudioDevicePropertyScopeOutput, + kAudioObjectPropertyElementMaster + }; + + if(isMacBookPro && sysDevID && AudioObjectHasProperty(sysDevID, &dataSourceProp)){ + AudioObjectRemovePropertyListener(sysDevID, &dataSourceProp, AudioOutputAudioUnitLegacy::DefaultDeviceChangedCallback, this); + } + + if(deviceID=="default"){ + AudioObjectPropertyAddress propertyAddress; + propertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + propertyAddress.mScope = kAudioObjectPropertyScopeGlobal; + propertyAddress.mElement = kAudioObjectPropertyElementMaster; + UInt32 propsize = sizeof(AudioDeviceID); + status = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &propsize, &outputDevice); + CHECK_AU_ERROR(status, "Error getting default input device"); + }else{ + AudioObjectPropertyAddress propertyAddress = { + kAudioHardwarePropertyDevices, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster + }; + UInt32 dataSize = 0; + status = AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize); + CHECK_AU_ERROR(status, "Error getting devices size"); + UInt32 deviceCount = (UInt32)(dataSize / sizeof(AudioDeviceID)); + AudioDeviceID audioDevices[deviceCount]; + status = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize, audioDevices); + CHECK_AU_ERROR(status, "Error getting device list"); + for(UInt32 i = 0; i < deviceCount; ++i) { + // Query device UID + CFStringRef deviceUID = NULL; + dataSize = sizeof(deviceUID); + propertyAddress.mSelector = kAudioDevicePropertyDeviceUID; + status = AudioObjectGetPropertyData(audioDevices[i], &propertyAddress, 0, NULL, &dataSize, &deviceUID); + CHECK_AU_ERROR(status, "Error getting device uid"); + char buf[1024]; + CFStringGetCString(deviceUID, buf, 1024, kCFStringEncodingUTF8); + if(deviceID==buf){ + LOGV("Found device for id %s", buf); + outputDevice=audioDevices[i]; + break; + } + } + if(!outputDevice){ + LOGW("Requested device not found, using default"); + SetCurrentDevice("default"); + return; + } + } + + status =AudioUnitSetProperty(unit, + kAudioOutputUnitProperty_CurrentDevice, + kAudioUnitScope_Global, + kOutputBus, + &outputDevice, + size); + CHECK_AU_ERROR(status, "Error setting output device"); + + AudioStreamBasicDescription hardwareFormat; + size=sizeof(hardwareFormat); + status=AudioUnitGetProperty(unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kOutputBus, &hardwareFormat, &size); + CHECK_AU_ERROR(status, "Error getting hardware format"); + hardwareSampleRate=hardwareFormat.mSampleRate; + + AudioStreamBasicDescription desiredFormat={ + .mSampleRate=48000, .mFormatID=kAudioFormatLinearPCM, .mFormatFlags=kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian, + .mFramesPerPacket=1, .mChannelsPerFrame=1, .mBitsPerChannel=16, .mBytesPerPacket=2, .mBytesPerFrame=2 + }; + + status=AudioUnitSetProperty(unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &desiredFormat, sizeof(desiredFormat)); + CHECK_AU_ERROR(status, "Error setting format"); + + LOGD("Switched playback device, new sample rate %d", hardwareSampleRate); + + this->currentDevice=deviceID; + sysDevID=outputDevice; + + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyBufferFrameSize, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster + }; + size=4; + UInt32 bufferFrameSize; + status=AudioObjectGetPropertyData(outputDevice, &propertyAddress, 0, NULL, &size, &bufferFrameSize); + if(status==noErr){ + estimatedDelay=bufferFrameSize/48; + LOGD("CoreAudio buffer size for output device is %u frames (%u ms)", bufferFrameSize, estimatedDelay); + } + + if(isMacBookPro){ + if(AudioObjectHasProperty(outputDevice, &dataSourceProp)){ + UInt32 dataSource; + size=4; + AudioObjectGetPropertyData(outputDevice, &dataSourceProp, 0, NULL, &size, &dataSource); + SetPanRight(dataSource=='ispk'); + AudioObjectAddPropertyListener(outputDevice, &dataSourceProp, AudioOutputAudioUnitLegacy::DefaultDeviceChangedCallback, this); + }else{ + SetPanRight(false); + } + } +} + +void AudioOutputAudioUnitLegacy::SetPanRight(bool panRight){ + LOGI("%sabling pan right on macbook pro", panRight ? "En" : "Dis"); + int32_t channelMap[]={panRight ? -1 : 0, 0}; + OSStatus status=AudioUnitSetProperty(unit, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Global, kOutputBus, channelMap, sizeof(channelMap)); + CHECK_AU_ERROR(status, "Error setting channel map"); +} + +OSStatus AudioOutputAudioUnitLegacy::DefaultDeviceChangedCallback(AudioObjectID inObjectID, UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses, void *inClientData){ + AudioOutputAudioUnitLegacy* self=(AudioOutputAudioUnitLegacy*)inClientData; + if(inAddresses[0].mSelector==kAudioHardwarePropertyDefaultOutputDevice){ + LOGV("System default input device changed"); + if(self->currentDevice=="default"){ + self->SetCurrentDevice(self->currentDevice); + } + }else if(inAddresses[0].mSelector==kAudioDevicePropertyDataSource){ + UInt32 dataSource; + UInt32 size=4; + AudioObjectGetPropertyData(inObjectID, inAddresses, 0, NULL, &size, &dataSource); + self->SetPanRight(dataSource=='ispk'); + } + return noErr; +} diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnitOSX.h b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnitOSX.h new file mode 100644 index 000000000..d73bb5412 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioOutputAudioUnitOSX.h @@ -0,0 +1,43 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOINPUTAUDIOUNIT_OSX_H +#define LIBTGVOIP_AUDIOINPUTAUDIOUNIT_OSX_H + +#include +#import +#import +#include "../../audio/AudioOutput.h" + +namespace tgvoip{ namespace audio{ +class AudioOutputAudioUnitLegacy : public AudioOutput{ + +public: + AudioOutputAudioUnitLegacy(std::string deviceID); + virtual ~AudioOutputAudioUnitLegacy(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + virtual bool IsPlaying(); + void HandleBufferCallback(AudioBufferList* ioData); + static void EnumerateDevices(std::vector& devs); + virtual void SetCurrentDevice(std::string deviceID); + +private: + static OSStatus BufferCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData); + static OSStatus DefaultDeviceChangedCallback(AudioObjectID inObjectID, UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses, void *inClientData); + void SetPanRight(bool panRight); + unsigned char remainingData[10240]; + size_t remainingDataSize; + bool isPlaying; + AudioUnit unit; + int hardwareSampleRate; + bool isMacBookPro; + AudioDeviceID sysDevID; +}; +}} + +#endif //LIBTGVOIP_AUDIOINPUTAUDIOUNIT_OSX_H diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/AudioUnitIO.cpp b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioUnitIO.cpp new file mode 100644 index 000000000..69a924f99 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioUnitIO.cpp @@ -0,0 +1,317 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// +#include +#include "AudioUnitIO.h" +#include "AudioInputAudioUnit.h" +#include "AudioOutputAudioUnit.h" +#include "../../logging.h" +#include "../../VoIPController.h" +#include "../../VoIPServerConfig.h" + +#define CHECK_AU_ERROR(res, msg) if(res!=noErr){ LOGE(msg": OSStatus=%d", (int)res); failed=true; return; } +#define BUFFER_SIZE 960 // 20 ms + +#define kOutputBus 0 +#define kInputBus 1 + +using namespace tgvoip; +using namespace tgvoip::audio; + +int AudioUnitIO::refCount=0; +AudioUnitIO* AudioUnitIO::sharedInstance=NULL; + +AudioUnitIO::AudioUnitIO(){ + input=NULL; + output=NULL; + inputEnabled=false; + outputEnabled=false; + failed=false; + started=false; + inBufferList.mBuffers[0].mData=malloc(10240); + inBufferList.mBuffers[0].mDataByteSize=10240; + inBufferList.mNumberBuffers=1; + + OSStatus status; + AudioComponentDescription desc; + AudioComponent inputComponent; + desc.componentType = kAudioUnitType_Output; + desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO; + desc.componentFlags = 0; + desc.componentFlagsMask = 0; + desc.componentManufacturer = kAudioUnitManufacturer_Apple; + inputComponent = AudioComponentFindNext(NULL, &desc); + status = AudioComponentInstanceNew(inputComponent, &unit); + + UInt32 flag=1; +#if TARGET_OS_IPHONE + status = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, sizeof(flag)); + CHECK_AU_ERROR(status, "Error enabling AudioUnit output"); + status = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag)); + CHECK_AU_ERROR(status, "Error enabling AudioUnit input"); +#endif + +#if TARGET_OS_IPHONE + flag=ServerConfig::GetSharedInstance()->GetBoolean("use_ios_vpio_agc", true) ? 1 : 0; +#else + flag=ServerConfig::GetSharedInstance()->GetBoolean("use_osx_vpio_agc", true) ? 1 : 0; +#endif + status=AudioUnitSetProperty(unit, kAUVoiceIOProperty_VoiceProcessingEnableAGC, kAudioUnitScope_Global, kInputBus, &flag, sizeof(flag)); + CHECK_AU_ERROR(status, "Error disabling AGC"); + + AudioStreamBasicDescription audioFormat; + audioFormat.mSampleRate = 48000; + audioFormat.mFormatID = kAudioFormatLinearPCM; +#if TARGET_OS_IPHONE + audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian; + audioFormat.mBitsPerChannel = 16; + audioFormat.mBytesPerPacket = 2; + audioFormat.mBytesPerFrame = 2; +#else // OS X + audioFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian; + audioFormat.mBitsPerChannel = 32; + audioFormat.mBytesPerPacket = 4; + audioFormat.mBytesPerFrame = 4; +#endif + audioFormat.mFramesPerPacket = 1; + audioFormat.mChannelsPerFrame = 1; + + status = AudioUnitSetProperty(unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioFormat, sizeof(audioFormat)); + CHECK_AU_ERROR(status, "Error setting output format"); + status = AudioUnitSetProperty(unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, sizeof(audioFormat)); + CHECK_AU_ERROR(status, "Error setting input format"); + + AURenderCallbackStruct callbackStruct; + + callbackStruct.inputProc = AudioUnitIO::BufferCallback; + callbackStruct.inputProcRefCon = this; + status = AudioUnitSetProperty(unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &callbackStruct, sizeof(callbackStruct)); + CHECK_AU_ERROR(status, "Error setting output buffer callback"); + status = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &callbackStruct, sizeof(callbackStruct)); + CHECK_AU_ERROR(status, "Error setting input buffer callback"); + +#if TARGET_OS_OSX + CFRunLoopRef theRunLoop = NULL; + AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyRunLoop, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster }; + status = AudioObjectSetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop); + + propertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + propertyAddress.mScope = kAudioObjectPropertyScopeGlobal; + propertyAddress.mElement = kAudioObjectPropertyElementMaster; + AudioObjectAddPropertyListener(kAudioObjectSystemObject, &propertyAddress, AudioUnitIO::DefaultDeviceChangedCallback, this); + propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice; + AudioObjectAddPropertyListener(kAudioObjectSystemObject, &propertyAddress, AudioUnitIO::DefaultDeviceChangedCallback, this); +#endif +} + +AudioUnitIO::~AudioUnitIO(){ + AudioOutputUnitStop(unit); + AudioUnitUninitialize(unit); + AudioComponentInstanceDispose(unit); + free(inBufferList.mBuffers[0].mData); +#if TARGET_OS_OSX + AudioObjectPropertyAddress propertyAddress; + propertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + propertyAddress.mScope = kAudioObjectPropertyScopeGlobal; + propertyAddress.mElement = kAudioObjectPropertyElementMaster; + AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &propertyAddress, AudioUnitIO::DefaultDeviceChangedCallback, this); + propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice; + AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &propertyAddress, AudioUnitIO::DefaultDeviceChangedCallback, this); +#endif +} + +AudioUnitIO* AudioUnitIO::Get(){ + if(refCount==0){ + sharedInstance=new AudioUnitIO(); + } + refCount++; + assert(refCount>0); + return sharedInstance; +} + +void AudioUnitIO::Release(){ + refCount--; + assert(refCount>=0); + if(refCount==0){ + delete sharedInstance; + sharedInstance=NULL; + } +} + +void AudioUnitIO::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + +} + +OSStatus AudioUnitIO::BufferCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData){ + ((AudioUnitIO*)inRefCon)->BufferCallback(ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, ioData); + return noErr; +} + +void AudioUnitIO::BufferCallback(AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 bus, UInt32 numFrames, AudioBufferList *ioData){ + if(bus==kOutputBus){ + if(output && outputEnabled){ + output->HandleBufferCallback(ioData); + }else{ + memset(ioData->mBuffers[0].mData, 0, ioData->mBuffers[0].mDataByteSize); + } + }else if(bus==kInputBus){ + inBufferList.mBuffers[0].mDataByteSize=10240; + AudioUnitRender(unit, ioActionFlags, inTimeStamp, bus, numFrames, &inBufferList); + if(input && inputEnabled){ + input->HandleBufferCallback(&inBufferList); + } + } +} + +void AudioUnitIO::AttachInput(AudioInputAudioUnit *i){ + assert(input==NULL); + input=i; +} + +void AudioUnitIO::AttachOutput(AudioOutputAudioUnit *o){ + assert(output==NULL); + output=o; +} + +void AudioUnitIO::DetachInput(){ + assert(input!=NULL); + input=NULL; + inputEnabled=false; +} + +void AudioUnitIO::DetachOutput(){ + assert(output!=NULL); + output=NULL; + outputEnabled=false; +} + +void AudioUnitIO::EnableInput(bool enabled){ + inputEnabled=enabled; + StartIfNeeded(); +} + +void AudioUnitIO::EnableOutput(bool enabled){ + outputEnabled=enabled; + StartIfNeeded(); +} + +void AudioUnitIO::StartIfNeeded(){ + if(started) + return; + started=true; + OSStatus status = AudioUnitInitialize(unit); + CHECK_AU_ERROR(status, "Error initializing AudioUnit"); + status=AudioOutputUnitStart(unit); + CHECK_AU_ERROR(status, "Error starting AudioUnit"); +} + +bool AudioUnitIO::IsFailed(){ + return failed; +} + +#if TARGET_OS_OSX +OSStatus AudioUnitIO::DefaultDeviceChangedCallback(AudioObjectID inObjectID, UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses, void *inClientData){ + AudioUnitIO* self=(AudioUnitIO*)inClientData; + if(inAddresses[0].mSelector==kAudioHardwarePropertyDefaultOutputDevice){ + LOGV("System default output device changed"); + if(self->currentOutputDevice=="default"){ + self->SetCurrentDevice(false, self->currentOutputDevice); + } + }else if(inAddresses[0].mSelector==kAudioHardwarePropertyDefaultInputDevice){ + LOGV("System default input device changed"); + if(self->currentInputDevice=="default"){ + self->SetCurrentDevice(true, self->currentInputDevice); + } + } + return noErr; +} + +void AudioUnitIO::SetCurrentDevice(bool input, std::string deviceID){ + if(started){ + AudioOutputUnitStop(unit); + AudioUnitUninitialize(unit); + } + UInt32 size=sizeof(AudioDeviceID); + AudioDeviceID device=NULL; + OSStatus status; + + if(deviceID=="default"){ + AudioObjectPropertyAddress propertyAddress; + propertyAddress.mSelector = input ? kAudioHardwarePropertyDefaultInputDevice : kAudioHardwarePropertyDefaultOutputDevice; + propertyAddress.mScope = kAudioObjectPropertyScopeGlobal; + propertyAddress.mElement = kAudioObjectPropertyElementMaster; + UInt32 propsize = sizeof(AudioDeviceID); + status = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &propsize, &device); + CHECK_AU_ERROR(status, "Error getting default device"); + }else{ + AudioObjectPropertyAddress propertyAddress = { + kAudioHardwarePropertyDevices, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster + }; + UInt32 dataSize = 0; + status = AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize); + CHECK_AU_ERROR(status, "Error getting devices size"); + UInt32 deviceCount = (UInt32)(dataSize / sizeof(AudioDeviceID)); + AudioDeviceID audioDevices[deviceCount]; + status = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize, audioDevices); + CHECK_AU_ERROR(status, "Error getting device list"); + for(UInt32 i = 0; i < deviceCount; ++i) { + // Query device UID + CFStringRef deviceUID = NULL; + dataSize = sizeof(deviceUID); + propertyAddress.mSelector = kAudioDevicePropertyDeviceUID; + status = AudioObjectGetPropertyData(audioDevices[i], &propertyAddress, 0, NULL, &dataSize, &deviceUID); + CHECK_AU_ERROR(status, "Error getting device uid"); + char buf[1024]; + CFStringGetCString(deviceUID, buf, 1024, kCFStringEncodingUTF8); + if(deviceID==buf){ + LOGV("Found device for id %s", buf); + device=audioDevices[i]; + break; + } + } + if(!device){ + LOGW("Requested device not found, using default"); + SetCurrentDevice(input, "default"); + return; + } + } + + status=AudioUnitSetProperty(unit, + kAudioOutputUnitProperty_CurrentDevice, + kAudioUnitScope_Global, + input ? kInputBus : kOutputBus, + &device, + size); + CHECK_AU_ERROR(status, "Error setting input device"); + + if(input) + currentInputDevice=deviceID; + else + currentOutputDevice=deviceID; + + + /*AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyBufferFrameSize, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster + }; + size=4; + UInt32 bufferFrameSize; + status=AudioObjectGetPropertyData(device, &propertyAddress, 0, NULL, &size, &bufferFrameSize); + if(status==noErr){ + estimatedDelay=bufferFrameSize/48; + LOGD("CoreAudio buffer size for device is %u frames (%u ms)", bufferFrameSize, estimatedDelay); + }*/ + if(started){ + started=false; + StartIfNeeded(); + } +} + +#endif diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/AudioUnitIO.h b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioUnitIO.h new file mode 100644 index 000000000..42f08ff83 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/AudioUnitIO.h @@ -0,0 +1,59 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOUNITIO_H +#define LIBTGVOIP_AUDIOUNITIO_H + +#include +#include +#include "../../threading.h" +#include + +namespace tgvoip{ namespace audio{ +class AudioInputAudioUnit; +class AudioOutputAudioUnit; + +class AudioUnitIO{ +public: + AudioUnitIO(); + ~AudioUnitIO(); + void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + void AttachInput(AudioInputAudioUnit* i); + void AttachOutput(AudioOutputAudioUnit* o); + void DetachInput(); + void DetachOutput(); + void EnableInput(bool enabled); + void EnableOutput(bool enabled); + bool IsFailed(); + static AudioUnitIO* Get(); + static void Release(); +#if TARGET_OS_OSX + void SetCurrentDevice(bool input, std::string deviceID); +#endif + +private: + static OSStatus BufferCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData); + void BufferCallback(AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 bus, UInt32 numFrames, AudioBufferList* ioData); + void StartIfNeeded(); +#if TARGET_OS_OSX + static OSStatus DefaultDeviceChangedCallback(AudioObjectID inObjectID, UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses, void *inClientData); + std::string currentInputDevice; + std::string currentOutputDevice; +#endif + AudioComponentInstance unit; + AudioInputAudioUnit* input; + AudioOutputAudioUnit* output; + AudioBufferList inBufferList; + bool inputEnabled; + bool outputEnabled; + bool failed; + bool started; + static int refCount; + static AudioUnitIO* sharedInstance; +}; +}} + +#endif /* LIBTGVOIP_AUDIOUNITIO_H */ diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/DarwinSpecific.h b/Telegram/ThirdParty/libtgvoip/os/darwin/DarwinSpecific.h new file mode 100644 index 000000000..9bdb5c188 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/DarwinSpecific.h @@ -0,0 +1,19 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef TGVOIP_DARWINSPECIFIC_H +#define TGVOIP_DARWINSPECIFIC_H + +#include + +namespace tgvoip { +class DarwinSpecific{ +public: + static void GetSystemName(char* buf, size_t len); +}; +} + +#endif //TGVOIP_DARWINSPECIFIC_H diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/DarwinSpecific.mm b/Telegram/ThirdParty/libtgvoip/os/darwin/DarwinSpecific.mm new file mode 100644 index 000000000..f2f04c027 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/DarwinSpecific.mm @@ -0,0 +1,17 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "DarwinSpecific.h" + +#import + +using namespace tgvoip; + +void DarwinSpecific::GetSystemName(char* buf, size_t len){ + NSString* v=[[NSProcessInfo processInfo] operatingSystemVersionString]; + strcpy(buf, [v UTF8String]); + //[v getCString:buf maxLength:sizeof(buf) encoding:NSUTF8StringEncoding]; +} diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/TGLogWrapper.h b/Telegram/ThirdParty/libtgvoip/os/darwin/TGLogWrapper.h new file mode 100644 index 000000000..8f111774f --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/TGLogWrapper.h @@ -0,0 +1,20 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef TGVOIP_TGLOGWRAPPER_H +#define TGVOIP_TGLOGWRAPPER_H + +#if defined __cplusplus +extern "C" { +#endif + +void __tgvoip_call_tglog(const char* format, ...); + +#if defined __cplusplus +}; +#endif + +#endif //TGVOIP_TGLOGWRAPPER_H diff --git a/Telegram/ThirdParty/libtgvoip/os/darwin/TGLogWrapper.m b/Telegram/ThirdParty/libtgvoip/os/darwin/TGLogWrapper.m new file mode 100644 index 000000000..be239b5d9 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/darwin/TGLogWrapper.m @@ -0,0 +1,10 @@ +#import + +extern void TGLogv(NSString *format, va_list args); + +void __tgvoip_call_tglog(const char* format, ...){ + va_list args; + va_start(args, format); + TGLogv([[NSString alloc]initWithUTF8String:format], args); + va_end(args); +} diff --git a/Telegram/ThirdParty/libtgvoip/os/linux/AudioInputALSA.cpp b/Telegram/ThirdParty/libtgvoip/os/linux/AudioInputALSA.cpp new file mode 100644 index 000000000..177474d30 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/linux/AudioInputALSA.cpp @@ -0,0 +1,179 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include +#include +#include +#include +#include "AudioInputALSA.h" +#include "../../logging.h" +#include "../../VoIPController.h" + +using namespace tgvoip::audio; + +#define BUFFER_SIZE 960 +#define CHECK_ERROR(res, msg) if(res<0){LOGE(msg ": %s", _snd_strerror(res));} +#define CHECK_DL_ERROR(res, msg) if(!res){LOGE(msg ": %s", dlerror()); failed=true; return;} +#define LOAD_FUNCTION(lib, name, ref) {ref=(typeof(ref))dlsym(lib, name); CHECK_DL_ERROR(ref, "Error getting entry point for " name);} + +AudioInputALSA::AudioInputALSA(std::string devID){ + isRecording=false; + handle=NULL; + + lib=dlopen("libasound.so.2", RTLD_LAZY); + if(!lib) + lib=dlopen("libasound.so", RTLD_LAZY); + if(!lib){ + LOGE("Error loading libasound: %s", dlerror()); + failed=true; + return; + } + + LOAD_FUNCTION(lib, "snd_pcm_open", _snd_pcm_open); + LOAD_FUNCTION(lib, "snd_pcm_set_params", _snd_pcm_set_params); + LOAD_FUNCTION(lib, "snd_pcm_close", _snd_pcm_close); + LOAD_FUNCTION(lib, "snd_pcm_readi", _snd_pcm_readi); + LOAD_FUNCTION(lib, "snd_pcm_recover", _snd_pcm_recover); + LOAD_FUNCTION(lib, "snd_strerror", _snd_strerror); + + SetCurrentDevice(devID); +} + +AudioInputALSA::~AudioInputALSA(){ + if(handle) + _snd_pcm_close(handle); + if(lib) + dlclose(lib); +} + +void AudioInputALSA::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + +} + +void AudioInputALSA::Start(){ + if(failed || isRecording) + return; + + isRecording=true; + start_thread(thread, AudioInputALSA::StartThread, this); +} + +void AudioInputALSA::Stop(){ + if(!isRecording) + return; + + isRecording=false; + join_thread(thread); +} + +void* AudioInputALSA::StartThread(void* arg){ + ((AudioInputALSA*)arg)->RunThread(); +} + +void AudioInputALSA::RunThread(){ + unsigned char buffer[BUFFER_SIZE*2]; + snd_pcm_sframes_t frames; + while(isRecording){ + frames=_snd_pcm_readi(handle, buffer, BUFFER_SIZE); + if (frames < 0){ + frames = _snd_pcm_recover(handle, frames, 0); + } + if (frames < 0) { + LOGE("snd_pcm_readi failed: %s\n", _snd_strerror(frames)); + break; + } + InvokeCallback(buffer, sizeof(buffer)); + } +} + +void AudioInputALSA::SetCurrentDevice(std::string devID){ + bool wasRecording=isRecording; + isRecording=false; + if(handle){ + join_thread(thread); + _snd_pcm_close(handle); + } + currentDevice=devID; + + int res=_snd_pcm_open(&handle, devID.c_str(), SND_PCM_STREAM_CAPTURE, 0); + if(res<0) + res=_snd_pcm_open(&handle, "default", SND_PCM_STREAM_CAPTURE, 0); + CHECK_ERROR(res, "snd_pcm_open failed"); + + res=_snd_pcm_set_params(handle, SND_PCM_FORMAT_S16, SND_PCM_ACCESS_RW_INTERLEAVED, 1, 48000, 1, 100000); + CHECK_ERROR(res, "snd_pcm_set_params failed"); + + if(wasRecording){ + isRecording=true; + start_thread(thread, AudioInputALSA::StartThread, this); + } +} + +void AudioInputALSA::EnumerateDevices(std::vector& devs){ + int (*_snd_device_name_hint)(int card, const char* iface, void*** hints); + char* (*_snd_device_name_get_hint)(const void* hint, const char* id); + int (*_snd_device_name_free_hint)(void** hinst); + void* lib=dlopen("libasound.so.2", RTLD_LAZY); + if(!lib) + dlopen("libasound.so", RTLD_LAZY); + if(!lib) + return; + + _snd_device_name_hint=(typeof(_snd_device_name_hint))dlsym(lib, "snd_device_name_hint"); + _snd_device_name_get_hint=(typeof(_snd_device_name_get_hint))dlsym(lib, "snd_device_name_get_hint"); + _snd_device_name_free_hint=(typeof(_snd_device_name_free_hint))dlsym(lib, "snd_device_name_free_hint"); + + if(!_snd_device_name_hint || !_snd_device_name_get_hint || !_snd_device_name_free_hint){ + dlclose(lib); + return; + } + + char** hints; + int err=_snd_device_name_hint(-1, "pcm", (void***)&hints); + if(err!=0){ + dlclose(lib); + return; + } + + char** n=hints; + while(*n){ + char* name=_snd_device_name_get_hint(*n, "NAME"); + if(strncmp(name, "surround", 8)==0 || strcmp(name, "null")==0){ + free(name); + n++; + continue; + } + char* desc=_snd_device_name_get_hint(*n, "DESC"); + char* ioid=_snd_device_name_get_hint(*n, "IOID"); + if(!ioid || strcmp(ioid, "Input")==0){ + char* l1=strtok(desc, "\n"); + char* l2=strtok(NULL, "\n"); + char* tmp=strtok(l1, ","); + char* actualName=tmp; + while((tmp=strtok(NULL, ","))){ + actualName=tmp; + } + if(actualName[0]==' ') + actualName++; + AudioInputDevice dev; + dev.id=std::string(name); + if(l2){ + char buf[256]; + snprintf(buf, sizeof(buf), "%s (%s)", actualName, l2); + dev.displayName=std::string(buf); + }else{ + dev.displayName=std::string(actualName); + } + devs.push_back(dev); + } + free(name); + free(desc); + free(ioid); + n++; + } + + dlclose(lib); +} \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/os/linux/AudioInputALSA.h b/Telegram/ThirdParty/libtgvoip/os/linux/AudioInputALSA.h new file mode 100644 index 000000000..8bcd4353b --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/linux/AudioInputALSA.h @@ -0,0 +1,48 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOINPUTALSA_H +#define LIBTGVOIP_AUDIOINPUTALSA_H + +#include "../../audio/AudioInput.h" +#include "../../threading.h" +#include + +namespace tgvoip{ +namespace audio{ + +class AudioInputALSA : public AudioInput{ + +public: + AudioInputALSA(std::string devID); + virtual ~AudioInputALSA(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + virtual void SetCurrentDevice(std::string devID); + static void EnumerateDevices(std::vector& devs); + +private: + static void* StartThread(void* arg); + void RunThread(); + + int (*_snd_pcm_open)(snd_pcm_t** pcm, const char* name, snd_pcm_stream_t stream, int mode); + int (*_snd_pcm_set_params)(snd_pcm_t* pcm, snd_pcm_format_t format, snd_pcm_access_t access, unsigned int channels, unsigned int rate, int soft_resample, unsigned int latency); + int (*_snd_pcm_close)(snd_pcm_t* pcm); + snd_pcm_sframes_t (*_snd_pcm_readi)(snd_pcm_t *pcm, const void *buffer, snd_pcm_uframes_t size); + int (*_snd_pcm_recover)(snd_pcm_t* pcm, int err, int silent); + const char* (*_snd_strerror)(int errnum); + void* lib; + + snd_pcm_t* handle; + tgvoip_thread_t thread; + bool isRecording; +}; + +} +} + +#endif //LIBTGVOIP_AUDIOINPUTALSA_H diff --git a/Telegram/ThirdParty/libtgvoip/os/linux/AudioInputPulse.cpp b/Telegram/ThirdParty/libtgvoip/os/linux/AudioInputPulse.cpp new file mode 100644 index 000000000..cdd31aa04 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/linux/AudioInputPulse.cpp @@ -0,0 +1,333 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + + +#include +#include +#include +#include "AudioInputPulse.h" +#include "../../logging.h" +#include "../../VoIPController.h" +#define TGVOIP_IN_AUDIO_IO +#include "PulseAudioLoader.h" +#undef TGVOIP_IN_AUDIO_IO + +#define BUFFER_SIZE 960 +#define CHECK_ERROR(res, msg) if(res!=0){LOGE(msg " failed: %s", pa_strerror(res)); failed=true; return;} + +using namespace tgvoip::audio; + +AudioInputPulse::AudioInputPulse(std::string devID){ + isRecording=false; + isConnected=false; + didStart=false; + + mainloop=NULL; + mainloopApi=NULL; + context=NULL; + stream=NULL; + remainingDataSize=0; + + if(!PulseAudioLoader::IncRef()){ + failed=true; + return; + } + + mainloop=pa_threaded_mainloop_new(); + if(!mainloop){ + LOGE("Error initializing PulseAudio (pa_threaded_mainloop_new)"); + failed=true; + return; + } + mainloopApi=pa_threaded_mainloop_get_api(mainloop); + char exePath[MAXPATHLEN]; + char exeName[MAXPATHLEN]; + ssize_t lres=readlink("/proc/self/exe", exePath, sizeof(exePath)); + if(lres==-1) + lres=readlink("/proc/curproc/file", exePath, sizeof(exePath)); + if(lres==-1) + lres=readlink("/proc/curproc/exe", exePath, sizeof(exePath)); + if(lres>0){ + strcpy(exeName, basename(exePath)); + }else{ + snprintf(exeName, sizeof(exeName), "Process %d", getpid()); + } + context=pa_context_new(mainloopApi, exeName); + if(!context){ + LOGE("Error initializing PulseAudio (pa_context_new)"); + failed=true; + return; + } + pa_context_set_state_callback(context, AudioInputPulse::ContextStateCallback, this); + pa_threaded_mainloop_lock(mainloop); + isLocked=true; + int err=pa_threaded_mainloop_start(mainloop); + CHECK_ERROR(err, "pa_threaded_mainloop_start"); + didStart=true; + + err=pa_context_connect(context, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL); + CHECK_ERROR(err, "pa_context_connect"); + + while(true){ + pa_threaded_mainloop_lock(mainloop); + pa_context_state_t contextState=pa_context_get_state(context); + pa_threaded_mainloop_unlock(mainloop); + if(!PA_CONTEXT_IS_GOOD(contextState)){ + LOGE("Error initializing PulseAudio (PA_CONTEXT_IS_GOOD)"); + failed=true; + return; + } + if(contextState==PA_CONTEXT_READY) + break; + pa_threaded_mainloop_wait(mainloop); + } + + pa_sample_spec sample_specifications{ + .format=PA_SAMPLE_S16LE, + .rate=48000, + .channels=1 + }; + + stream=pa_stream_new(context, "libtgvoip capture", &sample_specifications, NULL); + if(!stream){ + LOGE("Error initializing PulseAudio (pa_stream_new)"); + failed=true; + return; + } + pa_stream_set_state_callback(stream, AudioInputPulse::StreamStateCallback, this); + pa_stream_set_read_callback(stream, AudioInputPulse::StreamReadCallback, this); + pa_threaded_mainloop_unlock(mainloop); + isLocked=false; + + SetCurrentDevice(devID); +} + +AudioInputPulse::~AudioInputPulse(){ + if(mainloop && didStart){ + if(isLocked) + pa_threaded_mainloop_unlock(mainloop); + pa_threaded_mainloop_stop(mainloop); + } + if(stream){ + pa_stream_disconnect(stream); + pa_stream_unref(stream); + } + if(context){ + pa_context_disconnect(context); + pa_context_unref(context); + } + if(mainloop) + pa_threaded_mainloop_free(mainloop); + + PulseAudioLoader::DecRef(); +} + +bool AudioInputPulse::IsAvailable(){ + void* lib=dlopen("libpulse.so.0", RTLD_LAZY); + if(!lib) + lib=dlopen("libpulse.so", RTLD_LAZY); + if(lib){ + dlclose(lib); + return true; + } + return false; +} + +void AudioInputPulse::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + +} + +void AudioInputPulse::Start(){ + if(failed || isRecording) + return; + + isRecording=true; + pa_operation_unref(pa_stream_cork(stream, 0, AudioInputPulse::StreamSuccessCallback, mainloop)); +} + +void AudioInputPulse::Stop(){ + if(!isRecording) + return; + + isRecording=false; + pa_operation_unref(pa_stream_cork(stream, 1, AudioInputPulse::StreamSuccessCallback, mainloop)); +} + +bool AudioInputPulse::IsRecording(){ + return isRecording; +} + +void AudioInputPulse::SetCurrentDevice(std::string devID){ + currentDevice=devID; + if(isRecording && isConnected){ + pa_stream_disconnect(stream); + isConnected=false; + } + + pa_buffer_attr bufferAttr={ + .maxlength=960*6, + .tlength=960*6, + .prebuf=0, + .minreq=960*2 + }; + int streamFlags=PA_STREAM_START_CORKED | PA_STREAM_INTERPOLATE_TIMING | + PA_STREAM_NOT_MONOTONIC | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_ADJUST_LATENCY; + + int err=pa_stream_connect_record(stream, devID=="default" ? NULL : devID.c_str(), &bufferAttr, (pa_stream_flags_t)streamFlags); + if(err!=0 && devID!="default"){ + SetCurrentDevice("default"); + return; + } + CHECK_ERROR(err, "pa_stream_connect_record"); + + while(true){ + pa_threaded_mainloop_lock(mainloop); + pa_stream_state_t streamState=pa_stream_get_state(stream); + pa_threaded_mainloop_unlock(mainloop); + if(!PA_STREAM_IS_GOOD(streamState)){ + LOGE("Error connecting to audio device '%s'", devID.c_str()); + failed=true; + return; + } + if(streamState==PA_STREAM_READY) + break; + pa_threaded_mainloop_wait(mainloop); + } + + isConnected=true; + + if(isRecording){ + pa_operation_unref(pa_stream_cork(stream, 0, AudioInputPulse::StreamSuccessCallback, mainloop)); + } +} + +bool AudioInputPulse::EnumerateDevices(std::vector& devs){ + if(!PulseAudioLoader::IncRef()) + return false; + + pa_mainloop* ml; + pa_mainloop_api* mlAPI; + pa_context* ctx; + pa_operation* op=NULL; + int state=0; + int paReady=0; + + ml=pa_mainloop_new(); + mlAPI=pa_mainloop_get_api(ml); + ctx=pa_context_new(mlAPI, "libtgvoip"); + + pa_context_connect(ctx, NULL, PA_CONTEXT_NOFLAGS, NULL); + pa_context_set_state_callback(ctx, AudioInputPulse::ContextStateCallbackEnum, &paReady); + + while(true){ + if(paReady==0){ + pa_mainloop_iterate(ml, 1, NULL); + continue; + } + if(paReady==2){ + pa_context_disconnect(ctx); + pa_context_unref(ctx); + pa_mainloop_free(ml); + PulseAudioLoader::DecRef(); + return false; + } + if(!op){ + op=pa_context_get_source_info_list(ctx, AudioInputPulse::DeviceEnumCallback, &devs); + continue; + } + if(pa_operation_get_state(op)==PA_OPERATION_DONE){ + pa_operation_unref(op); + pa_context_disconnect(ctx); + pa_context_unref(ctx); + pa_mainloop_free(ml); + PulseAudioLoader::DecRef(); + return true; + } + pa_mainloop_iterate(ml, 1, NULL); + } +} + +void AudioInputPulse::ContextStateCallback(pa_context* context, void* arg) { + AudioInputPulse* self=(AudioInputPulse*) arg; + pa_threaded_mainloop_signal(self->mainloop, 0); +} + +void AudioInputPulse::StreamStateCallback(pa_stream *s, void* arg) { + AudioInputPulse* self=(AudioInputPulse*) arg; + pa_threaded_mainloop_signal(self->mainloop, 0); +} + +void AudioInputPulse::StreamReadCallback(pa_stream *stream, size_t requestedBytes, void *userdata){ + ((AudioInputPulse*)userdata)->StreamReadCallback(stream, requestedBytes); +} + +void AudioInputPulse::StreamReadCallback(pa_stream *stream, size_t requestedBytes) { + int bytesRemaining = requestedBytes; + uint8_t *buffer = NULL; + while (bytesRemaining > 0) { + size_t bytesToFill = 102400; + size_t i; + + if (bytesToFill > bytesRemaining) bytesToFill = bytesRemaining; + + int err=pa_stream_peek(stream, (const void**) &buffer, &bytesToFill); + CHECK_ERROR(err, "pa_stream_peek"); + + if(isRecording){ + if(remainingDataSize+bytesToFill>sizeof(remainingData)){ + LOGE("Capture buffer is too big (%d)", (int)bytesToFill); + } + memcpy(remainingData+remainingDataSize, buffer, bytesToFill); + remainingDataSize+=bytesToFill; + while(remainingDataSize>=960*2){ + InvokeCallback(remainingData, 960*2); + memmove(remainingData, remainingData+960*2, remainingDataSize-960*2); + remainingDataSize-=960*2; + } + } + + err=pa_stream_drop(stream); + CHECK_ERROR(err, "pa_stream_drop"); + + bytesRemaining -= bytesToFill; + } +} + +void AudioInputPulse::StreamSuccessCallback(pa_stream *stream, int success, void *userdata) { + return; +} + +void AudioInputPulse::ContextStateCallbackEnum(pa_context* context, void* arg){ + pa_context_state_t state; + int* pa_ready=(int*)arg; + + state=pa_context_get_state(context); + switch(state){ + case PA_CONTEXT_UNCONNECTED: + case PA_CONTEXT_CONNECTING: + case PA_CONTEXT_AUTHORIZING: + case PA_CONTEXT_SETTING_NAME: + default: + break; + case PA_CONTEXT_FAILED: + case PA_CONTEXT_TERMINATED: + *pa_ready=2; + break; + case PA_CONTEXT_READY: + *pa_ready=1; + break; + } +} + +void AudioInputPulse::DeviceEnumCallback(pa_context* ctx, const pa_source_info* info, int eol, void* userdata){ + if(eol>0) + return; + std::vector* devs=(std::vector*)userdata; + AudioInputDevice dev; + dev.id=std::string(info->name); + dev.displayName=std::string(info->description); + devs->push_back(dev); +} diff --git a/Telegram/ThirdParty/libtgvoip/os/linux/AudioInputPulse.h b/Telegram/ThirdParty/libtgvoip/os/linux/AudioInputPulse.h new file mode 100644 index 000000000..3dd55ce5e --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/linux/AudioInputPulse.h @@ -0,0 +1,58 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOINPUTPULSE_H +#define LIBTGVOIP_AUDIOINPUTPULSE_H + +#include "../../audio/AudioInput.h" +#include "../../threading.h" +#include + +#define DECLARE_DL_FUNCTION(name) typeof(name)* _import_##name + +namespace tgvoip{ +namespace audio{ + +class AudioInputPulse : public AudioInput{ +public: + AudioInputPulse(std::string devID); + virtual ~AudioInputPulse(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + virtual bool IsRecording(); + virtual void SetCurrentDevice(std::string devID); + static bool EnumerateDevices(std::vector& devs); + static bool IsAvailable(); + +private: + static void ContextStateCallback(pa_context* context, void* arg); + static void ContextStateCallbackEnum(pa_context* context, void* arg); + static void StreamStateCallback(pa_stream* s, void* arg); + static void StreamSuccessCallback(pa_stream* stream, int success, void* userdata); + static void StreamReadCallback(pa_stream* stream, size_t requested_bytes, void* userdata); + static void DeviceEnumCallback(pa_context* ctx, const pa_source_info* info, int eol, void* userdata); + void StreamReadCallback(pa_stream* stream, size_t requestedBytes); + + pa_threaded_mainloop* mainloop; + pa_mainloop_api* mainloopApi; + pa_context* context; + pa_stream* stream; + + bool isRecording; + bool isConnected; + bool didStart; + bool isLocked; + unsigned char remainingData[960*8*2]; + size_t remainingDataSize; +}; + +} +} + +#undef DECLARE_DL_FUNCTION + +#endif //LIBTGVOIP_AUDIOINPUTPULSE_H diff --git a/Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputALSA.cpp b/Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputALSA.cpp new file mode 100644 index 000000000..593edca4f --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputALSA.cpp @@ -0,0 +1,182 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + + +#include +#include +#include "AudioOutputALSA.h" +#include "../../logging.h" +#include "../../VoIPController.h" + +#define BUFFER_SIZE 960 +#define CHECK_ERROR(res, msg) if(res<0){LOGE(msg ": %s", _snd_strerror(res));} +#define CHECK_DL_ERROR(res, msg) if(!res){LOGE(msg ": %s", dlerror()); failed=true; return;} +#define LOAD_FUNCTION(lib, name, ref) {ref=(typeof(ref))dlsym(lib, name); CHECK_DL_ERROR(ref, "Error getting entry point for " name);} + +using namespace tgvoip::audio; + +AudioOutputALSA::AudioOutputALSA(std::string devID){ + isPlaying=false; + handle=NULL; + + lib=dlopen("libasound.so.2", RTLD_LAZY); + if(!lib) + lib=dlopen("libasound.so", RTLD_LAZY); + if(!lib){ + LOGE("Error loading libasound: %s", dlerror()); + failed=true; + return; + } + + LOAD_FUNCTION(lib, "snd_pcm_open", _snd_pcm_open); + LOAD_FUNCTION(lib, "snd_pcm_set_params", _snd_pcm_set_params); + LOAD_FUNCTION(lib, "snd_pcm_close", _snd_pcm_close); + LOAD_FUNCTION(lib, "snd_pcm_writei", _snd_pcm_writei); + LOAD_FUNCTION(lib, "snd_pcm_recover", _snd_pcm_recover); + LOAD_FUNCTION(lib, "snd_strerror", _snd_strerror); + + SetCurrentDevice(devID); +} + +AudioOutputALSA::~AudioOutputALSA(){ + if(handle) + _snd_pcm_close(handle); + if(lib) + dlclose(lib); +} + +void AudioOutputALSA::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + +} + +void AudioOutputALSA::Start(){ + if(failed || isPlaying) + return; + + isPlaying=true; + start_thread(thread, AudioOutputALSA::StartThread, this); +} + +void AudioOutputALSA::Stop(){ + if(!isPlaying) + return; + + isPlaying=false; + join_thread(thread); +} + +bool AudioOutputALSA::IsPlaying(){ + return isPlaying; +} + +void* AudioOutputALSA::StartThread(void* arg){ + ((AudioOutputALSA*)arg)->RunThread(); +} + +void AudioOutputALSA::RunThread(){ + unsigned char buffer[BUFFER_SIZE*2]; + snd_pcm_sframes_t frames; + while(isPlaying){ + InvokeCallback(buffer, sizeof(buffer)); + frames=_snd_pcm_writei(handle, buffer, BUFFER_SIZE); + if (frames < 0){ + frames = _snd_pcm_recover(handle, frames, 0); + } + if (frames < 0) { + LOGE("snd_pcm_writei failed: %s\n", _snd_strerror(frames)); + break; + } + } +} + +void AudioOutputALSA::SetCurrentDevice(std::string devID){ + bool wasPlaying=isPlaying; + isPlaying=false; + if(handle){ + join_thread(thread); + _snd_pcm_close(handle); + } + currentDevice=devID; + + int res=_snd_pcm_open(&handle, devID.c_str(), SND_PCM_STREAM_PLAYBACK, 0); + if(res<0) + res=_snd_pcm_open(&handle, "default", SND_PCM_STREAM_PLAYBACK, 0); + CHECK_ERROR(res, "snd_pcm_open failed"); + + res=_snd_pcm_set_params(handle, SND_PCM_FORMAT_S16, SND_PCM_ACCESS_RW_INTERLEAVED, 1, 48000, 1, 100000); + CHECK_ERROR(res, "snd_pcm_set_params failed"); + + if(wasPlaying){ + isPlaying=true; + start_thread(thread, AudioOutputALSA::StartThread, this); + } +} + +void AudioOutputALSA::EnumerateDevices(std::vector& devs){ + int (*_snd_device_name_hint)(int card, const char* iface, void*** hints); + char* (*_snd_device_name_get_hint)(const void* hint, const char* id); + int (*_snd_device_name_free_hint)(void** hinst); + void* lib=dlopen("libasound.so.2", RTLD_LAZY); + if(!lib) + dlopen("libasound.so", RTLD_LAZY); + if(!lib) + return; + + _snd_device_name_hint=(typeof(_snd_device_name_hint))dlsym(lib, "snd_device_name_hint"); + _snd_device_name_get_hint=(typeof(_snd_device_name_get_hint))dlsym(lib, "snd_device_name_get_hint"); + _snd_device_name_free_hint=(typeof(_snd_device_name_free_hint))dlsym(lib, "snd_device_name_free_hint"); + + if(!_snd_device_name_hint || !_snd_device_name_get_hint || !_snd_device_name_free_hint){ + dlclose(lib); + return; + } + + char** hints; + int err=_snd_device_name_hint(-1, "pcm", (void***)&hints); + if(err!=0){ + dlclose(lib); + return; + } + + char** n=hints; + while(*n){ + char* name=_snd_device_name_get_hint(*n, "NAME"); + if(strncmp(name, "surround", 8)==0 || strcmp(name, "null")==0){ + free(name); + n++; + continue; + } + char* desc=_snd_device_name_get_hint(*n, "DESC"); + char* ioid=_snd_device_name_get_hint(*n, "IOID"); + if(!ioid || strcmp(ioid, "Output")==0){ + char* l1=strtok(desc, "\n"); + char* l2=strtok(NULL, "\n"); + char* tmp=strtok(l1, ","); + char* actualName=tmp; + while((tmp=strtok(NULL, ","))){ + actualName=tmp; + } + if(actualName[0]==' ') + actualName++; + AudioOutputDevice dev; + dev.id=std::string(name); + if(l2){ + char buf[256]; + snprintf(buf, sizeof(buf), "%s (%s)", actualName, l2); + dev.displayName=std::string(buf); + }else{ + dev.displayName=std::string(actualName); + } + devs.push_back(dev); + } + free(name); + free(desc); + free(ioid); + n++; + } + + dlclose(lib); +} \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputALSA.h b/Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputALSA.h new file mode 100644 index 000000000..e967d1a2e --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputALSA.h @@ -0,0 +1,48 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOOUTPUTALSA_H +#define LIBTGVOIP_AUDIOOUTPUTALSA_H + +#include "../../audio/AudioOutput.h" +#include "../../threading.h" +#include + +namespace tgvoip{ +namespace audio{ + +class AudioOutputALSA : public AudioOutput{ +public: + AudioOutputALSA(std::string devID); + virtual ~AudioOutputALSA(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + virtual bool IsPlaying(); + virtual void SetCurrentDevice(std::string devID); + static void EnumerateDevices(std::vector& devs); + +private: + static void* StartThread(void* arg); + void RunThread(); + + int (*_snd_pcm_open)(snd_pcm_t** pcm, const char* name, snd_pcm_stream_t stream, int mode); + int (*_snd_pcm_set_params)(snd_pcm_t* pcm, snd_pcm_format_t format, snd_pcm_access_t access, unsigned int channels, unsigned int rate, int soft_resample, unsigned int latency); + int (*_snd_pcm_close)(snd_pcm_t* pcm); + snd_pcm_sframes_t (*_snd_pcm_writei)(snd_pcm_t *pcm, const void *buffer, snd_pcm_uframes_t size); + int (*_snd_pcm_recover)(snd_pcm_t* pcm, int err, int silent); + const char* (*_snd_strerror)(int errnum); + void* lib; + + snd_pcm_t* handle; + tgvoip_thread_t thread; + bool isPlaying; +}; + +} +} + +#endif //LIBTGVOIP_AUDIOOUTPUTALSA_H diff --git a/Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputPulse.cpp b/Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputPulse.cpp new file mode 100644 index 000000000..facab10b8 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputPulse.cpp @@ -0,0 +1,340 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + + +#include +#include +#include +#include "AudioOutputPulse.h" +#include "../../logging.h" +#include "../../VoIPController.h" +#define TGVOIP_IN_AUDIO_IO +#include "PulseAudioLoader.h" +#undef TGVOIP_IN_AUDIO_IO + +#define BUFFER_SIZE 960 +#define CHECK_ERROR(res, msg) if(res!=0){LOGE(msg " failed: %s", pa_strerror(res)); failed=true; return;} + +using namespace tgvoip; +using namespace tgvoip::audio; + +using tgvoip::PulseAudioLoader; + +AudioOutputPulse::AudioOutputPulse(std::string devID){ + isPlaying=false; + isConnected=false; + didStart=false; + isLocked=false; + + mainloop=NULL; + mainloopApi=NULL; + context=NULL; + stream=NULL; + remainingDataSize=0; + + if(!PulseAudioLoader::IncRef()){ + failed=true; + return; + } + + mainloop=pa_threaded_mainloop_new(); + if(!mainloop){ + LOGE("Error initializing PulseAudio (pa_threaded_mainloop_new)"); + failed=true; + return; + } + mainloopApi=pa_threaded_mainloop_get_api(mainloop); + char exePath[MAXPATHLEN]; + char exeName[MAXPATHLEN]; + ssize_t lres=readlink("/proc/self/exe", exePath, sizeof(exePath)); + if(lres==-1) + lres=readlink("/proc/curproc/file", exePath, sizeof(exePath)); + if(lres==-1) + lres=readlink("/proc/curproc/exe", exePath, sizeof(exePath)); + if(lres>0){ + strcpy(exeName, basename(exePath)); + }else{ + snprintf(exeName, sizeof(exeName), "Process %d", getpid()); + } + context=pa_context_new(mainloopApi, exeName); + if(!context){ + LOGE("Error initializing PulseAudio (pa_context_new)"); + failed=true; + return; + } + pa_context_set_state_callback(context, AudioOutputPulse::ContextStateCallback, this); + pa_threaded_mainloop_lock(mainloop); + isLocked=true; + int err=pa_threaded_mainloop_start(mainloop); + CHECK_ERROR(err, "pa_threaded_mainloop_start"); + didStart=true; + + err=pa_context_connect(context, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL); + CHECK_ERROR(err, "pa_context_connect"); + + while(true){ + pa_context_state_t contextState=pa_context_get_state(context); + if(!PA_CONTEXT_IS_GOOD(contextState)){ + LOGE("Error initializing PulseAudio (PA_CONTEXT_IS_GOOD)"); + failed=true; + return; + } + if(contextState==PA_CONTEXT_READY) + break; + pa_threaded_mainloop_wait(mainloop); + } + + pa_sample_spec sample_specifications{ + .format=PA_SAMPLE_S16LE, + .rate=48000, + .channels=1 + }; + + stream=pa_stream_new(context, "libtgvoip playback", &sample_specifications, NULL); + if(!stream){ + LOGE("Error initializing PulseAudio (pa_stream_new)"); + failed=true; + return; + } + pa_stream_set_state_callback(stream, AudioOutputPulse::StreamStateCallback, this); + pa_stream_set_write_callback(stream, AudioOutputPulse::StreamWriteCallback, this); + pa_threaded_mainloop_unlock(mainloop); + isLocked=false; + + SetCurrentDevice(devID); +} + +AudioOutputPulse::~AudioOutputPulse(){ + if(mainloop && didStart){ + if(isLocked) + pa_threaded_mainloop_unlock(mainloop); + pa_threaded_mainloop_stop(mainloop); + } + if(stream){ + pa_stream_disconnect(stream); + pa_stream_unref(stream); + } + if(context){ + pa_context_disconnect(context); + pa_context_unref(context); + } + if(mainloop) + pa_threaded_mainloop_free(mainloop); + + PulseAudioLoader::DecRef(); +} + +bool AudioOutputPulse::IsAvailable(){ + void* lib=dlopen("libpulse.so.0", RTLD_LAZY); + if(!lib) + lib=dlopen("libpulse.so", RTLD_LAZY); + if(lib){ + dlclose(lib); + return true; + } + return false; +} + +void AudioOutputPulse::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + +} + +void AudioOutputPulse::Start(){ + if(failed || isPlaying) + return; + + isPlaying=true; + pa_operation_unref(pa_stream_cork(stream, 0, AudioOutputPulse::StreamSuccessCallback, mainloop)); +} + +void AudioOutputPulse::Stop(){ + if(!isPlaying) + return; + + isPlaying=false; + pa_operation_unref(pa_stream_cork(stream, 1, AudioOutputPulse::StreamSuccessCallback, mainloop)); +} + +bool AudioOutputPulse::IsPlaying(){ + return isPlaying; +} + +void AudioOutputPulse::SetCurrentDevice(std::string devID){ + currentDevice=devID; + if(isPlaying && isConnected){ + pa_stream_disconnect(stream); + isConnected=false; + } + + pa_buffer_attr bufferAttr={ + .maxlength=960*6, + .tlength=960*6, + .prebuf=0, + .minreq=960*2 + }; + int streamFlags=PA_STREAM_START_CORKED | PA_STREAM_INTERPOLATE_TIMING | + PA_STREAM_NOT_MONOTONIC | PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_ADJUST_LATENCY; + + int err=pa_stream_connect_playback(stream, devID=="default" ? NULL : devID.c_str(), &bufferAttr, (pa_stream_flags_t)streamFlags, NULL, NULL); + if(err!=0 && devID!="default"){ + SetCurrentDevice("default"); + return; + } + CHECK_ERROR(err, "pa_stream_connect_playback"); + + while(true){ + pa_threaded_mainloop_lock(mainloop); + pa_stream_state_t streamState=pa_stream_get_state(stream); + pa_threaded_mainloop_unlock(mainloop); + if(!PA_STREAM_IS_GOOD(streamState)){ + LOGE("Error connecting to audio device '%s'", devID.c_str()); + failed=true; + return; + } + if(streamState==PA_STREAM_READY) + break; + pa_threaded_mainloop_wait(mainloop); + } + + isConnected=true; + + if(isPlaying){ + pa_operation_unref(pa_stream_cork(stream, 0, AudioOutputPulse::StreamSuccessCallback, mainloop)); + } +} + +bool AudioOutputPulse::EnumerateDevices(std::vector& devs){ + if(!PulseAudioLoader::IncRef()) + return false; + + pa_mainloop* ml; + pa_mainloop_api* mlAPI; + pa_context* ctx; + pa_operation* op=NULL; + int state=0; + int paReady=0; + + ml=pa_mainloop_new(); + mlAPI=pa_mainloop_get_api(ml); + ctx=pa_context_new(mlAPI, "libtgvoip"); + + pa_context_connect(ctx, NULL, PA_CONTEXT_NOFLAGS, NULL); + pa_context_set_state_callback(ctx, AudioOutputPulse::ContextStateCallbackEnum, &paReady); + + while(true){ + if(paReady==0){ + pa_mainloop_iterate(ml, 1, NULL); + continue; + } + if(paReady==2){ + pa_context_disconnect(ctx); + pa_context_unref(ctx); + pa_mainloop_free(ml); + PulseAudioLoader::DecRef(); + return false; + } + if(!op){ + op=pa_context_get_sink_info_list(ctx, AudioOutputPulse::DeviceEnumCallback, &devs); + continue; + } + if(pa_operation_get_state(op)==PA_OPERATION_DONE){ + pa_operation_unref(op); + pa_context_disconnect(ctx); + pa_context_unref(ctx); + pa_mainloop_free(ml); + PulseAudioLoader::DecRef(); + return true; + } + pa_mainloop_iterate(ml, 1, NULL); + } +} + +void AudioOutputPulse::ContextStateCallback(pa_context* context, void* arg) { + AudioOutputPulse* self=(AudioOutputPulse*) arg; + pa_threaded_mainloop_signal(self->mainloop, 0); +} + +void AudioOutputPulse::StreamStateCallback(pa_stream *s, void* arg) { + AudioOutputPulse* self=(AudioOutputPulse*) arg; + pa_threaded_mainloop_signal(self->mainloop, 0); +} + +void AudioOutputPulse::StreamWriteCallback(pa_stream *stream, size_t requestedBytes, void *userdata){ + ((AudioOutputPulse*)userdata)->StreamWriteCallback(stream, requestedBytes); +} + +void AudioOutputPulse::StreamWriteCallback(pa_stream *stream, size_t requestedBytes) { + int bytesRemaining = requestedBytes; + uint8_t *buffer = NULL; + while (bytesRemaining > 0) { + size_t bytesToFill = 102400; + size_t i; + + if (bytesToFill > bytesRemaining) bytesToFill = bytesRemaining; + + int err=pa_stream_begin_write(stream, (void**) &buffer, &bytesToFill); + CHECK_ERROR(err, "pa_stream_begin_write"); + + if(isPlaying){ + while(remainingDataSize=sizeof(remainingData)){ + LOGE("Can't provide %d bytes of audio data at a time", (int)bytesToFill); + failed=true; + pa_threaded_mainloop_unlock(mainloop); + return; + } + InvokeCallback(remainingData+remainingDataSize, 960*2); + remainingDataSize+=960*2; + } + memcpy(buffer, remainingData, bytesToFill); + memmove(remainingData, remainingData+bytesToFill, remainingDataSize-bytesToFill); + remainingDataSize-=bytesToFill; + }else{ + memset(buffer, 0, bytesToFill); + } + + err=pa_stream_write(stream, buffer, bytesToFill, NULL, 0LL, PA_SEEK_RELATIVE); + CHECK_ERROR(err, "pa_stream_write"); + + bytesRemaining -= bytesToFill; + } +} + +void AudioOutputPulse::StreamSuccessCallback(pa_stream *stream, int success, void *userdata) { + return; +} + +void AudioOutputPulse::ContextStateCallbackEnum(pa_context* context, void* arg){ + pa_context_state_t state; + int* pa_ready=(int*)arg; + + state=pa_context_get_state(context); + switch(state){ + case PA_CONTEXT_UNCONNECTED: + case PA_CONTEXT_CONNECTING: + case PA_CONTEXT_AUTHORIZING: + case PA_CONTEXT_SETTING_NAME: + default: + break; + case PA_CONTEXT_FAILED: + case PA_CONTEXT_TERMINATED: + *pa_ready=2; + break; + case PA_CONTEXT_READY: + *pa_ready=1; + break; + } +} + +void AudioOutputPulse::DeviceEnumCallback(pa_context* ctx, const pa_sink_info* info, int eol, void* userdata){ + if(eol>0) + return; + std::vector* devs=(std::vector*)userdata; + AudioOutputDevice dev; + dev.id=std::string(info->name); + dev.displayName=std::string(info->description); + devs->push_back(dev); +} diff --git a/Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputPulse.h b/Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputPulse.h new file mode 100644 index 000000000..e2b7838d8 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/linux/AudioOutputPulse.h @@ -0,0 +1,54 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOOUTPUTPULSE_H +#define LIBTGVOIP_AUDIOOUTPUTPULSE_H + +#include "../../audio/AudioOutput.h" +#include "../../threading.h" +#include + +namespace tgvoip{ +namespace audio{ + +class AudioOutputPulse : public AudioOutput{ +public: + AudioOutputPulse(std::string devID); + virtual ~AudioOutputPulse(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + virtual bool IsPlaying(); + virtual void SetCurrentDevice(std::string devID); + static bool EnumerateDevices(std::vector& devs); + static bool IsAvailable(); + +private: + static void ContextStateCallback(pa_context* context, void* arg); + static void ContextStateCallbackEnum(pa_context* context, void* arg); + static void StreamStateCallback(pa_stream* s, void* arg); + static void StreamSuccessCallback(pa_stream* stream, int success, void* userdata); + static void StreamWriteCallback(pa_stream* stream, size_t requested_bytes, void* userdata); + static void DeviceEnumCallback(pa_context* ctx, const pa_sink_info* info, int eol, void* userdata); + void StreamWriteCallback(pa_stream* stream, size_t requestedBytes); + + pa_threaded_mainloop* mainloop; + pa_mainloop_api* mainloopApi; + pa_context* context; + pa_stream* stream; + + bool isPlaying; + bool isConnected; + bool didStart; + bool isLocked; + unsigned char remainingData[960*8*2]; + size_t remainingDataSize; +}; + +} +} + +#endif //LIBTGVOIP_AUDIOOUTPUTPULSE_H diff --git a/Telegram/ThirdParty/libtgvoip/os/linux/PulseAudioLoader.cpp b/Telegram/ThirdParty/libtgvoip/os/linux/PulseAudioLoader.cpp new file mode 100644 index 000000000..d1008e967 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/linux/PulseAudioLoader.cpp @@ -0,0 +1,120 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "PulseAudioLoader.h" +#include +#include "../../logging.h" + +#define DECLARE_DL_FUNCTION(name) typeof(name)* PulseAudioLoader::_import_##name=NULL +#define CHECK_DL_ERROR(res, msg) if(!res){LOGE(msg ": %s", dlerror()); dlclose(lib); return false;} +#define LOAD_DL_FUNCTION(name) {_import_##name=(typeof(_import_##name))dlsym(lib, #name); CHECK_DL_ERROR(_import_##name, "Error getting entry point for " #name);} + +using namespace tgvoip; + +int PulseAudioLoader::refCount=0; +void* PulseAudioLoader::lib=NULL; + +DECLARE_DL_FUNCTION(pa_threaded_mainloop_new); +DECLARE_DL_FUNCTION(pa_threaded_mainloop_get_api); +DECLARE_DL_FUNCTION(pa_context_new); +DECLARE_DL_FUNCTION(pa_context_set_state_callback); +DECLARE_DL_FUNCTION(pa_threaded_mainloop_lock); +DECLARE_DL_FUNCTION(pa_threaded_mainloop_unlock); +DECLARE_DL_FUNCTION(pa_threaded_mainloop_start); +DECLARE_DL_FUNCTION(pa_context_connect); +DECLARE_DL_FUNCTION(pa_context_get_state); +DECLARE_DL_FUNCTION(pa_threaded_mainloop_wait); +DECLARE_DL_FUNCTION(pa_stream_new); +DECLARE_DL_FUNCTION(pa_stream_set_state_callback); +DECLARE_DL_FUNCTION(pa_stream_set_write_callback); +DECLARE_DL_FUNCTION(pa_stream_connect_playback); +DECLARE_DL_FUNCTION(pa_operation_unref); +DECLARE_DL_FUNCTION(pa_stream_cork); +DECLARE_DL_FUNCTION(pa_threaded_mainloop_stop); +DECLARE_DL_FUNCTION(pa_stream_disconnect); +DECLARE_DL_FUNCTION(pa_stream_unref); +DECLARE_DL_FUNCTION(pa_context_disconnect); +DECLARE_DL_FUNCTION(pa_context_unref); +DECLARE_DL_FUNCTION(pa_threaded_mainloop_free); +DECLARE_DL_FUNCTION(pa_threaded_mainloop_signal); +DECLARE_DL_FUNCTION(pa_stream_begin_write); +DECLARE_DL_FUNCTION(pa_stream_write); +DECLARE_DL_FUNCTION(pa_stream_get_state); +DECLARE_DL_FUNCTION(pa_strerror); +DECLARE_DL_FUNCTION(pa_stream_set_read_callback); +DECLARE_DL_FUNCTION(pa_stream_connect_record); +DECLARE_DL_FUNCTION(pa_stream_peek); +DECLARE_DL_FUNCTION(pa_stream_drop); +DECLARE_DL_FUNCTION(pa_mainloop_new); +DECLARE_DL_FUNCTION(pa_mainloop_get_api); +DECLARE_DL_FUNCTION(pa_mainloop_iterate); +DECLARE_DL_FUNCTION(pa_mainloop_free); +DECLARE_DL_FUNCTION(pa_context_get_sink_info_list); +DECLARE_DL_FUNCTION(pa_context_get_source_info_list); +DECLARE_DL_FUNCTION(pa_operation_get_state); + +bool PulseAudioLoader::IncRef(){ + if(refCount==0){ + lib=dlopen("libpulse.so.0", RTLD_LAZY); + if(!lib) + lib=dlopen("libpulse.so", RTLD_LAZY); + if(!lib){ + LOGE("Error loading libpulse: %s", dlerror()); + return false; + } + } + + LOAD_DL_FUNCTION(pa_threaded_mainloop_new); + LOAD_DL_FUNCTION(pa_threaded_mainloop_get_api); + LOAD_DL_FUNCTION(pa_context_new); + LOAD_DL_FUNCTION(pa_context_set_state_callback); + LOAD_DL_FUNCTION(pa_threaded_mainloop_lock); + LOAD_DL_FUNCTION(pa_threaded_mainloop_unlock); + LOAD_DL_FUNCTION(pa_threaded_mainloop_start); + LOAD_DL_FUNCTION(pa_context_connect); + LOAD_DL_FUNCTION(pa_context_get_state); + LOAD_DL_FUNCTION(pa_threaded_mainloop_wait); + LOAD_DL_FUNCTION(pa_stream_new); + LOAD_DL_FUNCTION(pa_stream_set_state_callback); + LOAD_DL_FUNCTION(pa_stream_set_write_callback); + LOAD_DL_FUNCTION(pa_stream_connect_playback); + LOAD_DL_FUNCTION(pa_operation_unref); + LOAD_DL_FUNCTION(pa_stream_cork); + LOAD_DL_FUNCTION(pa_threaded_mainloop_stop); + LOAD_DL_FUNCTION(pa_stream_disconnect); + LOAD_DL_FUNCTION(pa_stream_unref); + LOAD_DL_FUNCTION(pa_context_disconnect); + LOAD_DL_FUNCTION(pa_context_unref); + LOAD_DL_FUNCTION(pa_threaded_mainloop_free); + LOAD_DL_FUNCTION(pa_threaded_mainloop_signal); + LOAD_DL_FUNCTION(pa_stream_begin_write); + LOAD_DL_FUNCTION(pa_stream_write); + LOAD_DL_FUNCTION(pa_stream_get_state); + LOAD_DL_FUNCTION(pa_strerror); + LOAD_DL_FUNCTION(pa_stream_set_read_callback); + LOAD_DL_FUNCTION(pa_stream_connect_record); + LOAD_DL_FUNCTION(pa_stream_peek); + LOAD_DL_FUNCTION(pa_stream_drop); + LOAD_DL_FUNCTION(pa_mainloop_new); + LOAD_DL_FUNCTION(pa_mainloop_get_api); + LOAD_DL_FUNCTION(pa_mainloop_iterate); + LOAD_DL_FUNCTION(pa_mainloop_free); + LOAD_DL_FUNCTION(pa_context_get_sink_info_list); + LOAD_DL_FUNCTION(pa_context_get_source_info_list); + LOAD_DL_FUNCTION(pa_operation_get_state); + + refCount++; + return true; +} + +void PulseAudioLoader::DecRef(){ + if(refCount>0) + refCount--; + if(refCount==0){ + dlclose(lib); + lib=NULL; + } +} \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/os/linux/PulseAudioLoader.h b/Telegram/ThirdParty/libtgvoip/os/linux/PulseAudioLoader.h new file mode 100644 index 000000000..aa34f092c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/linux/PulseAudioLoader.h @@ -0,0 +1,109 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_PULSEAUDIOLOADER_H +#define LIBTGVOIP_PULSEAUDIOLOADER_H + +#include + +#define DECLARE_DL_FUNCTION(name) static typeof(name)* _import_##name + +namespace tgvoip{ +class PulseAudioLoader{ +public: + static bool IncRef(); + static void DecRef(); + + DECLARE_DL_FUNCTION(pa_threaded_mainloop_new); + DECLARE_DL_FUNCTION(pa_threaded_mainloop_get_api); + DECLARE_DL_FUNCTION(pa_context_new); + DECLARE_DL_FUNCTION(pa_context_set_state_callback); + DECLARE_DL_FUNCTION(pa_threaded_mainloop_lock); + DECLARE_DL_FUNCTION(pa_threaded_mainloop_unlock); + DECLARE_DL_FUNCTION(pa_threaded_mainloop_start); + DECLARE_DL_FUNCTION(pa_context_connect); + DECLARE_DL_FUNCTION(pa_context_get_state); + DECLARE_DL_FUNCTION(pa_threaded_mainloop_wait); + DECLARE_DL_FUNCTION(pa_stream_new); + DECLARE_DL_FUNCTION(pa_stream_set_state_callback); + DECLARE_DL_FUNCTION(pa_stream_set_write_callback); + DECLARE_DL_FUNCTION(pa_stream_connect_playback); + DECLARE_DL_FUNCTION(pa_operation_unref); + DECLARE_DL_FUNCTION(pa_stream_cork); + DECLARE_DL_FUNCTION(pa_threaded_mainloop_stop); + DECLARE_DL_FUNCTION(pa_stream_disconnect); + DECLARE_DL_FUNCTION(pa_stream_unref); + DECLARE_DL_FUNCTION(pa_context_disconnect); + DECLARE_DL_FUNCTION(pa_context_unref); + DECLARE_DL_FUNCTION(pa_threaded_mainloop_free); + DECLARE_DL_FUNCTION(pa_threaded_mainloop_signal); + DECLARE_DL_FUNCTION(pa_stream_begin_write); + DECLARE_DL_FUNCTION(pa_stream_write); + DECLARE_DL_FUNCTION(pa_stream_get_state); + DECLARE_DL_FUNCTION(pa_strerror); + DECLARE_DL_FUNCTION(pa_stream_set_read_callback); + DECLARE_DL_FUNCTION(pa_stream_connect_record); + DECLARE_DL_FUNCTION(pa_stream_peek); + DECLARE_DL_FUNCTION(pa_stream_drop); + + DECLARE_DL_FUNCTION(pa_mainloop_new); + DECLARE_DL_FUNCTION(pa_mainloop_get_api); + DECLARE_DL_FUNCTION(pa_mainloop_iterate); + DECLARE_DL_FUNCTION(pa_mainloop_free); + DECLARE_DL_FUNCTION(pa_context_get_sink_info_list); + DECLARE_DL_FUNCTION(pa_context_get_source_info_list); + DECLARE_DL_FUNCTION(pa_operation_get_state); + +private: + static void* lib; + static int refCount; +}; +} + +#undef DECLARE_DL_FUNCTION + +#ifdef TGVOIP_IN_AUDIO_IO +#define pa_threaded_mainloop_new PulseAudioLoader::_import_pa_threaded_mainloop_new +#define pa_threaded_mainloop_get_api PulseAudioLoader::_import_pa_threaded_mainloop_get_api +#define pa_context_new PulseAudioLoader::_import_pa_context_new +#define pa_context_set_state_callback PulseAudioLoader::_import_pa_context_set_state_callback +#define pa_threaded_mainloop_lock PulseAudioLoader::_import_pa_threaded_mainloop_lock +#define pa_threaded_mainloop_unlock PulseAudioLoader::_import_pa_threaded_mainloop_unlock +#define pa_threaded_mainloop_start PulseAudioLoader::_import_pa_threaded_mainloop_start +#define pa_context_connect PulseAudioLoader::_import_pa_context_connect +#define pa_context_get_state PulseAudioLoader::_import_pa_context_get_state +#define pa_threaded_mainloop_wait PulseAudioLoader::_import_pa_threaded_mainloop_wait +#define pa_stream_new PulseAudioLoader::_import_pa_stream_new +#define pa_stream_set_state_callback PulseAudioLoader::_import_pa_stream_set_state_callback +#define pa_stream_set_write_callback PulseAudioLoader::_import_pa_stream_set_write_callback +#define pa_stream_connect_playback PulseAudioLoader::_import_pa_stream_connect_playback +#define pa_operation_unref PulseAudioLoader::_import_pa_operation_unref +#define pa_stream_cork PulseAudioLoader::_import_pa_stream_cork +#define pa_threaded_mainloop_stop PulseAudioLoader::_import_pa_threaded_mainloop_stop +#define pa_stream_disconnect PulseAudioLoader::_import_pa_stream_disconnect +#define pa_stream_unref PulseAudioLoader::_import_pa_stream_unref +#define pa_context_disconnect PulseAudioLoader::_import_pa_context_disconnect +#define pa_context_unref PulseAudioLoader::_import_pa_context_unref +#define pa_threaded_mainloop_free PulseAudioLoader::_import_pa_threaded_mainloop_free +#define pa_threaded_mainloop_signal PulseAudioLoader::_import_pa_threaded_mainloop_signal +#define pa_stream_begin_write PulseAudioLoader::_import_pa_stream_begin_write +#define pa_stream_write PulseAudioLoader::_import_pa_stream_write +#define pa_strerror PulseAudioLoader::_import_pa_strerror +#define pa_stream_get_state PulseAudioLoader::_import_pa_stream_get_state +#define pa_stream_set_read_callback PulseAudioLoader::_import_pa_stream_set_read_callback +#define pa_stream_connect_record PulseAudioLoader::_import_pa_stream_connect_record +#define pa_stream_peek PulseAudioLoader::_import_pa_stream_peek +#define pa_stream_drop PulseAudioLoader::_import_pa_stream_drop +#define pa_mainloop_new PulseAudioLoader::_import_pa_mainloop_new +#define pa_mainloop_get_api PulseAudioLoader::_import_pa_mainloop_get_api +#define pa_mainloop_iterate PulseAudioLoader::_import_pa_mainloop_iterate +#define pa_mainloop_free PulseAudioLoader::_import_pa_mainloop_free +#define pa_context_get_sink_info_list PulseAudioLoader::_import_pa_context_get_sink_info_list +#define pa_context_get_source_info_list PulseAudioLoader::_import_pa_context_get_source_info_list +#define pa_operation_get_state PulseAudioLoader::_import_pa_operation_get_state +#endif + +#endif // LIBTGVOIP_PULSEAUDIOLOADER_H \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/os/posix/NetworkSocketPosix.cpp b/Telegram/ThirdParty/libtgvoip/os/posix/NetworkSocketPosix.cpp new file mode 100644 index 000000000..a0ddd5f19 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/posix/NetworkSocketPosix.cpp @@ -0,0 +1,522 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "NetworkSocketPosix.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../logging.h" +#include "../../VoIPController.h" +#include "../../BufferInputStream.h" +#include "../../BufferOutputStream.h" + +using namespace tgvoip; + + +NetworkSocketPosix::NetworkSocketPosix(NetworkProtocol protocol) : NetworkSocket(protocol), lastRecvdV4(0), lastRecvdV6("::0"){ + needUpdateNat64Prefix=true; + nat64Present=false; + switchToV6at=0; + isV4Available=false; + fd=-1; + useTCP=false; + closing=false; + + tcpConnectedAddress=NULL; + tcpConnectedPort=0; +} + +NetworkSocketPosix::~NetworkSocketPosix(){ + if(tcpConnectedAddress) + delete tcpConnectedAddress; +} + +void NetworkSocketPosix::SetMaxPriority(){ +#ifdef __APPLE__ + int prio=NET_SERVICE_TYPE_VO; + int res=setsockopt(fd, SOL_SOCKET, SO_NET_SERVICE_TYPE, &prio, sizeof(prio)); + if(res<0){ + LOGE("error setting darwin-specific net priority: %d / %s", errno, strerror(errno)); + } +#else + int prio=5; + int res=setsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)); + if(res<0){ + LOGE("error setting priority: %d / %s", errno, strerror(errno)); + } + prio=6 << 5; + res=setsockopt(fd, SOL_IP, IP_TOS, &prio, sizeof(prio)); + if(res<0){ + LOGE("error setting ip tos: %d / %s", errno, strerror(errno)); + } +#endif +} + +void NetworkSocketPosix::Send(NetworkPacket *packet){ + if(!packet || !packet->address){ + LOGW("tried to send null packet"); + return; + } + int res; + if(protocol==PROTO_UDP){ + sockaddr_in6 addr; + IPv4Address *v4addr=dynamic_cast(packet->address); + if(v4addr){ + if(needUpdateNat64Prefix && !isV4Available && VoIPController::GetCurrentTime()>switchToV6at && switchToV6at!=0){ + LOGV("Updating NAT64 prefix"); + nat64Present=false; + addrinfo *addr0; + int res=getaddrinfo("ipv4only.arpa", NULL, NULL, &addr0); + if(res!=0){ + LOGW("Error updating NAT64 prefix: %d / %s", res, gai_strerror(res)); + }else{ + addrinfo *addrPtr; + unsigned char *addr170=NULL; + unsigned char *addr171=NULL; + for(addrPtr=addr0; addrPtr; addrPtr=addrPtr->ai_next){ + if(addrPtr->ai_family==AF_INET6){ + sockaddr_in6 *translatedAddr=(sockaddr_in6 *) addrPtr->ai_addr; + uint32_t v4part=*((uint32_t *) &translatedAddr->sin6_addr.s6_addr[12]); + if(v4part==0xAA0000C0 && !addr170){ + addr170=translatedAddr->sin6_addr.s6_addr; + } + if(v4part==0xAB0000C0 && !addr171){ + addr171=translatedAddr->sin6_addr.s6_addr; + } + char buf[INET6_ADDRSTRLEN]; + LOGV("Got translated address: %s", inet_ntop(AF_INET6, &translatedAddr->sin6_addr, buf, sizeof(buf))); + } + } + if(addr170 && addr171 && memcmp(addr170, addr171, 12)==0){ + nat64Present=true; + memcpy(nat64Prefix, addr170, 12); + char buf[INET6_ADDRSTRLEN]; + LOGV("Found nat64 prefix from %s", inet_ntop(AF_INET6, addr170, buf, sizeof(buf))); + }else{ + LOGV("Didn't find nat64"); + } + freeaddrinfo(addr0); + } + needUpdateNat64Prefix=false; + } + memset(&addr, 0, sizeof(sockaddr_in6)); + addr.sin6_family=AF_INET6; + *((uint32_t *) &addr.sin6_addr.s6_addr[12])=v4addr->GetAddress(); + if(nat64Present) + memcpy(addr.sin6_addr.s6_addr, nat64Prefix, 12); + else + addr.sin6_addr.s6_addr[11]=addr.sin6_addr.s6_addr[10]=0xFF; + + }else{ + IPv6Address *v6addr=dynamic_cast(packet->address); + assert(v6addr!=NULL); + memcpy(addr.sin6_addr.s6_addr, v6addr->GetAddress(), 16); + } + addr.sin6_port=htons(packet->port); + res=sendto(fd, packet->data, packet->length, 0, (const sockaddr *) &addr, sizeof(addr)); + }else{ + res=send(fd, packet->data, packet->length, 0); + } + if(res<0){ + LOGE("error sending: %d / %s", errno, strerror(errno)); + if(errno==ENETUNREACH && !isV4Available && VoIPController::GetCurrentTime()data, packet->length, 0, (sockaddr *) &srcAddr, (socklen_t *) &addrLen); + if(len>0) + packet->length=(size_t) len; + else{ + LOGE("error receiving %d / %s", errno, strerror(errno)); + packet->length=0; + return; + } + //LOGV("Received %d bytes from %s:%d at %.5lf", len, inet_ntoa(srcAddr.sin_addr), ntohs(srcAddr.sin_port), GetCurrentTime()); + if(!isV4Available && IN6_IS_ADDR_V4MAPPED(&srcAddr.sin6_addr)){ + isV4Available=true; + LOGI("Detected IPv4 connectivity, will not try IPv6"); + } + if(IN6_IS_ADDR_V4MAPPED(&srcAddr.sin6_addr) || (nat64Present && memcmp(nat64Prefix, srcAddr.sin6_addr.s6_addr, 12)==0)){ + in_addr v4addr=*((in_addr *) &srcAddr.sin6_addr.s6_addr[12]); + lastRecvdV4=IPv4Address(v4addr.s_addr); + packet->address=&lastRecvdV4; + }else{ + lastRecvdV6=IPv6Address(srcAddr.sin6_addr.s6_addr); + packet->address=&lastRecvdV6; + } + packet->protocol=PROTO_UDP; + packet->port=ntohs(srcAddr.sin6_port); + }else if(protocol==PROTO_TCP){ + int res=recv(fd, packet->data, packet->length, 0); + if(res<=0){ + LOGE("Error receiving from TCP socket: %d / %s", errno, strerror(errno)); + failed=true; + }else{ + packet->length=(size_t)res; + packet->address=tcpConnectedAddress; + packet->port=tcpConnectedPort; + packet->protocol=PROTO_TCP; + } + } +} + +void NetworkSocketPosix::Open(){ + if(protocol!=PROTO_UDP) + return; + fd=socket(PF_INET6, SOCK_DGRAM, IPPROTO_UDP); + if(fd<0){ + LOGE("error creating socket: %d / %s", errno, strerror(errno)); + failed=true; + return; + } + int flag=0; + int res=setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &flag, sizeof(flag)); + if(res<0){ + LOGE("error enabling dual stack socket: %d / %s", errno, strerror(errno)); + failed=true; + return; + } + + SetMaxPriority(); + + int tries=0; + sockaddr_in6 addr; + //addr.sin6_addr.s_addr=0; + memset(&addr, 0, sizeof(sockaddr_in6)); + //addr.sin6_len=sizeof(sa_family_t); + addr.sin6_family=AF_INET6; + for(tries=0;tries<10;tries++){ + addr.sin6_port=htons(GenerateLocalPort()); + res=::bind(fd, (sockaddr *) &addr, sizeof(sockaddr_in6)); + LOGV("trying bind to port %u", ntohs(addr.sin6_port)); + if(res<0){ + LOGE("error binding to port %u: %d / %s", ntohs(addr.sin6_port), errno, strerror(errno)); + }else{ + break; + } + } + if(tries==10){ + addr.sin6_port=0; + res=::bind(fd, (sockaddr *) &addr, sizeof(sockaddr_in6)); + if(res<0){ + LOGE("error binding to port %u: %d / %s", ntohs(addr.sin6_port), errno, strerror(errno)); + //SetState(STATE_FAILED); + failed=true; + return; + } + } + size_t addrLen=sizeof(sockaddr_in6); + getsockname(fd, (sockaddr*)&addr, (socklen_t*) &addrLen); + uint16_t localUdpPort=ntohs(addr.sin6_port); + LOGD("Bound to local UDP port %u", ntohs(addr.sin6_port)); + + needUpdateNat64Prefix=true; + isV4Available=false; + switchToV6at=VoIPController::GetCurrentTime()+ipv6Timeout; +} + +void NetworkSocketPosix::Close(){ + closing=true; + failed=true; + + if (fd>=0) { + shutdown(fd, SHUT_RDWR); + close(fd); + } +} + +void NetworkSocketPosix::Connect(NetworkAddress *address, uint16_t port){ + IPv4Address* v4addr=dynamic_cast(address); + IPv6Address* v6addr=dynamic_cast(address); + sockaddr_in v4; + sockaddr_in6 v6; + sockaddr* addr=NULL; + size_t addrLen=0; + if(v4addr){ + v4.sin_family=AF_INET; + v4.sin_addr.s_addr=v4addr->GetAddress(); + v4.sin_port=htons(port); + addr=reinterpret_cast(&v4); + addrLen=sizeof(v4); + }else if(v6addr){ + v6.sin6_family=AF_INET6; + memcpy(v6.sin6_addr.s6_addr, v6addr->GetAddress(), 16); + v6.sin6_flowinfo=0; + v6.sin6_scope_id=0; + v6.sin6_port=htons(port); + addr=reinterpret_cast(&v6); + addrLen=sizeof(v6); + }else{ + LOGE("Unknown address type in TCP connect"); + failed=true; + return; + } + fd=socket(addr->sa_family, SOCK_STREAM, IPPROTO_TCP); + if(fd<0){ + LOGE("Error creating TCP socket: %d / %s", errno, strerror(errno)); + failed=true; + return; + } + int opt=1; + setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt)); + timeval timeout; + timeout.tv_sec=5; + timeout.tv_usec=0; + setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeout, sizeof(timeout)); + timeout.tv_sec=60; + setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeout, sizeof(timeout)); + int res=connect(fd, (const sockaddr*) addr, addrLen); + if(res!=0){ + LOGW("error connecting TCP socket to %s:%u: %d / %s; %d / %s", address->ToString().c_str(), port, res, strerror(res), errno, strerror(errno)); + close(fd); + failed=true; + return; + } + tcpConnectedAddress=v4addr ? (NetworkAddress*)new IPv4Address(*v4addr) : (NetworkAddress*)new IPv6Address(*v6addr); + tcpConnectedPort=port; + LOGI("successfully connected to %s:%d", tcpConnectedAddress->ToString().c_str(), tcpConnectedPort); +} + +void NetworkSocketPosix::OnActiveInterfaceChanged(){ + needUpdateNat64Prefix=true; + isV4Available=false; + switchToV6at=VoIPController::GetCurrentTime()+ipv6Timeout; +} + +std::string NetworkSocketPosix::GetLocalInterfaceInfo(IPv4Address *v4addr, IPv6Address *v6addr){ + struct ifconf ifc; + struct ifreq* ifr; + char buf[16384]; + int sd; + std::string name=""; + sd=socket(PF_INET, SOCK_DGRAM, 0); + if(sd>0){ + ifc.ifc_len=sizeof(buf); + ifc.ifc_ifcu.ifcu_buf=buf; + if(ioctl(sd, SIOCGIFCONF, &ifc)==0){ + ifr=ifc.ifc_req; + int len; + int i; + for(i=0;iifr_addr.sa_len; +#else + len=sizeof(*ifr); +#endif + if(ifr->ifr_addr.sa_family==AF_INET){ + if(ioctl(sd, SIOCGIFADDR, ifr)==0){ + struct sockaddr_in* addr=(struct sockaddr_in *)(&ifr->ifr_addr); + LOGI("Interface %s, address %s\n", ifr->ifr_name, inet_ntoa(addr->sin_addr)); + if(ioctl(sd, SIOCGIFFLAGS, ifr)==0){ + if(!(ifr->ifr_flags & IFF_LOOPBACK) && (ifr->ifr_flags & IFF_UP) && (ifr->ifr_flags & IFF_RUNNING)){ + //LOGV("flags = %08X", ifr->ifr_flags); + if((ntohl(addr->sin_addr.s_addr) & 0xFFFF0000)==0xA9FE0000){ + LOGV("skipping link-local"); + continue; + } + if(v4addr){ + *v4addr=IPv4Address(addr->sin_addr.s_addr); + } + name=ifr->ifr_name; + } + } + }else{ + LOGE("Error getting address for %s: %d\n", ifr->ifr_name, errno); + } + } + ifr=(struct ifreq*)((char*)ifr+len); + i+=len; + } + }else{ + LOGE("Error getting LAN address: %d", errno); + } + } + close(sd); + return name; +} + +uint16_t NetworkSocketPosix::GetLocalPort(){ + sockaddr_in6 addr; + size_t addrLen=sizeof(sockaddr_in6); + getsockname(fd, (sockaddr*)&addr, (socklen_t*) &addrLen); + return ntohs(addr.sin6_port); +} + +std::string NetworkSocketPosix::V4AddressToString(uint32_t address){ + char buf[INET_ADDRSTRLEN]; + in_addr addr; + addr.s_addr=address; + inet_ntop(AF_INET, &addr, buf, sizeof(buf)); + return std::string(buf); +} + +std::string NetworkSocketPosix::V6AddressToString(unsigned char *address){ + char buf[INET6_ADDRSTRLEN]; + in6_addr addr; + memcpy(addr.s6_addr, address, 16); + inet_ntop(AF_INET6, &addr, buf, sizeof(buf)); + return std::string(buf); +} + +uint32_t NetworkSocketPosix::StringToV4Address(std::string address){ + in_addr addr; + inet_pton(AF_INET, address.c_str(), &addr); + return addr.s_addr; +} + +void NetworkSocketPosix::StringToV6Address(std::string address, unsigned char *out){ + in6_addr addr; + inet_pton(AF_INET6, address.c_str(), &addr); + memcpy(out, addr.s6_addr, 16); +} + +IPv4Address *NetworkSocketPosix::ResolveDomainName(std::string name){ + addrinfo* addr0; + IPv4Address* ret=NULL; + int res=getaddrinfo(name.c_str(), NULL, NULL, &addr0); + if(res!=0){ + LOGW("Error updating NAT64 prefix: %d / %s", res, gai_strerror(res)); + }else{ + addrinfo* addrPtr; + for(addrPtr=addr0;addrPtr;addrPtr=addrPtr->ai_next){ + if(addrPtr->ai_family==AF_INET){ + sockaddr_in* addr=(sockaddr_in*)addrPtr->ai_addr; + ret=new IPv4Address(addr->sin_addr.s_addr); + break; + } + } + freeaddrinfo(addr0); + } + return ret; +} + +NetworkAddress *NetworkSocketPosix::GetConnectedAddress(){ + return tcpConnectedAddress; +} + +uint16_t NetworkSocketPosix::GetConnectedPort(){ + return tcpConnectedPort; +} + +void NetworkSocketPosix::SetTimeouts(int sendTimeout, int recvTimeout){ + timeval timeout; + timeout.tv_sec=sendTimeout; + timeout.tv_usec=0; + setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeout, sizeof(timeout)); + timeout.tv_sec=recvTimeout; + setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeout, sizeof(timeout)); +} + +bool NetworkSocketPosix::Select(std::vector &readFds, std::vector &errorFds, SocketSelectCanceller* _canceller){ + fd_set readSet; + fd_set errorSet; + FD_ZERO(&readSet); + FD_ZERO(&errorSet); + SocketSelectCancellerPosix* canceller=dynamic_cast(_canceller); + if(canceller) + FD_SET(canceller->pipeRead, &readSet); + + int maxfd=canceller ? canceller->pipeRead : 0; + + for(std::vector::iterator itr=readFds.begin();itr!=readFds.end();++itr){ + int sfd=GetDescriptorFromSocket(*itr); + if(sfd==0){ + LOGW("can't select on one of sockets because it's not a NetworkSocketPosix instance"); + continue; + } + FD_SET(sfd, &readSet); + if(maxfd::iterator itr=errorFds.begin();itr!=errorFds.end();++itr){ + int sfd=GetDescriptorFromSocket(*itr); + if(sfd==0){ + LOGW("can't select on one of sockets because it's not a NetworkSocketPosix instance"); + continue; + } + anyFailed |= (*itr)->IsFailed(); + FD_SET(sfd, &errorSet); + if(maxfdpipeRead, &readSet) && !anyFailed){ + char c; + read(canceller->pipeRead, &c, 1); + return false; + }else if(anyFailed){ + FD_ZERO(&readSet); + FD_ZERO(&errorSet); + } + + std::vector::iterator itr=readFds.begin(); + while(itr!=readFds.end()){ + int sfd=GetDescriptorFromSocket(*itr); + if(sfd==0 || !FD_ISSET(sfd, &readSet)){ + itr=readFds.erase(itr); + }else{ + ++itr; + } + } + + itr=errorFds.begin(); + while(itr!=errorFds.end()){ + int sfd=GetDescriptorFromSocket(*itr); + if((sfd==0 || !FD_ISSET(sfd, &errorSet)) && !(*itr)->IsFailed()){ + itr=errorFds.erase(itr); + }else{ + ++itr; + } + } + //LOGV("select fds left: read=%d, error=%d", readFds.size(), errorFds.size()); + + return readFds.size()>0 || errorFds.size()>0; +} + +SocketSelectCancellerPosix::SocketSelectCancellerPosix(){ + int p[2]; + int pipeRes=pipe(p); + assert(pipeRes==0); + pipeRead=p[0]; + pipeWrite=p[1]; +} + +SocketSelectCancellerPosix::~SocketSelectCancellerPosix(){ + close(pipeRead); + close(pipeWrite); +} + +void SocketSelectCancellerPosix::CancelSelect(){ + char c=1; + write(pipeWrite, &c, 1); +} + +int NetworkSocketPosix::GetDescriptorFromSocket(NetworkSocket *socket){ + NetworkSocketPosix* sp=dynamic_cast(socket); + if(sp) + return sp->fd; + NetworkSocketWrapper* sw=dynamic_cast(socket); + if(sw) + return GetDescriptorFromSocket(sw->GetWrapped()); + return 0; +} diff --git a/Telegram/ThirdParty/libtgvoip/os/posix/NetworkSocketPosix.h b/Telegram/ThirdParty/libtgvoip/os/posix/NetworkSocketPosix.h new file mode 100644 index 000000000..86dbe3102 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/posix/NetworkSocketPosix.h @@ -0,0 +1,74 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_NETWORKSOCKETPOSIX_H +#define LIBTGVOIP_NETWORKSOCKETPOSIX_H + +#include "../../NetworkSocket.h" +#include +#include +#include + +namespace tgvoip { + +class SocketSelectCancellerPosix : public SocketSelectCanceller{ +friend class NetworkSocketPosix; +public: + SocketSelectCancellerPosix(); + virtual ~SocketSelectCancellerPosix(); + virtual void CancelSelect(); +private: + int pipeRead; + int pipeWrite; +}; + +class NetworkSocketPosix : public NetworkSocket{ +public: + NetworkSocketPosix(NetworkProtocol protocol); + virtual ~NetworkSocketPosix(); + virtual void Send(NetworkPacket* packet); + virtual void Receive(NetworkPacket* packet); + virtual void Open(); + virtual void Close(); + virtual void Connect(NetworkAddress* address, uint16_t port); + virtual std::string GetLocalInterfaceInfo(IPv4Address* v4addr, IPv6Address* v6addr); + virtual void OnActiveInterfaceChanged(); + virtual uint16_t GetLocalPort(); + + static std::string V4AddressToString(uint32_t address); + static std::string V6AddressToString(unsigned char address[16]); + static uint32_t StringToV4Address(std::string address); + static void StringToV6Address(std::string address, unsigned char* out); + static IPv4Address* ResolveDomainName(std::string name); + static bool Select(std::vector& readFds, std::vector& errorFds, SocketSelectCanceller* canceller); + + virtual NetworkAddress *GetConnectedAddress(); + + virtual uint16_t GetConnectedPort(); + + virtual void SetTimeouts(int sendTimeout, int recvTimeout); + +protected: + virtual void SetMaxPriority(); + +private: + static int GetDescriptorFromSocket(NetworkSocket* socket); + int fd; + bool needUpdateNat64Prefix; + bool nat64Present; + double switchToV6at; + bool isV4Available; + bool useTCP; + bool closing; + IPv4Address lastRecvdV4; + IPv6Address lastRecvdV6; + NetworkAddress* tcpConnectedAddress; + uint16_t tcpConnectedPort; +}; + +} + +#endif //LIBTGVOIP_NETWORKSOCKETPOSIX_H diff --git a/Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWASAPI.cpp b/Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWASAPI.cpp new file mode 100644 index 000000000..0de718bfc --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWASAPI.cpp @@ -0,0 +1,431 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + + +#include +#include "AudioInputWASAPI.h" +#include "../../logging.h" +#include "../../VoIPController.h" + +#define BUFFER_SIZE 960 +#define CHECK_RES(res, msg) {if(FAILED(res)){LOGE("%s failed: HRESULT=0x%08X", msg, res); failed=true; return;}} +#define SCHECK_RES(res, msg) {if(FAILED(res)){LOGE("%s failed: HRESULT=0x%08X", msg, res); return;}} + +template void SafeRelease(T **ppT) +{ + if(*ppT) + { + (*ppT)->Release(); + *ppT = NULL; + } +} + +using namespace tgvoip::audio; + +AudioInputWASAPI::AudioInputWASAPI(std::string deviceID){ + isRecording=false; + remainingDataLen=0; + refCount=1; + HRESULT res; + res=CoInitializeEx(NULL, COINIT_APARTMENTTHREADED); + CHECK_RES(res, "CoInitializeEx"); +#ifdef TGVOIP_WINXP_COMPAT + HANDLE (WINAPI *__CreateEventExA)(LPSECURITY_ATTRIBUTES lpEventAttributes, LPCSTR lpName, DWORD dwFlags, DWORD dwDesiredAccess); + __CreateEventExA=(HANDLE (WINAPI *)(LPSECURITY_ATTRIBUTES, LPCSTR, DWORD, DWORD))GetProcAddress(GetModuleHandleA("kernel32.dll"), "CreateEventExA"); +#undef CreateEventEx +#define CreateEventEx __CreateEventExA +#endif + shutdownEvent=CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE); + audioSamplesReadyEvent=CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE); + streamSwitchEvent=CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE); + ZeroMemory(&format, sizeof(format)); + format.wFormatTag=WAVE_FORMAT_PCM; + format.nChannels=1; + format.nSamplesPerSec=48000; + format.nBlockAlign=2; + format.nAvgBytesPerSec=format.nSamplesPerSec*format.nBlockAlign; + format.wBitsPerSample=16; + +#ifdef TGVOIP_WINDOWS_DESKTOP + res=CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&enumerator)); + CHECK_RES(res, "CoCreateInstance(MMDeviceEnumerator)"); + res=enumerator->RegisterEndpointNotificationCallback(this); + CHECK_RES(res, "enumerator->RegisterEndpointNotificationCallback"); + audioSessionControl=NULL; + device=NULL; +#endif + + audioClient=NULL; + captureClient=NULL; + thread=NULL; + started=false; + + SetCurrentDevice(deviceID); +} + +AudioInputWASAPI::~AudioInputWASAPI(){ + if(audioClient && started){ + audioClient->Stop(); + } + +#ifdef TGVOIP_WINDOWS_DESKTOP + if(audioSessionControl){ + audioSessionControl->UnregisterAudioSessionNotification(this); + } +#endif + + SetEvent(shutdownEvent); + if(thread){ + WaitForSingleObjectEx(thread, INFINITE, false); + CloseHandle(thread); + } +#ifdef TGVOIP_WINDOWS_DESKTOP + SafeRelease(&audioSessionControl); +#endif + SafeRelease(&captureClient); + SafeRelease(&audioClient); +#ifdef TGVOIP_WINDOWS_DESKTOP + SafeRelease(&device); +#endif + CloseHandle(shutdownEvent); + CloseHandle(audioSamplesReadyEvent); + CloseHandle(streamSwitchEvent); +#ifdef TGVOIP_WINDOWS_DESKTOP + if(enumerator) + enumerator->UnregisterEndpointNotificationCallback(this); + SafeRelease(&enumerator); +#endif +} + +void AudioInputWASAPI::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + +} + +void AudioInputWASAPI::Start(){ + isRecording=true; + if(!thread){ + thread=CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)AudioInputWASAPI::StartThread, this, 0, NULL); + } + + started=true; + if(audioClient){ + audioClient->Start(); + } +} + +void AudioInputWASAPI::Stop(){ + isRecording=false; +} + +bool AudioInputWASAPI::IsRecording(){ + return isRecording; +} + +void AudioInputWASAPI::EnumerateDevices(std::vector& devs){ +#ifdef TGVOIP_WINDOWS_DESKTOP + HRESULT res; + res=CoInitializeEx(NULL, COINIT_APARTMENTTHREADED); + SCHECK_RES(res, "CoInitializeEx"); + + IMMDeviceEnumerator *deviceEnumerator = NULL; + IMMDeviceCollection *deviceCollection = NULL; + + res=CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&deviceEnumerator)); + SCHECK_RES(res, "CoCreateInstance(MMDeviceEnumerator)"); + + res=deviceEnumerator->EnumAudioEndpoints(eCapture, DEVICE_STATE_ACTIVE, &deviceCollection); + SCHECK_RES(res, "EnumAudioEndpoints"); + + UINT devCount; + res=deviceCollection->GetCount(&devCount); + SCHECK_RES(res, "GetCount"); + + for(UINT i=0;iItem(i, &device); + SCHECK_RES(res, "GetDeviceItem"); + wchar_t* devID; + res=device->GetId(&devID); + SCHECK_RES(res, "get device id"); + + IPropertyStore* propStore; + res=device->OpenPropertyStore(STGM_READ, &propStore); + SafeRelease(&device); + SCHECK_RES(res, "OpenPropertyStore"); + + PROPVARIANT friendlyName; + PropVariantInit(&friendlyName); + res=propStore->GetValue(PKEY_Device_FriendlyName, &friendlyName); + SafeRelease(&propStore); + + AudioInputDevice dev; + + wchar_t actualFriendlyName[128]; + if(friendlyName.vt==VT_LPWSTR){ + wcsncpy(actualFriendlyName, friendlyName.pwszVal, sizeof(actualFriendlyName)/sizeof(wchar_t)); + }else{ + wcscpy(actualFriendlyName, L"Unknown"); + } + PropVariantClear(&friendlyName); + + char buf[256]; + WideCharToMultiByte(CP_UTF8, 0, devID, -1, buf, sizeof(buf), NULL, NULL); + dev.id=buf; + WideCharToMultiByte(CP_UTF8, 0, actualFriendlyName, -1, buf, sizeof(buf), NULL, NULL); + dev.displayName=buf; + devs.push_back(dev); + + CoTaskMemFree(devID); + } + + SafeRelease(&deviceCollection); + SafeRelease(&deviceEnumerator); +#endif +} + +void AudioInputWASAPI::SetCurrentDevice(std::string deviceID){ + if(thread){ + streamChangeToDevice=deviceID; + SetEvent(streamSwitchEvent); + }else{ + ActuallySetCurrentDevice(deviceID); + } +} + +void AudioInputWASAPI::ActuallySetCurrentDevice(std::string deviceID){ + currentDevice=deviceID; + HRESULT res; + + if(audioClient){ + res=audioClient->Stop(); + CHECK_RES(res, "audioClient->Stop"); + } + +#ifdef TGVOIP_WINDOWS_DESKTOP + if(audioSessionControl){ + res=audioSessionControl->UnregisterAudioSessionNotification(this); + CHECK_RES(res, "audioSessionControl->UnregisterAudioSessionNotification"); + } + + SafeRelease(&audioSessionControl); +#endif + SafeRelease(&captureClient); + SafeRelease(&audioClient); +#ifdef TGVOIP_WINDOWS_DESKTOP + SafeRelease(&device); + + IMMDeviceCollection *deviceCollection = NULL; + + if(deviceID=="default"){ + isDefaultDevice=true; + res=enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications, &device); + CHECK_RES(res, "GetDefaultAudioEndpoint"); + }else{ + isDefaultDevice=false; + res=enumerator->EnumAudioEndpoints(eCapture, DEVICE_STATE_ACTIVE, &deviceCollection); + CHECK_RES(res, "EnumAudioEndpoints"); + + UINT devCount; + res=deviceCollection->GetCount(&devCount); + CHECK_RES(res, "GetCount"); + + for(UINT i=0;iItem(i, &device); + CHECK_RES(res, "GetDeviceItem"); + wchar_t* _devID; + res=device->GetId(&_devID); + CHECK_RES(res, "get device id"); + + char devID[128]; + WideCharToMultiByte(CP_UTF8, 0, _devID, -1, devID, 128, NULL, NULL); + + CoTaskMemFree(_devID); + if(deviceID==devID){ + this->device=device; + //device->AddRef(); + break; + } + } + } + + if(deviceCollection) + SafeRelease(&deviceCollection); + + if(!device){ + LOGE("Didn't find capture device; failing"); + failed=true; + return; + } + + res=device->Activate(__uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL, (void**)&audioClient); + CHECK_RES(res, "device->Activate"); +#else + Platform::String^ defaultDevID=Windows::Media::Devices::MediaDevice::GetDefaultAudioCaptureId(Windows::Media::Devices::AudioDeviceRole::Communications); + HRESULT res1, res2; + IAudioClient2* audioClient2=WindowsSandboxUtils::ActivateAudioDevice(defaultDevID->Data(), &res1, &res2); + CHECK_RES(res1, "activate1"); + CHECK_RES(res2, "activate2"); + + AudioClientProperties properties={}; + properties.cbSize=sizeof AudioClientProperties; + properties.eCategory=AudioCategory_Communications; + res = audioClient2->SetClientProperties(&properties); + CHECK_RES(res, "audioClient2->SetClientProperties"); + + audioClient=audioClient2; +#endif + + + // {2C693079-3F59-49FD-964F-61C005EAA5D3} + const GUID guid = { 0x2c693079, 0x3f59, 0x49fd, { 0x96, 0x4f, 0x61, 0xc0, 0x5, 0xea, 0xa5, 0xd3 } }; + res = audioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST | 0x80000000/*AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM*/, 60 * 10000, 0, &format, &guid); + CHECK_RES(res, "audioClient->Initialize"); + + uint32_t bufSize; + res = audioClient->GetBufferSize(&bufSize); + CHECK_RES(res, "audioClient->GetBufferSize"); + + LOGV("buffer size: %u", bufSize); + REFERENCE_TIME latency; + if(SUCCEEDED(audioClient->GetStreamLatency(&latency))){ + estimatedDelay=latency ? latency/10000 : 60; + LOGD("capture latency: %d", estimatedDelay); + }else{ + estimatedDelay=60; + } + + res = audioClient->SetEventHandle(audioSamplesReadyEvent); + CHECK_RES(res, "audioClient->SetEventHandle"); + + res = audioClient->GetService(IID_PPV_ARGS(&captureClient)); + CHECK_RES(res, "audioClient->GetService"); + +#ifdef TGVOIP_WINDOWS_DESKTOP + res=audioClient->GetService(IID_PPV_ARGS(&audioSessionControl)); + CHECK_RES(res, "audioClient->GetService(IAudioSessionControl)"); + + res=audioSessionControl->RegisterAudioSessionNotification(this); + CHECK_RES(res, "audioSessionControl->RegisterAudioSessionNotification"); +#endif + + if(isRecording) + audioClient->Start(); + + LOGV("set current output device done"); +} + +DWORD AudioInputWASAPI::StartThread(void* arg) { + ((AudioInputWASAPI*)arg)->RunThread(); + return 0; +} + +void AudioInputWASAPI::RunThread() { + SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_HIGHEST); + + HANDLE waitArray[]={shutdownEvent, streamSwitchEvent, audioSamplesReadyEvent}; + HRESULT res=CoInitializeEx(NULL, COINIT_APARTMENTTHREADED); + CHECK_RES(res, "CoInitializeEx in capture thread"); + + uint32_t bufferSize=0; + uint32_t framesWritten=0; + + bool running=true; + //double prevCallback=VoIPController::GetCurrentTime(); + + while(running){ + DWORD waitResult=WaitForMultipleObjectsEx(3, waitArray, false, INFINITE, false); + if(waitResult==WAIT_OBJECT_0){ // shutdownEvent + LOGV("capture thread shutting down"); + running=false; + }else if(waitResult==WAIT_OBJECT_0+1){ // streamSwitchEvent + LOGV("stream switch"); + ActuallySetCurrentDevice(streamChangeToDevice); + ResetEvent(streamSwitchEvent); + bufferSize=0; + LOGV("stream switch done"); + }else if(waitResult==WAIT_OBJECT_0+2){ // audioSamplesReadyEvent + if(!audioClient) + continue; + res=captureClient->GetNextPacketSize(&bufferSize); + CHECK_RES(res, "captureClient->GetNextPacketSize"); + BYTE* data; + uint32_t framesAvailable=bufferSize; + DWORD flags; + + res=captureClient->GetBuffer(&data, &framesAvailable, &flags, NULL, NULL); + CHECK_RES(res, "captureClient->GetBuffer"); + size_t dataLen=framesAvailable*2; + assert(remainingDataLen+dataLen960*2){ + if(isRecording) + InvokeCallback(remainingData, 960*2); + memmove(remainingData, remainingData+(960*2), remainingDataLen-960*2); + remainingDataLen-=960*2; + } + + res=captureClient->ReleaseBuffer(framesAvailable); + CHECK_RES(res, "captureClient->ReleaseBuffer"); + + framesWritten+=framesAvailable; + } + } +} + +#ifdef TGVOIP_WINDOWS_DESKTOP +HRESULT AudioInputWASAPI::OnSessionDisconnected(AudioSessionDisconnectReason reason) { + if(!isDefaultDevice){ + streamChangeToDevice="default"; + SetEvent(streamSwitchEvent); + } + return S_OK; +} + +HRESULT AudioInputWASAPI::OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR newDevID) { + if(flow==eCapture && role==eCommunications && isDefaultDevice){ + streamChangeToDevice="default"; + SetEvent(streamSwitchEvent); + } + return S_OK; +} + +ULONG AudioInputWASAPI::AddRef(){ + return InterlockedIncrement(&refCount); +} + +ULONG AudioInputWASAPI::Release(){ + return InterlockedDecrement(&refCount); +} + +HRESULT AudioInputWASAPI::QueryInterface(REFIID iid, void** obj){ + if(!obj){ + return E_POINTER; + } + *obj=NULL; + + if(iid==IID_IUnknown){ + *obj=static_cast(static_cast(this)); + AddRef(); + }else if(iid==__uuidof(IMMNotificationClient)){ + *obj=static_cast(this); + AddRef(); + }else if(iid==__uuidof(IAudioSessionEvents)){ + *obj=static_cast(this); + AddRef(); + }else{ + return E_NOINTERFACE; + } + + return S_OK; +} +#endif \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWASAPI.h b/Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWASAPI.h new file mode 100644 index 000000000..d19d10ad2 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWASAPI.h @@ -0,0 +1,106 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOINPUTWASAPI_H +#define LIBTGVOIP_AUDIOINPUTWASAPI_H + +#if WINAPI_FAMILY==WINAPI_FAMILY_PHONE_APP +#define TGVOIP_WINDOWS_PHONE +#endif +#if !defined(WINAPI_FAMILY) || WINAPI_FAMILY==WINAPI_FAMILY_DESKTOP_APP +#define TGVOIP_WINDOWS_DESKTOP +#endif + +#include +#include +#include +#pragma warning(push) +#pragma warning(disable : 4201) +#ifndef TGVOIP_WP_SILVERLIGHT +#include +#endif +#ifdef TGVOIP_WINDOWS_DESKTOP +#include +#include +#else +#include +#include "WindowsSandboxUtils.h" +#endif +#pragma warning(pop) +#include "../../audio/AudioInput.h" + +namespace tgvoip{ +namespace audio{ + +#ifdef TGVOIP_WINDOWS_DESKTOP +class AudioInputWASAPI : public AudioInput, IMMNotificationClient, IAudioSessionEvents{ +#else +class AudioInputWASAPI : public AudioInput{ +#endif + +public: + AudioInputWASAPI(std::string deviceID); + virtual ~AudioInputWASAPI(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + virtual bool IsRecording(); + virtual void SetCurrentDevice(std::string deviceID); + static void EnumerateDevices(std::vector& devs); +#ifdef TGVOIP_WINDOWS_DESKTOP + STDMETHOD_(ULONG, AddRef)(); + STDMETHOD_(ULONG, Release)(); +#endif + +private: + void ActuallySetCurrentDevice(std::string deviceID); + static DWORD StartThread(void* arg); + void RunThread(); + WAVEFORMATEX format; + bool isRecording; + HANDLE shutdownEvent; + HANDLE audioSamplesReadyEvent; + HANDLE streamSwitchEvent; + HANDLE thread; + IAudioClient* audioClient; + IAudioCaptureClient* captureClient; +#ifdef TGVOIP_WINDOWS_DESKTOP + IMMDeviceEnumerator* enumerator; + IAudioSessionControl* audioSessionControl; + IMMDevice* device; +#endif + unsigned char remainingData[10240]; + size_t remainingDataLen; + bool isDefaultDevice; + ULONG refCount; + std::string streamChangeToDevice; + bool started; + +#ifdef TGVOIP_WINDOWS_DESKTOP + STDMETHOD(OnDisplayNameChanged) (LPCWSTR /*NewDisplayName*/, LPCGUID /*EventContext*/) { return S_OK; }; + STDMETHOD(OnIconPathChanged) (LPCWSTR /*NewIconPath*/, LPCGUID /*EventContext*/) { return S_OK; }; + STDMETHOD(OnSimpleVolumeChanged) (float /*NewSimpleVolume*/, BOOL /*NewMute*/, LPCGUID /*EventContext*/) { return S_OK; } + STDMETHOD(OnChannelVolumeChanged) (DWORD /*ChannelCount*/, float /*NewChannelVolumes*/[], DWORD /*ChangedChannel*/, LPCGUID /*EventContext*/) { return S_OK; }; + STDMETHOD(OnGroupingParamChanged) (LPCGUID /*NewGroupingParam*/, LPCGUID /*EventContext*/) { return S_OK; }; + STDMETHOD(OnStateChanged) (AudioSessionState /*NewState*/) { return S_OK; }; + STDMETHOD(OnSessionDisconnected) (AudioSessionDisconnectReason DisconnectReason); + STDMETHOD(OnDeviceStateChanged) (LPCWSTR /*DeviceId*/, DWORD /*NewState*/) { return S_OK; } + STDMETHOD(OnDeviceAdded) (LPCWSTR /*DeviceId*/) { return S_OK; }; + STDMETHOD(OnDeviceRemoved) (LPCWSTR /*DeviceId(*/) { return S_OK; }; + STDMETHOD(OnDefaultDeviceChanged) (EDataFlow Flow, ERole Role, LPCWSTR NewDefaultDeviceId); + STDMETHOD(OnPropertyValueChanged) (LPCWSTR /*DeviceId*/, const PROPERTYKEY /*Key*/) { return S_OK; }; + + // + // IUnknown + // + STDMETHOD(QueryInterface)(REFIID iid, void **pvObject); +#endif +}; + +} +} + +#endif //LIBTGVOIP_AUDIOINPUTWASAPI_H diff --git a/Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWave.cpp b/Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWave.cpp new file mode 100644 index 000000000..61a8f0b45 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/windows/AudioInputWave.cpp @@ -0,0 +1,170 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include +#include +#include +#include "AudioInputWave.h" +#include "../../logging.h" +#include "../../VoIPController.h" + +using namespace tgvoip::audio; + +#define BUFFER_SIZE 960 +#define CHECK_ERROR(res, msg) if(res!=MMSYSERR_NOERROR){wchar_t _buf[1024]; waveInGetErrorTextW(res, _buf, 1024); LOGE(msg ": %ws (MMRESULT=0x%08X)", _buf, res); failed=true;} + +AudioInputWave::AudioInputWave(std::string deviceID){ + isRecording=false; + + for(int i=0;i<4;i++){ + ZeroMemory(&buffers[i], sizeof(WAVEHDR)); + buffers[i].dwBufferLength=960*2; + buffers[i].lpData=(char*)malloc(960*2); + } + + hWaveIn=NULL; + + SetCurrentDevice(deviceID); +} + +AudioInputWave::~AudioInputWave(){ + for(int i=0;i<4;i++){ + free(buffers[i].lpData); + } + waveInClose(hWaveIn); +} + +void AudioInputWave::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + +} + +void AudioInputWave::Start(){ + isRecording=true; + + MMRESULT res; + for(int i=0;i<4;i++){ + res=waveInPrepareHeader(hWaveIn, &buffers[i], sizeof(WAVEHDR)); + CHECK_ERROR(res, "waveInPrepareHeader failed"); + res=waveInAddBuffer(hWaveIn, &buffers[i], sizeof(WAVEHDR)); + CHECK_ERROR(res, "waveInAddBuffer failed"); + } + res=waveInStart(hWaveIn); + CHECK_ERROR(res, "waveInStart failed"); +} + +void AudioInputWave::Stop(){ + isRecording=false; + + MMRESULT res=waveInStop(hWaveIn); + CHECK_ERROR(res, "waveInStop failed"); + res=waveInReset(hWaveIn); + CHECK_ERROR(res, "waveInReset failed"); + for(int i=0;i<4;i++){ + res=waveInUnprepareHeader(hWaveIn, &buffers[i], sizeof(WAVEHDR)); + CHECK_ERROR(res, "waveInUnprepareHeader failed"); + } +} + +void AudioInputWave::WaveInProc(HWAVEIN hwi, UINT uMsg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2){ + if(uMsg==WIM_DATA){ + ((AudioInputWave*)dwInstance)->OnData((WAVEHDR*)dwParam1); + } +} + +void AudioInputWave::OnData(WAVEHDR* hdr){ + if(!isRecording) + return; + + InvokeCallback((unsigned char*)hdr->lpData, hdr->dwBufferLength); + hdr->dwFlags&= ~WHDR_DONE; + MMRESULT res=waveInAddBuffer(hWaveIn, hdr, sizeof(WAVEHDR)); + CHECK_ERROR(res, "waveInAddBuffer failed"); +} + +void AudioInputWave::EnumerateDevices(std::vector& devs){ + UINT num=waveInGetNumDevs(); + WAVEINCAPSW caps; + char nameBuf[512]; + for(UINT i=0;i +#include +#include +#include "../../audio/AudioInput.h" + +namespace tgvoip{ +namespace audio{ + +class AudioInputWave : public AudioInput{ + +public: + AudioInputWave(std::string deviceID); + virtual ~AudioInputWave(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + virtual void SetCurrentDevice(std::string deviceID); + static void EnumerateDevices(std::vector& devs); + +private: + static void CALLBACK WaveInProc(HWAVEIN hwi, UINT uMsg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2); + void OnData(WAVEHDR* hdr); + HWAVEIN hWaveIn; + WAVEFORMATEX format; + WAVEHDR buffers[4]; + bool isRecording; +}; + +} +} + +#endif //LIBTGVOIP_AUDIOINPUTWAVE_H diff --git a/Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWASAPI.cpp b/Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWASAPI.cpp new file mode 100644 index 000000000..953678595 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWASAPI.cpp @@ -0,0 +1,439 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + + +#include +#include "AudioOutputWASAPI.h" +#include "../../logging.h" +#include "../../VoIPController.h" + +#define BUFFER_SIZE 960 +#define CHECK_RES(res, msg) {if(FAILED(res)){LOGE("%s failed: HRESULT=0x%08X", msg, res); failed=true; return;}} +#define SCHECK_RES(res, msg) {if(FAILED(res)){LOGE("%s failed: HRESULT=0x%08X", msg, res); return;}} + +template void SafeRelease(T **ppT) +{ + if(*ppT) + { + (*ppT)->Release(); + *ppT = NULL; + } +} + +#ifdef TGVOIP_WINXP_COMPAT + +#endif + +using namespace tgvoip::audio; + +AudioOutputWASAPI::AudioOutputWASAPI(std::string deviceID){ + isPlaying=false; + remainingDataLen=0; + refCount=1; + HRESULT res; + res=CoInitializeEx(NULL, COINIT_APARTMENTTHREADED); + CHECK_RES(res, "CoInitializeEx"); +#ifdef TGVOIP_WINXP_COMPAT + HANDLE (WINAPI *__CreateEventExA)(LPSECURITY_ATTRIBUTES lpEventAttributes, LPCSTR lpName, DWORD dwFlags, DWORD dwDesiredAccess); + __CreateEventExA=(HANDLE (WINAPI *)(LPSECURITY_ATTRIBUTES, LPCSTR, DWORD, DWORD))GetProcAddress(GetModuleHandleA("kernel32.dll"), "CreateEventExA"); +#undef CreateEventEx +#define CreateEventEx __CreateEventExA +#endif + shutdownEvent=CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE); + audioSamplesReadyEvent=CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE); + streamSwitchEvent=CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE); + ZeroMemory(&format, sizeof(format)); + format.wFormatTag=WAVE_FORMAT_PCM; + format.nChannels=1; + format.nSamplesPerSec=48000; + format.nBlockAlign=2; + format.nAvgBytesPerSec=format.nSamplesPerSec*format.nBlockAlign; + format.wBitsPerSample=16; + +#ifdef TGVOIP_WINDOWS_DESKTOP + res=CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&enumerator)); + CHECK_RES(res, "CoCreateInstance(MMDeviceEnumerator)"); + res=enumerator->RegisterEndpointNotificationCallback(this); + CHECK_RES(res, "enumerator->RegisterEndpointNotificationCallback"); + audioSessionControl=NULL; + device=NULL; +#endif + + audioClient=NULL; + renderClient=NULL; + thread=NULL; + + SetCurrentDevice(deviceID); +} + +AudioOutputWASAPI::~AudioOutputWASAPI(){ + if(audioClient && isPlaying){ + audioClient->Stop(); + } + +#ifdef TGVOIP_WINDOWS_DESKTOP + if(audioSessionControl){ + audioSessionControl->UnregisterAudioSessionNotification(this); + } +#endif + + SetEvent(shutdownEvent); + if(thread){ + WaitForSingleObjectEx(thread, INFINITE, false); + CloseHandle(thread); + } + SafeRelease(&renderClient); + SafeRelease(&audioClient); +#ifdef TGVOIP_WINDOWS_DESKTOP + SafeRelease(&device); + SafeRelease(&audioSessionControl); +#endif + CloseHandle(shutdownEvent); + CloseHandle(audioSamplesReadyEvent); + CloseHandle(streamSwitchEvent); +#ifdef TGVOIP_WINDOWS_DESKTOP + if(enumerator) + enumerator->UnregisterEndpointNotificationCallback(this); + SafeRelease(&enumerator); +#endif +} + +void AudioOutputWASAPI::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + +} + +void AudioOutputWASAPI::Start(){ + isPlaying=true; + if(!thread){ + thread=CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)AudioOutputWASAPI::StartThread, this, 0, NULL); + } + + if(audioClient) + audioClient->Start(); +} + +void AudioOutputWASAPI::Stop(){ + isPlaying=false; + + if(audioClient) + audioClient->Stop(); +} + +bool AudioOutputWASAPI::IsPlaying(){ + return isPlaying; +} + +void AudioOutputWASAPI::EnumerateDevices(std::vector& devs){ +#ifdef TGVOIP_WINDOWS_DESKTOP + HRESULT res; + res=CoInitializeEx(NULL, COINIT_APARTMENTTHREADED); + SCHECK_RES(res, "CoInitializeEx"); + + IMMDeviceEnumerator *deviceEnumerator = NULL; + IMMDeviceCollection *deviceCollection = NULL; + + res=CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&deviceEnumerator)); + SCHECK_RES(res, "CoCreateInstance(MMDeviceEnumerator)"); + + res=deviceEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &deviceCollection); + SCHECK_RES(res, "EnumAudioEndpoints"); + + UINT devCount; + res=deviceCollection->GetCount(&devCount); + SCHECK_RES(res, "GetCount"); + + for(UINT i=0;iItem(i, &device); + SCHECK_RES(res, "GetDeviceItem"); + wchar_t* devID; + res=device->GetId(&devID); + SCHECK_RES(res, "get device id"); + + IPropertyStore* propStore; + res=device->OpenPropertyStore(STGM_READ, &propStore); + SafeRelease(&device); + SCHECK_RES(res, "OpenPropertyStore"); + + PROPVARIANT friendlyName; + PropVariantInit(&friendlyName); + res=propStore->GetValue(PKEY_Device_FriendlyName, &friendlyName); + SafeRelease(&propStore); + + AudioOutputDevice dev; + + wchar_t actualFriendlyName[128]; + if(friendlyName.vt==VT_LPWSTR){ + wcsncpy(actualFriendlyName, friendlyName.pwszVal, sizeof(actualFriendlyName)/sizeof(wchar_t)); + }else{ + wcscpy(actualFriendlyName, L"Unknown"); + } + PropVariantClear(&friendlyName); + + char buf[256]; + WideCharToMultiByte(CP_UTF8, 0, devID, -1, buf, sizeof(buf), NULL, NULL); + dev.id=buf; + WideCharToMultiByte(CP_UTF8, 0, actualFriendlyName, -1, buf, sizeof(buf), NULL, NULL); + dev.displayName=buf; + devs.push_back(dev); + + CoTaskMemFree(devID); + } + + SafeRelease(&deviceCollection); + SafeRelease(&deviceEnumerator); +#endif +} + +void AudioOutputWASAPI::SetCurrentDevice(std::string deviceID){ + if(thread){ + streamChangeToDevice=deviceID; + SetEvent(streamSwitchEvent); + }else{ + ActuallySetCurrentDevice(deviceID); + } +} + +void AudioOutputWASAPI::ActuallySetCurrentDevice(std::string deviceID){ + currentDevice=deviceID; + HRESULT res; + + if(audioClient && isPlaying){ + res=audioClient->Stop(); + CHECK_RES(res, "audioClient->Stop"); + } + +#ifdef TGVOIP_WINDOWS_DESKTOP + if(audioSessionControl){ + res=audioSessionControl->UnregisterAudioSessionNotification(this); + CHECK_RES(res, "audioSessionControl->UnregisterAudioSessionNotification"); + } + + SafeRelease(&audioSessionControl); +#endif + SafeRelease(&renderClient); + SafeRelease(&audioClient); +#ifdef TGVOIP_WINDOWS_DESKTOP + SafeRelease(&device); + + + IMMDeviceCollection *deviceCollection = NULL; + + if(deviceID=="default"){ + isDefaultDevice=true; + res=enumerator->GetDefaultAudioEndpoint(eRender, eCommunications, &device); + CHECK_RES(res, "GetDefaultAudioEndpoint"); + }else{ + isDefaultDevice=false; + res=enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &deviceCollection); + CHECK_RES(res, "EnumAudioEndpoints"); + + UINT devCount; + res=deviceCollection->GetCount(&devCount); + CHECK_RES(res, "GetCount"); + + for(UINT i=0;iItem(i, &device); + CHECK_RES(res, "GetDeviceItem"); + wchar_t* _devID; + res=device->GetId(&_devID); + CHECK_RES(res, "get device id"); + + char devID[128]; + WideCharToMultiByte(CP_UTF8, 0, _devID, -1, devID, 128, NULL, NULL); + + CoTaskMemFree(_devID); + if(deviceID==devID){ + this->device=device; + break; + } + } + } + + if(deviceCollection) + SafeRelease(&deviceCollection); + + if(!device){ + LOGE("Didn't find playback device; failing"); + failed=true; + return; + } + + res=device->Activate(__uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL, (void**)&audioClient); + CHECK_RES(res, "device->Activate"); +#else + Platform::String^ defaultDevID=Windows::Media::Devices::MediaDevice::GetDefaultAudioRenderId(Windows::Media::Devices::AudioDeviceRole::Communications); + HRESULT res1, res2; + IAudioClient2* audioClient2=WindowsSandboxUtils::ActivateAudioDevice(defaultDevID->Data(), &res1, &res2); + CHECK_RES(res1, "activate1"); + CHECK_RES(res2, "activate2"); + + AudioClientProperties properties={}; + properties.cbSize=sizeof AudioClientProperties; + properties.eCategory=AudioCategory_Communications; + res = audioClient2->SetClientProperties(&properties); + CHECK_RES(res, "audioClient2->SetClientProperties"); + + audioClient = audioClient2; +#endif + + // {2C693079-3F59-49FD-964F-61C005EAA5D3} + const GUID guid = { 0x2c693079, 0x3f59, 0x49fd, { 0x96, 0x4f, 0x61, 0xc0, 0x5, 0xea, 0xa5, 0xd3 } }; + res = audioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST | 0x80000000/*AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM*/, 60 * 10000, 0, &format, &guid); + CHECK_RES(res, "audioClient->Initialize"); + + uint32_t bufSize; + res = audioClient->GetBufferSize(&bufSize); + CHECK_RES(res, "audioClient->GetBufferSize"); + + LOGV("buffer size: %u", bufSize); + REFERENCE_TIME latency; + if(SUCCEEDED(audioClient->GetStreamLatency(&latency))){ + estimatedDelay=latency ? latency/10000 : 60; + LOGD("playback latency: %d", estimatedDelay); + }else{ + estimatedDelay=60; + } + + res = audioClient->SetEventHandle(audioSamplesReadyEvent); + CHECK_RES(res, "audioClient->SetEventHandle"); + + res = audioClient->GetService(IID_PPV_ARGS(&renderClient)); + CHECK_RES(res, "audioClient->GetService"); + + BYTE* data; + res = renderClient->GetBuffer(bufSize, &data); + CHECK_RES(res, "renderClient->GetBuffer"); + + res = renderClient->ReleaseBuffer(bufSize, AUDCLNT_BUFFERFLAGS_SILENT); + CHECK_RES(res, "renderClient->ReleaseBuffer"); + +#ifdef TGVOIP_WINDOWS_DESKTOP + res=audioClient->GetService(IID_PPV_ARGS(&audioSessionControl)); + CHECK_RES(res, "audioClient->GetService(IAudioSessionControl)"); + + res=audioSessionControl->RegisterAudioSessionNotification(this); + CHECK_RES(res, "audioSessionControl->RegisterAudioSessionNotification"); +#endif + + if(isPlaying) + audioClient->Start(); + + LOGV("set current output device done"); +} + +DWORD AudioOutputWASAPI::StartThread(void* arg) { + ((AudioOutputWASAPI*)arg)->RunThread(); + return 0; +} + +void AudioOutputWASAPI::RunThread() { + SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_HIGHEST); + + HANDLE waitArray[]={shutdownEvent, streamSwitchEvent, audioSamplesReadyEvent}; + HRESULT res=CoInitializeEx(NULL, COINIT_APARTMENTTHREADED); + CHECK_RES(res, "CoInitializeEx in render thread"); + + uint32_t bufferSize; + res=audioClient->GetBufferSize(&bufferSize); + CHECK_RES(res, "audioClient->GetBufferSize"); + uint32_t framesWritten=0; + + bool running=true; + //double prevCallback=VoIPController::GetCurrentTime(); + + while(running){ + DWORD waitResult=WaitForMultipleObjectsEx(3, waitArray, false, INFINITE, false); + if(waitResult==WAIT_OBJECT_0){ // shutdownEvent + LOGV("render thread shutting down"); + running=false; + }else if(waitResult==WAIT_OBJECT_0+1){ // streamSwitchEvent + LOGV("stream switch"); + ActuallySetCurrentDevice(streamChangeToDevice); + ResetEvent(streamSwitchEvent); + LOGV("stream switch done"); + }else if(waitResult==WAIT_OBJECT_0+2){ // audioSamplesReadyEvent + if(!audioClient) + continue; + BYTE* data; + uint32_t padding; + uint32_t framesAvailable; + res=audioClient->GetCurrentPadding(&padding); + CHECK_RES(res, "audioClient->GetCurrentPadding"); + framesAvailable=bufferSize-padding; + res=renderClient->GetBuffer(framesAvailable, &data); + CHECK_RES(res, "renderClient->GetBuffer"); + + //double t=VoIPController::GetCurrentTime(); + //LOGV("framesAvail: %u, time: %f, isPlaying: %d", framesAvailable, t-prevCallback, isPlaying); + //prevCallback=t; + + size_t bytesAvailable=framesAvailable*2; + while(bytesAvailable>remainingDataLen){ + InvokeCallback(remainingData+remainingDataLen, 960*2); + remainingDataLen+=960*2; + } + memcpy(data, remainingData, bytesAvailable); + if(remainingDataLen>bytesAvailable){ + memmove(remainingData, remainingData+bytesAvailable, remainingDataLen-bytesAvailable); + } + remainingDataLen-=bytesAvailable; + + res=renderClient->ReleaseBuffer(framesAvailable, 0); + CHECK_RES(res, "renderClient->ReleaseBuffer"); + framesWritten+=framesAvailable; + } + } +} + +#ifdef TGVOIP_WINDOWS_DESKTOP +HRESULT AudioOutputWASAPI::OnSessionDisconnected(AudioSessionDisconnectReason reason) { + if(!isDefaultDevice){ + streamChangeToDevice="default"; + SetEvent(streamSwitchEvent); + } + return S_OK; +} + +HRESULT AudioOutputWASAPI::OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR newDevID) { + if(flow==eRender && role==eCommunications && isDefaultDevice){ + streamChangeToDevice="default"; + SetEvent(streamSwitchEvent); + } + return S_OK; +} + +ULONG AudioOutputWASAPI::AddRef(){ + return InterlockedIncrement(&refCount); +} + +ULONG AudioOutputWASAPI::Release(){ + return InterlockedDecrement(&refCount); +} + +HRESULT AudioOutputWASAPI::QueryInterface(REFIID iid, void** obj){ + if(!obj){ + return E_POINTER; + } + *obj=NULL; + + if(iid==IID_IUnknown){ + *obj=static_cast(static_cast(this)); + AddRef(); + }else if(iid==__uuidof(IMMNotificationClient)){ + *obj=static_cast(this); + AddRef(); + }else if(iid==__uuidof(IAudioSessionEvents)){ + *obj=static_cast(this); + AddRef(); + }else{ + return E_NOINTERFACE; + } + + return S_OK; +} +#endif \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWASAPI.h b/Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWASAPI.h new file mode 100644 index 000000000..6fd22c269 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWASAPI.h @@ -0,0 +1,104 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_AUDIOOUTPUTWASAPI_H +#define LIBTGVOIP_AUDIOOUTPUTWASAPI_H + +#if WINAPI_FAMILY==WINAPI_FAMILY_PHONE_APP +#define TGVOIP_WINDOWS_PHONE +#endif +#if !defined(WINAPI_FAMILY) || WINAPI_FAMILY==WINAPI_FAMILY_DESKTOP_APP +#define TGVOIP_WINDOWS_DESKTOP +#endif + +#include +#include +#include +#pragma warning(push) +#pragma warning(disable : 4201) +#ifndef TGVOIP_WP_SILVERLIGHT +#include +#endif +#ifdef TGVOIP_WINDOWS_DESKTOP +#include +#include +#else +#include +#include "WindowsSandboxUtils.h" +#endif +#pragma warning(pop) +#include "../../audio/AudioOutput.h" + +namespace tgvoip{ +namespace audio{ + +#ifdef TGVOIP_WINDOWS_DESKTOP +class AudioOutputWASAPI : public AudioOutput, IMMNotificationClient, IAudioSessionEvents{ +#else +class AudioOutputWASAPI : public AudioOutput{ +#endif +public: + AudioOutputWASAPI(std::string deviceID); + virtual ~AudioOutputWASAPI(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + virtual bool IsPlaying(); + virtual void SetCurrentDevice(std::string deviceID); + static void EnumerateDevices(std::vector& devs); +#ifdef TGVOIP_WINDOWS_DESKTOP + STDMETHOD_(ULONG, AddRef)(); + STDMETHOD_(ULONG, Release)(); +#endif + +private: + void ActuallySetCurrentDevice(std::string deviceID); + static DWORD StartThread(void* arg); + void RunThread(); + WAVEFORMATEX format; + bool isPlaying; + HANDLE shutdownEvent; + HANDLE audioSamplesReadyEvent; + HANDLE streamSwitchEvent; + HANDLE thread; + IAudioClient* audioClient; + IAudioRenderClient* renderClient; +#ifdef TGVOIP_WINDOWS_DESKTOP + IMMDeviceEnumerator* enumerator; + IAudioSessionControl* audioSessionControl; + IMMDevice* device; +#endif + unsigned char remainingData[10240]; + size_t remainingDataLen; + bool isDefaultDevice; + ULONG refCount; + std::string streamChangeToDevice; + +#ifdef TGVOIP_WINDOWS_DESKTOP + STDMETHOD(OnDisplayNameChanged) (LPCWSTR /*NewDisplayName*/, LPCGUID /*EventContext*/) { return S_OK; }; + STDMETHOD(OnIconPathChanged) (LPCWSTR /*NewIconPath*/, LPCGUID /*EventContext*/) { return S_OK; }; + STDMETHOD(OnSimpleVolumeChanged) (float /*NewSimpleVolume*/, BOOL /*NewMute*/, LPCGUID /*EventContext*/) { return S_OK; } + STDMETHOD(OnChannelVolumeChanged) (DWORD /*ChannelCount*/, float /*NewChannelVolumes*/[], DWORD /*ChangedChannel*/, LPCGUID /*EventContext*/) { return S_OK; }; + STDMETHOD(OnGroupingParamChanged) (LPCGUID /*NewGroupingParam*/, LPCGUID /*EventContext*/) { return S_OK; }; + STDMETHOD(OnStateChanged) (AudioSessionState /*NewState*/) { return S_OK; }; + STDMETHOD(OnSessionDisconnected) (AudioSessionDisconnectReason DisconnectReason); + STDMETHOD(OnDeviceStateChanged) (LPCWSTR /*DeviceId*/, DWORD /*NewState*/) { return S_OK; } + STDMETHOD(OnDeviceAdded) (LPCWSTR /*DeviceId*/) { return S_OK; }; + STDMETHOD(OnDeviceRemoved) (LPCWSTR /*DeviceId(*/) { return S_OK; }; + STDMETHOD(OnDefaultDeviceChanged) (EDataFlow Flow, ERole Role, LPCWSTR NewDefaultDeviceId); + STDMETHOD(OnPropertyValueChanged) (LPCWSTR /*DeviceId*/, const PROPERTYKEY /*Key*/) { return S_OK; }; + + // + // IUnknown + // + STDMETHOD(QueryInterface)(REFIID iid, void **pvObject); +#endif +}; + +} +} + +#endif //LIBTGVOIP_AUDIOOUTPUTWASAPI_H diff --git a/Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWave.cpp b/Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWave.cpp new file mode 100644 index 000000000..752aad840 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/windows/AudioOutputWave.cpp @@ -0,0 +1,165 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + + +#include +#include "AudioOutputWave.h" +#include "../../logging.h" +#include "../../VoIPController.h" + +#define BUFFER_SIZE 960 +#define CHECK_ERROR(res, msg) if(res!=MMSYSERR_NOERROR){wchar_t _buf[1024]; waveOutGetErrorTextW(res, _buf, 1024); LOGE(msg ": %ws (MMRESULT=0x%08X)", _buf, res); failed=true;} + +using namespace tgvoip::audio; + +AudioOutputWave::AudioOutputWave(std::string deviceID){ + isPlaying=false; + hWaveOut=NULL; + + for(int i=0;i<4;i++){ + ZeroMemory(&buffers[i], sizeof(WAVEHDR)); + buffers[i].dwBufferLength=960*2; + buffers[i].lpData=(char*)malloc(960*2); + } + + SetCurrentDevice(deviceID); +} + +AudioOutputWave::~AudioOutputWave(){ + for(int i=0;i<4;i++){ + free(buffers[i].lpData); + } + waveOutClose(hWaveOut); +} + +void AudioOutputWave::Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels){ + +} + +void AudioOutputWave::Start(){ + isPlaying=true; + + for(int i=0;i<4;i++){ + MMRESULT res=waveOutPrepareHeader(hWaveOut, &buffers[i], sizeof(WAVEHDR)); + CHECK_ERROR(res, "waveOutPrepareHeader failed"); + //InvokeCallback((unsigned char*)buffers[i].lpData, buffers[i].dwBufferLength); + ZeroMemory(buffers[i].lpData, buffers[i].dwBufferLength); + res=waveOutWrite(hWaveOut, &buffers[i], sizeof(WAVEHDR)); + CHECK_ERROR(res, "waveOutWrite failed"); + } +} + +void AudioOutputWave::Stop(){ + isPlaying=false; + + MMRESULT res=waveOutReset(hWaveOut); + CHECK_ERROR(res, "waveOutReset failed"); + for(int i=0;i<4;i++){ + res=waveOutUnprepareHeader(hWaveOut, &buffers[i], sizeof(WAVEHDR)); + CHECK_ERROR(res, "waveOutUnprepareHeader failed"); + } +} + +bool AudioOutputWave::IsPlaying(){ + return isPlaying; +} + +void AudioOutputWave::WaveOutProc(HWAVEOUT hwo, UINT uMsg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2) { + if(uMsg==WOM_DONE){ + ((AudioOutputWave*)dwInstance)->OnBufferDone((WAVEHDR*)dwParam1); + } +} + +void AudioOutputWave::OnBufferDone(WAVEHDR* hdr){ + if(!isPlaying) + return; + + InvokeCallback((unsigned char*)hdr->lpData, hdr->dwBufferLength); + hdr->dwFlags&= ~WHDR_DONE; + MMRESULT res=waveOutWrite(hWaveOut, hdr, sizeof(WAVEHDR)); +} + +void AudioOutputWave::EnumerateDevices(std::vector& devs){ + UINT num=waveOutGetNumDevs(); + WAVEOUTCAPSW caps; + char nameBuf[512]; + for(UINT i=0;i +#include +#include +#include "../../audio/AudioOutput.h" + +namespace tgvoip{ +namespace audio{ + +class AudioOutputWave : public AudioOutput{ +public: + AudioOutputWave(std::string deviceID); + virtual ~AudioOutputWave(); + virtual void Configure(uint32_t sampleRate, uint32_t bitsPerSample, uint32_t channels); + virtual void Start(); + virtual void Stop(); + virtual bool IsPlaying(); + virtual void SetCurrentDevice(std::string deviceID); + static void EnumerateDevices(std::vector& devs); + +private: + HWAVEOUT hWaveOut; + WAVEFORMATEX format; + WAVEHDR buffers[4]; + static void CALLBACK WaveOutProc(HWAVEOUT hwo, UINT uMsg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2); + void OnBufferDone(WAVEHDR* hdr); + bool isPlaying; +}; + +} +} + +#endif //LIBTGVOIP_AUDIOOUTPUTWAVE_H diff --git a/Telegram/ThirdParty/libtgvoip/os/windows/CXWrapper.cpp b/Telegram/ThirdParty/libtgvoip/os/windows/CXWrapper.cpp new file mode 100755 index 000000000..818ec5370 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/windows/CXWrapper.cpp @@ -0,0 +1,437 @@ +#include +#include +#include +#include +#include +#include +#include "CXWrapper.h" +#include +#include + +using namespace Windows::Storage::Streams; +using namespace Microsoft::WRL; +using namespace libtgvoip; +using namespace Platform; +using namespace tgvoip; +using namespace Windows::Security::Cryptography; +using namespace Windows::Security::Cryptography::Core; +using namespace Windows::Storage::Streams; +using namespace Windows::Data::Json; +using namespace Windows::Phone::Media::Devices; + +//CryptographicHash^ MicrosoftCryptoImpl::sha1Hash; +//CryptographicHash^ MicrosoftCryptoImpl::sha256Hash; +HashAlgorithmProvider^ MicrosoftCryptoImpl::sha1Provider; +HashAlgorithmProvider^ MicrosoftCryptoImpl::sha256Provider; +SymmetricKeyAlgorithmProvider^ MicrosoftCryptoImpl::aesKeyProvider; + +/*struct tgvoip_cx_data{ + VoIPControllerWrapper^ self; +};*/ + +VoIPControllerWrapper::VoIPControllerWrapper(){ + VoIPController::crypto.aes_ige_decrypt=MicrosoftCryptoImpl::AesIgeDecrypt; + VoIPController::crypto.aes_ige_encrypt=MicrosoftCryptoImpl::AesIgeEncrypt; + VoIPController::crypto.aes_ctr_encrypt = MicrosoftCryptoImpl::AesCtrEncrypt; + VoIPController::crypto.sha1=MicrosoftCryptoImpl::SHA1; + VoIPController::crypto.sha256=MicrosoftCryptoImpl::SHA256; + VoIPController::crypto.rand_bytes=MicrosoftCryptoImpl::RandBytes; + MicrosoftCryptoImpl::Init(); + controller=new VoIPController(); + controller->implData=(void*)this; + controller->SetStateCallback(VoIPControllerWrapper::OnStateChanged); + controller->SetSignalBarsCountCallback(VoIPControllerWrapper::OnSignalBarsChanged); + stateCallback=nullptr; +} + +VoIPControllerWrapper::~VoIPControllerWrapper(){ + delete controller; +} + +void VoIPControllerWrapper::Start(){ + controller->Start(); +} + +void VoIPControllerWrapper::Connect(){ + controller->Connect(); +} + +void VoIPControllerWrapper::SetPublicEndpoints(const Platform::Array^ endpoints, bool allowP2P){ + std::vector eps; + for (int i = 0; i < endpoints->Length; i++) + { + libtgvoip::Endpoint^ _ep = endpoints[i]; + tgvoip::Endpoint ep; + ep.id = _ep->id; + ep.type = EP_TYPE_UDP_RELAY; + char buf[128]; + if (_ep->ipv4){ + WideCharToMultiByte(CP_UTF8, 0, _ep->ipv4->Data(), -1, buf, sizeof(buf), NULL, NULL); + ep.address = IPv4Address(buf); + } + if (_ep->ipv6){ + WideCharToMultiByte(CP_UTF8, 0, _ep->ipv6->Data(), -1, buf, sizeof(buf), NULL, NULL); + ep.v6address = IPv6Address(buf); + } + ep.port = _ep->port; + if (_ep->peerTag->Length != 16) + throw ref new Platform::InvalidArgumentException("Peer tag must be exactly 16 bytes long"); + memcpy(ep.peerTag, _ep->peerTag->Data, 16); + eps.push_back(ep); + } + controller->SetRemoteEndpoints(eps, allowP2P); +} + +void VoIPControllerWrapper::SetNetworkType(NetworkType type){ + controller->SetNetworkType((int)type); +} + +void VoIPControllerWrapper::SetStateCallback(IStateCallback^ callback){ + stateCallback=callback; +} + +void VoIPControllerWrapper::SetMicMute(bool mute){ + controller->SetMicMute(mute); +} + +int64 VoIPControllerWrapper::GetPreferredRelayID(){ + return controller->GetPreferredRelayID(); +} + +void VoIPControllerWrapper::SetEncryptionKey(const Platform::Array^ key, bool isOutgoing){ + if(key->Length!=256) + throw ref new Platform::InvalidArgumentException("Encryption key must be exactly 256 bytes long"); + controller->SetEncryptionKey((char*)key->Data, isOutgoing); +} + +Platform::String^ VoIPControllerWrapper::GetDebugString(){ + char abuf[10240]; + controller->GetDebugString(abuf, sizeof(abuf)); + wchar_t wbuf[10240]; + MultiByteToWideChar(CP_UTF8, 0, abuf, -1, wbuf, sizeof(wbuf)); + return ref new Platform::String(wbuf); +} + +Platform::String^ VoIPControllerWrapper::GetDebugLog(){ + std::string log=controller->GetDebugLog(); + size_t len=sizeof(wchar_t)*(log.length()+1); + wchar_t* wlog=(wchar_t*)malloc(len); + MultiByteToWideChar(CP_UTF8, 0, log.c_str(), -1, wlog, len/sizeof(wchar_t)); + Platform::String^ res=ref new Platform::String(wlog); + free(wlog); + return res; +} + +Error VoIPControllerWrapper::GetLastError(){ + return (Error)controller->GetLastError(); +} + +Platform::String^ VoIPControllerWrapper::GetVersion(){ + const char* v=VoIPController::GetVersion(); + wchar_t buf[32]; + MultiByteToWideChar(CP_UTF8, 0, v, -1, buf, sizeof(buf)); + return ref new Platform::String(buf); +} + +void VoIPControllerWrapper::OnStateChanged(VoIPController* c, int state){ + reinterpret_cast(c->implData)->OnStateChangedInternal(state); +} + +void VoIPControllerWrapper::OnSignalBarsChanged(VoIPController* c, int count){ + reinterpret_cast(c->implData)->OnSignalBarsChangedInternal(count); +} + +void VoIPControllerWrapper::OnStateChangedInternal(int state){ + if(stateCallback) + stateCallback->OnCallStateChanged((CallState)state); +} + +void VoIPControllerWrapper::OnSignalBarsChangedInternal(int count){ + if(stateCallback) + stateCallback->OnSignalBarsChanged(count); +} + +void VoIPControllerWrapper::SetConfig(double initTimeout, double recvTimeout, DataSavingMode dataSavingMode, bool enableAEC, bool enableNS, bool enableAGC, Platform::String^ logFilePath, Platform::String^ statsDumpFilePath){ + voip_config_t config{0}; + config.init_timeout=initTimeout; + config.recv_timeout=recvTimeout; + config.data_saving=(int)dataSavingMode; + config.enableAEC=enableAEC; + config.enableAGC=enableAGC; + config.enableNS=enableNS; + if(logFilePath!=nullptr&&!logFilePath->IsEmpty()){ + WideCharToMultiByte(CP_UTF8, 0, logFilePath->Data(), -1, config.logFilePath, sizeof(config.logFilePath), NULL, NULL); + } + if(statsDumpFilePath!=nullptr&&!statsDumpFilePath->IsEmpty()){ + WideCharToMultiByte(CP_UTF8, 0, statsDumpFilePath->Data(), -1, config.statsDumpFilePath, sizeof(config.statsDumpFilePath), NULL, NULL); + } + controller->SetConfig(&config); +} + +void VoIPControllerWrapper::SetProxy(ProxyProtocol protocol, Platform::String^ address, uint16_t port, Platform::String^ username, Platform::String^ password){ + char _address[2000]; + char _username[256]; + char _password[256]; + + WideCharToMultiByte(CP_UTF8, 0, address->Data(), -1, _address, sizeof(_address), NULL, NULL); + WideCharToMultiByte(CP_UTF8, 0, username->Data(), -1, _username, sizeof(_username), NULL, NULL); + WideCharToMultiByte(CP_UTF8, 0, password->Data(), -1, _password, sizeof(_password), NULL, NULL); + + controller->SetProxy((int)protocol, _address, port, _username, _password); +} + +void VoIPControllerWrapper::UpdateServerConfig(Platform::String^ json){ + JsonObject^ jconfig=JsonValue::Parse(json)->GetObject(); + std::map config; + + for each (auto item in jconfig){ + char _key[128]; + char _value[256]; + WideCharToMultiByte(CP_UTF8, 0, item->Key->Data(), -1, _key, sizeof(_key), NULL, NULL); + if(item->Value->ValueType==Windows::Data::Json::JsonValueType::String) + WideCharToMultiByte(CP_UTF8, 0, item->Value->GetString()->Data(), -1, _value, sizeof(_value), NULL, NULL); + else + WideCharToMultiByte(CP_UTF8, 0, item->Value->ToString()->Data(), -1, _value, sizeof(_value), NULL, NULL); + std::string key(_key); + std::string value(_value); + + config[key]=value; + } + + ServerConfig::GetSharedInstance()->Update(config); +} + +void VoIPControllerWrapper::SwitchSpeaker(bool external){ + auto routingManager = AudioRoutingManager::GetDefault(); + if (external){ + routingManager->SetAudioEndpoint(AudioRoutingEndpoint::Speakerphone); + } + else{ + if ((routingManager->AvailableAudioEndpoints & AvailableAudioRoutingEndpoints::Bluetooth) == AvailableAudioRoutingEndpoints::Bluetooth){ + routingManager->SetAudioEndpoint(AudioRoutingEndpoint::Bluetooth); + } + else if ((routingManager->AvailableAudioEndpoints & AvailableAudioRoutingEndpoints::Earpiece) == AvailableAudioRoutingEndpoints::Earpiece){ + routingManager->SetAudioEndpoint(AudioRoutingEndpoint::Earpiece); + } + } +} + +void MicrosoftCryptoImpl::AesIgeEncrypt(uint8_t* in, uint8_t* out, size_t len, uint8_t* key, uint8_t* iv){ + IBuffer^ keybuf=IBufferFromPtr(key, 32); + CryptographicKey^ _key=aesKeyProvider->CreateSymmetricKey(keybuf); + uint8_t tmpOut[16]; + uint8_t* xPrev=iv+16; + uint8_t* yPrev=iv; + uint8_t x[16]; + uint8_t y[16]; + for(size_t offset=0;offsetCreateSymmetricKey(keybuf); + uint8_t tmpOut[16]; + uint8_t* xPrev=iv; + uint8_t* yPrev=iv+16; + uint8_t x[16]; + uint8_t y[16]; + for(size_t offset=0;offset> 24); (ct)[1] = (u8)((st) >> 16); (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); } + +typedef uint8_t u8; + +#define L_ENDIAN + +/* increment counter (128-bit int) by 2^64 */ +static void AES_ctr128_inc(unsigned char *counter) { + unsigned long c; + + /* Grab 3rd dword of counter and increment */ +#ifdef L_ENDIAN + c = GETU32(counter + 8); + c++; + PUTU32(counter + 8, c); +#else + c = GETU32(counter + 4); + c++; + PUTU32(counter + 4, c); +#endif + + /* if no overflow, we're done */ + if (c) + return; + + /* Grab top dword of counter and increment */ +#ifdef L_ENDIAN + c = GETU32(counter + 12); + c++; + PUTU32(counter + 12, c); +#else + c = GETU32(counter + 0); + c++; + PUTU32(counter + 0, c); +#endif + +} + +void MicrosoftCryptoImpl::AesCtrEncrypt(uint8_t* inout, size_t len, uint8_t* key, uint8_t* counter, uint8_t* ecount_buf, uint32_t* num){ + unsigned int n; + unsigned long l = len; + + //assert(in && out && key && counter && num); + //assert(*num < AES_BLOCK_SIZE); + + IBuffer^ keybuf = IBufferFromPtr(key, 32); + CryptographicKey^ _key = aesKeyProvider->CreateSymmetricKey(keybuf); + + n = *num; + + while (l--) { + if (n == 0) { + IBuffer^ inbuf = IBufferFromPtr(counter, 16); + IBuffer^ outbuf = CryptographicEngine::Encrypt(_key, inbuf, nullptr); + IBufferToPtr(outbuf, 16, ecount_buf); + //AES_encrypt(counter, ecount_buf, key); + AES_ctr128_inc(counter); + } + *inout = *(inout++) ^ ecount_buf[n]; + n = (n + 1) % 16; + } + + *num = n; +} + +void MicrosoftCryptoImpl::SHA1(uint8_t* msg, size_t len, uint8_t* out){ + //EnterCriticalSection(&hashMutex); + + IBuffer^ arr=IBufferFromPtr(msg, len); + CryptographicHash^ hash=sha1Provider->CreateHash(); + hash->Append(arr); + IBuffer^ res=hash->GetValueAndReset(); + IBufferToPtr(res, 20, out); + + //LeaveCriticalSection(&hashMutex); +} + +void MicrosoftCryptoImpl::SHA256(uint8_t* msg, size_t len, uint8_t* out){ + //EnterCriticalSection(&hashMutex); + + IBuffer^ arr=IBufferFromPtr(msg, len); + CryptographicHash^ hash=sha256Provider->CreateHash(); + hash->Append(arr); + IBuffer^ res=hash->GetValueAndReset(); + IBufferToPtr(res, 32, out); + //LeaveCriticalSection(&hashMutex); +} + +void MicrosoftCryptoImpl::RandBytes(uint8_t* buffer, size_t len){ + IBuffer^ res=CryptographicBuffer::GenerateRandom(len); + IBufferToPtr(res, len, buffer); +} + +void MicrosoftCryptoImpl::Init(){ + /*sha1Hash=HashAlgorithmProvider::OpenAlgorithm(HashAlgorithmNames::Sha1)->CreateHash(); + sha256Hash=HashAlgorithmProvider::OpenAlgorithm(HashAlgorithmNames::Sha256)->CreateHash();*/ + sha1Provider=HashAlgorithmProvider::OpenAlgorithm(HashAlgorithmNames::Sha1); + sha256Provider=HashAlgorithmProvider::OpenAlgorithm(HashAlgorithmNames::Sha256); + aesKeyProvider=SymmetricKeyAlgorithmProvider::OpenAlgorithm(SymmetricAlgorithmNames::AesEcb); +} + +void MicrosoftCryptoImpl::XorInt128(uint8_t* a, uint8_t* b, uint8_t* out){ + uint64_t* _a=reinterpret_cast(a); + uint64_t* _b=reinterpret_cast(b); + uint64_t* _out=reinterpret_cast(out); + _out[0]=_a[0]^_b[0]; + _out[1]=_a[1]^_b[1]; +} + +void MicrosoftCryptoImpl::IBufferToPtr(IBuffer^ buffer, size_t len, uint8_t* out) +{ + ComPtr bufferByteAccess; + reinterpret_cast(buffer)->QueryInterface(IID_PPV_ARGS(&bufferByteAccess)); + + byte* hashBuffer; + bufferByteAccess->Buffer(&hashBuffer); + CopyMemory(out, hashBuffer, len); +} + +IBuffer^ MicrosoftCryptoImpl::IBufferFromPtr(uint8_t* msg, size_t len) +{ + ComPtr nativeBuffer=Make((byte *)msg, len); + return reinterpret_cast(nativeBuffer.Get()); +} + +/*Platform::String^ VoIPControllerWrapper::TestAesIge(){ + MicrosoftCryptoImpl::Init(); + Platform::String^ res=""; + Platform::Array^ data=ref new Platform::Array(32); + Platform::Array^ out=ref new Platform::Array(32); + Platform::Array^ key=ref new Platform::Array(16); + Platform::Array^ iv=ref new Platform::Array(32); + + + CryptographicBuffer::CopyToByteArray(CryptographicBuffer::DecodeFromHexString("0000000000000000000000000000000000000000000000000000000000000000"), &data); + CryptographicBuffer::CopyToByteArray(CryptographicBuffer::DecodeFromHexString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"), &iv); + CryptographicBuffer::CopyToByteArray(CryptographicBuffer::DecodeFromHexString("000102030405060708090a0b0c0d0e0f"), &key); + MicrosoftCryptoImpl::AesIgeEncrypt(data->Data, out->Data, 32, key->Data, iv->Data); + res+=CryptographicBuffer::EncodeToHexString(CryptographicBuffer::CreateFromByteArray(out)); + res+="\n"; + + CryptographicBuffer::CopyToByteArray(CryptographicBuffer::DecodeFromHexString("1A8519A6557BE652E9DA8E43DA4EF4453CF456B4CA488AA383C79C98B34797CB"), &data); + CryptographicBuffer::CopyToByteArray(CryptographicBuffer::DecodeFromHexString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"), &iv); + CryptographicBuffer::CopyToByteArray(CryptographicBuffer::DecodeFromHexString("000102030405060708090a0b0c0d0e0f"), &key); + MicrosoftCryptoImpl::AesIgeDecrypt(data->Data, out->Data, 32, key->Data, iv->Data); + res+=CryptographicBuffer::EncodeToHexString(CryptographicBuffer::CreateFromByteArray(out)); + res+="\n"; + + CryptographicBuffer::CopyToByteArray(CryptographicBuffer::DecodeFromHexString("99706487A1CDE613BC6DE0B6F24B1C7AA448C8B9C3403E3467A8CAD89340F53B"), &data); + CryptographicBuffer::CopyToByteArray(CryptographicBuffer::DecodeFromHexString("6D656E746174696F6E206F6620494745206D6F646520666F72204F70656E5353"), &iv); + CryptographicBuffer::CopyToByteArray(CryptographicBuffer::DecodeFromHexString("5468697320697320616E20696D706C65"), &key); + MicrosoftCryptoImpl::AesIgeEncrypt(data->Data, out->Data, 32, key->Data, iv->Data); + res+=CryptographicBuffer::EncodeToHexString(CryptographicBuffer::CreateFromByteArray(out)); + res+="\n"; + + CryptographicBuffer::CopyToByteArray(CryptographicBuffer::DecodeFromHexString("4C2E204C6574277320686F70652042656E20676F74206974207269676874210A"), &data); + CryptographicBuffer::CopyToByteArray(CryptographicBuffer::DecodeFromHexString("6D656E746174696F6E206F6620494745206D6F646520666F72204F70656E5353"), &iv); + CryptographicBuffer::CopyToByteArray(CryptographicBuffer::DecodeFromHexString("5468697320697320616E20696D706C65"), &key); + MicrosoftCryptoImpl::AesIgeDecrypt(data->Data, out->Data, 32, key->Data, iv->Data); + res+=CryptographicBuffer::EncodeToHexString(CryptographicBuffer::CreateFromByteArray(out)); + return res; +}*/ \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/os/windows/CXWrapper.h b/Telegram/ThirdParty/libtgvoip/os/windows/CXWrapper.h new file mode 100755 index 000000000..cc3e0033b --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/windows/CXWrapper.h @@ -0,0 +1,167 @@ +#pragma once + +#include +#include +#include +#include +#include +#include "../../VoIPController.h" +#include "../../VoIPServerConfig.h" + +namespace libtgvoip{ + public ref class Endpoint sealed{ + public: + property int64 id; + property uint16 port; + property Platform::String^ ipv4; + property Platform::String^ ipv6; + property Platform::Array^ peerTag; + }; + + public enum class CallState : int{ + WaitInit=1, + WaitInitAck, + Established, + Failed + }; + + public enum class Error : int{ + Unknown=0, + Incompatible, + Timeout, + AudioIO + }; + + public enum class NetworkType : int{ + Unknown=0, + GPRS, + EDGE, + UMTS, + HSPA, + LTE, + WiFi, + Ethernet, + OtherHighSpeed, + OtherLowSpeed, + Dialup, + OtherMobile + }; + + public enum class DataSavingMode{ + Never=0, + MobileOnly, + Always + }; + + public enum class ProxyProtocol{ + None=0, + SOCKS5 + }; + + public interface class IStateCallback{ + void OnCallStateChanged(CallState newState); + void OnSignalBarsChanged(int count); + }; + + public ref class VoIPControllerWrapper sealed{ + public: + VoIPControllerWrapper(); + virtual ~VoIPControllerWrapper(); + void Start(); + void Connect(); + void SetPublicEndpoints(const Platform::Array^ endpoints, bool allowP2P); + void SetNetworkType(NetworkType type); + void SetStateCallback(IStateCallback^ callback); + void SetMicMute(bool mute); + void SetEncryptionKey(const Platform::Array^ key, bool isOutgoing); + void SetConfig(double initTimeout, double recvTimeout, DataSavingMode dataSavingMode, bool enableAEC, bool enableNS, bool enableAGC, Platform::String^ logFilePath, Platform::String^ statsDumpFilePath); + void SetProxy(ProxyProtocol protocol, Platform::String^ address, uint16_t port, Platform::String^ username, Platform::String^ password); + Platform::String^ GetDebugString(); + Platform::String^ GetDebugLog(); + Error GetLastError(); + static Platform::String^ GetVersion(); + int64 GetPreferredRelayID(); + static void UpdateServerConfig(Platform::String^ json); + static void SwitchSpeaker(bool external); + //static Platform::String^ TestAesIge(); + private: + static void OnStateChanged(tgvoip::VoIPController* c, int state); + static void OnSignalBarsChanged(tgvoip::VoIPController* c, int count); + void OnStateChangedInternal(int state); + void OnSignalBarsChangedInternal(int count); + tgvoip::VoIPController* controller; + IStateCallback^ stateCallback; + }; + + ref class MicrosoftCryptoImpl{ + public: + static void AesIgeEncrypt(uint8_t* in, uint8_t* out, size_t len, uint8_t* key, uint8_t* iv); + static void AesIgeDecrypt(uint8_t* in, uint8_t* out, size_t len, uint8_t* key, uint8_t* iv); + static void AesCtrEncrypt(uint8_t* inout, size_t len, uint8_t* key, uint8_t* iv, uint8_t* ecount, uint32_t* num); + static void SHA1(uint8_t* msg, size_t len, uint8_t* out); + static void SHA256(uint8_t* msg, size_t len, uint8_t* out); + static void RandBytes(uint8_t* buffer, size_t len); + static void Init(); + private: + static inline void XorInt128(uint8_t* a, uint8_t* b, uint8_t* out); + static void IBufferToPtr(Windows::Storage::Streams::IBuffer^ buffer, size_t len, uint8_t* out); + static Windows::Storage::Streams::IBuffer^ IBufferFromPtr(uint8_t* msg, size_t len); + /*static Windows::Security::Cryptography::Core::CryptographicHash^ sha1Hash; + static Windows::Security::Cryptography::Core::CryptographicHash^ sha256Hash;*/ + static Windows::Security::Cryptography::Core::HashAlgorithmProvider^ sha1Provider; + static Windows::Security::Cryptography::Core::HashAlgorithmProvider^ sha256Provider; + static Windows::Security::Cryptography::Core::SymmetricKeyAlgorithmProvider^ aesKeyProvider; + }; + + class NativeBuffer : + public Microsoft::WRL::RuntimeClass, + ABI::Windows::Storage::Streams::IBuffer, + Windows::Storage::Streams::IBufferByteAccess> + { + public: + NativeBuffer(byte *buffer, UINT totalSize) + { + m_length=totalSize; + m_buffer=buffer; + } + + virtual ~NativeBuffer() + { + } + + STDMETHODIMP RuntimeClassInitialize(byte *buffer, UINT totalSize) + { + m_length=totalSize; + m_buffer=buffer; + return S_OK; + } + + STDMETHODIMP Buffer(byte **value) + { + *value=m_buffer; + return S_OK; + } + + STDMETHODIMP get_Capacity(UINT32 *value) + { + *value=m_length; + return S_OK; + } + + STDMETHODIMP get_Length(UINT32 *value) + { + *value=m_length; + return S_OK; + } + + STDMETHODIMP put_Length(UINT32 value) + { + m_length=value; + return S_OK; + } + + private: + UINT32 m_length; + byte *m_buffer; + }; +} \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/os/windows/NetworkSocketWinsock.cpp b/Telegram/ThirdParty/libtgvoip/os/windows/NetworkSocketWinsock.cpp new file mode 100644 index 000000000..c575840cb --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/windows/NetworkSocketWinsock.cpp @@ -0,0 +1,638 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "NetworkSocketWinsock.h" +#include +#include +#if WINAPI_FAMILY==WINAPI_FAMILY_PHONE_APP + +#else +#include +#endif +#include +#include "../../logging.h" +#include "../../VoIPController.h" + +using namespace tgvoip; + +NetworkSocketWinsock::NetworkSocketWinsock(NetworkProtocol protocol) : NetworkSocket(protocol), lastRecvdV4(0), lastRecvdV6("::0"){ + needUpdateNat64Prefix=true; + nat64Present=false; + switchToV6at=0; + isV4Available=false; + closing=false; + fd=INVALID_SOCKET; + +#ifdef TGVOIP_WINXP_COMPAT + DWORD version=GetVersion(); + isAtLeastVista=LOBYTE(LOWORD(version))>=6; // Vista is 6.0, XP is 5.1 and 5.2 +#else + isAtLeastVista=true; +#endif + + WSADATA wsaData; + WSAStartup(MAKEWORD(2, 2), &wsaData); + LOGD("Initialized winsock, version %d.%d", wsaData.wHighVersion, wsaData.wVersion); + tcpConnectedAddress=NULL; +} + +NetworkSocketWinsock::~NetworkSocketWinsock(){ + if(tcpConnectedAddress) + delete tcpConnectedAddress; +} + +void NetworkSocketWinsock::SetMaxPriority(){ + +} + +void NetworkSocketWinsock::Send(NetworkPacket *packet){ + if(!packet || !packet->address){ + LOGW("tried to send null packet"); + return; + } + int res; + if(protocol==PROTO_UDP){ + IPv4Address *v4addr=dynamic_cast(packet->address); + if(isAtLeastVista){ + sockaddr_in6 addr; + if(v4addr){ + if(needUpdateNat64Prefix && !isV4Available && VoIPController::GetCurrentTime()>switchToV6at && switchToV6at!=0){ + LOGV("Updating NAT64 prefix"); + nat64Present=false; + addrinfo *addr0; + int res=getaddrinfo("ipv4only.arpa", NULL, NULL, &addr0); + if(res!=0){ + LOGW("Error updating NAT64 prefix: %d / %s", res, gai_strerror(res)); + }else{ + addrinfo *addrPtr; + unsigned char *addr170=NULL; + unsigned char *addr171=NULL; + for(addrPtr=addr0; addrPtr; addrPtr=addrPtr->ai_next){ + if(addrPtr->ai_family==AF_INET6){ + sockaddr_in6 *translatedAddr=(sockaddr_in6 *) addrPtr->ai_addr; + uint32_t v4part=*((uint32_t *) &translatedAddr->sin6_addr.s6_addr[12]); + if(v4part==0xAA0000C0 && !addr170){ + addr170=translatedAddr->sin6_addr.s6_addr; + } + if(v4part==0xAB0000C0 && !addr171){ + addr171=translatedAddr->sin6_addr.s6_addr; + } + char buf[INET6_ADDRSTRLEN]; + //LOGV("Got translated address: %s", inet_ntop(AF_INET6, &translatedAddr->sin6_addr, buf, sizeof(buf))); + } + } + if(addr170 && addr171 && memcmp(addr170, addr171, 12)==0){ + nat64Present=true; + memcpy(nat64Prefix, addr170, 12); + char buf[INET6_ADDRSTRLEN]; + //LOGV("Found nat64 prefix from %s", inet_ntop(AF_INET6, addr170, buf, sizeof(buf))); + }else{ + LOGV("Didn't find nat64"); + } + freeaddrinfo(addr0); + } + needUpdateNat64Prefix=false; + } + memset(&addr, 0, sizeof(sockaddr_in6)); + addr.sin6_family=AF_INET6; + *((uint32_t *) &addr.sin6_addr.s6_addr[12])=v4addr->GetAddress(); + if(nat64Present) + memcpy(addr.sin6_addr.s6_addr, nat64Prefix, 12); + else + addr.sin6_addr.s6_addr[11]=addr.sin6_addr.s6_addr[10]=0xFF; + + }else{ + IPv6Address *v6addr=dynamic_cast(packet->address); + assert(v6addr!=NULL); + memcpy(addr.sin6_addr.s6_addr, v6addr->GetAddress(), 16); + } + addr.sin6_port=htons(packet->port); + res=sendto(fd, (const char*)packet->data, packet->length, 0, (const sockaddr *) &addr, sizeof(addr)); + }else if(v4addr){ + sockaddr_in addr; + addr.sin_addr.s_addr=v4addr->GetAddress(); + addr.sin_port=htons(packet->port); + addr.sin_family=AF_INET; + res=sendto(fd, (const char*)packet->data, packet->length, 0, (const sockaddr*)&addr, sizeof(addr)); + } + }else{ + res=send(fd, (const char*)packet->data, packet->length, 0); + } + if(res==SOCKET_ERROR){ + LOGE("error sending: %d", WSAGetLastError()); + if(errno==ENETUNREACH && !isV4Available && VoIPController::GetCurrentTime()data, packet->length, 0, (sockaddr *) &srcAddr, (socklen_t *) &addrLen); + if(res!=SOCKET_ERROR) + packet->length=(size_t) res; + else{ + LOGE("error receiving %d", WSAGetLastError()); + packet->length=0; + return; + } + //LOGV("Received %d bytes from %s:%d at %.5lf", len, inet_ntoa(srcAddr.sin_addr), ntohs(srcAddr.sin_port), GetCurrentTime()); + if(!isV4Available && IN6_IS_ADDR_V4MAPPED(&srcAddr.sin6_addr)){ + isV4Available=true; + LOGI("Detected IPv4 connectivity, will not try IPv6"); + } + if(IN6_IS_ADDR_V4MAPPED(&srcAddr.sin6_addr) || (nat64Present && memcmp(nat64Prefix, srcAddr.sin6_addr.s6_addr, 12)==0)){ + in_addr v4addr=*((in_addr *) &srcAddr.sin6_addr.s6_addr[12]); + lastRecvdV4=IPv4Address(v4addr.s_addr); + packet->address=&lastRecvdV4; + }else{ + lastRecvdV6=IPv6Address(srcAddr.sin6_addr.s6_addr); + packet->address=&lastRecvdV6; + } + packet->port=ntohs(srcAddr.sin6_port); + }else{ + int addrLen=sizeof(sockaddr_in); + sockaddr_in srcAddr; + int res=recvfrom(fd, (char*)packet->data, packet->length, 0, (sockaddr *) &srcAddr, (socklen_t *) &addrLen); + if(res!=SOCKET_ERROR) + packet->length=(size_t) res; + else{ + LOGE("error receiving %d", WSAGetLastError()); + packet->length=0; + return; + } + lastRecvdV4=IPv4Address(srcAddr.sin_addr.s_addr); + packet->address=&lastRecvdV4; + packet->port=ntohs(srcAddr.sin_port); + } + packet->protocol=PROTO_UDP; + }else if(protocol==PROTO_TCP){ + int res=recv(fd, (char*)packet->data, packet->length, 0); + if(res==SOCKET_ERROR){ + LOGE("Error receiving from TCP socket: %d", WSAGetLastError()); + failed=true; + }else{ + packet->length=(size_t)res; + packet->address=tcpConnectedAddress; + packet->port=tcpConnectedPort; + packet->protocol=PROTO_TCP; + } + } +} + +void NetworkSocketWinsock::Open(){ + if(protocol==PROTO_UDP){ + fd=socket(isAtLeastVista ? AF_INET6 : AF_INET, SOCK_DGRAM, IPPROTO_UDP); + if(fd==INVALID_SOCKET){ + int error=WSAGetLastError(); + LOGE("error creating socket: %d", error); + failed=true; + return; + } + + int res; + if(isAtLeastVista){ + DWORD flag=0; + res=setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (const char*)&flag, sizeof(flag)); + if(res==SOCKET_ERROR){ + LOGE("error enabling dual stack socket: %d", WSAGetLastError()); + failed=true; + return; + } + } + + SetMaxPriority(); + + int tries=0; + sockaddr* addr; + sockaddr_in addr4; + sockaddr_in6 addr6; + int addrLen; + if(isAtLeastVista){ + //addr.sin6_addr.s_addr=0; + memset(&addr6, 0, sizeof(sockaddr_in6)); + //addr.sin6_len=sizeof(sa_family_t); + addr6.sin6_family=AF_INET6; + addr=(sockaddr*)&addr6; + addrLen=sizeof(addr6); + }else{ + sockaddr_in addr4; + addr4.sin_addr.s_addr=0; + addr4.sin_family=AF_INET; + addr=(sockaddr*)&addr4; + addrLen=sizeof(addr4); + } + for(tries=0;tries<10;tries++){ + uint16_t port=htons(GenerateLocalPort()); + if(isAtLeastVista) + ((sockaddr_in6*)addr)->sin6_port=port; + else + ((sockaddr_in*)addr)->sin_port=port; + res=::bind(fd, addr, addrLen); + LOGV("trying bind to port %u", ntohs(port)); + if(res<0){ + LOGE("error binding to port %u: %d / %s", ntohs(port), errno, strerror(errno)); + }else{ + break; + } + } + if(tries==10){ + if(isAtLeastVista) + ((sockaddr_in6*)addr)->sin6_port=0; + else + ((sockaddr_in*)addr)->sin_port=0; + res=::bind(fd, addr, addrLen); + if(res<0){ + LOGE("error binding to port %u: %d / %s", 0, errno, strerror(errno)); + //SetState(STATE_FAILED); + return; + } + } + getsockname(fd, addr, (socklen_t*) &addrLen); + uint16_t localUdpPort; + if(isAtLeastVista) + localUdpPort=ntohs(((sockaddr_in6*)addr)->sin6_port); + else + localUdpPort=ntohs(((sockaddr_in*)addr)->sin_port); + LOGD("Bound to local UDP port %u", localUdpPort); + + needUpdateNat64Prefix=true; + isV4Available=false; + switchToV6at=VoIPController::GetCurrentTime()+ipv6Timeout; + } +} + +void NetworkSocketWinsock::Close(){ + closing=true; + failed=true; + if(fd!=INVALID_SOCKET) + closesocket(fd); +} + +void NetworkSocketWinsock::OnActiveInterfaceChanged(){ + needUpdateNat64Prefix=true; + isV4Available=false; + switchToV6at=VoIPController::GetCurrentTime()+ipv6Timeout; +} + +std::string NetworkSocketWinsock::GetLocalInterfaceInfo(IPv4Address *v4addr, IPv6Address *v6addr){ +#if WINAPI_FAMILY==WINAPI_FAMILY_PHONE_APP + Windows::Networking::Connectivity::ConnectionProfile^ profile=Windows::Networking::Connectivity::NetworkInformation::GetInternetConnectionProfile(); + if(profile){ + Windows::Foundation::Collections::IVectorView^ hostnames=Windows::Networking::Connectivity::NetworkInformation::GetHostNames(); + for(unsigned int i=0;iSize;i++){ + Windows::Networking::HostName^ n = hostnames->GetAt(i); + if(n->Type!=Windows::Networking::HostNameType::Ipv4 && n->Type!=Windows::Networking::HostNameType::Ipv6) + continue; + if(n->IPInformation->NetworkAdapter->Equals(profile->NetworkAdapter)){ + if(v4addr && n->Type==Windows::Networking::HostNameType::Ipv4){ + char buf[INET_ADDRSTRLEN]; + WideCharToMultiByte(CP_UTF8, 0, n->RawName->Data(), -1, buf, sizeof(buf), NULL, NULL); + *v4addr=IPv4Address(buf); + }else if(v6addr && n->Type==Windows::Networking::HostNameType::Ipv6){ + char buf[INET6_ADDRSTRLEN]; + WideCharToMultiByte(CP_UTF8, 0, n->RawName->Data(), -1, buf, sizeof(buf), NULL, NULL); + *v6addr=IPv6Address(buf); + } + } + } + char buf[128]; + WideCharToMultiByte(CP_UTF8, 0, profile->NetworkAdapter->NetworkAdapterId.ToString()->Data(), -1, buf, sizeof(buf), NULL, NULL); + return std::string(buf); + } + return ""; +#else + IP_ADAPTER_ADDRESSES* addrs=(IP_ADAPTER_ADDRESSES*)malloc(15*1024); + ULONG size=15*1024; + ULONG flags=GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_SKIP_FRIENDLY_NAME; + + ULONG res=GetAdaptersAddresses(AF_UNSPEC, flags, NULL, addrs, &size); + if(res==ERROR_BUFFER_OVERFLOW){ + addrs=(IP_ADAPTER_ADDRESSES*)realloc(addrs, size); + res=GetAdaptersAddresses(AF_UNSPEC, flags, NULL, addrs, &size); + } + + ULONG bestMetric=0; + std::string bestName(""); + + if(res==ERROR_SUCCESS){ + IP_ADAPTER_ADDRESSES* current=addrs; + while(current){ + char* name=current->AdapterName; + LOGV("Adapter '%s':", name); + IP_ADAPTER_UNICAST_ADDRESS* curAddr=current->FirstUnicastAddress; + if(current->OperStatus!=IfOperStatusUp){ + LOGV("-> (down)"); + current=current->Next; + continue; + } + if(current->IfType==IF_TYPE_SOFTWARE_LOOPBACK){ + LOGV("-> (loopback)"); + current=current->Next; + continue; + } + if(isAtLeastVista) + LOGV("v4 metric: %u, v6 metric: %u", current->Ipv4Metric, current->Ipv6Metric); + while(curAddr){ + sockaddr* addr=curAddr->Address.lpSockaddr; + if(addr->sa_family==AF_INET && v4addr){ + sockaddr_in* ipv4=(sockaddr_in*)addr; + LOGV("-> V4: %s", V4AddressToString(ipv4->sin_addr.s_addr).c_str()); + uint32_t ip=ntohl(ipv4->sin_addr.s_addr); + if((ip & 0xFFFF0000)!=0xA9FE0000){ + if(isAtLeastVista){ + if(current->Ipv4Metric>bestMetric){ + bestMetric=current->Ipv4Metric; + bestName=std::string(current->AdapterName); + *v4addr=IPv4Address(ipv4->sin_addr.s_addr); + } + }else{ + bestName=std::string(current->AdapterName); + *v4addr=IPv4Address(ipv4->sin_addr.s_addr); + } + } + }else if(addr->sa_family==AF_INET6 && v6addr){ + sockaddr_in6* ipv6=(sockaddr_in6*)addr; + LOGV("-> V6: %s", V6AddressToString(ipv6->sin6_addr.s6_addr).c_str()); + if(!IN6_IS_ADDR_LINKLOCAL(&ipv6->sin6_addr)){ + *v6addr=IPv6Address(ipv6->sin6_addr.s6_addr); + } + } + curAddr=curAddr->Next; + } + current=current->Next; + } + } + + free(addrs); + return bestName; +#endif +} + +uint16_t NetworkSocketWinsock::GetLocalPort(){ + if(!isAtLeastVista){ + sockaddr_in addr; + size_t addrLen=sizeof(sockaddr_in); + getsockname(fd, (sockaddr*)&addr, (socklen_t*)&addrLen); + return ntohs(addr.sin_port); + } + sockaddr_in6 addr; + size_t addrLen=sizeof(sockaddr_in6); + getsockname(fd, (sockaddr*)&addr, (socklen_t*) &addrLen); + return ntohs(addr.sin6_port); +} + +std::string NetworkSocketWinsock::V4AddressToString(uint32_t address){ + char buf[INET_ADDRSTRLEN]; + sockaddr_in addr; + ZeroMemory(&addr, sizeof(addr)); + addr.sin_family=AF_INET; + addr.sin_addr.s_addr=address; + DWORD len=sizeof(buf); +#if WINAPI_FAMILY==WINAPI_FAMILY_PHONE_APP + wchar_t wbuf[INET_ADDRSTRLEN]; + ZeroMemory(wbuf, sizeof(wbuf)); + WSAAddressToStringW((sockaddr*)&addr, sizeof(addr), NULL, wbuf, &len); + WideCharToMultiByte(CP_UTF8, 0, wbuf, -1, buf, sizeof(buf), NULL, NULL); +#else + WSAAddressToStringA((sockaddr*)&addr, sizeof(addr), NULL, buf, &len); +#endif + return std::string(buf); +} + +std::string NetworkSocketWinsock::V6AddressToString(unsigned char *address){ + char buf[INET6_ADDRSTRLEN]; + sockaddr_in6 addr; + ZeroMemory(&addr, sizeof(addr)); + addr.sin6_family=AF_INET6; + memcpy(addr.sin6_addr.s6_addr, address, 16); + DWORD len=sizeof(buf); +#if WINAPI_FAMILY==WINAPI_FAMILY_PHONE_APP + wchar_t wbuf[INET6_ADDRSTRLEN]; + ZeroMemory(wbuf, sizeof(wbuf)); + WSAAddressToStringW((sockaddr*)&addr, sizeof(addr), NULL, wbuf, &len); + WideCharToMultiByte(CP_UTF8, 0, wbuf, -1, buf, sizeof(buf), NULL, NULL); +#else + WSAAddressToStringA((sockaddr*)&addr, sizeof(addr), NULL, buf, &len); +#endif + return std::string(buf); +} + +uint32_t NetworkSocketWinsock::StringToV4Address(std::string address){ + sockaddr_in addr; + ZeroMemory(&addr, sizeof(addr)); + addr.sin_family=AF_INET; + int size=sizeof(addr); +#if WINAPI_FAMILY==WINAPI_FAMILY_PHONE_APP + wchar_t buf[INET_ADDRSTRLEN]; + MultiByteToWideChar(CP_UTF8, 0, address.c_str(), -1, buf, INET_ADDRSTRLEN); + WSAStringToAddressW(buf, AF_INET, NULL, (sockaddr*)&addr, &size); +#else + WSAStringToAddressA((char*)address.c_str(), AF_INET, NULL, (sockaddr*)&addr, &size); +#endif + return addr.sin_addr.s_addr; +} + +void NetworkSocketWinsock::StringToV6Address(std::string address, unsigned char *out){ + sockaddr_in6 addr; + ZeroMemory(&addr, sizeof(addr)); + addr.sin6_family=AF_INET6; + int size=sizeof(addr); +#if WINAPI_FAMILY==WINAPI_FAMILY_PHONE_APP + wchar_t buf[INET6_ADDRSTRLEN]; + MultiByteToWideChar(CP_UTF8, 0, address.c_str(), -1, buf, INET6_ADDRSTRLEN); + WSAStringToAddressW(buf, AF_INET, NULL, (sockaddr*)&addr, &size); +#else + WSAStringToAddressA((char*)address.c_str(), AF_INET, NULL, (sockaddr*)&addr, &size); +#endif + memcpy(out, addr.sin6_addr.s6_addr, 16); +} + +void NetworkSocketWinsock::Connect(NetworkAddress *address, uint16_t port){ + IPv4Address* v4addr=dynamic_cast(address); + IPv6Address* v6addr=dynamic_cast(address); + sockaddr_in v4; + sockaddr_in6 v6; + sockaddr* addr=NULL; + size_t addrLen=0; + if(v4addr){ + v4.sin_family=AF_INET; + v4.sin_addr.s_addr=v4addr->GetAddress(); + v4.sin_port=htons(port); + addr=reinterpret_cast(&v4); + addrLen=sizeof(v4); + }else if(v6addr){ + v6.sin6_family=AF_INET6; + memcpy(v6.sin6_addr.s6_addr, v6addr->GetAddress(), 16); + v6.sin6_flowinfo=0; + v6.sin6_scope_id=0; + v6.sin6_port=htons(port); + addr=reinterpret_cast(&v6); + addrLen=sizeof(v6); + }else{ + LOGE("Unknown address type in TCP connect"); + failed=true; + return; + } + fd=socket(addr->sa_family, SOCK_STREAM, IPPROTO_TCP); + if(fd==0){ + LOGE("Error creating TCP socket: %d", WSAGetLastError()); + failed=true; + return; + } + int opt=1; + setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const char*)&opt, sizeof(opt)); + timeval timeout; + timeout.tv_sec=5; + timeout.tv_usec=0; + setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (const char*)&timeout, sizeof(timeout)); + timeout.tv_sec=60; + setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, (const char*)&timeout, sizeof(timeout)); + int res=connect(fd, (const sockaddr*) addr, addrLen); + if(res!=0){ + LOGW("error connecting TCP socket to %s:%u: %d", address->ToString().c_str(), port, WSAGetLastError()); + closesocket(fd); + failed=true; + return; + } + tcpConnectedAddress=v4addr ? (NetworkAddress*)new IPv4Address(*v4addr) : (NetworkAddress*)new IPv6Address(*v6addr); + tcpConnectedPort=port; + LOGI("successfully connected to %s:%d", tcpConnectedAddress->ToString().c_str(), tcpConnectedPort); +} + +IPv4Address *NetworkSocketWinsock::ResolveDomainName(std::string name){ + addrinfo* addr0; + IPv4Address* ret=NULL; + int res=getaddrinfo(name.c_str(), NULL, NULL, &addr0); + if(res!=0){ + LOGW("Error updating NAT64 prefix: %d / %s", res, gai_strerror(res)); + }else{ + addrinfo* addrPtr; + for(addrPtr=addr0;addrPtr;addrPtr=addrPtr->ai_next){ + if(addrPtr->ai_family==AF_INET){ + sockaddr_in* addr=(sockaddr_in*)addrPtr->ai_addr; + ret=new IPv4Address(addr->sin_addr.s_addr); + break; + } + } + freeaddrinfo(addr0); + } + return ret; +} + +NetworkAddress *NetworkSocketWinsock::GetConnectedAddress(){ + return tcpConnectedAddress; +} + +uint16_t NetworkSocketWinsock::GetConnectedPort(){ + return tcpConnectedPort; +} + +void NetworkSocketWinsock::SetTimeouts(int sendTimeout, int recvTimeout){ + timeval timeout; + timeout.tv_sec=sendTimeout; + timeout.tv_usec=0; + setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (const char*)&timeout, sizeof(timeout)); + timeout.tv_sec=recvTimeout; + setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, (const char*)&timeout, sizeof(timeout)); +} + +bool NetworkSocketWinsock::Select(std::vector &readFds, std::vector &errorFds, SocketSelectCanceller* _canceller){ + fd_set readSet; + fd_set errorSet; + SocketSelectCancellerWin32* canceller=dynamic_cast(_canceller); + timeval timeout={0, 10000}; + bool anyFailed=false; + int res=0; + + do{ + FD_ZERO(&readSet); + FD_ZERO(&errorSet); + + for(std::vector::iterator itr=readFds.begin();itr!=readFds.end();++itr){ + int sfd=GetDescriptorFromSocket(*itr); + if(sfd==0){ + LOGW("can't select on one of sockets because it's not a NetworkSocketWinsock instance"); + continue; + } + FD_SET(sfd, &readSet); + } + + + for(std::vector::iterator itr=errorFds.begin();itr!=errorFds.end();++itr){ + int sfd=GetDescriptorFromSocket(*itr); + if(sfd==0){ + LOGW("can't select on one of sockets because it's not a NetworkSocketWinsock instance"); + continue; + } + anyFailed |= (*itr)->IsFailed(); + FD_SET(sfd, &errorSet); + } + if(canceller && canceller->canceled) + break; + res=select(0, &readSet, NULL, &errorSet, &timeout); + //LOGV("select result %d", res); + if(res==SOCKET_ERROR) + LOGE("SELECT ERROR %d", WSAGetLastError()); + }while(res==0); + + + if(canceller && canceller->canceled && !anyFailed){ + canceller->canceled=false; + return false; + }else if(anyFailed){ + FD_ZERO(&readSet); + FD_ZERO(&errorSet); + } + + std::vector::iterator itr=readFds.begin(); + while(itr!=readFds.end()){ + int sfd=GetDescriptorFromSocket(*itr); + if(sfd==0 || !FD_ISSET(sfd, &readSet)){ + itr=readFds.erase(itr); + }else{ + ++itr; + } + } + + itr=errorFds.begin(); + while(itr!=errorFds.end()){ + int sfd=GetDescriptorFromSocket(*itr); + if((sfd==0 || !FD_ISSET(sfd, &errorSet)) && !(*itr)->IsFailed()){ + itr=errorFds.erase(itr); + }else{ + ++itr; + } + } + //LOGV("select fds left: read=%d, error=%d", readFds.size(), errorFds.size()); + + return readFds.size()>0 || errorFds.size()>0; +} + +SocketSelectCancellerWin32::SocketSelectCancellerWin32(){ + canceled=false; +} + +SocketSelectCancellerWin32::~SocketSelectCancellerWin32(){ +} + +void SocketSelectCancellerWin32::CancelSelect(){ + canceled=true; +} + +int NetworkSocketWinsock::GetDescriptorFromSocket(NetworkSocket *socket){ + NetworkSocketWinsock* sp=dynamic_cast(socket); + if(sp) + return sp->fd; + NetworkSocketWrapper* sw=dynamic_cast(socket); + if(sw) + return GetDescriptorFromSocket(sw->GetWrapped()); + return 0; +} diff --git a/Telegram/ThirdParty/libtgvoip/os/windows/NetworkSocketWinsock.h b/Telegram/ThirdParty/libtgvoip/os/windows/NetworkSocketWinsock.h new file mode 100644 index 000000000..101b79298 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/windows/NetworkSocketWinsock.h @@ -0,0 +1,72 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef LIBTGVOIP_NETWORKSOCKETWINSOCK_H +#define LIBTGVOIP_NETWORKSOCKETWINSOCK_H + +#include "../../NetworkSocket.h" +#include +#include + +namespace tgvoip { + +class SocketSelectCancellerWin32 : public SocketSelectCanceller{ +friend class NetworkSocketWinsock; +public: + SocketSelectCancellerWin32(); + virtual ~SocketSelectCancellerWin32(); + virtual void CancelSelect(); +private: + bool canceled; +}; + +class NetworkSocketWinsock : public NetworkSocket{ +public: + NetworkSocketWinsock(NetworkProtocol protocol); + virtual ~NetworkSocketWinsock(); + virtual void Send(NetworkPacket* packet); + virtual void Receive(NetworkPacket* packet); + virtual void Open(); + virtual void Close(); + virtual std::string GetLocalInterfaceInfo(IPv4Address* v4addr, IPv6Address* v6addr); + virtual void OnActiveInterfaceChanged(); + virtual uint16_t GetLocalPort(); + virtual void Connect(NetworkAddress* address, uint16_t port); + + static std::string V4AddressToString(uint32_t address); + static std::string V6AddressToString(unsigned char address[16]); + static uint32_t StringToV4Address(std::string address); + static void StringToV6Address(std::string address, unsigned char* out); + static IPv4Address* ResolveDomainName(std::string name); + static bool Select(std::vector& readFds, std::vector& errorFds, SocketSelectCanceller* canceller); + + virtual NetworkAddress *GetConnectedAddress(); + + virtual uint16_t GetConnectedPort(); + + virtual void SetTimeouts(int sendTimeout, int recvTimeout); + +protected: + virtual void SetMaxPriority(); + +private: + static int GetDescriptorFromSocket(NetworkSocket* socket); + uintptr_t fd; + bool needUpdateNat64Prefix; + bool nat64Present; + double switchToV6at; + bool isV4Available; + IPv4Address lastRecvdV4; + IPv6Address lastRecvdV6; + bool isAtLeastVista; + bool closing; + NetworkAddress* tcpConnectedAddress; + uint16_t tcpConnectedPort; +}; + +} + +#endif //LIBTGVOIP_NETWORKSOCKETWINSOCK_H diff --git a/Telegram/ThirdParty/libtgvoip/os/windows/WindowsSandboxUtils.cpp b/Telegram/ThirdParty/libtgvoip/os/windows/WindowsSandboxUtils.cpp new file mode 100644 index 000000000..8fbba664e --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/windows/WindowsSandboxUtils.cpp @@ -0,0 +1,68 @@ + +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include "WindowsSandboxUtils.h" +#include +#include +#ifdef TGVOIP_WP_SILVERLIGHT +#include +#endif + +using namespace tgvoip; +using namespace Microsoft::WRL; + + +IAudioClient2* WindowsSandboxUtils::ActivateAudioDevice(const wchar_t* devID, HRESULT* callRes, HRESULT* actRes) { +#ifndef TGVOIP_WP_SILVERLIGHT + // Did I say that I hate pointlessly asynchronous things? + HANDLE event = CreateEventEx(NULL, NULL, 0, EVENT_ALL_ACCESS); + ActivationHandler activationHandler(event); + IActivateAudioInterfaceAsyncOperation* actHandler; + HRESULT cr = ActivateAudioInterfaceAsync(devID, __uuidof(IAudioClient2), NULL, (IActivateAudioInterfaceCompletionHandler*)&activationHandler, &actHandler); + if (callRes) + *callRes = cr; + DWORD resulttt = WaitForSingleObjectEx(event, INFINITE, false); + DWORD last = GetLastError(); + CloseHandle(event); + if (actRes) + *actRes = activationHandler.actResult; + return activationHandler.client; +#else + IAudioClient2* client; + HRESULT res=ActivateAudioInterface(devID, __uuidof(IAudioClient2), (void**)&client); + if(callRes) + *callRes=S_OK; + if(actRes) + *actRes=res; + return client; +#endif +} + +#ifndef TGVOIP_WP_SILVERLIGHT +ActivationHandler::ActivationHandler(HANDLE _event) : event(_event) +{ + +} + +STDMETHODIMP ActivationHandler::ActivateCompleted(IActivateAudioInterfaceAsyncOperation * operation) +{ + HRESULT hr = S_OK; + HRESULT hrActivateResult = S_OK; + IUnknown *punkAudioInterface = nullptr; + + hr = operation->GetActivateResult(&hrActivateResult, &punkAudioInterface); + if (SUCCEEDED(hr) && SUCCEEDED(hrActivateResult)) + { + punkAudioInterface->QueryInterface(IID_PPV_ARGS(&client)); + } + + SetEvent(event); + + return hr; +} + +#endif \ No newline at end of file diff --git a/Telegram/ThirdParty/libtgvoip/os/windows/WindowsSandboxUtils.h b/Telegram/ThirdParty/libtgvoip/os/windows/WindowsSandboxUtils.h new file mode 100644 index 000000000..54654f7c8 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/os/windows/WindowsSandboxUtils.h @@ -0,0 +1,38 @@ + +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#include +#include +#ifndef TGVOIP_WP_SILVERLIGHT +#include +#endif +#include +#include + +using namespace Microsoft::WRL; + +namespace tgvoip { + +#ifndef TGVOIP_WP_SILVERLIGHT + class ActivationHandler : + public RuntimeClass< RuntimeClassFlags< ClassicCom >, FtmBase, IActivateAudioInterfaceCompletionHandler > + { + public: + STDMETHOD(ActivateCompleted)(IActivateAudioInterfaceAsyncOperation *operation); + + ActivationHandler(HANDLE _event); + HANDLE event; + IAudioClient2* client; + HRESULT actResult; + }; +#endif + + class WindowsSandboxUtils { + public: + static IAudioClient2* ActivateAudioDevice(const wchar_t* devID, HRESULT* callResult, HRESULT* actResult); + }; +} diff --git a/Telegram/ThirdParty/libtgvoip/threading.h b/Telegram/ThirdParty/libtgvoip/threading.h new file mode 100644 index 000000000..8a6f43842 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/threading.h @@ -0,0 +1,207 @@ +// +// libtgvoip is free and unencumbered public domain software. +// For more information, see http://unlicense.org or the UNLICENSE file +// you should have received with this source code distribution. +// + +#ifndef __THREADING_H +#define __THREADING_H + +#if defined(_POSIX_THREADS) || defined(_POSIX_VERSION) || defined(__unix__) || defined(__unix) || (defined(__APPLE__) && defined(__MACH__)) + +#include +#include +#include + +typedef pthread_t tgvoip_thread_t; +typedef pthread_mutex_t tgvoip_mutex_t; +typedef pthread_cond_t tgvoip_lock_t; + +#define start_thread(ref, entry, arg) pthread_create(&ref, NULL, entry, arg) +#define join_thread(thread) pthread_join(thread, NULL) +#ifndef __APPLE__ +#define set_thread_name(thread, name) pthread_setname_np(thread, name) +#else +#define set_thread_name(thread, name) +#endif +#define set_thread_priority(thread, priority) {sched_param __param; __param.sched_priority=priority; int __result=pthread_setschedparam(thread, SCHED_RR, &__param); if(__result!=0){LOGE("can't set thread priority: %s", strerror(__result));}}; +#define get_thread_max_priority() sched_get_priority_max(SCHED_RR) +#define get_thread_min_priority() sched_get_priority_min(SCHED_RR) +#define init_mutex(mutex) pthread_mutex_init(&mutex, NULL) +#define free_mutex(mutex) pthread_mutex_destroy(&mutex) +#define lock_mutex(mutex) pthread_mutex_lock(&mutex) +#define unlock_mutex(mutex) pthread_mutex_unlock(&mutex) +#define init_lock(lock) pthread_cond_init(&lock, NULL) +#define free_lock(lock) pthread_cond_destroy(&lock) +#define wait_lock(lock, mutex) pthread_cond_wait(&lock, &mutex) +#define notify_lock(lock) pthread_cond_broadcast(&lock) + +#ifdef __APPLE__ +#include +namespace tgvoip{ +class Semaphore{ +public: + Semaphore(unsigned int maxCount, unsigned int initValue){ + sem = dispatch_semaphore_create(initValue); + } + + ~Semaphore(){ +#if ! __has_feature(objc_arc) + dispatch_release(sem); +#endif + } + + void Acquire(){ + dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); + } + + void Release(){ + dispatch_semaphore_signal(sem); + } + + void Acquire(int count){ + for(int i=0;i +#include +typedef HANDLE tgvoip_thread_t; +typedef CRITICAL_SECTION tgvoip_mutex_t; +typedef HANDLE tgvoip_lock_t; // uncomment for XP compatibility +//typedef CONDITION_VARIABLE tgvoip_lock_t; + +#define start_thread(ref, entry, arg) (ref=CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)entry, arg, 0, NULL)) +#if !defined(WINAPI_FAMILY) || WINAPI_FAMILY!=WINAPI_FAMILY_PHONE_APP +#define join_thread(thread) {WaitForSingleObject(thread, INFINITE); CloseHandle(thread);} +#else +#define join_thread(thread) {WaitForSingleObjectEx(thread, INFINITE, false); CloseHandle(thread);} +#endif +#define set_thread_name(thread, name) // threads in Windows don't have names +#define set_thread_priority(thread, priority) SetThreadPriority(thread, priority) +#define get_thread_max_priority() THREAD_PRIORITY_HIGHEST +#define get_thread_min_priority() THREAD_PRIORITY_LOWEST +#if !defined(WINAPI_FAMILY) || WINAPI_FAMILY!=WINAPI_FAMILY_PHONE_APP +#define init_mutex(mutex) InitializeCriticalSection(&mutex) +#else +#define init_mutex(mutex) InitializeCriticalSectionEx(&mutex, 0, 0) +#endif +#define free_mutex(mutex) DeleteCriticalSection(&mutex) +#define lock_mutex(mutex) EnterCriticalSection(&mutex) +#define unlock_mutex(mutex) LeaveCriticalSection(&mutex) +#define init_lock(lock) (lock=CreateEvent(NULL, false, false, NULL)) +#define free_lock(lock) CloseHandle(lock) +#define wait_lock(lock, mutex) {LeaveCriticalSection(&mutex); WaitForSingleObject(lock, INFINITE); EnterCriticalSection(&mutex);} +#define notify_lock(lock) PulseEvent(lock) +//#define init_lock(lock) InitializeConditionVariable(&lock) +//#define free_lock(lock) // ? +//#define wait_lock(lock, mutex) SleepConditionVariableCS(&lock, &mutex, INFINITE) +//#define notify_lock(lock) WakeAllConditionVariable(&lock) + +namespace tgvoip{ +class Semaphore{ +public: + Semaphore(unsigned int maxCount, unsigned int initValue){ +#if !defined(WINAPI_FAMILY) || WINAPI_FAMILY!=WINAPI_FAMILY_PHONE_APP + h=CreateSemaphore(NULL, initValue, maxCount, NULL); +#else + h=CreateSemaphoreEx(NULL, initValue, maxCount, NULL, 0, SEMAPHORE_ALL_ACCESS); + assert(h); +#endif + } + + ~Semaphore(){ + CloseHandle(h); + } + + void Acquire(){ +#if !defined(WINAPI_FAMILY) || WINAPI_FAMILY!=WINAPI_FAMILY_PHONE_APP + WaitForSingleObject(h, INFINITE); +#else + WaitForSingleObjectEx(h, INFINITE, false); +#endif + } + + void Release(){ + ReleaseSemaphore(h, 1, NULL); + } + + void Acquire(int count){ + for(int i=0;i. It contains a T pointer (to an array it doesn't +// own) and a count, and supports the basic things you'd expect, such as +// indexing and iteration. It allows us to write our function like this: +// +// bool Contains17(rtc::ArrayView arr) { +// for (auto e : arr) { +// if (e == 17) +// return true; +// } +// return false; +// } +// +// And even better, because a bunch of things will implicitly convert to +// ArrayView, we can call it like this: +// +// Contains17(arr); // C array +// Contains17(arr); // std::vector +// Contains17(rtc::ArrayView(arr, size)); // pointer + size +// Contains17(nullptr); // nullptr -> empty ArrayView +// ... +// +// One important point is that ArrayView and ArrayView are +// different types, which allow and don't allow mutation of the array elements, +// respectively. The implicit conversions work just like you'd hope, so that +// e.g. vector will convert to either ArrayView or ArrayView, but const vector will convert only to ArrayView. +// (ArrayView itself can be the source type in such conversions, so +// ArrayView will convert to ArrayView.) +// +// Note: ArrayView is tiny (just a pointer and a count) and trivially copyable, +// so it's probably cheaper to pass it by value than by const reference. +template +class ArrayView final { + public: + using value_type = T; + using const_iterator = const T*; + + // Construct an empty ArrayView. + ArrayView() : ArrayView(static_cast(nullptr), 0) {} + ArrayView(std::nullptr_t) : ArrayView() {} + + // Construct an ArrayView for a (pointer,size) pair. + template + ArrayView(U* data, size_t size) + : data_(size == 0 ? nullptr : data), size_(size) { + CheckInvariant(); + } + + // Construct an ArrayView for an array. + template + ArrayView(U (&array)[N]) : ArrayView(&array[0], N) {} + + // Construct an ArrayView for any type U that has a size() method whose + // return value converts implicitly to size_t, and a data() method whose + // return value converts implicitly to T*. In particular, this means we allow + // conversion from ArrayView to ArrayView, but not the other way + // around. Other allowed conversions include std::vector to ArrayView + // or ArrayView, const std::vector to ArrayView, and + // rtc::Buffer to ArrayView (with the same const behavior as + // std::vector). + template < + typename U, +#if defined(_MSC_VER) && _MCS_VER<=1800 + typename std::enable_if::type* = nullptr> +#else + typename std::enable_if::value>::type* = nullptr> +#endif + ArrayView(U& u) : ArrayView(u.data(), u.size()) {} + + // Indexing, size, and iteration. These allow mutation even if the ArrayView + // is const, because the ArrayView doesn't own the array. (To prevent + // mutation, use ArrayView.) + size_t size() const { return size_; } + bool empty() const { return size_ == 0; } + T* data() const { return data_; } + T& operator[](size_t idx) const { + RTC_DCHECK_LT(idx, size_); + RTC_DCHECK(data_); // Follows from size_ > idx and the class invariant. + return data_[idx]; + } + T* begin() const { return data_; } + T* end() const { return data_ + size_; } + const T* cbegin() const { return data_; } + const T* cend() const { return data_ + size_; } + + ArrayView subview(size_t offset, size_t size) const { + if (offset >= size_) + return ArrayView(); + return ArrayView(data_ + offset, std::min(size, size_ - offset)); + } + ArrayView subview(size_t offset) const { return subview(offset, size_); } + + // Comparing two ArrayViews compares their (pointer,size) pairs; it does + // *not* dereference the pointers. + friend bool operator==(const ArrayView& a, const ArrayView& b) { + return a.data_ == b.data_ && a.size_ == b.size_; + } + friend bool operator!=(const ArrayView& a, const ArrayView& b) { + return !(a == b); + } + + private: + // Invariant: !data_ iff size_ == 0. + void CheckInvariant() const { RTC_DCHECK_EQ(!data_, size_ == 0); } + T* data_; + size_t size_; +}; + +template +inline ArrayView MakeArrayView(T* data, size_t size) { + return ArrayView(data, size); +} + +} // namespace rtc + +#endif // WEBRTC_BASE_ARRAY_VIEW_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/atomicops.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/atomicops.h new file mode 100644 index 000000000..a286bf01c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/atomicops.h @@ -0,0 +1,87 @@ +/* + * Copyright 2011 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_BASE_ATOMICOPS_H_ +#define WEBRTC_BASE_ATOMICOPS_H_ + +#if defined(WEBRTC_WIN) +// Include winsock2.h before including to maintain consistency with +// win32.h. We can't include win32.h directly here since it pulls in +// headers such as basictypes.h which causes problems in Chromium where webrtc +// exists as two separate projects, webrtc and libjingle. +#include +#include +#endif // defined(WEBRTC_WIN) + +namespace rtc { +class AtomicOps { + public: +#if defined(WEBRTC_WIN) + // Assumes sizeof(int) == sizeof(LONG), which it is on Win32 and Win64. + static int Increment(volatile int* i) { + return ::InterlockedIncrement(reinterpret_cast(i)); + } + static int Decrement(volatile int* i) { + return ::InterlockedDecrement(reinterpret_cast(i)); + } + static int AcquireLoad(volatile const int* i) { + return *i; + } + static void ReleaseStore(volatile int* i, int value) { + *i = value; + } + static int CompareAndSwap(volatile int* i, int old_value, int new_value) { + return ::InterlockedCompareExchange(reinterpret_cast(i), + new_value, + old_value); + } + // Pointer variants. + template + static T* AcquireLoadPtr(T* volatile* ptr) { + return *ptr; + } + template + static T* CompareAndSwapPtr(T* volatile* ptr, T* old_value, T* new_value) { + return static_cast(::InterlockedCompareExchangePointer( + reinterpret_cast(ptr), new_value, old_value)); + } +#else + static int Increment(volatile int* i) { + return __sync_add_and_fetch(i, 1); + } + static int Decrement(volatile int* i) { + return __sync_sub_and_fetch(i, 1); + } + static int AcquireLoad(volatile const int* i) { + return __atomic_load_n(i, __ATOMIC_ACQUIRE); + } + static void ReleaseStore(volatile int* i, int value) { + __atomic_store_n(i, value, __ATOMIC_RELEASE); + } + static int CompareAndSwap(volatile int* i, int old_value, int new_value) { + return __sync_val_compare_and_swap(i, old_value, new_value); + } + // Pointer variants. + template + static T* AcquireLoadPtr(T* volatile* ptr) { + return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); + } + template + static T* CompareAndSwapPtr(T* volatile* ptr, T* old_value, T* new_value) { + return __sync_val_compare_and_swap(ptr, old_value, new_value); + } +#endif +}; + + + +} + +#endif // WEBRTC_BASE_ATOMICOPS_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/basictypes.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/basictypes.h new file mode 100644 index 000000000..87dcdc6d1 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/basictypes.h @@ -0,0 +1,70 @@ +/* + * Copyright 2004 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_BASE_BASICTYPES_H_ +#define WEBRTC_BASE_BASICTYPES_H_ + +#include // for NULL, size_t +#include // for uintptr_t and (u)int_t types. + +// Detect compiler is for x86 or x64. +#if defined(__x86_64__) || defined(_M_X64) || \ + defined(__i386__) || defined(_M_IX86) +#define CPU_X86 1 +#endif + +// Detect compiler is for arm. +#if defined(__arm__) || defined(_M_ARM) +#define CPU_ARM 1 +#endif + +#if defined(CPU_X86) && defined(CPU_ARM) +#error CPU_X86 and CPU_ARM both defined. +#endif + +#if !defined(RTC_ARCH_CPU_BIG_ENDIAN) && !defined(RTC_ARCH_CPU_LITTLE_ENDIAN) +// x86, arm or GCC provided __BYTE_ORDER__ macros +#if defined(CPU_X86) || defined(CPU_ARM) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#define RTC_ARCH_CPU_LITTLE_ENDIAN +#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define RTC_ARCH_CPU_BIG_ENDIAN +#else +#error RTC_ARCH_CPU_BIG_ENDIAN or RTC_ARCH_CPU_LITTLE_ENDIAN should be defined. +#endif +#endif + +#if defined(RTC_ARCH_CPU_BIG_ENDIAN) && defined(RTC_ARCH_CPU_LITTLE_ENDIAN) +#error RTC_ARCH_CPU_BIG_ENDIAN and RTC_ARCH_CPU_LITTLE_ENDIAN both defined. +#endif + +#if defined(WEBRTC_WIN) +typedef int socklen_t; +#endif + +// The following only works for C++ +#ifdef __cplusplus + +#ifndef ALIGNP +#define ALIGNP(p, t) \ + (reinterpret_cast(((reinterpret_cast(p) + \ + ((t) - 1)) & ~((t) - 1)))) +#endif + +#define RTC_IS_ALIGNED(p, a) (!((uintptr_t)(p) & ((a) - 1))) + +// Use these to declare and define a static local variable that gets leaked so +// that its destructors are not called at exit. +#define RTC_DEFINE_STATIC_LOCAL(type, name, arguments) \ + static type& name = *new type arguments + +#endif // __cplusplus + +#endif // WEBRTC_BASE_BASICTYPES_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/checks.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/checks.cc new file mode 100644 index 000000000..8d970b7cc --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/checks.cc @@ -0,0 +1,140 @@ +/* + * Copyright 2006 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Most of this was borrowed (with minor modifications) from V8's and Chromium's +// src/base/logging.cc. + +// Use the C++ version to provide __GLIBCXX__. +#include +#include +#include + +#if defined(__GLIBCXX__) && !defined(__UCLIBC__) +#include +//#include +#endif + +#if defined(WEBRTC_ANDROID) +#define RTC_LOG_TAG "rtc" +#include // NOLINT +#endif + +#if defined(WEBRTC_WIN) +#include +#endif + +#include "webrtc/base/checks.h" +//#include "webrtc/base/logging.h" + +#if defined(_MSC_VER) +// Warning C4722: destructor never returns, potential memory leak. +// FatalMessage's dtor very intentionally aborts. +#pragma warning(disable:4722) +#endif + +namespace rtc { + +void VPrintError(const char* format, va_list args) { +#if defined(WEBRTC_ANDROID) + __android_log_vprint(ANDROID_LOG_ERROR, RTC_LOG_TAG, format, args); +#else + vfprintf(stderr, format, args); +#endif +} + +void PrintError(const char* format, ...) { + va_list args; + va_start(args, format); + VPrintError(format, args); + va_end(args); +} + +// TODO(ajm): This works on Mac (although the parsing fails) but I don't seem +// to get usable symbols on Linux. This is copied from V8. Chromium has a more +// advanced stace trace system; also more difficult to copy. +void DumpBacktrace() { +/*#if defined(__GLIBCXX__) && !defined(__UCLIBC__) + void* trace[100]; + int size = backtrace(trace, sizeof(trace) / sizeof(*trace)); + char** symbols = backtrace_symbols(trace, size); + PrintError("\n==== C stack trace ===============================\n\n"); + if (size == 0) { + PrintError("(empty)\n"); + } else if (symbols == NULL) { + PrintError("(no symbols)\n"); + } else { + for (int i = 1; i < size; ++i) { + char mangled[201]; + if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT + PrintError("%2d: ", i); + int status; + size_t length; + char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status); + PrintError("%s\n", demangled != NULL ? demangled : mangled); + free(demangled); + } else { + // If parsing failed, at least print the unparsed symbol. + PrintError("%s\n", symbols[i]); + } + } + } + free(symbols); +#endif*/ +} + +FatalMessage::FatalMessage(const char* file, int line) { + Init(file, line); +} + +FatalMessage::FatalMessage(const char* file, int line, std::string* result) { + Init(file, line); + stream_ << "Check failed: " << *result << std::endl << "# "; + delete result; +} + +NO_RETURN FatalMessage::~FatalMessage() { + fflush(stdout); + fflush(stderr); + stream_ << std::endl << "#" << std::endl; + PrintError(stream_.str().c_str()); + DumpBacktrace(); + fflush(stderr); + abort(); +} + +void FatalMessage::Init(const char* file, int line) { + stream_ << std::endl << std::endl + << "#" << std::endl + << "# Fatal error in " << file << ", line " << line << std::endl + // << "# last system error: " << LAST_SYSTEM_ERROR << std::endl + << "# "; +} + +// MSVC doesn't like complex extern templates and DLLs. +#if !defined(COMPILER_MSVC) +// Explicit instantiations for commonly used comparisons. +template std::string* MakeCheckOpString( + const int&, const int&, const char* names); +template std::string* MakeCheckOpString( + const unsigned long&, const unsigned long&, const char* names); +template std::string* MakeCheckOpString( + const unsigned long&, const unsigned int&, const char* names); +template std::string* MakeCheckOpString( + const unsigned int&, const unsigned long&, const char* names); +template std::string* MakeCheckOpString( + const std::string&, const std::string&, const char* name); +#endif + +} // namespace rtc + +// Function to call from the C version of the RTC_CHECK and RTC_DCHECK macros. +NO_RETURN void rtc_FatalMessage(const char* file, int line, const char* msg) { + rtc::FatalMessage(file, line).stream() << msg; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/checks.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/checks.h new file mode 100644 index 000000000..653ed322e --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/checks.h @@ -0,0 +1,290 @@ +/* + * Copyright 2006 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_BASE_CHECKS_H_ +#define WEBRTC_BASE_CHECKS_H_ + +#include "webrtc/typedefs.h" + +// If you for some reson need to know if DCHECKs are on, test the value of +// RTC_DCHECK_IS_ON. (Test its value, not if it's defined; it'll always be +// defined, to either a true or a false value.) +#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON) +#define RTC_DCHECK_IS_ON 1 +#else +#define RTC_DCHECK_IS_ON 0 +#endif + +#ifdef __cplusplus +extern "C" { +#endif +NO_RETURN void rtc_FatalMessage(const char* file, int line, const char* msg); +#ifdef __cplusplus +} // extern "C" +#endif + +#ifdef __cplusplus +// C++ version. + +#include +#include + +#include "webrtc/base/safe_compare.h" + +// The macros here print a message to stderr and abort under various +// conditions. All will accept additional stream messages. For example: +// RTC_DCHECK_EQ(foo, bar) << "I'm printed when foo != bar."; +// +// - RTC_CHECK(x) is an assertion that x is always true, and that if it isn't, +// it's better to terminate the process than to continue. During development, +// the reason that it's better to terminate might simply be that the error +// handling code isn't in place yet; in production, the reason might be that +// the author of the code truly believes that x will always be true, but that +// she recognizes that if she is wrong, abrupt and unpleasant process +// termination is still better than carrying on with the assumption violated. +// +// RTC_CHECK always evaluates its argument, so it's OK for x to have side +// effects. +// +// - RTC_DCHECK(x) is the same as RTC_CHECK(x)---an assertion that x is always +// true---except that x will only be evaluated in debug builds; in production +// builds, x is simply assumed to be true. This is useful if evaluating x is +// expensive and the expected cost of failing to detect the violated +// assumption is acceptable. You should not handle cases where a production +// build fails to spot a violated condition, even those that would result in +// crashes. If the code needs to cope with the error, make it cope, but don't +// call RTC_DCHECK; if the condition really can't occur, but you'd sleep +// better at night knowing that the process will suicide instead of carrying +// on in case you were wrong, use RTC_CHECK instead of RTC_DCHECK. +// +// RTC_DCHECK only evaluates its argument in debug builds, so if x has visible +// side effects, you need to write e.g. +// bool w = x; RTC_DCHECK(w); +// +// - RTC_CHECK_EQ, _NE, _GT, ..., and RTC_DCHECK_EQ, _NE, _GT, ... are +// specialized variants of RTC_CHECK and RTC_DCHECK that print prettier +// messages if the condition doesn't hold. Prefer them to raw RTC_CHECK and +// RTC_DCHECK. +// +// - FATAL() aborts unconditionally. +// +// TODO(ajm): Ideally, checks.h would be combined with logging.h, but +// consolidation with system_wrappers/logging.h should happen first. + +namespace rtc { + +// Helper macro which avoids evaluating the arguments to a stream if +// the condition doesn't hold. +#define RTC_LAZY_STREAM(stream, condition) \ + !(condition) ? static_cast(0) : rtc::FatalMessageVoidify() & (stream) + +// The actual stream used isn't important. We reference |ignored| in the code +// but don't evaluate it; this is to avoid "unused variable" warnings (we do so +// in a particularly convoluted way with an extra ?: because that appears to be +// the simplest construct that keeps Visual Studio from complaining about +// condition being unused). +#define RTC_EAT_STREAM_PARAMETERS(ignored) \ + (true ? true : ((void)(ignored), true)) \ + ? static_cast(0) \ + : rtc::FatalMessageVoidify() & rtc::FatalMessage("", 0).stream() + +// Call RTC_EAT_STREAM_PARAMETERS with an argument that fails to compile if +// values of the same types as |a| and |b| can't be compared with the given +// operation, and that would evaluate |a| and |b| if evaluated. +#define RTC_EAT_STREAM_PARAMETERS_OP(op, a, b) \ + RTC_EAT_STREAM_PARAMETERS(((void)rtc::safe_cmp::op(a, b))) + +// RTC_CHECK dies with a fatal error if condition is not true. It is *not* +// controlled by NDEBUG or anything else, so the check will be executed +// regardless of compilation mode. +// +// We make sure RTC_CHECK et al. always evaluates their arguments, as +// doing RTC_CHECK(FunctionWithSideEffect()) is a common idiom. +#define RTC_CHECK(condition) \ + RTC_LAZY_STREAM(rtc::FatalMessage(__FILE__, __LINE__).stream(), \ + !(condition)) \ + << "Check failed: " #condition << std::endl << "# " + +// Helper macro for binary operators. +// Don't use this macro directly in your code, use RTC_CHECK_EQ et al below. +// +// TODO(akalin): Rewrite this so that constructs like if (...) +// RTC_CHECK_EQ(...) else { ... } work properly. +#define RTC_CHECK_OP(name, op, val1, val2) \ + if (std::string* _result = \ + rtc::Check##name##Impl((val1), (val2), #val1 " " #op " " #val2)) \ + rtc::FatalMessage(__FILE__, __LINE__, _result).stream() + +// Build the error message string. This is separate from the "Impl" +// function template because it is not performance critical and so can +// be out of line, while the "Impl" code should be inline. Caller +// takes ownership of the returned string. +template +std::string* MakeCheckOpString(const t1& v1, const t2& v2, const char* names) { + std::ostringstream ss; + ss << names << " (" << v1 << " vs. " << v2 << ")"; + std::string* msg = new std::string(ss.str()); + return msg; +} + +// MSVC doesn't like complex extern templates and DLLs. +#if !defined(COMPILER_MSVC) +// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated +// in logging.cc. +extern template std::string* MakeCheckOpString( + const int&, const int&, const char* names); +extern template +std::string* MakeCheckOpString( + const unsigned long&, const unsigned long&, const char* names); +extern template +std::string* MakeCheckOpString( + const unsigned long&, const unsigned int&, const char* names); +extern template +std::string* MakeCheckOpString( + const unsigned int&, const unsigned long&, const char* names); +extern template +std::string* MakeCheckOpString( + const std::string&, const std::string&, const char* name); +#endif + +// Helper functions for RTC_CHECK_OP macro. +// The (int, int) specialization works around the issue that the compiler +// will not instantiate the template version of the function on values of +// unnamed enum type - see comment below. +#define DEFINE_RTC_CHECK_OP_IMPL(name) \ + template \ + inline std::string* Check##name##Impl(const t1& v1, const t2& v2, \ + const char* names) { \ + if (rtc::safe_cmp::name(v1, v2)) \ + return NULL; \ + else \ + return rtc::MakeCheckOpString(v1, v2, names); \ + } \ + inline std::string* Check##name##Impl(int v1, int v2, const char* names) { \ + if (rtc::safe_cmp::name(v1, v2)) \ + return NULL; \ + else \ + return rtc::MakeCheckOpString(v1, v2, names); \ + } +DEFINE_RTC_CHECK_OP_IMPL(Eq) +DEFINE_RTC_CHECK_OP_IMPL(Ne) +DEFINE_RTC_CHECK_OP_IMPL(Le) +DEFINE_RTC_CHECK_OP_IMPL(Lt) +DEFINE_RTC_CHECK_OP_IMPL(Ge) +DEFINE_RTC_CHECK_OP_IMPL(Gt) +#undef DEFINE_RTC_CHECK_OP_IMPL + +#define RTC_CHECK_EQ(val1, val2) RTC_CHECK_OP(Eq, ==, val1, val2) +#define RTC_CHECK_NE(val1, val2) RTC_CHECK_OP(Ne, !=, val1, val2) +#define RTC_CHECK_LE(val1, val2) RTC_CHECK_OP(Le, <=, val1, val2) +#define RTC_CHECK_LT(val1, val2) RTC_CHECK_OP(Lt, <, val1, val2) +#define RTC_CHECK_GE(val1, val2) RTC_CHECK_OP(Ge, >=, val1, val2) +#define RTC_CHECK_GT(val1, val2) RTC_CHECK_OP(Gt, >, val1, val2) + +// The RTC_DCHECK macro is equivalent to RTC_CHECK except that it only generates +// code in debug builds. It does reference the condition parameter in all cases, +// though, so callers won't risk getting warnings about unused variables. +#if RTC_DCHECK_IS_ON +#define RTC_DCHECK(condition) RTC_CHECK(condition) +#define RTC_DCHECK_EQ(v1, v2) RTC_CHECK_EQ(v1, v2) +#define RTC_DCHECK_NE(v1, v2) RTC_CHECK_NE(v1, v2) +#define RTC_DCHECK_LE(v1, v2) RTC_CHECK_LE(v1, v2) +#define RTC_DCHECK_LT(v1, v2) RTC_CHECK_LT(v1, v2) +#define RTC_DCHECK_GE(v1, v2) RTC_CHECK_GE(v1, v2) +#define RTC_DCHECK_GT(v1, v2) RTC_CHECK_GT(v1, v2) +#else +#define RTC_DCHECK(condition) RTC_EAT_STREAM_PARAMETERS(condition) +#define RTC_DCHECK_EQ(v1, v2) RTC_EAT_STREAM_PARAMETERS_OP(Eq, v1, v2) +#define RTC_DCHECK_NE(v1, v2) RTC_EAT_STREAM_PARAMETERS_OP(Ne, v1, v2) +#define RTC_DCHECK_LE(v1, v2) RTC_EAT_STREAM_PARAMETERS_OP(Le, v1, v2) +#define RTC_DCHECK_LT(v1, v2) RTC_EAT_STREAM_PARAMETERS_OP(Lt, v1, v2) +#define RTC_DCHECK_GE(v1, v2) RTC_EAT_STREAM_PARAMETERS_OP(Ge, v1, v2) +#define RTC_DCHECK_GT(v1, v2) RTC_EAT_STREAM_PARAMETERS_OP(Gt, v1, v2) +#endif + +// This is identical to LogMessageVoidify but in name. +class FatalMessageVoidify { + public: + FatalMessageVoidify() { } + // This has to be an operator with a precedence lower than << but + // higher than ?: + void operator&(std::ostream&) { } +}; + +#define RTC_UNREACHABLE_CODE_HIT false +#define RTC_NOTREACHED() RTC_DCHECK(RTC_UNREACHABLE_CODE_HIT) + +#define FATAL() rtc::FatalMessage(__FILE__, __LINE__).stream() +// TODO(ajm): Consider adding RTC_NOTIMPLEMENTED macro when +// base/logging.h and system_wrappers/logging.h are consolidated such that we +// can match the Chromium behavior. + +// Like a stripped-down LogMessage from logging.h, except that it aborts. +class FatalMessage { + public: + FatalMessage(const char* file, int line); + // Used for RTC_CHECK_EQ(), etc. Takes ownership of the given string. + FatalMessage(const char* file, int line, std::string* result); + NO_RETURN ~FatalMessage(); + + std::ostream& stream() { return stream_; } + + private: + void Init(const char* file, int line); + + std::ostringstream stream_; +}; + +// Performs the integer division a/b and returns the result. CHECKs that the +// remainder is zero. +template +inline T CheckedDivExact(T a, T b) { + RTC_CHECK_EQ(a % b, static_cast(0)) << a << " is not evenly divisible by " + << b; + return a / b; +} + +} // namespace rtc + +#else // __cplusplus not defined +// C version. Lacks many features compared to the C++ version, but usage +// guidelines are the same. + +#define RTC_CHECK(condition) \ + do { \ + if (!(condition)) { \ + rtc_FatalMessage(__FILE__, __LINE__, "CHECK failed: " #condition); \ + } \ + } while (0) + +#define RTC_CHECK_EQ(a, b) RTC_CHECK((a) == (b)) +#define RTC_CHECK_NE(a, b) RTC_CHECK((a) != (b)) +#define RTC_CHECK_LE(a, b) RTC_CHECK((a) <= (b)) +#define RTC_CHECK_LT(a, b) RTC_CHECK((a) < (b)) +#define RTC_CHECK_GE(a, b) RTC_CHECK((a) >= (b)) +#define RTC_CHECK_GT(a, b) RTC_CHECK((a) > (b)) + +#define RTC_DCHECK(condition) \ + do { \ + if (RTC_DCHECK_IS_ON && !(condition)) { \ + rtc_FatalMessage(__FILE__, __LINE__, "DCHECK failed: " #condition); \ + } \ + } while (0) + +#define RTC_DCHECK_EQ(a, b) RTC_DCHECK((a) == (b)) +#define RTC_DCHECK_NE(a, b) RTC_DCHECK((a) != (b)) +#define RTC_DCHECK_LE(a, b) RTC_DCHECK((a) <= (b)) +#define RTC_DCHECK_LT(a, b) RTC_DCHECK((a) < (b)) +#define RTC_DCHECK_GE(a, b) RTC_DCHECK((a) >= (b)) +#define RTC_DCHECK_GT(a, b) RTC_DCHECK((a) > (b)) + +#endif // __cplusplus + +#endif // WEBRTC_BASE_CHECKS_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/constructormagic.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/constructormagic.h new file mode 100644 index 000000000..6ef782650 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/constructormagic.h @@ -0,0 +1,34 @@ +/* + * Copyright 2004 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_BASE_CONSTRUCTORMAGIC_H_ +#define WEBRTC_BASE_CONSTRUCTORMAGIC_H_ + +// Put this in the declarations for a class to be unassignable. +#define RTC_DISALLOW_ASSIGN(TypeName) \ + void operator=(const TypeName&) = delete + +// A macro to disallow the copy constructor and operator= functions. This should +// be used in the declarations for a class. +#define RTC_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + RTC_DISALLOW_ASSIGN(TypeName) + +// A macro to disallow all the implicit constructors, namely the default +// constructor, copy constructor and operator= functions. +// +// This should be used in the declarations for a class that wants to prevent +// anyone from instantiating it. This is especially useful for classes +// containing only static methods. +#define RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \ + TypeName() = delete; \ + RTC_DISALLOW_COPY_AND_ASSIGN(TypeName) + +#endif // WEBRTC_BASE_CONSTRUCTORMAGIC_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/safe_compare.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/safe_compare.h new file mode 100644 index 000000000..09954247f --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/safe_compare.h @@ -0,0 +1,184 @@ +/* + * Copyright 2016 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This file defines six functions: +// +// rtc::safe_cmp::Eq // == +// rtc::safe_cmp::Ne // != +// rtc::safe_cmp::Lt // < +// rtc::safe_cmp::Le // <= +// rtc::safe_cmp::Gt // > +// rtc::safe_cmp::Ge // >= +// +// They each accept two arguments of arbitrary types, and in almost all cases, +// they simply call the appropriate comparison operator. However, if both +// arguments are integers, they don't compare them using C++'s quirky rules, +// but instead adhere to the true mathematical definitions. It is as if the +// arguments were first converted to infinite-range signed integers, and then +// compared, although of course nothing expensive like that actually takes +// place. In practice, for signed/signed and unsigned/unsigned comparisons and +// some mixed-signed comparisons with a compile-time constant, the overhead is +// zero; in the remaining cases, it is just a few machine instructions (no +// branches). + +#ifndef WEBRTC_BASE_SAFE_COMPARE_H_ +#define WEBRTC_BASE_SAFE_COMPARE_H_ + +#if defined(_MSC_VER) && _MSC_VER<=1800 +#define constexpr const // Older MSVC used for WP app doesn't support this thing but we can ignore it +#endif + +#include +#include + +#include +#include + +namespace rtc { +namespace safe_cmp { + +namespace safe_cmp_impl { + +template +struct LargerIntImpl : std::false_type {}; +template <> +struct LargerIntImpl : std::true_type { + using type = int16_t; +}; +template <> +struct LargerIntImpl : std::true_type { + using type = int32_t; +}; +template <> +struct LargerIntImpl : std::true_type { + using type = int64_t; +}; + +// LargerInt::value is true iff there's a signed type that's larger +// than T1 (and no larger than the larger of T2 and int*, for performance +// reasons); and if there is such a type, LargerInt::type is an alias +// for it. +template +struct LargerInt + : LargerIntImpl {}; + +template +inline typename std::make_unsigned::type MakeUnsigned(T a) { + return static_cast::type>(a); +} + +// Overload for when both T1 and T2 have the same signedness. +template ::value == + std::is_signed::value>::type* = nullptr> +inline bool Cmp(T1 a, T2 b) { + return Op::Op(a, b); +} + +// Overload for signed - unsigned comparison that can be promoted to a bigger +// signed type. +template ::value && + std::is_unsigned::value && + LargerInt::value>::type* = nullptr> +inline bool Cmp(T1 a, T2 b) { + return Op::Op(a, static_cast::type>(b)); +} + +// Overload for unsigned - signed comparison that can be promoted to a bigger +// signed type. +template ::value && + std::is_signed::value && + LargerInt::value>::type* = nullptr> +inline bool Cmp(T1 a, T2 b) { + return Op::Op(static_cast::type>(a), b); +} + +// Overload for signed - unsigned comparison that can't be promoted to a bigger +// signed type. +template ::value && + std::is_unsigned::value && + !LargerInt::value>::type* = nullptr> +inline bool Cmp(T1 a, T2 b) { + return a < 0 ? Op::Op(-1, 0) : Op::Op(safe_cmp_impl::MakeUnsigned(a), b); +} + +// Overload for unsigned - signed comparison that can't be promoted to a bigger +// signed type. +template ::value && + std::is_signed::value && + !LargerInt::value>::type* = nullptr> +inline bool Cmp(T1 a, T2 b) { + return b < 0 ? Op::Op(0, -1) : Op::Op(a, safe_cmp_impl::MakeUnsigned(b)); +} + +#define RTC_SAFECMP_MAKE_OP(name, op) \ + struct name { \ + template \ + static constexpr bool Op(T1 a, T2 b) { \ + return a op b; \ + } \ + }; +RTC_SAFECMP_MAKE_OP(EqOp, ==) +RTC_SAFECMP_MAKE_OP(NeOp, !=) +RTC_SAFECMP_MAKE_OP(LtOp, <) +RTC_SAFECMP_MAKE_OP(LeOp, <=) +RTC_SAFECMP_MAKE_OP(GtOp, >) +RTC_SAFECMP_MAKE_OP(GeOp, >=) +#undef RTC_SAFECMP_MAKE_OP + +} // namespace safe_cmp_impl + +#define RTC_SAFECMP_MAKE_FUN(name) \ + template < \ + typename T1, typename T2, \ + typename std::enable_if< \ + std::is_integral::type>::value && \ + std::is_integral::type>::value>:: \ + type* = nullptr> \ + inline bool name(T1 a, T2 b) { \ + return safe_cmp_impl::Cmp(a, b); \ + } \ + template ::type>::value || \ + !std::is_integral::type>:: \ + value>::type* = nullptr> \ + inline bool name(T1&& a, T2&& b) { \ + return safe_cmp_impl::name##Op::Op(a, b); \ + } +RTC_SAFECMP_MAKE_FUN(Eq) +RTC_SAFECMP_MAKE_FUN(Ne) +RTC_SAFECMP_MAKE_FUN(Lt) +RTC_SAFECMP_MAKE_FUN(Le) +RTC_SAFECMP_MAKE_FUN(Gt) +RTC_SAFECMP_MAKE_FUN(Ge) +#undef RTC_SAFECMP_MAKE_FUN + +} // namespace safe_cmp +} // namespace rtc + +#endif // WEBRTC_BASE_SAFE_COMPARE_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/safe_conversions.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/safe_conversions.h new file mode 100644 index 000000000..51239bc65 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/safe_conversions.h @@ -0,0 +1,70 @@ +/* + * Copyright 2014 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Borrowed from Chromium's src/base/numerics/safe_conversions.h. + +#ifndef WEBRTC_BASE_SAFE_CONVERSIONS_H_ +#define WEBRTC_BASE_SAFE_CONVERSIONS_H_ + +#include + +#include "webrtc/base/checks.h" +#include "webrtc/base/safe_conversions_impl.h" + +namespace rtc { + +// Convenience function that returns true if the supplied value is in range +// for the destination type. +template +inline bool IsValueInRangeForNumericType(Src value) { + return internal::RangeCheck(value) == internal::TYPE_VALID; +} + +// checked_cast<> is analogous to static_cast<> for numeric types, +// except that it CHECKs that the specified numeric conversion will not +// overflow or underflow. NaN source will always trigger a CHECK. +template +inline Dst checked_cast(Src value) { + RTC_CHECK(IsValueInRangeForNumericType(value)); + return static_cast(value); +} + +// saturated_cast<> is analogous to static_cast<> for numeric types, except +// that the specified numeric conversion will saturate rather than overflow or +// underflow. NaN assignment to an integral will trigger a RTC_CHECK condition. +template +inline Dst saturated_cast(Src value) { + // Optimization for floating point values, which already saturate. + if (std::numeric_limits::is_iec559) + return static_cast(value); + + switch (internal::RangeCheck(value)) { + case internal::TYPE_VALID: + return static_cast(value); + + case internal::TYPE_UNDERFLOW: + return std::numeric_limits::min(); + + case internal::TYPE_OVERFLOW: + return std::numeric_limits::max(); + + // Should fail only on attempting to assign NaN to a saturated integer. + case internal::TYPE_INVALID: + FATAL(); + return std::numeric_limits::max(); + } + + FATAL(); + return static_cast(value); +} + +} // namespace rtc + +#endif // WEBRTC_BASE_SAFE_CONVERSIONS_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/safe_conversions_impl.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/safe_conversions_impl.h new file mode 100644 index 000000000..52e52eff8 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/safe_conversions_impl.h @@ -0,0 +1,188 @@ +/* + * Copyright 2014 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Borrowed from Chromium's src/base/numerics/safe_conversions_impl.h. + +#ifndef WEBRTC_BASE_SAFE_CONVERSIONS_IMPL_H_ +#define WEBRTC_BASE_SAFE_CONVERSIONS_IMPL_H_ + +#include + +namespace rtc { +namespace internal { + +enum DstSign { + DST_UNSIGNED, + DST_SIGNED +}; + +enum SrcSign { + SRC_UNSIGNED, + SRC_SIGNED +}; + +enum DstRange { + OVERLAPS_RANGE, + CONTAINS_RANGE +}; + +// Helper templates to statically determine if our destination type can contain +// all values represented by the source type. + +template ::is_signed ? + DST_SIGNED : DST_UNSIGNED, + SrcSign IsSrcSigned = std::numeric_limits::is_signed ? + SRC_SIGNED : SRC_UNSIGNED> +struct StaticRangeCheck {}; + +template +struct StaticRangeCheck { + typedef std::numeric_limits DstLimits; + typedef std::numeric_limits SrcLimits; + // Compare based on max_exponent, which we must compute for integrals. + static const size_t kDstMaxExponent = DstLimits::is_iec559 ? + DstLimits::max_exponent : + (sizeof(Dst) * 8 - 1); + static const size_t kSrcMaxExponent = SrcLimits::is_iec559 ? + SrcLimits::max_exponent : + (sizeof(Src) * 8 - 1); + static const DstRange value = kDstMaxExponent >= kSrcMaxExponent ? + CONTAINS_RANGE : OVERLAPS_RANGE; +}; + +template +struct StaticRangeCheck { + static const DstRange value = sizeof(Dst) >= sizeof(Src) ? + CONTAINS_RANGE : OVERLAPS_RANGE; +}; + +template +struct StaticRangeCheck { + typedef std::numeric_limits DstLimits; + typedef std::numeric_limits SrcLimits; + // Compare based on max_exponent, which we must compute for integrals. + static const size_t kDstMaxExponent = DstLimits::is_iec559 ? + DstLimits::max_exponent : + (sizeof(Dst) * 8 - 1); + static const size_t kSrcMaxExponent = sizeof(Src) * 8; + static const DstRange value = kDstMaxExponent >= kSrcMaxExponent ? + CONTAINS_RANGE : OVERLAPS_RANGE; +}; + +template +struct StaticRangeCheck { + static const DstRange value = OVERLAPS_RANGE; +}; + + +enum RangeCheckResult { + TYPE_VALID = 0, // Value can be represented by the destination type. + TYPE_UNDERFLOW = 1, // Value would overflow. + TYPE_OVERFLOW = 2, // Value would underflow. + TYPE_INVALID = 3 // Source value is invalid (i.e. NaN). +}; + +// This macro creates a RangeCheckResult from an upper and lower bound +// check by taking advantage of the fact that only NaN can be out of range in +// both directions at once. +#define BASE_NUMERIC_RANGE_CHECK_RESULT(is_in_upper_bound, is_in_lower_bound) \ + RangeCheckResult(((is_in_upper_bound) ? 0 : TYPE_OVERFLOW) | \ + ((is_in_lower_bound) ? 0 : TYPE_UNDERFLOW)) + +template ::is_signed ? + DST_SIGNED : DST_UNSIGNED, + SrcSign IsSrcSigned = std::numeric_limits::is_signed ? + SRC_SIGNED : SRC_UNSIGNED, + DstRange IsSrcRangeContained = StaticRangeCheck::value> +struct RangeCheckImpl {}; + +// The following templates are for ranges that must be verified at runtime. We +// split it into checks based on signedness to avoid confusing casts and +// compiler warnings on signed an unsigned comparisons. + +// Dst range always contains the result: nothing to check. +template +struct RangeCheckImpl { + static RangeCheckResult Check(Src value) { + return TYPE_VALID; + } +}; + +// Signed to signed narrowing. +template +struct RangeCheckImpl { + static RangeCheckResult Check(Src value) { + typedef std::numeric_limits DstLimits; + return DstLimits::is_iec559 ? + BASE_NUMERIC_RANGE_CHECK_RESULT( + value <= static_cast(DstLimits::max()), + value >= static_cast(DstLimits::max() * -1)) : + BASE_NUMERIC_RANGE_CHECK_RESULT( + value <= static_cast(DstLimits::max()), + value >= static_cast(DstLimits::min())); + } +}; + +// Unsigned to unsigned narrowing. +template +struct RangeCheckImpl { + static RangeCheckResult Check(Src value) { + typedef std::numeric_limits DstLimits; + return BASE_NUMERIC_RANGE_CHECK_RESULT( + value <= static_cast(DstLimits::max()), true); + } +}; + +// Unsigned to signed. +template +struct RangeCheckImpl { + static RangeCheckResult Check(Src value) { + typedef std::numeric_limits DstLimits; + return sizeof(Dst) > sizeof(Src) ? TYPE_VALID : + BASE_NUMERIC_RANGE_CHECK_RESULT( + value <= static_cast(DstLimits::max()), true); + } +}; + +// Signed to unsigned. +template +struct RangeCheckImpl { + static RangeCheckResult Check(Src value) { + typedef std::numeric_limits DstLimits; + typedef std::numeric_limits SrcLimits; + // Compare based on max_exponent, which we must compute for integrals. + static const size_t kDstMaxExponent = sizeof(Dst) * 8; + static const size_t kSrcMaxExponent = SrcLimits::is_iec559 ? + SrcLimits::max_exponent : + (sizeof(Src) * 8 - 1); + return (kDstMaxExponent >= kSrcMaxExponent) ? + BASE_NUMERIC_RANGE_CHECK_RESULT(true, value >= static_cast(0)) : + BASE_NUMERIC_RANGE_CHECK_RESULT( + value <= static_cast(DstLimits::max()), + value >= static_cast(0)); + } +}; + +template +inline RangeCheckResult RangeCheck(Src value) { + static_assert(std::numeric_limits::is_specialized, + "argument must be numeric"); + static_assert(std::numeric_limits::is_specialized, + "result must be numeric"); + return RangeCheckImpl::Check(value); +} + +} // namespace internal +} // namespace rtc + +#endif // WEBRTC_BASE_SAFE_CONVERSIONS_IMPL_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/sanitizer.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/sanitizer.h new file mode 100644 index 000000000..e27a69216 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/sanitizer.h @@ -0,0 +1,116 @@ +/* + * Copyright 2016 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_BASE_SANITIZER_H_ +#define WEBRTC_BASE_SANITIZER_H_ + +#if defined(__has_feature) +#if __has_feature(address_sanitizer) +#define RTC_HAS_ASAN 1 +#endif +#if __has_feature(memory_sanitizer) +#define RTC_HAS_MSAN 1 +#endif +#endif +#ifndef RTC_HAS_ASAN +#define RTC_HAS_ASAN 0 +#endif +#ifndef RTC_HAS_MSAN +#define RTC_HAS_MSAN 0 +#endif + +#if RTC_HAS_ASAN +#include +#endif +#if RTC_HAS_MSAN +#include +#endif + +#ifdef __has_attribute +#if __has_attribute(no_sanitize) +#define RTC_NO_SANITIZE(what) __attribute__((no_sanitize(what))) +#endif +#endif +#ifndef RTC_NO_SANITIZE +#define RTC_NO_SANITIZE(what) +#endif + +// Ask ASan to mark the memory range [ptr, ptr + element_size * num_elements) +// as being unaddressable, so that reads and writes are not allowed. ASan may +// narrow the range to the nearest alignment boundaries. +static inline void rtc_AsanPoison(const volatile void* ptr, + size_t element_size, + size_t num_elements) { +#if RTC_HAS_ASAN + ASAN_POISON_MEMORY_REGION(ptr, element_size * num_elements); +#endif +} + +// Ask ASan to mark the memory range [ptr, ptr + element_size * num_elements) +// as being addressable, so that reads and writes are allowed. ASan may widen +// the range to the nearest alignment boundaries. +static inline void rtc_AsanUnpoison(const volatile void* ptr, + size_t element_size, + size_t num_elements) { +#if RTC_HAS_ASAN + ASAN_UNPOISON_MEMORY_REGION(ptr, element_size * num_elements); +#endif +} + +// Ask MSan to mark the memory range [ptr, ptr + element_size * num_elements) +// as being uninitialized. +static inline void rtc_MsanMarkUninitialized(const volatile void* ptr, + size_t element_size, + size_t num_elements) { +#if RTC_HAS_MSAN + __msan_poison(ptr, element_size * num_elements); +#endif +} + +// Force an MSan check (if any bits in the memory range [ptr, ptr + +// element_size * num_elements) are uninitialized the call will crash with an +// MSan report). +static inline void rtc_MsanCheckInitialized(const volatile void* ptr, + size_t element_size, + size_t num_elements) { +#if RTC_HAS_MSAN + __msan_check_mem_is_initialized(ptr, element_size * num_elements); +#endif +} + +#ifdef __cplusplus + +namespace rtc { + +template +inline void AsanPoison(const T& mem) { + rtc_AsanPoison(mem.data(), sizeof(mem.data()[0]), mem.size()); +} + +template +inline void AsanUnpoison(const T& mem) { + rtc_AsanUnpoison(mem.data(), sizeof(mem.data()[0]), mem.size()); +} + +template +inline void MsanMarkUninitialized(const T& mem) { + rtc_MsanMarkUninitialized(mem.data(), sizeof(mem.data()[0]), mem.size()); +} + +template +inline void MsanCheckInitialized(const T& mem) { + rtc_MsanCheckInitialized(mem.data(), sizeof(mem.data()[0]), mem.size()); +} + +} // namespace rtc + +#endif // __cplusplus + +#endif // WEBRTC_BASE_SANITIZER_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/stringutils.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/stringutils.cc new file mode 100644 index 000000000..eca136216 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/stringutils.cc @@ -0,0 +1,133 @@ +/* + * Copyright 2004 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/base/checks.h" +#include "webrtc/base/stringutils.h" + +namespace rtc { + +bool memory_check(const void* memory, int c, size_t count) { + const char* char_memory = static_cast(memory); + char char_c = static_cast(c); + for (size_t i = 0; i < count; ++i) { + if (char_memory[i] != char_c) { + return false; + } + } + return true; +} + +bool string_match(const char* target, const char* pattern) { + while (*pattern) { + if (*pattern == '*') { + if (!*++pattern) { + return true; + } + while (*target) { + if ((toupper(*pattern) == toupper(*target)) + && string_match(target + 1, pattern + 1)) { + return true; + } + ++target; + } + return false; + } else { + if (toupper(*pattern) != toupper(*target)) { + return false; + } + ++target; + ++pattern; + } + } + return !*target; +} + +#if defined(WEBRTC_WIN) +int ascii_string_compare(const wchar_t* s1, const char* s2, size_t n, + CharacterTransformation transformation) { + wchar_t c1, c2; + while (true) { + if (n-- == 0) return 0; + c1 = transformation(*s1); + // Double check that characters are not UTF-8 + RTC_DCHECK_LT(*s2, 128); + // Note: *s2 gets implicitly promoted to wchar_t + c2 = transformation(*s2); + if (c1 != c2) return (c1 < c2) ? -1 : 1; + if (!c1) return 0; + ++s1; + ++s2; + } +} + +size_t asccpyn(wchar_t* buffer, size_t buflen, + const char* source, size_t srclen) { + if (buflen <= 0) + return 0; + + if (srclen == SIZE_UNKNOWN) { + srclen = strlenn(source, buflen - 1); + } else if (srclen >= buflen) { + srclen = buflen - 1; + } +#if RTC_DCHECK_IS_ON + // Double check that characters are not UTF-8 + for (size_t pos = 0; pos < srclen; ++pos) + RTC_DCHECK_LT(source[pos], 128); +#endif + std::copy(source, source + srclen, buffer); + buffer[srclen] = 0; + return srclen; +} + +#endif // WEBRTC_WIN + +void replace_substrs(const char *search, + size_t search_len, + const char *replace, + size_t replace_len, + std::string *s) { + size_t pos = 0; + while ((pos = s->find(search, pos, search_len)) != std::string::npos) { + s->replace(pos, search_len, replace, replace_len); + pos += replace_len; + } +} + +bool starts_with(const char *s1, const char *s2) { + return strncmp(s1, s2, strlen(s2)) == 0; +} + +bool ends_with(const char *s1, const char *s2) { + size_t s1_length = strlen(s1); + size_t s2_length = strlen(s2); + + if (s2_length > s1_length) { + return false; + } + + const char* start = s1 + (s1_length - s2_length); + return strncmp(start, s2, s2_length) == 0; +} + +static const char kWhitespace[] = " \n\r\t"; + +std::string string_trim(const std::string& s) { + std::string::size_type first = s.find_first_not_of(kWhitespace); + std::string::size_type last = s.find_last_not_of(kWhitespace); + + if (first == std::string::npos || last == std::string::npos) { + return std::string(""); + } + + return s.substr(first, last - first + 1); +} + +} // namespace rtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/stringutils.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/stringutils.h new file mode 100644 index 000000000..51fd4885c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/stringutils.h @@ -0,0 +1,318 @@ +/* + * Copyright 2004 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_BASE_STRINGUTILS_H__ +#define WEBRTC_BASE_STRINGUTILS_H__ + +#include +#include +#include +#include + +#if defined(WEBRTC_WIN) +#include +#include +#define alloca _alloca +#endif // WEBRTC_WIN + +#if defined(WEBRTC_POSIX) +#ifdef BSD +#include +#else // BSD +#include +#endif // !BSD +#endif // WEBRTC_POSIX + +#include + +#include "webrtc/base/basictypes.h" + +/////////////////////////////////////////////////////////////////////////////// +// Generic string/memory utilities +/////////////////////////////////////////////////////////////////////////////// + +#define STACK_ARRAY(TYPE, LEN) static_cast(::alloca((LEN)*sizeof(TYPE))) + +namespace rtc { + +// Complement to memset. Verifies memory consists of count bytes of value c. +bool memory_check(const void* memory, int c, size_t count); + +// Determines whether the simple wildcard pattern matches target. +// Alpha characters in pattern match case-insensitively. +// Asterisks in pattern match 0 or more characters. +// Ex: string_match("www.TEST.GOOGLE.COM", "www.*.com") -> true +bool string_match(const char* target, const char* pattern); + +} // namespace rtc + +/////////////////////////////////////////////////////////////////////////////// +// Rename a bunch of common string functions so they are consistent across +// platforms and between char and wchar_t variants. +// Here is the full list of functions that are unified: +// strlen, strcmp, stricmp, strncmp, strnicmp +// strchr, vsnprintf, strtoul, tolowercase +// tolowercase is like tolower, but not compatible with end-of-file value +// +// It's not clear if we will ever use wchar_t strings on unix. In theory, +// all strings should be Utf8 all the time, except when interfacing with Win32 +// APIs that require Utf16. +/////////////////////////////////////////////////////////////////////////////// + +inline char tolowercase(char c) { + return static_cast(tolower(c)); +} + +#if defined(WEBRTC_WIN) + +inline size_t strlen(const wchar_t* s) { + return wcslen(s); +} +inline int strcmp(const wchar_t* s1, const wchar_t* s2) { + return wcscmp(s1, s2); +} +inline int stricmp(const wchar_t* s1, const wchar_t* s2) { + return _wcsicmp(s1, s2); +} +inline int strncmp(const wchar_t* s1, const wchar_t* s2, size_t n) { + return wcsncmp(s1, s2, n); +} +inline int strnicmp(const wchar_t* s1, const wchar_t* s2, size_t n) { + return _wcsnicmp(s1, s2, n); +} +inline const wchar_t* strchr(const wchar_t* s, wchar_t c) { + return wcschr(s, c); +} +inline const wchar_t* strstr(const wchar_t* haystack, const wchar_t* needle) { + return wcsstr(haystack, needle); +} +#ifndef vsnprintf +inline int vsnprintf(wchar_t* buf, size_t n, const wchar_t* fmt, va_list args) { + return _vsnwprintf(buf, n, fmt, args); +} +#endif // !vsnprintf +inline unsigned long strtoul(const wchar_t* snum, wchar_t** end, int base) { + return wcstoul(snum, end, base); +} +inline wchar_t tolowercase(wchar_t c) { + return static_cast(towlower(c)); +} + +#endif // WEBRTC_WIN + +#if defined(WEBRTC_POSIX) + +inline int _stricmp(const char* s1, const char* s2) { + return strcasecmp(s1, s2); +} +inline int _strnicmp(const char* s1, const char* s2, size_t n) { + return strncasecmp(s1, s2, n); +} + +#endif // WEBRTC_POSIX + +/////////////////////////////////////////////////////////////////////////////// +// Traits simplifies porting string functions to be CTYPE-agnostic +/////////////////////////////////////////////////////////////////////////////// + +namespace rtc { + +const size_t SIZE_UNKNOWN = static_cast(-1); + +template +struct Traits { + // STL string type + //typedef XXX string; + // Null-terminated string + //inline static const CTYPE* empty_str(); +}; + +/////////////////////////////////////////////////////////////////////////////// +// String utilities which work with char or wchar_t +/////////////////////////////////////////////////////////////////////////////// + +template +inline const CTYPE* nonnull(const CTYPE* str, const CTYPE* def_str = NULL) { + return str ? str : (def_str ? def_str : Traits::empty_str()); +} + +template +const CTYPE* strchr(const CTYPE* str, const CTYPE* chs) { + for (size_t i=0; str[i]; ++i) { + for (size_t j=0; chs[j]; ++j) { + if (str[i] == chs[j]) { + return str + i; + } + } + } + return 0; +} + +template +const CTYPE* strchrn(const CTYPE* str, size_t slen, CTYPE ch) { + for (size_t i=0; i +size_t strlenn(const CTYPE* buffer, size_t buflen) { + size_t bufpos = 0; + while (buffer[bufpos] && (bufpos < buflen)) { + ++bufpos; + } + return bufpos; +} + +// Safe versions of strncpy, strncat, snprintf and vsnprintf that always +// null-terminate. + +template +size_t strcpyn(CTYPE* buffer, size_t buflen, + const CTYPE* source, size_t srclen = SIZE_UNKNOWN) { + if (buflen <= 0) + return 0; + + if (srclen == SIZE_UNKNOWN) { + srclen = strlenn(source, buflen - 1); + } else if (srclen >= buflen) { + srclen = buflen - 1; + } + memcpy(buffer, source, srclen * sizeof(CTYPE)); + buffer[srclen] = 0; + return srclen; +} + +template +size_t strcatn(CTYPE* buffer, size_t buflen, + const CTYPE* source, size_t srclen = SIZE_UNKNOWN) { + if (buflen <= 0) + return 0; + + size_t bufpos = strlenn(buffer, buflen - 1); + return bufpos + strcpyn(buffer + bufpos, buflen - bufpos, source, srclen); +} + +// Some compilers (clang specifically) require vsprintfn be defined before +// sprintfn. +template +size_t vsprintfn(CTYPE* buffer, size_t buflen, const CTYPE* format, + va_list args) { + int len = vsnprintf(buffer, buflen, format, args); + if ((len < 0) || (static_cast(len) >= buflen)) { + len = static_cast(buflen - 1); + buffer[len] = 0; + } + return len; +} + +template +size_t sprintfn(CTYPE* buffer, size_t buflen, const CTYPE* format, ...); +template +size_t sprintfn(CTYPE* buffer, size_t buflen, const CTYPE* format, ...) { + va_list args; + va_start(args, format); + size_t len = vsprintfn(buffer, buflen, format, args); + va_end(args); + return len; +} + +/////////////////////////////////////////////////////////////////////////////// +// Allow safe comparing and copying ascii (not UTF-8) with both wide and +// non-wide character strings. +/////////////////////////////////////////////////////////////////////////////// + +inline int asccmp(const char* s1, const char* s2) { + return strcmp(s1, s2); +} +inline int ascicmp(const char* s1, const char* s2) { + return strcmp(s1, s2); +} +inline int ascncmp(const char* s1, const char* s2, size_t n) { + return strncmp(s1, s2, n); +} +inline int ascnicmp(const char* s1, const char* s2, size_t n) { + return strncmp(s1, s2, n); +} +inline size_t asccpyn(char* buffer, size_t buflen, + const char* source, size_t srclen = SIZE_UNKNOWN) { + return strcpyn(buffer, buflen, source, srclen); +} + +#if defined(WEBRTC_WIN) + +typedef wchar_t(*CharacterTransformation)(wchar_t); +inline wchar_t identity(wchar_t c) { return c; } +int ascii_string_compare(const wchar_t* s1, const char* s2, size_t n, + CharacterTransformation transformation); + +inline int asccmp(const wchar_t* s1, const char* s2) { + return ascii_string_compare(s1, s2, static_cast(-1), identity); +} +inline int ascicmp(const wchar_t* s1, const char* s2) { + return ascii_string_compare(s1, s2, static_cast(-1), tolowercase); +} +inline int ascncmp(const wchar_t* s1, const char* s2, size_t n) { + return ascii_string_compare(s1, s2, n, identity); +} +inline int ascnicmp(const wchar_t* s1, const char* s2, size_t n) { + return ascii_string_compare(s1, s2, n, tolowercase); +} +size_t asccpyn(wchar_t* buffer, size_t buflen, + const char* source, size_t srclen = SIZE_UNKNOWN); + +#endif // WEBRTC_WIN + +/////////////////////////////////////////////////////////////////////////////// +// Traits specializations +/////////////////////////////////////////////////////////////////////////////// + +template<> +struct Traits { + typedef std::string string; + inline static const char* empty_str() { return ""; } +}; + +/////////////////////////////////////////////////////////////////////////////// +// Traits specializations (Windows only, currently) +/////////////////////////////////////////////////////////////////////////////// + +#if defined(WEBRTC_WIN) + +template<> +struct Traits { + typedef std::wstring string; + inline static const wchar_t* empty_str() { return L""; } +}; + +#endif // WEBRTC_WIN + +// Replaces all occurrences of "search" with "replace". +void replace_substrs(const char *search, + size_t search_len, + const char *replace, + size_t replace_len, + std::string *s); + +// True iff s1 starts with s2. +bool starts_with(const char *s1, const char *s2); + +// True iff s1 ends with s2. +bool ends_with(const char *s1, const char *s2); + +// Remove leading and trailing whitespaces. +std::string string_trim(const std::string& s); + +} // namespace rtc + +#endif // WEBRTC_BASE_STRINGUTILS_H__ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/type_traits.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/type_traits.h new file mode 100644 index 000000000..9617486b6 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/base/type_traits.h @@ -0,0 +1,79 @@ +/* + * Copyright 2016 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#if (!defined(_MSC_VER) || _MSC_VER>1800) +#ifndef WEBRTC_BASE_TYPE_TRAITS_H_ +#define WEBRTC_BASE_TYPE_TRAITS_H_ + +#include +#include + +namespace rtc { + +// Determines if the given class has zero-argument .data() and .size() methods +// whose return values are convertible to T* and size_t, respectively. +template +class HasDataAndSize { + private: + template < + typename C, + typename std::enable_if< + std::is_convertible().data()), T*>::value && + std::is_convertible().size()), + std::size_t>::value>::type* = nullptr> + static int Test(int); + + template + static char Test(...); + + public: + static constexpr bool value = std::is_same(0)), int>::value; +}; + +namespace test_has_data_and_size { + +template +struct Test1 { + DR data(); + SR size(); +}; +static_assert(HasDataAndSize, int>::value, ""); +static_assert(HasDataAndSize, const int>::value, ""); +static_assert(HasDataAndSize, const int>::value, ""); +static_assert(!HasDataAndSize, int>::value, + "implicit cast of const int* to int*"); +static_assert(!HasDataAndSize, int>::value, + "implicit cast of char* to int*"); + +struct Test2 { + int* data; + size_t size; +}; +static_assert(!HasDataAndSize::value, + ".data and .size aren't functions"); + +struct Test3 { + int* data(); +}; +static_assert(!HasDataAndSize::value, ".size() is missing"); + +class Test4 { + int* data(); + size_t size(); +}; +static_assert(!HasDataAndSize::value, + ".data() and .size() are private"); + +} // namespace test_has_data_and_size + +} // namespace rtc + +#endif // WEBRTC_BASE_TYPE_TRAITS_H_ +#endif diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/audio_util.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/audio_util.cc new file mode 100644 index 000000000..2ce2eba99 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/audio_util.cc @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/include/audio_util.h" + +#include "webrtc/typedefs.h" + +namespace webrtc { + +void FloatToS16(const float* src, size_t size, int16_t* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = FloatToS16(src[i]); +} + +void S16ToFloat(const int16_t* src, size_t size, float* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = S16ToFloat(src[i]); +} + +void FloatS16ToS16(const float* src, size_t size, int16_t* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = FloatS16ToS16(src[i]); +} + +void FloatToFloatS16(const float* src, size_t size, float* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = FloatToFloatS16(src[i]); +} + +void FloatS16ToFloat(const float* src, size_t size, float* dest) { + for (size_t i = 0; i < size; ++i) + dest[i] = FloatS16ToFloat(src[i]); +} + +template <> +void DownmixInterleavedToMono(const int16_t* interleaved, + size_t num_frames, + int num_channels, + int16_t* deinterleaved) { + DownmixInterleavedToMonoImpl(interleaved, num_frames, + num_channels, deinterleaved); +} + +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/channel_buffer.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/channel_buffer.cc new file mode 100644 index 000000000..0f364114c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/channel_buffer.cc @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/channel_buffer.h" + +#include "webrtc/base/checks.h" + +namespace webrtc { + +IFChannelBuffer::IFChannelBuffer(size_t num_frames, + size_t num_channels, + size_t num_bands) + : ivalid_(true), + ibuf_(num_frames, num_channels, num_bands), + fvalid_(true), + fbuf_(num_frames, num_channels, num_bands) {} + +IFChannelBuffer::~IFChannelBuffer() = default; + +ChannelBuffer* IFChannelBuffer::ibuf() { + RefreshI(); + fvalid_ = false; + return &ibuf_; +} + +ChannelBuffer* IFChannelBuffer::fbuf() { + RefreshF(); + ivalid_ = false; + return &fbuf_; +} + +const ChannelBuffer* IFChannelBuffer::ibuf_const() const { + RefreshI(); + return &ibuf_; +} + +const ChannelBuffer* IFChannelBuffer::fbuf_const() const { + RefreshF(); + return &fbuf_; +} + +void IFChannelBuffer::RefreshF() const { + if (!fvalid_) { + RTC_DCHECK(ivalid_); + fbuf_.set_num_channels(ibuf_.num_channels()); + const int16_t* const* int_channels = ibuf_.channels(); + float* const* float_channels = fbuf_.channels(); + for (size_t i = 0; i < ibuf_.num_channels(); ++i) { + for (size_t j = 0; j < ibuf_.num_frames(); ++j) { + float_channels[i][j] = int_channels[i][j]; + } + } + fvalid_ = true; + } +} + +void IFChannelBuffer::RefreshI() const { + if (!ivalid_) { + RTC_DCHECK(fvalid_); + int16_t* const* int_channels = ibuf_.channels(); + ibuf_.set_num_channels(fbuf_.num_channels()); + const float* const* float_channels = fbuf_.channels(); + for (size_t i = 0; i < fbuf_.num_channels(); ++i) { + FloatS16ToS16(float_channels[i], + ibuf_.num_frames(), + int_channels[i]); + } + ivalid_ = true; + } +} + +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/channel_buffer.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/channel_buffer.h new file mode 100644 index 000000000..201482f4d --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/channel_buffer.h @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_CHANNEL_BUFFER_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_CHANNEL_BUFFER_H_ + +#include + +#include + +#include "webrtc/base/checks.h" +//#include "webrtc/base/gtest_prod_util.h" +#include "webrtc/common_audio/include/audio_util.h" + +namespace webrtc { + +// Helper to encapsulate a contiguous data buffer, full or split into frequency +// bands, with access to a pointer arrays of the deinterleaved channels and +// bands. The buffer is zero initialized at creation. +// +// The buffer structure is showed below for a 2 channel and 2 bands case: +// +// |data_|: +// { [ --- b1ch1 --- ] [ --- b2ch1 --- ] [ --- b1ch2 --- ] [ --- b2ch2 --- ] } +// +// The pointer arrays for the same example are as follows: +// +// |channels_|: +// { [ b1ch1* ] [ b1ch2* ] [ b2ch1* ] [ b2ch2* ] } +// +// |bands_|: +// { [ b1ch1* ] [ b2ch1* ] [ b1ch2* ] [ b2ch2* ] } +template +class ChannelBuffer { + public: + ChannelBuffer(size_t num_frames, + size_t num_channels, + size_t num_bands = 1) + : data_(new T[num_frames * num_channels]()), + channels_(new T*[num_channels * num_bands]), + bands_(new T*[num_channels * num_bands]), + num_frames_(num_frames), + num_frames_per_band_(num_frames / num_bands), + num_allocated_channels_(num_channels), + num_channels_(num_channels), + num_bands_(num_bands) { + for (size_t i = 0; i < num_allocated_channels_; ++i) { + for (size_t j = 0; j < num_bands_; ++j) { + channels_[j * num_allocated_channels_ + i] = + &data_[i * num_frames_ + j * num_frames_per_band_]; + bands_[i * num_bands_ + j] = channels_[j * num_allocated_channels_ + i]; + } + } + } + + // Returns a pointer array to the full-band channels (or lower band channels). + // Usage: + // channels()[channel][sample]. + // Where: + // 0 <= channel < |num_allocated_channels_| + // 0 <= sample < |num_frames_| + T* const* channels() { return channels(0); } + const T* const* channels() const { return channels(0); } + + // Returns a pointer array to the channels for a specific band. + // Usage: + // channels(band)[channel][sample]. + // Where: + // 0 <= band < |num_bands_| + // 0 <= channel < |num_allocated_channels_| + // 0 <= sample < |num_frames_per_band_| + const T* const* channels(size_t band) const { + RTC_DCHECK_LT(band, num_bands_); + return &channels_[band * num_allocated_channels_]; + } + T* const* channels(size_t band) { + const ChannelBuffer* t = this; + return const_cast(t->channels(band)); + } + + // Returns a pointer array to the bands for a specific channel. + // Usage: + // bands(channel)[band][sample]. + // Where: + // 0 <= channel < |num_channels_| + // 0 <= band < |num_bands_| + // 0 <= sample < |num_frames_per_band_| + const T* const* bands(size_t channel) const { + RTC_DCHECK_LT(channel, num_channels_); + RTC_DCHECK_GE(channel, 0); + return &bands_[channel * num_bands_]; + } + T* const* bands(size_t channel) { + const ChannelBuffer* t = this; + return const_cast(t->bands(channel)); + } + + // Sets the |slice| pointers to the |start_frame| position for each channel. + // Returns |slice| for convenience. + const T* const* Slice(T** slice, size_t start_frame) const { + RTC_DCHECK_LT(start_frame, num_frames_); + for (size_t i = 0; i < num_channels_; ++i) + slice[i] = &channels_[i][start_frame]; + return slice; + } + T** Slice(T** slice, size_t start_frame) { + const ChannelBuffer* t = this; + return const_cast(t->Slice(slice, start_frame)); + } + + size_t num_frames() const { return num_frames_; } + size_t num_frames_per_band() const { return num_frames_per_band_; } + size_t num_channels() const { return num_channels_; } + size_t num_bands() const { return num_bands_; } + size_t size() const {return num_frames_ * num_allocated_channels_; } + + void set_num_channels(size_t num_channels) { + RTC_DCHECK_LE(num_channels, num_allocated_channels_); + num_channels_ = num_channels; + } + + void SetDataForTesting(const T* data, size_t size) { + RTC_CHECK_EQ(size, this->size()); + memcpy(data_.get(), data, size * sizeof(*data)); + } + + private: + std::unique_ptr data_; + std::unique_ptr channels_; + std::unique_ptr bands_; + const size_t num_frames_; + const size_t num_frames_per_band_; + // Number of channels the internal buffer holds. + const size_t num_allocated_channels_; + // Number of channels the user sees. + size_t num_channels_; + const size_t num_bands_; +}; + +// One int16_t and one float ChannelBuffer that are kept in sync. The sync is +// broken when someone requests write access to either ChannelBuffer, and +// reestablished when someone requests the outdated ChannelBuffer. It is +// therefore safe to use the return value of ibuf_const() and fbuf_const() +// until the next call to ibuf() or fbuf(), and the return value of ibuf() and +// fbuf() until the next call to any of the other functions. +class IFChannelBuffer { + public: + IFChannelBuffer(size_t num_frames, size_t num_channels, size_t num_bands = 1); + ~IFChannelBuffer(); + + ChannelBuffer* ibuf(); + ChannelBuffer* fbuf(); + const ChannelBuffer* ibuf_const() const; + const ChannelBuffer* fbuf_const() const; + + size_t num_frames() const { return ibuf_.num_frames(); } + size_t num_frames_per_band() const { return ibuf_.num_frames_per_band(); } + size_t num_channels() const { + return ivalid_ ? ibuf_.num_channels() : fbuf_.num_channels(); + } + void set_num_channels(size_t num_channels) { + ibuf_.set_num_channels(num_channels); + fbuf_.set_num_channels(num_channels); + } + size_t num_bands() const { return ibuf_.num_bands(); } + + private: + void RefreshF() const; + void RefreshI() const; + + mutable bool ivalid_; + mutable ChannelBuffer ibuf_; + mutable bool fvalid_; + mutable ChannelBuffer fbuf_; +}; + +} // namespace webrtc + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_CHANNEL_BUFFER_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/fft4g.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/fft4g.c new file mode 100644 index 000000000..9cf7b9f6c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/fft4g.c @@ -0,0 +1,1332 @@ +/* + * http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html + * Copyright Takuya OOURA, 1996-2001 + * + * You may use, copy, modify and distribute this code for any purpose (include + * commercial use) and without fee. Please refer to this package when you modify + * this code. + * + * Changes: + * Trivial type modifications by the WebRTC authors. + */ + +/* +Fast Fourier/Cosine/Sine Transform + dimension :one + data length :power of 2 + decimation :frequency + radix :4, 2 + data :inplace + table :use +functions + cdft: Complex Discrete Fourier Transform + rdft: Real Discrete Fourier Transform + ddct: Discrete Cosine Transform + ddst: Discrete Sine Transform + dfct: Cosine Transform of RDFT (Real Symmetric DFT) + dfst: Sine Transform of RDFT (Real Anti-symmetric DFT) +function prototypes + void cdft(int, int, float *, int *, float *); + void rdft(size_t, int, float *, size_t *, float *); + void ddct(int, int, float *, int *, float *); + void ddst(int, int, float *, int *, float *); + void dfct(int, float *, float *, int *, float *); + void dfst(int, float *, float *, int *, float *); + + +-------- Complex DFT (Discrete Fourier Transform) -------- + [definition] + + X[k] = sum_j=0^n-1 x[j]*exp(2*pi*i*j*k/n), 0<=k + X[k] = sum_j=0^n-1 x[j]*exp(-2*pi*i*j*k/n), 0<=k + ip[0] = 0; // first time only + cdft(2*n, 1, a, ip, w); + + ip[0] = 0; // first time only + cdft(2*n, -1, a, ip, w); + [parameters] + 2*n :data length (int) + n >= 1, n = power of 2 + a[0...2*n-1] :input/output data (float *) + input data + a[2*j] = Re(x[j]), + a[2*j+1] = Im(x[j]), 0<=j= 2+sqrt(n) + strictly, + length of ip >= + 2+(1<<(int)(log(n+0.5)/log(2))/2). + ip[0],ip[1] are pointers of the cos/sin table. + w[0...n/2-1] :cos/sin table (float *) + w[],ip[] are initialized if ip[0] == 0. + [remark] + Inverse of + cdft(2*n, -1, a, ip, w); + is + cdft(2*n, 1, a, ip, w); + for (j = 0; j <= 2 * n - 1; j++) { + a[j] *= 1.0 / n; + } + . + + +-------- Real DFT / Inverse of Real DFT -------- + [definition] + RDFT + R[k] = sum_j=0^n-1 a[j]*cos(2*pi*j*k/n), 0<=k<=n/2 + I[k] = sum_j=0^n-1 a[j]*sin(2*pi*j*k/n), 0 IRDFT (excluding scale) + a[k] = (R[0] + R[n/2]*cos(pi*k))/2 + + sum_j=1^n/2-1 R[j]*cos(2*pi*j*k/n) + + sum_j=1^n/2-1 I[j]*sin(2*pi*j*k/n), 0<=k + ip[0] = 0; // first time only + rdft(n, 1, a, ip, w); + + ip[0] = 0; // first time only + rdft(n, -1, a, ip, w); + [parameters] + n :data length (size_t) + n >= 2, n = power of 2 + a[0...n-1] :input/output data (float *) + + output data + a[2*k] = R[k], 0<=k + input data + a[2*j] = R[j], 0<=j= 2+sqrt(n/2) + strictly, + length of ip >= + 2+(1<<(int)(log(n/2+0.5)/log(2))/2). + ip[0],ip[1] are pointers of the cos/sin table. + w[0...n/2-1] :cos/sin table (float *) + w[],ip[] are initialized if ip[0] == 0. + [remark] + Inverse of + rdft(n, 1, a, ip, w); + is + rdft(n, -1, a, ip, w); + for (j = 0; j <= n - 1; j++) { + a[j] *= 2.0 / n; + } + . + + +-------- DCT (Discrete Cosine Transform) / Inverse of DCT -------- + [definition] + IDCT (excluding scale) + C[k] = sum_j=0^n-1 a[j]*cos(pi*j*(k+1/2)/n), 0<=k DCT + C[k] = sum_j=0^n-1 a[j]*cos(pi*(j+1/2)*k/n), 0<=k + ip[0] = 0; // first time only + ddct(n, 1, a, ip, w); + + ip[0] = 0; // first time only + ddct(n, -1, a, ip, w); + [parameters] + n :data length (int) + n >= 2, n = power of 2 + a[0...n-1] :input/output data (float *) + output data + a[k] = C[k], 0<=k= 2+sqrt(n/2) + strictly, + length of ip >= + 2+(1<<(int)(log(n/2+0.5)/log(2))/2). + ip[0],ip[1] are pointers of the cos/sin table. + w[0...n*5/4-1] :cos/sin table (float *) + w[],ip[] are initialized if ip[0] == 0. + [remark] + Inverse of + ddct(n, -1, a, ip, w); + is + a[0] *= 0.5; + ddct(n, 1, a, ip, w); + for (j = 0; j <= n - 1; j++) { + a[j] *= 2.0 / n; + } + . + + +-------- DST (Discrete Sine Transform) / Inverse of DST -------- + [definition] + IDST (excluding scale) + S[k] = sum_j=1^n A[j]*sin(pi*j*(k+1/2)/n), 0<=k DST + S[k] = sum_j=0^n-1 a[j]*sin(pi*(j+1/2)*k/n), 0 + ip[0] = 0; // first time only + ddst(n, 1, a, ip, w); + + ip[0] = 0; // first time only + ddst(n, -1, a, ip, w); + [parameters] + n :data length (int) + n >= 2, n = power of 2 + a[0...n-1] :input/output data (float *) + + input data + a[j] = A[j], 0 + output data + a[k] = S[k], 0= 2+sqrt(n/2) + strictly, + length of ip >= + 2+(1<<(int)(log(n/2+0.5)/log(2))/2). + ip[0],ip[1] are pointers of the cos/sin table. + w[0...n*5/4-1] :cos/sin table (float *) + w[],ip[] are initialized if ip[0] == 0. + [remark] + Inverse of + ddst(n, -1, a, ip, w); + is + a[0] *= 0.5; + ddst(n, 1, a, ip, w); + for (j = 0; j <= n - 1; j++) { + a[j] *= 2.0 / n; + } + . + + +-------- Cosine Transform of RDFT (Real Symmetric DFT) -------- + [definition] + C[k] = sum_j=0^n a[j]*cos(pi*j*k/n), 0<=k<=n + [usage] + ip[0] = 0; // first time only + dfct(n, a, t, ip, w); + [parameters] + n :data length - 1 (int) + n >= 2, n = power of 2 + a[0...n] :input/output data (float *) + output data + a[k] = C[k], 0<=k<=n + t[0...n/2] :work area (float *) + ip[0...*] :work area for bit reversal (int *) + length of ip >= 2+sqrt(n/4) + strictly, + length of ip >= + 2+(1<<(int)(log(n/4+0.5)/log(2))/2). + ip[0],ip[1] are pointers of the cos/sin table. + w[0...n*5/8-1] :cos/sin table (float *) + w[],ip[] are initialized if ip[0] == 0. + [remark] + Inverse of + a[0] *= 0.5; + a[n] *= 0.5; + dfct(n, a, t, ip, w); + is + a[0] *= 0.5; + a[n] *= 0.5; + dfct(n, a, t, ip, w); + for (j = 0; j <= n; j++) { + a[j] *= 2.0 / n; + } + . + + +-------- Sine Transform of RDFT (Real Anti-symmetric DFT) -------- + [definition] + S[k] = sum_j=1^n-1 a[j]*sin(pi*j*k/n), 0= 2, n = power of 2 + a[0...n-1] :input/output data (float *) + output data + a[k] = S[k], 0= 2+sqrt(n/4) + strictly, + length of ip >= + 2+(1<<(int)(log(n/4+0.5)/log(2))/2). + ip[0],ip[1] are pointers of the cos/sin table. + w[0...n*5/8-1] :cos/sin table (float *) + w[],ip[] are initialized if ip[0] == 0. + [remark] + Inverse of + dfst(n, a, t, ip, w); + is + dfst(n, a, t, ip, w); + for (j = 1; j <= n - 1; j++) { + a[j] *= 2.0 / n; + } + . + + +Appendix : + The cos/sin table is recalculated when the larger table required. + w[] and ip[] are compatible with all routines. +*/ + +#include + +static void makewt(size_t nw, size_t *ip, float *w); +static void makect(size_t nc, size_t *ip, float *c); +static void bitrv2(size_t n, size_t *ip, float *a); +#if 0 // Not used. +static void bitrv2conj(int n, int *ip, float *a); +#endif +static void cftfsub(size_t n, float *a, float *w); +static void cftbsub(size_t n, float *a, float *w); +static void cft1st(size_t n, float *a, float *w); +static void cftmdl(size_t n, size_t l, float *a, float *w); +static void rftfsub(size_t n, float *a, size_t nc, float *c); +static void rftbsub(size_t n, float *a, size_t nc, float *c); +#if 0 // Not used. +static void dctsub(int n, float *a, int nc, float *c) +static void dstsub(int n, float *a, int nc, float *c) +#endif + + +#if 0 // Not used. +void WebRtc_cdft(int n, int isgn, float *a, int *ip, float *w) +{ + if (n > (ip[0] << 2)) { + makewt(n >> 2, ip, w); + } + if (n > 4) { + if (isgn >= 0) { + bitrv2(n, ip + 2, a); + cftfsub(n, a, w); + } else { + bitrv2conj(n, ip + 2, a); + cftbsub(n, a, w); + } + } else if (n == 4) { + cftfsub(n, a, w); + } +} +#endif + + +void WebRtc_rdft(size_t n, int isgn, float *a, size_t *ip, float *w) +{ + size_t nw, nc; + float xi; + + nw = ip[0]; + if (n > (nw << 2)) { + nw = n >> 2; + makewt(nw, ip, w); + } + nc = ip[1]; + if (n > (nc << 2)) { + nc = n >> 2; + makect(nc, ip, w + nw); + } + if (isgn >= 0) { + if (n > 4) { + bitrv2(n, ip + 2, a); + cftfsub(n, a, w); + rftfsub(n, a, nc, w + nw); + } else if (n == 4) { + cftfsub(n, a, w); + } + xi = a[0] - a[1]; + a[0] += a[1]; + a[1] = xi; + } else { + a[1] = 0.5f * (a[0] - a[1]); + a[0] -= a[1]; + if (n > 4) { + rftbsub(n, a, nc, w + nw); + bitrv2(n, ip + 2, a); + cftbsub(n, a, w); + } else if (n == 4) { + cftfsub(n, a, w); + } + } +} + +#if 0 // Not used. +static void ddct(int n, int isgn, float *a, int *ip, float *w) +{ + int j, nw, nc; + float xr; + + nw = ip[0]; + if (n > (nw << 2)) { + nw = n >> 2; + makewt(nw, ip, w); + } + nc = ip[1]; + if (n > nc) { + nc = n; + makect(nc, ip, w + nw); + } + if (isgn < 0) { + xr = a[n - 1]; + for (j = n - 2; j >= 2; j -= 2) { + a[j + 1] = a[j] - a[j - 1]; + a[j] += a[j - 1]; + } + a[1] = a[0] - xr; + a[0] += xr; + if (n > 4) { + rftbsub(n, a, nc, w + nw); + bitrv2(n, ip + 2, a); + cftbsub(n, a, w); + } else if (n == 4) { + cftfsub(n, a, w); + } + } + dctsub(n, a, nc, w + nw); + if (isgn >= 0) { + if (n > 4) { + bitrv2(n, ip + 2, a); + cftfsub(n, a, w); + rftfsub(n, a, nc, w + nw); + } else if (n == 4) { + cftfsub(n, a, w); + } + xr = a[0] - a[1]; + a[0] += a[1]; + for (j = 2; j < n; j += 2) { + a[j - 1] = a[j] - a[j + 1]; + a[j] += a[j + 1]; + } + a[n - 1] = xr; + } +} + + +static void ddst(int n, int isgn, float *a, int *ip, float *w) +{ + int j, nw, nc; + float xr; + + nw = ip[0]; + if (n > (nw << 2)) { + nw = n >> 2; + makewt(nw, ip, w); + } + nc = ip[1]; + if (n > nc) { + nc = n; + makect(nc, ip, w + nw); + } + if (isgn < 0) { + xr = a[n - 1]; + for (j = n - 2; j >= 2; j -= 2) { + a[j + 1] = -a[j] - a[j - 1]; + a[j] -= a[j - 1]; + } + a[1] = a[0] + xr; + a[0] -= xr; + if (n > 4) { + rftbsub(n, a, nc, w + nw); + bitrv2(n, ip + 2, a); + cftbsub(n, a, w); + } else if (n == 4) { + cftfsub(n, a, w); + } + } + dstsub(n, a, nc, w + nw); + if (isgn >= 0) { + if (n > 4) { + bitrv2(n, ip + 2, a); + cftfsub(n, a, w); + rftfsub(n, a, nc, w + nw); + } else if (n == 4) { + cftfsub(n, a, w); + } + xr = a[0] - a[1]; + a[0] += a[1]; + for (j = 2; j < n; j += 2) { + a[j - 1] = -a[j] - a[j + 1]; + a[j] -= a[j + 1]; + } + a[n - 1] = -xr; + } +} + + +static void dfct(int n, float *a, float *t, int *ip, float *w) +{ + int j, k, l, m, mh, nw, nc; + float xr, xi, yr, yi; + + nw = ip[0]; + if (n > (nw << 3)) { + nw = n >> 3; + makewt(nw, ip, w); + } + nc = ip[1]; + if (n > (nc << 1)) { + nc = n >> 1; + makect(nc, ip, w + nw); + } + m = n >> 1; + yi = a[m]; + xi = a[0] + a[n]; + a[0] -= a[n]; + t[0] = xi - yi; + t[m] = xi + yi; + if (n > 2) { + mh = m >> 1; + for (j = 1; j < mh; j++) { + k = m - j; + xr = a[j] - a[n - j]; + xi = a[j] + a[n - j]; + yr = a[k] - a[n - k]; + yi = a[k] + a[n - k]; + a[j] = xr; + a[k] = yr; + t[j] = xi - yi; + t[k] = xi + yi; + } + t[mh] = a[mh] + a[n - mh]; + a[mh] -= a[n - mh]; + dctsub(m, a, nc, w + nw); + if (m > 4) { + bitrv2(m, ip + 2, a); + cftfsub(m, a, w); + rftfsub(m, a, nc, w + nw); + } else if (m == 4) { + cftfsub(m, a, w); + } + a[n - 1] = a[0] - a[1]; + a[1] = a[0] + a[1]; + for (j = m - 2; j >= 2; j -= 2) { + a[2 * j + 1] = a[j] + a[j + 1]; + a[2 * j - 1] = a[j] - a[j + 1]; + } + l = 2; + m = mh; + while (m >= 2) { + dctsub(m, t, nc, w + nw); + if (m > 4) { + bitrv2(m, ip + 2, t); + cftfsub(m, t, w); + rftfsub(m, t, nc, w + nw); + } else if (m == 4) { + cftfsub(m, t, w); + } + a[n - l] = t[0] - t[1]; + a[l] = t[0] + t[1]; + k = 0; + for (j = 2; j < m; j += 2) { + k += l << 2; + a[k - l] = t[j] - t[j + 1]; + a[k + l] = t[j] + t[j + 1]; + } + l <<= 1; + mh = m >> 1; + for (j = 0; j < mh; j++) { + k = m - j; + t[j] = t[m + k] - t[m + j]; + t[k] = t[m + k] + t[m + j]; + } + t[mh] = t[m + mh]; + m = mh; + } + a[l] = t[0]; + a[n] = t[2] - t[1]; + a[0] = t[2] + t[1]; + } else { + a[1] = a[0]; + a[2] = t[0]; + a[0] = t[1]; + } +} + +static void dfst(int n, float *a, float *t, int *ip, float *w) +{ + int j, k, l, m, mh, nw, nc; + float xr, xi, yr, yi; + + nw = ip[0]; + if (n > (nw << 3)) { + nw = n >> 3; + makewt(nw, ip, w); + } + nc = ip[1]; + if (n > (nc << 1)) { + nc = n >> 1; + makect(nc, ip, w + nw); + } + if (n > 2) { + m = n >> 1; + mh = m >> 1; + for (j = 1; j < mh; j++) { + k = m - j; + xr = a[j] + a[n - j]; + xi = a[j] - a[n - j]; + yr = a[k] + a[n - k]; + yi = a[k] - a[n - k]; + a[j] = xr; + a[k] = yr; + t[j] = xi + yi; + t[k] = xi - yi; + } + t[0] = a[mh] - a[n - mh]; + a[mh] += a[n - mh]; + a[0] = a[m]; + dstsub(m, a, nc, w + nw); + if (m > 4) { + bitrv2(m, ip + 2, a); + cftfsub(m, a, w); + rftfsub(m, a, nc, w + nw); + } else if (m == 4) { + cftfsub(m, a, w); + } + a[n - 1] = a[1] - a[0]; + a[1] = a[0] + a[1]; + for (j = m - 2; j >= 2; j -= 2) { + a[2 * j + 1] = a[j] - a[j + 1]; + a[2 * j - 1] = -a[j] - a[j + 1]; + } + l = 2; + m = mh; + while (m >= 2) { + dstsub(m, t, nc, w + nw); + if (m > 4) { + bitrv2(m, ip + 2, t); + cftfsub(m, t, w); + rftfsub(m, t, nc, w + nw); + } else if (m == 4) { + cftfsub(m, t, w); + } + a[n - l] = t[1] - t[0]; + a[l] = t[0] + t[1]; + k = 0; + for (j = 2; j < m; j += 2) { + k += l << 2; + a[k - l] = -t[j] - t[j + 1]; + a[k + l] = t[j] - t[j + 1]; + } + l <<= 1; + mh = m >> 1; + for (j = 1; j < mh; j++) { + k = m - j; + t[j] = t[m + k] + t[m + j]; + t[k] = t[m + k] - t[m + j]; + } + t[0] = t[m + mh]; + m = mh; + } + a[l] = t[0]; + } + a[0] = 0; +} +#endif // Not used. + + +/* -------- initializing routines -------- */ + + +#include + +static void makewt(size_t nw, size_t *ip, float *w) +{ + size_t j, nwh; + float delta, x, y; + + ip[0] = nw; + ip[1] = 1; + if (nw > 2) { + nwh = nw >> 1; + delta = atanf(1.0f) / nwh; + w[0] = 1; + w[1] = 0; + w[nwh] = (float)cos(delta * nwh); + w[nwh + 1] = w[nwh]; + if (nwh > 2) { + for (j = 2; j < nwh; j += 2) { + x = (float)cos(delta * j); + y = (float)sin(delta * j); + w[j] = x; + w[j + 1] = y; + w[nw - j] = y; + w[nw - j + 1] = x; + } + bitrv2(nw, ip + 2, w); + } + } +} + + +static void makect(size_t nc, size_t *ip, float *c) +{ + size_t j, nch; + float delta; + + ip[1] = nc; + if (nc > 1) { + nch = nc >> 1; + delta = atanf(1.0f) / nch; + c[0] = (float)cos(delta * nch); + c[nch] = 0.5f * c[0]; + for (j = 1; j < nch; j++) { + c[j] = 0.5f * (float)cos(delta * j); + c[nc - j] = 0.5f * (float)sin(delta * j); + } + } +} + + +/* -------- child routines -------- */ + + +static void bitrv2(size_t n, size_t *ip, float *a) +{ + size_t j, j1, k, k1, l, m, m2; + float xr, xi, yr, yi; + + ip[0] = 0; + l = n; + m = 1; + while ((m << 3) < l) { + l >>= 1; + for (j = 0; j < m; j++) { + ip[m + j] = ip[j] + l; + } + m <<= 1; + } + m2 = 2 * m; + if ((m << 3) == l) { + for (k = 0; k < m; k++) { + for (j = 0; j < k; j++) { + j1 = 2 * j + ip[k]; + k1 = 2 * k + ip[j]; + xr = a[j1]; + xi = a[j1 + 1]; + yr = a[k1]; + yi = a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + j1 += m2; + k1 += 2 * m2; + xr = a[j1]; + xi = a[j1 + 1]; + yr = a[k1]; + yi = a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + j1 += m2; + k1 -= m2; + xr = a[j1]; + xi = a[j1 + 1]; + yr = a[k1]; + yi = a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + j1 += m2; + k1 += 2 * m2; + xr = a[j1]; + xi = a[j1 + 1]; + yr = a[k1]; + yi = a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + } + j1 = 2 * k + m2 + ip[k]; + k1 = j1 + m2; + xr = a[j1]; + xi = a[j1 + 1]; + yr = a[k1]; + yi = a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + } + } else { + for (k = 1; k < m; k++) { + for (j = 0; j < k; j++) { + j1 = 2 * j + ip[k]; + k1 = 2 * k + ip[j]; + xr = a[j1]; + xi = a[j1 + 1]; + yr = a[k1]; + yi = a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + j1 += m2; + k1 += m2; + xr = a[j1]; + xi = a[j1 + 1]; + yr = a[k1]; + yi = a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + } + } + } +} + +#if 0 // Not used. +static void bitrv2conj(int n, int *ip, float *a) +{ + int j, j1, k, k1, l, m, m2; + float xr, xi, yr, yi; + + ip[0] = 0; + l = n; + m = 1; + while ((m << 3) < l) { + l >>= 1; + for (j = 0; j < m; j++) { + ip[m + j] = ip[j] + l; + } + m <<= 1; + } + m2 = 2 * m; + if ((m << 3) == l) { + for (k = 0; k < m; k++) { + for (j = 0; j < k; j++) { + j1 = 2 * j + ip[k]; + k1 = 2 * k + ip[j]; + xr = a[j1]; + xi = -a[j1 + 1]; + yr = a[k1]; + yi = -a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + j1 += m2; + k1 += 2 * m2; + xr = a[j1]; + xi = -a[j1 + 1]; + yr = a[k1]; + yi = -a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + j1 += m2; + k1 -= m2; + xr = a[j1]; + xi = -a[j1 + 1]; + yr = a[k1]; + yi = -a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + j1 += m2; + k1 += 2 * m2; + xr = a[j1]; + xi = -a[j1 + 1]; + yr = a[k1]; + yi = -a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + } + k1 = 2 * k + ip[k]; + a[k1 + 1] = -a[k1 + 1]; + j1 = k1 + m2; + k1 = j1 + m2; + xr = a[j1]; + xi = -a[j1 + 1]; + yr = a[k1]; + yi = -a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + k1 += m2; + a[k1 + 1] = -a[k1 + 1]; + } + } else { + a[1] = -a[1]; + a[m2 + 1] = -a[m2 + 1]; + for (k = 1; k < m; k++) { + for (j = 0; j < k; j++) { + j1 = 2 * j + ip[k]; + k1 = 2 * k + ip[j]; + xr = a[j1]; + xi = -a[j1 + 1]; + yr = a[k1]; + yi = -a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + j1 += m2; + k1 += m2; + xr = a[j1]; + xi = -a[j1 + 1]; + yr = a[k1]; + yi = -a[k1 + 1]; + a[j1] = yr; + a[j1 + 1] = yi; + a[k1] = xr; + a[k1 + 1] = xi; + } + k1 = 2 * k + ip[k]; + a[k1 + 1] = -a[k1 + 1]; + a[k1 + m2 + 1] = -a[k1 + m2 + 1]; + } + } +} +#endif + +static void cftfsub(size_t n, float *a, float *w) +{ + size_t j, j1, j2, j3, l; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + l = 2; + if (n > 8) { + cft1st(n, a, w); + l = 8; + while ((l << 2) < n) { + cftmdl(n, l, a, w); + l <<= 2; + } + } + if ((l << 2) == n) { + for (j = 0; j < l; j += 2) { + j1 = j + l; + j2 = j1 + l; + j3 = j2 + l; + x0r = a[j] + a[j1]; + x0i = a[j + 1] + a[j1 + 1]; + x1r = a[j] - a[j1]; + x1i = a[j + 1] - a[j1 + 1]; + x2r = a[j2] + a[j3]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2] - a[j3]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j] = x0r + x2r; + a[j + 1] = x0i + x2i; + a[j2] = x0r - x2r; + a[j2 + 1] = x0i - x2i; + a[j1] = x1r - x3i; + a[j1 + 1] = x1i + x3r; + a[j3] = x1r + x3i; + a[j3 + 1] = x1i - x3r; + } + } else { + for (j = 0; j < l; j += 2) { + j1 = j + l; + x0r = a[j] - a[j1]; + x0i = a[j + 1] - a[j1 + 1]; + a[j] += a[j1]; + a[j + 1] += a[j1 + 1]; + a[j1] = x0r; + a[j1 + 1] = x0i; + } + } +} + + +static void cftbsub(size_t n, float *a, float *w) +{ + size_t j, j1, j2, j3, l; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + l = 2; + if (n > 8) { + cft1st(n, a, w); + l = 8; + while ((l << 2) < n) { + cftmdl(n, l, a, w); + l <<= 2; + } + } + if ((l << 2) == n) { + for (j = 0; j < l; j += 2) { + j1 = j + l; + j2 = j1 + l; + j3 = j2 + l; + x0r = a[j] + a[j1]; + x0i = -a[j + 1] - a[j1 + 1]; + x1r = a[j] - a[j1]; + x1i = -a[j + 1] + a[j1 + 1]; + x2r = a[j2] + a[j3]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2] - a[j3]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j] = x0r + x2r; + a[j + 1] = x0i - x2i; + a[j2] = x0r - x2r; + a[j2 + 1] = x0i + x2i; + a[j1] = x1r - x3i; + a[j1 + 1] = x1i - x3r; + a[j3] = x1r + x3i; + a[j3 + 1] = x1i + x3r; + } + } else { + for (j = 0; j < l; j += 2) { + j1 = j + l; + x0r = a[j] - a[j1]; + x0i = -a[j + 1] + a[j1 + 1]; + a[j] += a[j1]; + a[j + 1] = -a[j + 1] - a[j1 + 1]; + a[j1] = x0r; + a[j1 + 1] = x0i; + } + } +} + + +static void cft1st(size_t n, float *a, float *w) +{ + size_t j, k1, k2; + float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + x0r = a[0] + a[2]; + x0i = a[1] + a[3]; + x1r = a[0] - a[2]; + x1i = a[1] - a[3]; + x2r = a[4] + a[6]; + x2i = a[5] + a[7]; + x3r = a[4] - a[6]; + x3i = a[5] - a[7]; + a[0] = x0r + x2r; + a[1] = x0i + x2i; + a[4] = x0r - x2r; + a[5] = x0i - x2i; + a[2] = x1r - x3i; + a[3] = x1i + x3r; + a[6] = x1r + x3i; + a[7] = x1i - x3r; + wk1r = w[2]; + x0r = a[8] + a[10]; + x0i = a[9] + a[11]; + x1r = a[8] - a[10]; + x1i = a[9] - a[11]; + x2r = a[12] + a[14]; + x2i = a[13] + a[15]; + x3r = a[12] - a[14]; + x3i = a[13] - a[15]; + a[8] = x0r + x2r; + a[9] = x0i + x2i; + a[12] = x2i - x0i; + a[13] = x0r - x2r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[10] = wk1r * (x0r - x0i); + a[11] = wk1r * (x0r + x0i); + x0r = x3i + x1r; + x0i = x3r - x1i; + a[14] = wk1r * (x0i - x0r); + a[15] = wk1r * (x0i + x0r); + k1 = 0; + for (j = 16; j < n; j += 16) { + k1 += 2; + k2 = 2 * k1; + wk2r = w[k1]; + wk2i = w[k1 + 1]; + wk1r = w[k2]; + wk1i = w[k2 + 1]; + wk3r = wk1r - 2 * wk2i * wk1i; + wk3i = 2 * wk2i * wk1r - wk1i; + x0r = a[j] + a[j + 2]; + x0i = a[j + 1] + a[j + 3]; + x1r = a[j] - a[j + 2]; + x1i = a[j + 1] - a[j + 3]; + x2r = a[j + 4] + a[j + 6]; + x2i = a[j + 5] + a[j + 7]; + x3r = a[j + 4] - a[j + 6]; + x3i = a[j + 5] - a[j + 7]; + a[j] = x0r + x2r; + a[j + 1] = x0i + x2i; + x0r -= x2r; + x0i -= x2i; + a[j + 4] = wk2r * x0r - wk2i * x0i; + a[j + 5] = wk2r * x0i + wk2i * x0r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j + 2] = wk1r * x0r - wk1i * x0i; + a[j + 3] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[j + 6] = wk3r * x0r - wk3i * x0i; + a[j + 7] = wk3r * x0i + wk3i * x0r; + wk1r = w[k2 + 2]; + wk1i = w[k2 + 3]; + wk3r = wk1r - 2 * wk2r * wk1i; + wk3i = 2 * wk2r * wk1r - wk1i; + x0r = a[j + 8] + a[j + 10]; + x0i = a[j + 9] + a[j + 11]; + x1r = a[j + 8] - a[j + 10]; + x1i = a[j + 9] - a[j + 11]; + x2r = a[j + 12] + a[j + 14]; + x2i = a[j + 13] + a[j + 15]; + x3r = a[j + 12] - a[j + 14]; + x3i = a[j + 13] - a[j + 15]; + a[j + 8] = x0r + x2r; + a[j + 9] = x0i + x2i; + x0r -= x2r; + x0i -= x2i; + a[j + 12] = -wk2i * x0r - wk2r * x0i; + a[j + 13] = -wk2i * x0i + wk2r * x0r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j + 10] = wk1r * x0r - wk1i * x0i; + a[j + 11] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[j + 14] = wk3r * x0r - wk3i * x0i; + a[j + 15] = wk3r * x0i + wk3i * x0r; + } +} + + +static void cftmdl(size_t n, size_t l, float *a, float *w) +{ + size_t j, j1, j2, j3, k, k1, k2, m, m2; + float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + m = l << 2; + for (j = 0; j < l; j += 2) { + j1 = j + l; + j2 = j1 + l; + j3 = j2 + l; + x0r = a[j] + a[j1]; + x0i = a[j + 1] + a[j1 + 1]; + x1r = a[j] - a[j1]; + x1i = a[j + 1] - a[j1 + 1]; + x2r = a[j2] + a[j3]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2] - a[j3]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j] = x0r + x2r; + a[j + 1] = x0i + x2i; + a[j2] = x0r - x2r; + a[j2 + 1] = x0i - x2i; + a[j1] = x1r - x3i; + a[j1 + 1] = x1i + x3r; + a[j3] = x1r + x3i; + a[j3 + 1] = x1i - x3r; + } + wk1r = w[2]; + for (j = m; j < l + m; j += 2) { + j1 = j + l; + j2 = j1 + l; + j3 = j2 + l; + x0r = a[j] + a[j1]; + x0i = a[j + 1] + a[j1 + 1]; + x1r = a[j] - a[j1]; + x1i = a[j + 1] - a[j1 + 1]; + x2r = a[j2] + a[j3]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2] - a[j3]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j] = x0r + x2r; + a[j + 1] = x0i + x2i; + a[j2] = x2i - x0i; + a[j2 + 1] = x0r - x2r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j1] = wk1r * (x0r - x0i); + a[j1 + 1] = wk1r * (x0r + x0i); + x0r = x3i + x1r; + x0i = x3r - x1i; + a[j3] = wk1r * (x0i - x0r); + a[j3 + 1] = wk1r * (x0i + x0r); + } + k1 = 0; + m2 = 2 * m; + for (k = m2; k < n; k += m2) { + k1 += 2; + k2 = 2 * k1; + wk2r = w[k1]; + wk2i = w[k1 + 1]; + wk1r = w[k2]; + wk1i = w[k2 + 1]; + wk3r = wk1r - 2 * wk2i * wk1i; + wk3i = 2 * wk2i * wk1r - wk1i; + for (j = k; j < l + k; j += 2) { + j1 = j + l; + j2 = j1 + l; + j3 = j2 + l; + x0r = a[j] + a[j1]; + x0i = a[j + 1] + a[j1 + 1]; + x1r = a[j] - a[j1]; + x1i = a[j + 1] - a[j1 + 1]; + x2r = a[j2] + a[j3]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2] - a[j3]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j] = x0r + x2r; + a[j + 1] = x0i + x2i; + x0r -= x2r; + x0i -= x2i; + a[j2] = wk2r * x0r - wk2i * x0i; + a[j2 + 1] = wk2r * x0i + wk2i * x0r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j1] = wk1r * x0r - wk1i * x0i; + a[j1 + 1] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[j3] = wk3r * x0r - wk3i * x0i; + a[j3 + 1] = wk3r * x0i + wk3i * x0r; + } + wk1r = w[k2 + 2]; + wk1i = w[k2 + 3]; + wk3r = wk1r - 2 * wk2r * wk1i; + wk3i = 2 * wk2r * wk1r - wk1i; + for (j = k + m; j < l + (k + m); j += 2) { + j1 = j + l; + j2 = j1 + l; + j3 = j2 + l; + x0r = a[j] + a[j1]; + x0i = a[j + 1] + a[j1 + 1]; + x1r = a[j] - a[j1]; + x1i = a[j + 1] - a[j1 + 1]; + x2r = a[j2] + a[j3]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2] - a[j3]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j] = x0r + x2r; + a[j + 1] = x0i + x2i; + x0r -= x2r; + x0i -= x2i; + a[j2] = -wk2i * x0r - wk2r * x0i; + a[j2 + 1] = -wk2i * x0i + wk2r * x0r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j1] = wk1r * x0r - wk1i * x0i; + a[j1 + 1] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[j3] = wk3r * x0r - wk3i * x0i; + a[j3 + 1] = wk3r * x0i + wk3i * x0r; + } + } +} + + +static void rftfsub(size_t n, float *a, size_t nc, float *c) +{ + size_t j, k, kk, ks, m; + float wkr, wki, xr, xi, yr, yi; + + m = n >> 1; + ks = 2 * nc / m; + kk = 0; + for (j = 2; j < m; j += 2) { + k = n - j; + kk += ks; + wkr = 0.5f - c[nc - kk]; + wki = c[kk]; + xr = a[j] - a[k]; + xi = a[j + 1] + a[k + 1]; + yr = wkr * xr - wki * xi; + yi = wkr * xi + wki * xr; + a[j] -= yr; + a[j + 1] -= yi; + a[k] += yr; + a[k + 1] -= yi; + } +} + + +static void rftbsub(size_t n, float *a, size_t nc, float *c) +{ + size_t j, k, kk, ks, m; + float wkr, wki, xr, xi, yr, yi; + + a[1] = -a[1]; + m = n >> 1; + ks = 2 * nc / m; + kk = 0; + for (j = 2; j < m; j += 2) { + k = n - j; + kk += ks; + wkr = 0.5f - c[nc - kk]; + wki = c[kk]; + xr = a[j] - a[k]; + xi = a[j + 1] + a[k + 1]; + yr = wkr * xr + wki * xi; + yi = wkr * xi - wki * xr; + a[j] -= yr; + a[j + 1] = yi - a[j + 1]; + a[k] += yr; + a[k + 1] = yi - a[k + 1]; + } + a[m + 1] = -a[m + 1]; +} + +#if 0 // Not used. +static void dctsub(int n, float *a, int nc, float *c) +{ + int j, k, kk, ks, m; + float wkr, wki, xr; + + m = n >> 1; + ks = nc / n; + kk = 0; + for (j = 1; j < m; j++) { + k = n - j; + kk += ks; + wkr = c[kk] - c[nc - kk]; + wki = c[kk] + c[nc - kk]; + xr = wki * a[j] - wkr * a[k]; + a[j] = wkr * a[j] + wki * a[k]; + a[k] = xr; + } + a[m] *= c[0]; +} + + +static void dstsub(int n, float *a, int nc, float *c) +{ + int j, k, kk, ks, m; + float wkr, wki, xr; + + m = n >> 1; + ks = nc / n; + kk = 0; + for (j = 1; j < m; j++) { + k = n - j; + kk += ks; + wkr = c[kk] - c[nc - kk]; + wki = c[kk] + c[nc - kk]; + xr = wki * a[k] - wkr * a[j]; + a[k] = wkr * a[k] + wki * a[j]; + a[j] = xr; + } + a[m] *= c[0]; +} +#endif // Not used. diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/fft4g.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/fft4g.h new file mode 100644 index 000000000..6dd792f63 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/fft4g.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_COMMON_AUDIO_FFT4G_H_ +#define WEBRTC_COMMON_AUDIO_FFT4G_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +// Refer to fft4g.c for documentation. +void WebRtc_rdft(size_t n, int isgn, float *a, size_t *ip, float *w); + +#if defined(__cplusplus) +} +#endif + +#endif // WEBRTC_COMMON_AUDIO_FFT4G_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/include/audio_util.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/include/audio_util.h new file mode 100644 index 000000000..1601c7fd1 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/include/audio_util.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_ +#define WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_ + +#include +#include + +#include "webrtc/base/checks.h" +#include "webrtc/typedefs.h" + +namespace webrtc { + +typedef std::numeric_limits limits_int16; + +// The conversion functions use the following naming convention: +// S16: int16_t [-32768, 32767] +// Float: float [-1.0, 1.0] +// FloatS16: float [-32768.0, 32767.0] +static inline int16_t FloatToS16(float v) { + if (v > 0) + return v >= 1 ? limits_int16::max() + : static_cast(v * limits_int16::max() + 0.5f); + return v <= -1 ? limits_int16::min() + : static_cast(-v * limits_int16::min() - 0.5f); +} + +static inline float S16ToFloat(int16_t v) { + static const float kMaxInt16Inverse = 1.f / limits_int16::max(); + static const float kMinInt16Inverse = 1.f / limits_int16::min(); + return v * (v > 0 ? kMaxInt16Inverse : -kMinInt16Inverse); +} + +static inline int16_t FloatS16ToS16(float v) { + static const float kMaxRound = limits_int16::max() - 0.5f; + static const float kMinRound = limits_int16::min() + 0.5f; + if (v > 0) + return v >= kMaxRound ? limits_int16::max() + : static_cast(v + 0.5f); + return v <= kMinRound ? limits_int16::min() : static_cast(v - 0.5f); +} + +static inline float FloatToFloatS16(float v) { + return v * (v > 0 ? limits_int16::max() : -limits_int16::min()); +} + +static inline float FloatS16ToFloat(float v) { + static const float kMaxInt16Inverse = 1.f / limits_int16::max(); + static const float kMinInt16Inverse = 1.f / limits_int16::min(); + return v * (v > 0 ? kMaxInt16Inverse : -kMinInt16Inverse); +} + +void FloatToS16(const float* src, size_t size, int16_t* dest); +void S16ToFloat(const int16_t* src, size_t size, float* dest); +void FloatS16ToS16(const float* src, size_t size, int16_t* dest); +void FloatToFloatS16(const float* src, size_t size, float* dest); +void FloatS16ToFloat(const float* src, size_t size, float* dest); + +// Copy audio from |src| channels to |dest| channels unless |src| and |dest| +// point to the same address. |src| and |dest| must have the same number of +// channels, and there must be sufficient space allocated in |dest|. +template +void CopyAudioIfNeeded(const T* const* src, + int num_frames, + int num_channels, + T* const* dest) { + for (int i = 0; i < num_channels; ++i) { + if (src[i] != dest[i]) { + std::copy(src[i], src[i] + num_frames, dest[i]); + } + } +} + +// Deinterleave audio from |interleaved| to the channel buffers pointed to +// by |deinterleaved|. There must be sufficient space allocated in the +// |deinterleaved| buffers (|num_channel| buffers with |samples_per_channel| +// per buffer). +template +void Deinterleave(const T* interleaved, + size_t samples_per_channel, + size_t num_channels, + T* const* deinterleaved) { + for (size_t i = 0; i < num_channels; ++i) { + T* channel = deinterleaved[i]; + size_t interleaved_idx = i; + for (size_t j = 0; j < samples_per_channel; ++j) { + channel[j] = interleaved[interleaved_idx]; + interleaved_idx += num_channels; + } + } +} + +// Interleave audio from the channel buffers pointed to by |deinterleaved| to +// |interleaved|. There must be sufficient space allocated in |interleaved| +// (|samples_per_channel| * |num_channels|). +template +void Interleave(const T* const* deinterleaved, + size_t samples_per_channel, + size_t num_channels, + T* interleaved) { + for (size_t i = 0; i < num_channels; ++i) { + const T* channel = deinterleaved[i]; + size_t interleaved_idx = i; + for (size_t j = 0; j < samples_per_channel; ++j) { + interleaved[interleaved_idx] = channel[j]; + interleaved_idx += num_channels; + } + } +} + +// Copies audio from a single channel buffer pointed to by |mono| to each +// channel of |interleaved|. There must be sufficient space allocated in +// |interleaved| (|samples_per_channel| * |num_channels|). +template +void UpmixMonoToInterleaved(const T* mono, + int num_frames, + int num_channels, + T* interleaved) { + int interleaved_idx = 0; + for (int i = 0; i < num_frames; ++i) { + for (int j = 0; j < num_channels; ++j) { + interleaved[interleaved_idx++] = mono[i]; + } + } +} + +template +void DownmixToMono(const T* const* input_channels, + size_t num_frames, + int num_channels, + T* out) { + for (size_t i = 0; i < num_frames; ++i) { + Intermediate value = input_channels[0][i]; + for (int j = 1; j < num_channels; ++j) { + value += input_channels[j][i]; + } + out[i] = value / num_channels; + } +} + +// Downmixes an interleaved multichannel signal to a single channel by averaging +// all channels. +template +void DownmixInterleavedToMonoImpl(const T* interleaved, + size_t num_frames, + int num_channels, + T* deinterleaved) { + RTC_DCHECK_GT(num_channels, 0); + RTC_DCHECK_GT(num_frames, 0); + + const T* const end = interleaved + num_frames * num_channels; + + while (interleaved < end) { + const T* const frame_end = interleaved + num_channels; + + Intermediate value = *interleaved++; + while (interleaved < frame_end) { + value += *interleaved++; + } + + *deinterleaved++ = value / num_channels; + } +} + +template +void DownmixInterleavedToMono(const T* interleaved, + size_t num_frames, + int num_channels, + T* deinterleaved); + +template <> +void DownmixInterleavedToMono(const int16_t* interleaved, + size_t num_frames, + int num_channels, + int16_t* deinterleaved); + +} // namespace webrtc + +#endif // WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/ring_buffer.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/ring_buffer.c new file mode 100644 index 000000000..5fc653bd5 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/ring_buffer.c @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// A ring buffer to hold arbitrary data. Provides no thread safety. Unless +// otherwise specified, functions return 0 on success and -1 on error. + +#include "webrtc/common_audio/ring_buffer.h" + +#include // size_t +#include +#include + +// Get address of region(s) from which we can read data. +// If the region is contiguous, |data_ptr_bytes_2| will be zero. +// If non-contiguous, |data_ptr_bytes_2| will be the size in bytes of the second +// region. Returns room available to be read or |element_count|, whichever is +// smaller. +static size_t GetBufferReadRegions(RingBuffer* buf, + size_t element_count, + void** data_ptr_1, + size_t* data_ptr_bytes_1, + void** data_ptr_2, + size_t* data_ptr_bytes_2) { + + const size_t readable_elements = WebRtc_available_read(buf); + const size_t read_elements = (readable_elements < element_count ? + readable_elements : element_count); + const size_t margin = buf->element_count - buf->read_pos; + + // Check to see if read is not contiguous. + if (read_elements > margin) { + // Write data in two blocks that wrap the buffer. + *data_ptr_1 = buf->data + buf->read_pos * buf->element_size; + *data_ptr_bytes_1 = margin * buf->element_size; + *data_ptr_2 = buf->data; + *data_ptr_bytes_2 = (read_elements - margin) * buf->element_size; + } else { + *data_ptr_1 = buf->data + buf->read_pos * buf->element_size; + *data_ptr_bytes_1 = read_elements * buf->element_size; + *data_ptr_2 = NULL; + *data_ptr_bytes_2 = 0; + } + + return read_elements; +} + +RingBuffer* WebRtc_CreateBuffer(size_t element_count, size_t element_size) { + RingBuffer* self = NULL; + if (element_count == 0 || element_size == 0) { + return NULL; + } + + self = malloc(sizeof(RingBuffer)); + if (!self) { + return NULL; + } + + self->data = malloc(element_count * element_size); + if (!self->data) { + free(self); + self = NULL; + return NULL; + } + + self->element_count = element_count; + self->element_size = element_size; + WebRtc_InitBuffer(self); + + return self; +} + +void WebRtc_InitBuffer(RingBuffer* self) { + self->read_pos = 0; + self->write_pos = 0; + self->rw_wrap = SAME_WRAP; + + // Initialize buffer to zeros + memset(self->data, 0, self->element_count * self->element_size); +} + +void WebRtc_FreeBuffer(void* handle) { + RingBuffer* self = (RingBuffer*)handle; + if (!self) { + return; + } + + free(self->data); + free(self); +} + +size_t WebRtc_ReadBuffer(RingBuffer* self, + void** data_ptr, + void* data, + size_t element_count) { + + if (self == NULL) { + return 0; + } + if (data == NULL) { + return 0; + } + + { + void* buf_ptr_1 = NULL; + void* buf_ptr_2 = NULL; + size_t buf_ptr_bytes_1 = 0; + size_t buf_ptr_bytes_2 = 0; + const size_t read_count = GetBufferReadRegions(self, + element_count, + &buf_ptr_1, + &buf_ptr_bytes_1, + &buf_ptr_2, + &buf_ptr_bytes_2); + + if (buf_ptr_bytes_2 > 0) { + // We have a wrap around when reading the buffer. Copy the buffer data to + // |data| and point to it. + memcpy(data, buf_ptr_1, buf_ptr_bytes_1); + memcpy(((char*) data) + buf_ptr_bytes_1, buf_ptr_2, buf_ptr_bytes_2); + buf_ptr_1 = data; + } else if (!data_ptr) { + // No wrap, but a memcpy was requested. + memcpy(data, buf_ptr_1, buf_ptr_bytes_1); + } + if (data_ptr) { + // |buf_ptr_1| == |data| in the case of a wrap. + *data_ptr = buf_ptr_1; + } + + // Update read position + WebRtc_MoveReadPtr(self, (int) read_count); + + return read_count; + } +} + +size_t WebRtc_WriteBuffer(RingBuffer* self, + const void* data, + size_t element_count) { + if (!self) { + return 0; + } + if (!data) { + return 0; + } + + { + const size_t free_elements = WebRtc_available_write(self); + const size_t write_elements = (free_elements < element_count ? free_elements + : element_count); + size_t n = write_elements; + const size_t margin = self->element_count - self->write_pos; + + if (write_elements > margin) { + // Buffer wrap around when writing. + memcpy(self->data + self->write_pos * self->element_size, + data, margin * self->element_size); + self->write_pos = 0; + n -= margin; + self->rw_wrap = DIFF_WRAP; + } + memcpy(self->data + self->write_pos * self->element_size, + ((const char*) data) + ((write_elements - n) * self->element_size), + n * self->element_size); + self->write_pos += n; + + return write_elements; + } +} + +int WebRtc_MoveReadPtr(RingBuffer* self, int element_count) { + if (!self) { + return 0; + } + + { + // We need to be able to take care of negative changes, hence use "int" + // instead of "size_t". + const int free_elements = (int) WebRtc_available_write(self); + const int readable_elements = (int) WebRtc_available_read(self); + int read_pos = (int) self->read_pos; + + if (element_count > readable_elements) { + element_count = readable_elements; + } + if (element_count < -free_elements) { + element_count = -free_elements; + } + + read_pos += element_count; + if (read_pos > (int) self->element_count) { + // Buffer wrap around. Restart read position and wrap indicator. + read_pos -= (int) self->element_count; + self->rw_wrap = SAME_WRAP; + } + if (read_pos < 0) { + // Buffer wrap around. Restart read position and wrap indicator. + read_pos += (int) self->element_count; + self->rw_wrap = DIFF_WRAP; + } + + self->read_pos = (size_t) read_pos; + + return element_count; + } +} + +size_t WebRtc_available_read(const RingBuffer* self) { + if (!self) { + return 0; + } + + if (self->rw_wrap == SAME_WRAP) { + return self->write_pos - self->read_pos; + } else { + return self->element_count - self->read_pos + self->write_pos; + } +} + +size_t WebRtc_available_write(const RingBuffer* self) { + if (!self) { + return 0; + } + + return self->element_count - WebRtc_available_read(self); +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/ring_buffer.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/ring_buffer.h new file mode 100644 index 000000000..74951a8b2 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/ring_buffer.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// A ring buffer to hold arbitrary data. Provides no thread safety. Unless +// otherwise specified, functions return 0 on success and -1 on error. + +#ifndef WEBRTC_COMMON_AUDIO_RING_BUFFER_H_ +#define WEBRTC_COMMON_AUDIO_RING_BUFFER_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include // size_t + +enum Wrap { SAME_WRAP, DIFF_WRAP }; + +typedef struct RingBuffer { + size_t read_pos; + size_t write_pos; + size_t element_count; + size_t element_size; + enum Wrap rw_wrap; + char* data; +} RingBuffer; + +// Creates and initializes the buffer. Returns NULL on failure. +RingBuffer* WebRtc_CreateBuffer(size_t element_count, size_t element_size); +void WebRtc_InitBuffer(RingBuffer* handle); +void WebRtc_FreeBuffer(void* handle); + +// Reads data from the buffer. The |data_ptr| will point to the address where +// it is located. If all |element_count| data are feasible to read without +// buffer wrap around |data_ptr| will point to the location in the buffer. +// Otherwise, the data will be copied to |data| (memory allocation done by the +// user) and |data_ptr| points to the address of |data|. |data_ptr| is only +// guaranteed to be valid until the next call to WebRtc_WriteBuffer(). +// +// To force a copying to |data|, pass a NULL |data_ptr|. +// +// Returns number of elements read. +size_t WebRtc_ReadBuffer(RingBuffer* handle, + void** data_ptr, + void* data, + size_t element_count); + +// Writes |data| to buffer and returns the number of elements written. +size_t WebRtc_WriteBuffer(RingBuffer* handle, const void* data, + size_t element_count); + +// Moves the buffer read position and returns the number of elements moved. +// Positive |element_count| moves the read position towards the write position, +// that is, flushing the buffer. Negative |element_count| moves the read +// position away from the the write position, that is, stuffing the buffer. +// Returns number of elements moved. +int WebRtc_MoveReadPtr(RingBuffer* handle, int element_count); + +// Returns number of available elements to read. +size_t WebRtc_available_read(const RingBuffer* handle); + +// Returns number of available elements for write. +size_t WebRtc_available_write(const RingBuffer* handle); + +#ifdef __cplusplus +} +#endif + +#endif // WEBRTC_COMMON_AUDIO_RING_BUFFER_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/auto_corr_to_refl_coef.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/auto_corr_to_refl_coef.c new file mode 100644 index 000000000..f99dd62b8 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/auto_corr_to_refl_coef.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the function WebRtcSpl_AutoCorrToReflCoef(). + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +void WebRtcSpl_AutoCorrToReflCoef(const int32_t *R, int use_order, int16_t *K) +{ + int i, n; + int16_t tmp; + const int32_t *rptr; + int32_t L_num, L_den; + int16_t *acfptr, *pptr, *wptr, *p1ptr, *w1ptr, ACF[WEBRTC_SPL_MAX_LPC_ORDER], + P[WEBRTC_SPL_MAX_LPC_ORDER], W[WEBRTC_SPL_MAX_LPC_ORDER]; + + // Initialize loop and pointers. + acfptr = ACF; + rptr = R; + pptr = P; + p1ptr = &P[1]; + w1ptr = &W[1]; + wptr = w1ptr; + + // First loop; n=0. Determine shifting. + tmp = WebRtcSpl_NormW32(*R); + *acfptr = (int16_t)((*rptr++ << tmp) >> 16); + *pptr++ = *acfptr++; + + // Initialize ACF, P and W. + for (i = 1; i <= use_order; i++) + { + *acfptr = (int16_t)((*rptr++ << tmp) >> 16); + *wptr++ = *acfptr; + *pptr++ = *acfptr++; + } + + // Compute reflection coefficients. + for (n = 1; n <= use_order; n++, K++) + { + tmp = WEBRTC_SPL_ABS_W16(*p1ptr); + if (*P < tmp) + { + for (i = n; i <= use_order; i++) + *K++ = 0; + + return; + } + + // Division: WebRtcSpl_div(tmp, *P) + *K = 0; + if (tmp != 0) + { + L_num = tmp; + L_den = *P; + i = 15; + while (i--) + { + (*K) <<= 1; + L_num <<= 1; + if (L_num >= L_den) + { + L_num -= L_den; + (*K)++; + } + } + if (*p1ptr > 0) + *K = -*K; + } + + // Last iteration; don't do Schur recursion. + if (n == use_order) + return; + + // Schur recursion. + pptr = P; + wptr = w1ptr; + tmp = (int16_t)(((int32_t)*p1ptr * (int32_t)*K + 16384) >> 15); + *pptr = WebRtcSpl_AddSatW16(*pptr, tmp); + pptr++; + for (i = 1; i <= use_order - n; i++) + { + tmp = (int16_t)(((int32_t)*wptr * (int32_t)*K + 16384) >> 15); + *pptr = WebRtcSpl_AddSatW16(*(pptr + 1), tmp); + pptr++; + tmp = (int16_t)(((int32_t)*pptr * (int32_t)*K + 16384) >> 15); + *wptr = WebRtcSpl_AddSatW16(*wptr, tmp); + wptr++; + } + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/auto_correlation.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/auto_correlation.c new file mode 100644 index 000000000..58e6d6e0a --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/auto_correlation.c @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +#include "webrtc/base/checks.h" + +size_t WebRtcSpl_AutoCorrelation(const int16_t* in_vector, + size_t in_vector_length, + size_t order, + int32_t* result, + int* scale) { + int32_t sum = 0; + size_t i = 0, j = 0; + int16_t smax = 0; + int scaling = 0; + + RTC_DCHECK_LE(order, in_vector_length); + + // Find the maximum absolute value of the samples. + smax = WebRtcSpl_MaxAbsValueW16(in_vector, in_vector_length); + + // In order to avoid overflow when computing the sum we should scale the + // samples so that (in_vector_length * smax * smax) will not overflow. + if (smax == 0) { + scaling = 0; + } else { + // Number of bits in the sum loop. + int nbits = WebRtcSpl_GetSizeInBits((uint32_t)in_vector_length); + // Number of bits to normalize smax. + int t = WebRtcSpl_NormW32(WEBRTC_SPL_MUL(smax, smax)); + + if (t > nbits) { + scaling = 0; + } else { + scaling = nbits - t; + } + } + + // Perform the actual correlation calculation. + for (i = 0; i < order + 1; i++) { + sum = 0; + /* Unroll the loop to improve performance. */ + for (j = 0; i + j + 3 < in_vector_length; j += 4) { + sum += (in_vector[j + 0] * in_vector[i + j + 0]) >> scaling; + sum += (in_vector[j + 1] * in_vector[i + j + 1]) >> scaling; + sum += (in_vector[j + 2] * in_vector[i + j + 2]) >> scaling; + sum += (in_vector[j + 3] * in_vector[i + j + 3]) >> scaling; + } + for (; j < in_vector_length - i; j++) { + sum += (in_vector[j] * in_vector[i + j]) >> scaling; + } + *result++ = sum; + } + + *scale = scaling; + return order + 1; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_bit_reverse.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_bit_reverse.c new file mode 100644 index 000000000..c8bd2dc45 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_bit_reverse.c @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +/* Tables for data buffer indexes that are bit reversed and thus need to be + * swapped. Note that, index_7[{0, 2, 4, ...}] are for the left side of the swap + * operations, while index_7[{1, 3, 5, ...}] are for the right side of the + * operation. Same for index_8. + */ + +/* Indexes for the case of stages == 7. */ +static const int16_t index_7[112] = { + 1, 64, 2, 32, 3, 96, 4, 16, 5, 80, 6, 48, 7, 112, 9, 72, 10, 40, 11, 104, + 12, 24, 13, 88, 14, 56, 15, 120, 17, 68, 18, 36, 19, 100, 21, 84, 22, 52, + 23, 116, 25, 76, 26, 44, 27, 108, 29, 92, 30, 60, 31, 124, 33, 66, 35, 98, + 37, 82, 38, 50, 39, 114, 41, 74, 43, 106, 45, 90, 46, 58, 47, 122, 49, 70, + 51, 102, 53, 86, 55, 118, 57, 78, 59, 110, 61, 94, 63, 126, 67, 97, 69, + 81, 71, 113, 75, 105, 77, 89, 79, 121, 83, 101, 87, 117, 91, 109, 95, 125, + 103, 115, 111, 123 +}; + +/* Indexes for the case of stages == 8. */ +static const int16_t index_8[240] = { + 1, 128, 2, 64, 3, 192, 4, 32, 5, 160, 6, 96, 7, 224, 8, 16, 9, 144, 10, 80, + 11, 208, 12, 48, 13, 176, 14, 112, 15, 240, 17, 136, 18, 72, 19, 200, 20, + 40, 21, 168, 22, 104, 23, 232, 25, 152, 26, 88, 27, 216, 28, 56, 29, 184, + 30, 120, 31, 248, 33, 132, 34, 68, 35, 196, 37, 164, 38, 100, 39, 228, 41, + 148, 42, 84, 43, 212, 44, 52, 45, 180, 46, 116, 47, 244, 49, 140, 50, 76, + 51, 204, 53, 172, 54, 108, 55, 236, 57, 156, 58, 92, 59, 220, 61, 188, 62, + 124, 63, 252, 65, 130, 67, 194, 69, 162, 70, 98, 71, 226, 73, 146, 74, 82, + 75, 210, 77, 178, 78, 114, 79, 242, 81, 138, 83, 202, 85, 170, 86, 106, 87, + 234, 89, 154, 91, 218, 93, 186, 94, 122, 95, 250, 97, 134, 99, 198, 101, + 166, 103, 230, 105, 150, 107, 214, 109, 182, 110, 118, 111, 246, 113, 142, + 115, 206, 117, 174, 119, 238, 121, 158, 123, 222, 125, 190, 127, 254, 131, + 193, 133, 161, 135, 225, 137, 145, 139, 209, 141, 177, 143, 241, 147, 201, + 149, 169, 151, 233, 155, 217, 157, 185, 159, 249, 163, 197, 167, 229, 171, + 213, 173, 181, 175, 245, 179, 205, 183, 237, 187, 221, 191, 253, 199, 227, + 203, 211, 207, 243, 215, 235, 223, 251, 239, 247 +}; + +void WebRtcSpl_ComplexBitReverse(int16_t* __restrict complex_data, int stages) { + /* For any specific value of stages, we know exactly the indexes that are + * bit reversed. Currently (Feb. 2012) in WebRTC the only possible values of + * stages are 7 and 8, so we use tables to save unnecessary iterations and + * calculations for these two cases. + */ + if (stages == 7 || stages == 8) { + int m = 0; + int length = 112; + const int16_t* index = index_7; + + if (stages == 8) { + length = 240; + index = index_8; + } + + /* Decimation in time. Swap the elements with bit-reversed indexes. */ + for (m = 0; m < length; m += 2) { + /* We declare a int32_t* type pointer, to load both the 16-bit real + * and imaginary elements from complex_data in one instruction, reducing + * complexity. + */ + int32_t* complex_data_ptr = (int32_t*)complex_data; + int32_t temp = 0; + + temp = complex_data_ptr[index[m]]; /* Real and imaginary */ + complex_data_ptr[index[m]] = complex_data_ptr[index[m + 1]]; + complex_data_ptr[index[m + 1]] = temp; + } + } + else { + int m = 0, mr = 0, l = 0; + int n = 1 << stages; + int nn = n - 1; + + /* Decimation in time - re-order data */ + for (m = 1; m <= nn; ++m) { + int32_t* complex_data_ptr = (int32_t*)complex_data; + int32_t temp = 0; + + /* Find out indexes that are bit-reversed. */ + l = n; + do { + l >>= 1; + } while (l > nn - mr); + mr = (mr & (l - 1)) + l; + + if (mr <= m) { + continue; + } + + /* Swap the elements with bit-reversed indexes. + * This is similar to the loop in the stages == 7 or 8 cases. + */ + temp = complex_data_ptr[m]; /* Real and imaginary */ + complex_data_ptr[m] = complex_data_ptr[mr]; + complex_data_ptr[mr] = temp; + } + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_bit_reverse_arm.S b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_bit_reverse_arm.S new file mode 100644 index 000000000..93de99f51 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_bit_reverse_arm.S @@ -0,0 +1,119 @@ +@ +@ Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. +@ +@ Use of this source code is governed by a BSD-style license +@ that can be found in the LICENSE file in the root of the source +@ tree. An additional intellectual property rights grant can be found +@ in the file PATENTS. All contributing project authors may +@ be found in the AUTHORS file in the root of the source tree. +@ + +@ This file contains the function WebRtcSpl_ComplexBitReverse(), optimized +@ for ARMv5 platforms. +@ Reference C code is in file complex_bit_reverse.c. Bit-exact. + +#include "webrtc/system_wrappers/include/asm_defines.h" + +GLOBAL_FUNCTION WebRtcSpl_ComplexBitReverse +.align 2 +DEFINE_FUNCTION WebRtcSpl_ComplexBitReverse + push {r4-r7} + + cmp r1, #7 + adr r3, index_7 @ Table pointer. + mov r4, #112 @ Number of interations. + beq PRE_LOOP_STAGES_7_OR_8 + + cmp r1, #8 + adr r3, index_8 @ Table pointer. + mov r4, #240 @ Number of interations. + beq PRE_LOOP_STAGES_7_OR_8 + + mov r3, #1 @ Initialize m. + mov r1, r3, asl r1 @ n = 1 << stages; + subs r6, r1, #1 @ nn = n - 1; + ble END + + mov r5, r0 @ &complex_data + mov r4, #0 @ ml + +LOOP_GENERIC: + rsb r12, r4, r6 @ l > nn - mr + mov r2, r1 @ n + +LOOP_SHIFT: + asr r2, #1 @ l >>= 1; + cmp r2, r12 + bgt LOOP_SHIFT + + sub r12, r2, #1 + and r4, r12, r4 + add r4, r2 @ mr = (mr & (l - 1)) + l; + cmp r4, r3 @ mr <= m ? + ble UPDATE_REGISTERS + + mov r12, r4, asl #2 + ldr r7, [r5, #4] @ complex_data[2 * m, 2 * m + 1]. + @ Offset 4 due to m incrementing from 1. + ldr r2, [r0, r12] @ complex_data[2 * mr, 2 * mr + 1]. + str r7, [r0, r12] + str r2, [r5, #4] + +UPDATE_REGISTERS: + add r3, r3, #1 + add r5, #4 + cmp r3, r1 + bne LOOP_GENERIC + + b END + +PRE_LOOP_STAGES_7_OR_8: + add r4, r3, r4, asl #1 + +LOOP_STAGES_7_OR_8: + ldrsh r2, [r3], #2 @ index[m] + ldrsh r5, [r3], #2 @ index[m + 1] + ldr r1, [r0, r2] @ complex_data[index[m], index[m] + 1] + ldr r12, [r0, r5] @ complex_data[index[m + 1], index[m + 1] + 1] + cmp r3, r4 + str r1, [r0, r5] + str r12, [r0, r2] + bne LOOP_STAGES_7_OR_8 + +END: + pop {r4-r7} + bx lr + +@ The index tables. Note the values are doubles of the actual indexes for 16-bit +@ elements, different from the generic C code. It actually provides byte offsets +@ for the indexes. + +.align 2 +index_7: @ Indexes for stages == 7. + .short 4, 256, 8, 128, 12, 384, 16, 64, 20, 320, 24, 192, 28, 448, 36, 288 + .short 40, 160, 44, 416, 48, 96, 52, 352, 56, 224, 60, 480, 68, 272, 72, 144 + .short 76, 400, 84, 336, 88, 208, 92, 464, 100, 304, 104, 176, 108, 432, 116 + .short 368, 120, 240, 124, 496, 132, 264, 140, 392, 148, 328, 152, 200, 156 + .short 456, 164, 296, 172, 424, 180, 360, 184, 232, 188, 488, 196, 280, 204 + .short 408, 212, 344, 220, 472, 228, 312, 236, 440, 244, 376, 252, 504, 268 + .short 388, 276, 324, 284, 452, 300, 420, 308, 356, 316, 484, 332, 404, 348 + .short 468, 364, 436, 380, 500, 412, 460, 444, 492 + +index_8: @ Indexes for stages == 8. + .short 4, 512, 8, 256, 12, 768, 16, 128, 20, 640, 24, 384, 28, 896, 32, 64 + .short 36, 576, 40, 320, 44, 832, 48, 192, 52, 704, 56, 448, 60, 960, 68, 544 + .short 72, 288, 76, 800, 80, 160, 84, 672, 88, 416, 92, 928, 100, 608, 104 + .short 352, 108, 864, 112, 224, 116, 736, 120, 480, 124, 992, 132, 528, 136 + .short 272, 140, 784, 148, 656, 152, 400, 156, 912, 164, 592, 168, 336, 172 + .short 848, 176, 208, 180, 720, 184, 464, 188, 976, 196, 560, 200, 304, 204 + .short 816, 212, 688, 216, 432, 220, 944, 228, 624, 232, 368, 236, 880, 244 + .short 752, 248, 496, 252, 1008, 260, 520, 268, 776, 276, 648, 280, 392, 284 + .short 904, 292, 584, 296, 328, 300, 840, 308, 712, 312, 456, 316, 968, 324 + .short 552, 332, 808, 340, 680, 344, 424, 348, 936, 356, 616, 364, 872, 372 + .short 744, 376, 488, 380, 1000, 388, 536, 396, 792, 404, 664, 412, 920, 420 + .short 600, 428, 856, 436, 728, 440, 472, 444, 984, 452, 568, 460, 824, 468 + .short 696, 476, 952, 484, 632, 492, 888, 500, 760, 508, 1016, 524, 772, 532 + .short 644, 540, 900, 548, 580, 556, 836, 564, 708, 572, 964, 588, 804, 596 + .short 676, 604, 932, 620, 868, 628, 740, 636, 996, 652, 788, 668, 916, 684 + .short 852, 692, 724, 700, 980, 716, 820, 732, 948, 748, 884, 764, 1012, 796 + .short 908, 812, 844, 828, 972, 860, 940, 892, 1004, 956, 988 diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_fft.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_fft.c new file mode 100644 index 000000000..97ebacc49 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_fft.c @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the function WebRtcSpl_ComplexFFT(). + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/complex_fft_tables.h" +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +#define CFFTSFT 14 +#define CFFTRND 1 +#define CFFTRND2 16384 + +#define CIFFTSFT 14 +#define CIFFTRND 1 + + +int WebRtcSpl_ComplexFFT(int16_t frfi[], int stages, int mode) +{ + int i, j, l, k, istep, n, m; + int16_t wr, wi; + int32_t tr32, ti32, qr32, qi32; + + /* The 1024-value is a constant given from the size of kSinTable1024[], + * and should not be changed depending on the input parameter 'stages' + */ + n = 1 << stages; + if (n > 1024) + return -1; + + l = 1; + k = 10 - 1; /* Constant for given kSinTable1024[]. Do not change + depending on the input parameter 'stages' */ + + if (mode == 0) + { + // mode==0: Low-complexity and Low-accuracy mode + while (l < n) + { + istep = l << 1; + + for (m = 0; m < l; ++m) + { + j = m << k; + + /* The 256-value is a constant given as 1/4 of the size of + * kSinTable1024[], and should not be changed depending on the input + * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2 + */ + wr = kSinTable1024[j + 256]; + wi = -kSinTable1024[j]; + + for (i = m; i < n; i += istep) + { + j = i + l; + + tr32 = (wr * frfi[2 * j] - wi * frfi[2 * j + 1]) >> 15; + + ti32 = (wr * frfi[2 * j + 1] + wi * frfi[2 * j]) >> 15; + + qr32 = (int32_t)frfi[2 * i]; + qi32 = (int32_t)frfi[2 * i + 1]; + frfi[2 * j] = (int16_t)((qr32 - tr32) >> 1); + frfi[2 * j + 1] = (int16_t)((qi32 - ti32) >> 1); + frfi[2 * i] = (int16_t)((qr32 + tr32) >> 1); + frfi[2 * i + 1] = (int16_t)((qi32 + ti32) >> 1); + } + } + + --k; + l = istep; + + } + + } else + { + // mode==1: High-complexity and High-accuracy mode + while (l < n) + { + istep = l << 1; + + for (m = 0; m < l; ++m) + { + j = m << k; + + /* The 256-value is a constant given as 1/4 of the size of + * kSinTable1024[], and should not be changed depending on the input + * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2 + */ + wr = kSinTable1024[j + 256]; + wi = -kSinTable1024[j]; + +#ifdef WEBRTC_ARCH_ARM_V7 + int32_t wri = 0; + __asm __volatile("pkhbt %0, %1, %2, lsl #16" : "=r"(wri) : + "r"((int32_t)wr), "r"((int32_t)wi)); +#endif + + for (i = m; i < n; i += istep) + { + j = i + l; + +#ifdef WEBRTC_ARCH_ARM_V7 + register int32_t frfi_r; + __asm __volatile( + "pkhbt %[frfi_r], %[frfi_even], %[frfi_odd]," + " lsl #16\n\t" + "smlsd %[tr32], %[wri], %[frfi_r], %[cfftrnd]\n\t" + "smladx %[ti32], %[wri], %[frfi_r], %[cfftrnd]\n\t" + :[frfi_r]"=&r"(frfi_r), + [tr32]"=&r"(tr32), + [ti32]"=r"(ti32) + :[frfi_even]"r"((int32_t)frfi[2*j]), + [frfi_odd]"r"((int32_t)frfi[2*j +1]), + [wri]"r"(wri), + [cfftrnd]"r"(CFFTRND)); +#else + tr32 = wr * frfi[2 * j] - wi * frfi[2 * j + 1] + CFFTRND; + + ti32 = wr * frfi[2 * j + 1] + wi * frfi[2 * j] + CFFTRND; +#endif + + tr32 >>= 15 - CFFTSFT; + ti32 >>= 15 - CFFTSFT; + + qr32 = ((int32_t)frfi[2 * i]) << CFFTSFT; + qi32 = ((int32_t)frfi[2 * i + 1]) << CFFTSFT; + + frfi[2 * j] = (int16_t)( + (qr32 - tr32 + CFFTRND2) >> (1 + CFFTSFT)); + frfi[2 * j + 1] = (int16_t)( + (qi32 - ti32 + CFFTRND2) >> (1 + CFFTSFT)); + frfi[2 * i] = (int16_t)( + (qr32 + tr32 + CFFTRND2) >> (1 + CFFTSFT)); + frfi[2 * i + 1] = (int16_t)( + (qi32 + ti32 + CFFTRND2) >> (1 + CFFTSFT)); + } + } + + --k; + l = istep; + } + } + return 0; +} + +int WebRtcSpl_ComplexIFFT(int16_t frfi[], int stages, int mode) +{ + size_t i, j, l, istep, n, m; + int k, scale, shift; + int16_t wr, wi; + int32_t tr32, ti32, qr32, qi32; + int32_t tmp32, round2; + + /* The 1024-value is a constant given from the size of kSinTable1024[], + * and should not be changed depending on the input parameter 'stages' + */ + n = 1 << stages; + if (n > 1024) + return -1; + + scale = 0; + + l = 1; + k = 10 - 1; /* Constant for given kSinTable1024[]. Do not change + depending on the input parameter 'stages' */ + + while (l < n) + { + // variable scaling, depending upon data + shift = 0; + round2 = 8192; + + tmp32 = WebRtcSpl_MaxAbsValueW16(frfi, 2 * n); + if (tmp32 > 13573) + { + shift++; + scale++; + round2 <<= 1; + } + if (tmp32 > 27146) + { + shift++; + scale++; + round2 <<= 1; + } + + istep = l << 1; + + if (mode == 0) + { + // mode==0: Low-complexity and Low-accuracy mode + for (m = 0; m < l; ++m) + { + j = m << k; + + /* The 256-value is a constant given as 1/4 of the size of + * kSinTable1024[], and should not be changed depending on the input + * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2 + */ + wr = kSinTable1024[j + 256]; + wi = kSinTable1024[j]; + + for (i = m; i < n; i += istep) + { + j = i + l; + + tr32 = (wr * frfi[2 * j] - wi * frfi[2 * j + 1]) >> 15; + + ti32 = (wr * frfi[2 * j + 1] + wi * frfi[2 * j]) >> 15; + + qr32 = (int32_t)frfi[2 * i]; + qi32 = (int32_t)frfi[2 * i + 1]; + frfi[2 * j] = (int16_t)((qr32 - tr32) >> shift); + frfi[2 * j + 1] = (int16_t)((qi32 - ti32) >> shift); + frfi[2 * i] = (int16_t)((qr32 + tr32) >> shift); + frfi[2 * i + 1] = (int16_t)((qi32 + ti32) >> shift); + } + } + } else + { + // mode==1: High-complexity and High-accuracy mode + + for (m = 0; m < l; ++m) + { + j = m << k; + + /* The 256-value is a constant given as 1/4 of the size of + * kSinTable1024[], and should not be changed depending on the input + * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2 + */ + wr = kSinTable1024[j + 256]; + wi = kSinTable1024[j]; + +#ifdef WEBRTC_ARCH_ARM_V7 + int32_t wri = 0; + __asm __volatile("pkhbt %0, %1, %2, lsl #16" : "=r"(wri) : + "r"((int32_t)wr), "r"((int32_t)wi)); +#endif + + for (i = m; i < n; i += istep) + { + j = i + l; + +#ifdef WEBRTC_ARCH_ARM_V7 + register int32_t frfi_r; + __asm __volatile( + "pkhbt %[frfi_r], %[frfi_even], %[frfi_odd], lsl #16\n\t" + "smlsd %[tr32], %[wri], %[frfi_r], %[cifftrnd]\n\t" + "smladx %[ti32], %[wri], %[frfi_r], %[cifftrnd]\n\t" + :[frfi_r]"=&r"(frfi_r), + [tr32]"=&r"(tr32), + [ti32]"=r"(ti32) + :[frfi_even]"r"((int32_t)frfi[2*j]), + [frfi_odd]"r"((int32_t)frfi[2*j +1]), + [wri]"r"(wri), + [cifftrnd]"r"(CIFFTRND) + ); +#else + + tr32 = wr * frfi[2 * j] - wi * frfi[2 * j + 1] + CIFFTRND; + + ti32 = wr * frfi[2 * j + 1] + wi * frfi[2 * j] + CIFFTRND; +#endif + tr32 >>= 15 - CIFFTSFT; + ti32 >>= 15 - CIFFTSFT; + + qr32 = ((int32_t)frfi[2 * i]) << CIFFTSFT; + qi32 = ((int32_t)frfi[2 * i + 1]) << CIFFTSFT; + + frfi[2 * j] = (int16_t)( + (qr32 - tr32 + round2) >> (shift + CIFFTSFT)); + frfi[2 * j + 1] = (int16_t)( + (qi32 - ti32 + round2) >> (shift + CIFFTSFT)); + frfi[2 * i] = (int16_t)( + (qr32 + tr32 + round2) >> (shift + CIFFTSFT)); + frfi[2 * i + 1] = (int16_t)( + (qi32 + ti32 + round2) >> (shift + CIFFTSFT)); + } + } + + } + --k; + l = istep; + } + return scale; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_fft_tables.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_fft_tables.h new file mode 100644 index 000000000..ca7b7fe39 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/complex_fft_tables.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +#ifndef WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_COMPLEX_FFT_TABLES_H_ +#define WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_COMPLEX_FFT_TABLES_H_ + +#include "webrtc/typedefs.h" + +static const int16_t kSinTable1024[] = { + 0, 201, 402, 603, 804, 1005, 1206, 1406, + 1607, 1808, 2009, 2209, 2410, 2610, 2811, 3011, + 3211, 3411, 3611, 3811, 4011, 4210, 4409, 4608, + 4807, 5006, 5205, 5403, 5601, 5799, 5997, 6195, + 6392, 6589, 6786, 6982, 7179, 7375, 7571, 7766, + 7961, 8156, 8351, 8545, 8739, 8932, 9126, 9319, + 9511, 9703, 9895, 10087, 10278, 10469, 10659, 10849, + 11038, 11227, 11416, 11604, 11792, 11980, 12166, 12353, + 12539, 12724, 12909, 13094, 13278, 13462, 13645, 13827, + 14009, 14191, 14372, 14552, 14732, 14911, 15090, 15268, + 15446, 15623, 15799, 15975, 16150, 16325, 16499, 16672, + 16845, 17017, 17189, 17360, 17530, 17699, 17868, 18036, + 18204, 18371, 18537, 18702, 18867, 19031, 19194, 19357, + 19519, 19680, 19840, 20000, 20159, 20317, 20474, 20631, + 20787, 20942, 21096, 21249, 21402, 21554, 21705, 21855, + 22004, 22153, 22301, 22448, 22594, 22739, 22883, 23027, + 23169, 23311, 23452, 23592, 23731, 23869, 24006, 24143, + 24278, 24413, 24546, 24679, 24811, 24942, 25072, 25201, + 25329, 25456, 25582, 25707, 25831, 25954, 26077, 26198, + 26318, 26437, 26556, 26673, 26789, 26905, 27019, 27132, + 27244, 27355, 27466, 27575, 27683, 27790, 27896, 28001, + 28105, 28208, 28309, 28410, 28510, 28608, 28706, 28802, + 28897, 28992, 29085, 29177, 29268, 29358, 29446, 29534, + 29621, 29706, 29790, 29873, 29955, 30036, 30116, 30195, + 30272, 30349, 30424, 30498, 30571, 30643, 30713, 30783, + 30851, 30918, 30984, 31049, 31113, 31175, 31236, 31297, + 31356, 31413, 31470, 31525, 31580, 31633, 31684, 31735, + 31785, 31833, 31880, 31926, 31970, 32014, 32056, 32097, + 32137, 32176, 32213, 32249, 32284, 32318, 32350, 32382, + 32412, 32441, 32468, 32495, 32520, 32544, 32567, 32588, + 32609, 32628, 32646, 32662, 32678, 32692, 32705, 32717, + 32727, 32736, 32744, 32751, 32757, 32761, 32764, 32766, + 32767, 32766, 32764, 32761, 32757, 32751, 32744, 32736, + 32727, 32717, 32705, 32692, 32678, 32662, 32646, 32628, + 32609, 32588, 32567, 32544, 32520, 32495, 32468, 32441, + 32412, 32382, 32350, 32318, 32284, 32249, 32213, 32176, + 32137, 32097, 32056, 32014, 31970, 31926, 31880, 31833, + 31785, 31735, 31684, 31633, 31580, 31525, 31470, 31413, + 31356, 31297, 31236, 31175, 31113, 31049, 30984, 30918, + 30851, 30783, 30713, 30643, 30571, 30498, 30424, 30349, + 30272, 30195, 30116, 30036, 29955, 29873, 29790, 29706, + 29621, 29534, 29446, 29358, 29268, 29177, 29085, 28992, + 28897, 28802, 28706, 28608, 28510, 28410, 28309, 28208, + 28105, 28001, 27896, 27790, 27683, 27575, 27466, 27355, + 27244, 27132, 27019, 26905, 26789, 26673, 26556, 26437, + 26318, 26198, 26077, 25954, 25831, 25707, 25582, 25456, + 25329, 25201, 25072, 24942, 24811, 24679, 24546, 24413, + 24278, 24143, 24006, 23869, 23731, 23592, 23452, 23311, + 23169, 23027, 22883, 22739, 22594, 22448, 22301, 22153, + 22004, 21855, 21705, 21554, 21402, 21249, 21096, 20942, + 20787, 20631, 20474, 20317, 20159, 20000, 19840, 19680, + 19519, 19357, 19194, 19031, 18867, 18702, 18537, 18371, + 18204, 18036, 17868, 17699, 17530, 17360, 17189, 17017, + 16845, 16672, 16499, 16325, 16150, 15975, 15799, 15623, + 15446, 15268, 15090, 14911, 14732, 14552, 14372, 14191, + 14009, 13827, 13645, 13462, 13278, 13094, 12909, 12724, + 12539, 12353, 12166, 11980, 11792, 11604, 11416, 11227, + 11038, 10849, 10659, 10469, 10278, 10087, 9895, 9703, + 9511, 9319, 9126, 8932, 8739, 8545, 8351, 8156, + 7961, 7766, 7571, 7375, 7179, 6982, 6786, 6589, + 6392, 6195, 5997, 5799, 5601, 5403, 5205, 5006, + 4807, 4608, 4409, 4210, 4011, 3811, 3611, 3411, + 3211, 3011, 2811, 2610, 2410, 2209, 2009, 1808, + 1607, 1406, 1206, 1005, 804, 603, 402, 201, + 0, -201, -402, -603, -804, -1005, -1206, -1406, + -1607, -1808, -2009, -2209, -2410, -2610, -2811, -3011, + -3211, -3411, -3611, -3811, -4011, -4210, -4409, -4608, + -4807, -5006, -5205, -5403, -5601, -5799, -5997, -6195, + -6392, -6589, -6786, -6982, -7179, -7375, -7571, -7766, + -7961, -8156, -8351, -8545, -8739, -8932, -9126, -9319, + -9511, -9703, -9895, -10087, -10278, -10469, -10659, -10849, + -11038, -11227, -11416, -11604, -11792, -11980, -12166, -12353, + -12539, -12724, -12909, -13094, -13278, -13462, -13645, -13827, + -14009, -14191, -14372, -14552, -14732, -14911, -15090, -15268, + -15446, -15623, -15799, -15975, -16150, -16325, -16499, -16672, + -16845, -17017, -17189, -17360, -17530, -17699, -17868, -18036, + -18204, -18371, -18537, -18702, -18867, -19031, -19194, -19357, + -19519, -19680, -19840, -20000, -20159, -20317, -20474, -20631, + -20787, -20942, -21096, -21249, -21402, -21554, -21705, -21855, + -22004, -22153, -22301, -22448, -22594, -22739, -22883, -23027, + -23169, -23311, -23452, -23592, -23731, -23869, -24006, -24143, + -24278, -24413, -24546, -24679, -24811, -24942, -25072, -25201, + -25329, -25456, -25582, -25707, -25831, -25954, -26077, -26198, + -26318, -26437, -26556, -26673, -26789, -26905, -27019, -27132, + -27244, -27355, -27466, -27575, -27683, -27790, -27896, -28001, + -28105, -28208, -28309, -28410, -28510, -28608, -28706, -28802, + -28897, -28992, -29085, -29177, -29268, -29358, -29446, -29534, + -29621, -29706, -29790, -29873, -29955, -30036, -30116, -30195, + -30272, -30349, -30424, -30498, -30571, -30643, -30713, -30783, + -30851, -30918, -30984, -31049, -31113, -31175, -31236, -31297, + -31356, -31413, -31470, -31525, -31580, -31633, -31684, -31735, + -31785, -31833, -31880, -31926, -31970, -32014, -32056, -32097, + -32137, -32176, -32213, -32249, -32284, -32318, -32350, -32382, + -32412, -32441, -32468, -32495, -32520, -32544, -32567, -32588, + -32609, -32628, -32646, -32662, -32678, -32692, -32705, -32717, + -32727, -32736, -32744, -32751, -32757, -32761, -32764, -32766, + -32767, -32766, -32764, -32761, -32757, -32751, -32744, -32736, + -32727, -32717, -32705, -32692, -32678, -32662, -32646, -32628, + -32609, -32588, -32567, -32544, -32520, -32495, -32468, -32441, + -32412, -32382, -32350, -32318, -32284, -32249, -32213, -32176, + -32137, -32097, -32056, -32014, -31970, -31926, -31880, -31833, + -31785, -31735, -31684, -31633, -31580, -31525, -31470, -31413, + -31356, -31297, -31236, -31175, -31113, -31049, -30984, -30918, + -30851, -30783, -30713, -30643, -30571, -30498, -30424, -30349, + -30272, -30195, -30116, -30036, -29955, -29873, -29790, -29706, + -29621, -29534, -29446, -29358, -29268, -29177, -29085, -28992, + -28897, -28802, -28706, -28608, -28510, -28410, -28309, -28208, + -28105, -28001, -27896, -27790, -27683, -27575, -27466, -27355, + -27244, -27132, -27019, -26905, -26789, -26673, -26556, -26437, + -26318, -26198, -26077, -25954, -25831, -25707, -25582, -25456, + -25329, -25201, -25072, -24942, -24811, -24679, -24546, -24413, + -24278, -24143, -24006, -23869, -23731, -23592, -23452, -23311, + -23169, -23027, -22883, -22739, -22594, -22448, -22301, -22153, + -22004, -21855, -21705, -21554, -21402, -21249, -21096, -20942, + -20787, -20631, -20474, -20317, -20159, -20000, -19840, -19680, + -19519, -19357, -19194, -19031, -18867, -18702, -18537, -18371, + -18204, -18036, -17868, -17699, -17530, -17360, -17189, -17017, + -16845, -16672, -16499, -16325, -16150, -15975, -15799, -15623, + -15446, -15268, -15090, -14911, -14732, -14552, -14372, -14191, + -14009, -13827, -13645, -13462, -13278, -13094, -12909, -12724, + -12539, -12353, -12166, -11980, -11792, -11604, -11416, -11227, + -11038, -10849, -10659, -10469, -10278, -10087, -9895, -9703, + -9511, -9319, -9126, -8932, -8739, -8545, -8351, -8156, + -7961, -7766, -7571, -7375, -7179, -6982, -6786, -6589, + -6392, -6195, -5997, -5799, -5601, -5403, -5205, -5006, + -4807, -4608, -4409, -4210, -4011, -3811, -3611, -3411, + -3211, -3011, -2811, -2610, -2410, -2209, -2009, -1808, + -1607, -1406, -1206, -1005, -804, -603, -402, -201 +}; + +#endif // WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_COMPLEX_FFT_TABLES_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/copy_set_operations.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/copy_set_operations.c new file mode 100644 index 000000000..9d7cf47e3 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/copy_set_operations.c @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the implementation of functions + * WebRtcSpl_MemSetW16() + * WebRtcSpl_MemSetW32() + * WebRtcSpl_MemCpyReversedOrder() + * WebRtcSpl_CopyFromEndW16() + * WebRtcSpl_ZerosArrayW16() + * WebRtcSpl_ZerosArrayW32() + * + * The description header can be found in signal_processing_library.h + * + */ + +#include +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + + +void WebRtcSpl_MemSetW16(int16_t *ptr, int16_t set_value, size_t length) +{ + size_t j; + int16_t *arrptr = ptr; + + for (j = length; j > 0; j--) + { + *arrptr++ = set_value; + } +} + +void WebRtcSpl_MemSetW32(int32_t *ptr, int32_t set_value, size_t length) +{ + size_t j; + int32_t *arrptr = ptr; + + for (j = length; j > 0; j--) + { + *arrptr++ = set_value; + } +} + +void WebRtcSpl_MemCpyReversedOrder(int16_t* dest, + int16_t* source, + size_t length) +{ + size_t j; + int16_t* destPtr = dest; + int16_t* sourcePtr = source; + + for (j = 0; j < length; j++) + { + *destPtr-- = *sourcePtr++; + } +} + +void WebRtcSpl_CopyFromEndW16(const int16_t *vector_in, + size_t length, + size_t samples, + int16_t *vector_out) +{ + // Copy the last of the input vector to vector_out + WEBRTC_SPL_MEMCPY_W16(vector_out, &vector_in[length - samples], samples); +} + +void WebRtcSpl_ZerosArrayW16(int16_t *vector, size_t length) +{ + WebRtcSpl_MemSetW16(vector, 0, length); +} + +void WebRtcSpl_ZerosArrayW32(int32_t *vector, size_t length) +{ + WebRtcSpl_MemSetW32(vector, 0, length); +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/cross_correlation.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/cross_correlation.c new file mode 100644 index 000000000..d7c9f2b9a --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/cross_correlation.c @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +/* C version of WebRtcSpl_CrossCorrelation() for generic platforms. */ +void WebRtcSpl_CrossCorrelationC(int32_t* cross_correlation, + const int16_t* seq1, + const int16_t* seq2, + size_t dim_seq, + size_t dim_cross_correlation, + int right_shifts, + int step_seq2) { + size_t i = 0, j = 0; + + for (i = 0; i < dim_cross_correlation; i++) { + int32_t corr = 0; + for (j = 0; j < dim_seq; j++) + corr += (seq1[j] * seq2[j]) >> right_shifts; + seq2 += step_seq2; + *cross_correlation++ = corr; + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/cross_correlation_neon.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/cross_correlation_neon.c new file mode 100644 index 000000000..44b3b0eab --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/cross_correlation_neon.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +#ifdef WEBRTC_ARCH_ARM_FAMILY + +#include + +static inline void DotProductWithScaleNeon(int32_t* cross_correlation, + const int16_t* vector1, + const int16_t* vector2, + size_t length, + int scaling) { + size_t i = 0; + size_t len1 = length >> 3; + size_t len2 = length & 7; + int64x2_t sum0 = vdupq_n_s64(0); + int64x2_t sum1 = vdupq_n_s64(0); + + for (i = len1; i > 0; i -= 1) { + int16x8_t seq1_16x8 = vld1q_s16(vector1); + int16x8_t seq2_16x8 = vld1q_s16(vector2); +#if defined(WEBRTC_ARCH_ARM64) + int32x4_t tmp0 = vmull_s16(vget_low_s16(seq1_16x8), + vget_low_s16(seq2_16x8)); + int32x4_t tmp1 = vmull_high_s16(seq1_16x8, seq2_16x8); +#else + int32x4_t tmp0 = vmull_s16(vget_low_s16(seq1_16x8), + vget_low_s16(seq2_16x8)); + int32x4_t tmp1 = vmull_s16(vget_high_s16(seq1_16x8), + vget_high_s16(seq2_16x8)); +#endif + sum0 = vpadalq_s32(sum0, tmp0); + sum1 = vpadalq_s32(sum1, tmp1); + vector1 += 8; + vector2 += 8; + } + + // Calculate the rest of the samples. + int64_t sum_res = 0; + for (i = len2; i > 0; i -= 1) { + sum_res += WEBRTC_SPL_MUL_16_16(*vector1, *vector2); + vector1++; + vector2++; + } + + sum0 = vaddq_s64(sum0, sum1); +#if defined(WEBRTC_ARCH_ARM64) + int64_t sum2 = vaddvq_s64(sum0); + *cross_correlation = (int32_t)((sum2 + sum_res) >> scaling); +#else + int64x1_t shift = vdup_n_s64(-scaling); + int64x1_t sum2 = vadd_s64(vget_low_s64(sum0), vget_high_s64(sum0)); + sum2 = vadd_s64(sum2, vdup_n_s64(sum_res)); + sum2 = vshl_s64(sum2, shift); + vst1_lane_s32(cross_correlation, vreinterpret_s32_s64(sum2), 0); +#endif +} + +/* NEON version of WebRtcSpl_CrossCorrelation() for ARM32/64 platforms. */ +void WebRtcSpl_CrossCorrelationNeon(int32_t* cross_correlation, + const int16_t* seq1, + const int16_t* seq2, + size_t dim_seq, + size_t dim_cross_correlation, + int right_shifts, + int step_seq2) { + size_t i = 0; + + for (i = 0; i < dim_cross_correlation; i++) { + const int16_t* seq1_ptr = seq1; + const int16_t* seq2_ptr = seq2 + (step_seq2 * i); + + DotProductWithScaleNeon(cross_correlation, + seq1_ptr, + seq2_ptr, + dim_seq, + right_shifts); + cross_correlation++; + } +} + +#endif // WEBRTC_ARCH_ARM_FAMILY diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/division_operations.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/division_operations.c new file mode 100644 index 000000000..eaa06a1ff --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/division_operations.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains implementations of the divisions + * WebRtcSpl_DivU32U16() + * WebRtcSpl_DivW32W16() + * WebRtcSpl_DivW32W16ResW16() + * WebRtcSpl_DivResultInQ31() + * WebRtcSpl_DivW32HiLow() + * + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +uint32_t WebRtcSpl_DivU32U16(uint32_t num, uint16_t den) +{ + // Guard against division with 0 + if (den != 0) + { + return (uint32_t)(num / den); + } else + { + return (uint32_t)0xFFFFFFFF; + } +} + +int32_t WebRtcSpl_DivW32W16(int32_t num, int16_t den) +{ + // Guard against division with 0 + if (den != 0) + { + return (int32_t)(num / den); + } else + { + return (int32_t)0x7FFFFFFF; + } +} + +int16_t WebRtcSpl_DivW32W16ResW16(int32_t num, int16_t den) +{ + // Guard against division with 0 + if (den != 0) + { + return (int16_t)(num / den); + } else + { + return (int16_t)0x7FFF; + } +} + +int32_t WebRtcSpl_DivResultInQ31(int32_t num, int32_t den) +{ + int32_t L_num = num; + int32_t L_den = den; + int32_t div = 0; + int k = 31; + int change_sign = 0; + + if (num == 0) + return 0; + + if (num < 0) + { + change_sign++; + L_num = -num; + } + if (den < 0) + { + change_sign++; + L_den = -den; + } + while (k--) + { + div <<= 1; + L_num <<= 1; + if (L_num >= L_den) + { + L_num -= L_den; + div++; + } + } + if (change_sign == 1) + { + div = -div; + } + return div; +} + +int32_t WebRtcSpl_DivW32HiLow(int32_t num, int16_t den_hi, int16_t den_low) +{ + int16_t approx, tmp_hi, tmp_low, num_hi, num_low; + int32_t tmpW32; + + approx = (int16_t)WebRtcSpl_DivW32W16((int32_t)0x1FFFFFFF, den_hi); + // result in Q14 (Note: 3FFFFFFF = 0.5 in Q30) + + // tmpW32 = 1/den = approx * (2.0 - den * approx) (in Q30) + tmpW32 = (den_hi * approx << 1) + ((den_low * approx >> 15) << 1); + // tmpW32 = den * approx + + tmpW32 = (int32_t)0x7fffffffL - tmpW32; // result in Q30 (tmpW32 = 2.0-(den*approx)) + + // Store tmpW32 in hi and low format + tmp_hi = (int16_t)(tmpW32 >> 16); + tmp_low = (int16_t)((tmpW32 - ((int32_t)tmp_hi << 16)) >> 1); + + // tmpW32 = 1/den in Q29 + tmpW32 = (tmp_hi * approx + (tmp_low * approx >> 15)) << 1; + + // 1/den in hi and low format + tmp_hi = (int16_t)(tmpW32 >> 16); + tmp_low = (int16_t)((tmpW32 - ((int32_t)tmp_hi << 16)) >> 1); + + // Store num in hi and low format + num_hi = (int16_t)(num >> 16); + num_low = (int16_t)((num - ((int32_t)num_hi << 16)) >> 1); + + // num * (1/den) by 32 bit multiplication (result in Q28) + + tmpW32 = num_hi * tmp_hi + (num_hi * tmp_low >> 15) + + (num_low * tmp_hi >> 15); + + // Put result in Q31 (convert from Q28) + tmpW32 = WEBRTC_SPL_LSHIFT_W32(tmpW32, 3); + + return tmpW32; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/dot_product_with_scale.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/dot_product_with_scale.c new file mode 100644 index 000000000..1302d6254 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/dot_product_with_scale.c @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +int32_t WebRtcSpl_DotProductWithScale(const int16_t* vector1, + const int16_t* vector2, + size_t length, + int scaling) { + int32_t sum = 0; + size_t i = 0; + + /* Unroll the loop to improve performance. */ + for (i = 0; i + 3 < length; i += 4) { + sum += (vector1[i + 0] * vector2[i + 0]) >> scaling; + sum += (vector1[i + 1] * vector2[i + 1]) >> scaling; + sum += (vector1[i + 2] * vector2[i + 2]) >> scaling; + sum += (vector1[i + 3] * vector2[i + 3]) >> scaling; + } + for (; i < length; i++) { + sum += (vector1[i] * vector2[i]) >> scaling; + } + + return sum; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/downsample_fast.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/downsample_fast.c new file mode 100644 index 000000000..3cbc3c111 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/downsample_fast.c @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +#include "webrtc/base/checks.h" +#include "webrtc/base/sanitizer.h" + +// TODO(Bjornv): Change the function parameter order to WebRTC code style. +// C version of WebRtcSpl_DownsampleFast() for generic platforms. +int WebRtcSpl_DownsampleFastC(const int16_t* data_in, + size_t data_in_length, + int16_t* data_out, + size_t data_out_length, + const int16_t* __restrict coefficients, + size_t coefficients_length, + int factor, + size_t delay) { + int16_t* const original_data_out = data_out; + size_t i = 0; + size_t j = 0; + int32_t out_s32 = 0; + size_t endpos = delay + factor * (data_out_length - 1) + 1; + + // Return error if any of the running conditions doesn't meet. + if (data_out_length == 0 || coefficients_length == 0 + || data_in_length < endpos) { + return -1; + } + + rtc_MsanCheckInitialized(coefficients, sizeof(coefficients[0]), + coefficients_length); + + for (i = delay; i < endpos; i += factor) { + out_s32 = 2048; // Round value, 0.5 in Q12. + + for (j = 0; j < coefficients_length; j++) { + rtc_MsanCheckInitialized(&data_in[i - j], sizeof(data_in[0]), 1); + out_s32 += coefficients[j] * data_in[i - j]; // Q12. + } + + out_s32 >>= 12; // Q0. + + // Saturate and store the output. + *data_out++ = WebRtcSpl_SatW32ToW16(out_s32); + } + + RTC_DCHECK_EQ(original_data_out + data_out_length, data_out); + rtc_MsanCheckInitialized(original_data_out, sizeof(original_data_out[0]), + data_out_length); + + return 0; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/downsample_fast_neon.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/downsample_fast_neon.c new file mode 100644 index 000000000..071478f44 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/downsample_fast_neon.c @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +#ifdef WEBRTC_ARCH_ARM_FAMILY + +#include + +// NEON intrinsics version of WebRtcSpl_DownsampleFast() +// for ARM 32-bit/64-bit platforms. +int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in, + size_t data_in_length, + int16_t* data_out, + size_t data_out_length, + const int16_t* __restrict coefficients, + size_t coefficients_length, + int factor, + size_t delay) { + size_t i = 0; + size_t j = 0; + int32_t out_s32 = 0; + size_t endpos = delay + factor * (data_out_length - 1) + 1; + size_t res = data_out_length & 0x7; + size_t endpos1 = endpos - factor * res; + + // Return error if any of the running conditions doesn't meet. + if (data_out_length == 0 || coefficients_length == 0 + || data_in_length < endpos) { + return -1; + } + + // First part, unroll the loop 8 times, with 3 subcases + // (factor == 2, 4, others). + switch (factor) { + case 2: { + for (i = delay; i < endpos1; i += 16) { + // Round value, 0.5 in Q12. + int32x4_t out32x4_0 = vdupq_n_s32(2048); + int32x4_t out32x4_1 = vdupq_n_s32(2048); + +#if defined(WEBRTC_ARCH_ARM64) + // Unroll the loop 2 times. + for (j = 0; j < coefficients_length - 1; j += 2) { + int32x2_t coeff32 = vld1_dup_s32((int32_t*)&coefficients[j]); + int16x4_t coeff16x4 = vreinterpret_s16_s32(coeff32); + int16x8x2_t in16x8x2 = vld2q_s16(&data_in[i - j - 1]); + + // Mul and accumulate low 64-bit data. + int16x4_t in16x4_0 = vget_low_s16(in16x8x2.val[0]); + int16x4_t in16x4_1 = vget_low_s16(in16x8x2.val[1]); + out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_0, coeff16x4, 1); + out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_1, coeff16x4, 0); + + // Mul and accumulate high 64-bit data. + // TODO: vget_high_s16 need extra cost on ARM64. This could be + // replaced by vmlal_high_lane_s16. But for the interface of + // vmlal_high_lane_s16, there is a bug in gcc 4.9. + // This issue need to be tracked in the future. + int16x4_t in16x4_2 = vget_high_s16(in16x8x2.val[0]); + int16x4_t in16x4_3 = vget_high_s16(in16x8x2.val[1]); + out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_2, coeff16x4, 1); + out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_3, coeff16x4, 0); + } + + for (; j < coefficients_length; j++) { + int16x4_t coeff16x4 = vld1_dup_s16(&coefficients[j]); + int16x8x2_t in16x8x2 = vld2q_s16(&data_in[i - j]); + + // Mul and accumulate low 64-bit data. + int16x4_t in16x4_0 = vget_low_s16(in16x8x2.val[0]); + out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_0, coeff16x4, 0); + + // Mul and accumulate high 64-bit data. + // TODO: vget_high_s16 need extra cost on ARM64. This could be + // replaced by vmlal_high_lane_s16. But for the interface of + // vmlal_high_lane_s16, there is a bug in gcc 4.9. + // This issue need to be tracked in the future. + int16x4_t in16x4_1 = vget_high_s16(in16x8x2.val[0]); + out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_1, coeff16x4, 0); + } +#else + // On ARMv7, the loop unrolling 2 times results in performance + // regression. + for (j = 0; j < coefficients_length; j++) { + int16x4_t coeff16x4 = vld1_dup_s16(&coefficients[j]); + int16x8x2_t in16x8x2 = vld2q_s16(&data_in[i - j]); + + // Mul and accumulate. + int16x4_t in16x4_0 = vget_low_s16(in16x8x2.val[0]); + int16x4_t in16x4_1 = vget_high_s16(in16x8x2.val[0]); + out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_0, coeff16x4, 0); + out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_1, coeff16x4, 0); + } +#endif + + // Saturate and store the output. + int16x4_t out16x4_0 = vqshrn_n_s32(out32x4_0, 12); + int16x4_t out16x4_1 = vqshrn_n_s32(out32x4_1, 12); + vst1q_s16(data_out, vcombine_s16(out16x4_0, out16x4_1)); + data_out += 8; + } + break; + } + case 4: { + for (i = delay; i < endpos1; i += 32) { + // Round value, 0.5 in Q12. + int32x4_t out32x4_0 = vdupq_n_s32(2048); + int32x4_t out32x4_1 = vdupq_n_s32(2048); + + // Unroll the loop 4 times. + for (j = 0; j < coefficients_length - 3; j += 4) { + int16x4_t coeff16x4 = vld1_s16(&coefficients[j]); + int16x8x4_t in16x8x4 = vld4q_s16(&data_in[i - j - 3]); + + // Mul and accumulate low 64-bit data. + int16x4_t in16x4_0 = vget_low_s16(in16x8x4.val[0]); + int16x4_t in16x4_2 = vget_low_s16(in16x8x4.val[1]); + int16x4_t in16x4_4 = vget_low_s16(in16x8x4.val[2]); + int16x4_t in16x4_6 = vget_low_s16(in16x8x4.val[3]); + out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_0, coeff16x4, 3); + out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_2, coeff16x4, 2); + out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_4, coeff16x4, 1); + out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_6, coeff16x4, 0); + + // Mul and accumulate high 64-bit data. + // TODO: vget_high_s16 need extra cost on ARM64. This could be + // replaced by vmlal_high_lane_s16. But for the interface of + // vmlal_high_lane_s16, there is a bug in gcc 4.9. + // This issue need to be tracked in the future. + int16x4_t in16x4_1 = vget_high_s16(in16x8x4.val[0]); + int16x4_t in16x4_3 = vget_high_s16(in16x8x4.val[1]); + int16x4_t in16x4_5 = vget_high_s16(in16x8x4.val[2]); + int16x4_t in16x4_7 = vget_high_s16(in16x8x4.val[3]); + out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_1, coeff16x4, 3); + out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_3, coeff16x4, 2); + out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_5, coeff16x4, 1); + out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_7, coeff16x4, 0); + } + + for (; j < coefficients_length; j++) { + int16x4_t coeff16x4 = vld1_dup_s16(&coefficients[j]); + int16x8x4_t in16x8x4 = vld4q_s16(&data_in[i - j]); + + // Mul and accumulate low 64-bit data. + int16x4_t in16x4_0 = vget_low_s16(in16x8x4.val[0]); + out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_0, coeff16x4, 0); + + // Mul and accumulate high 64-bit data. + // TODO: vget_high_s16 need extra cost on ARM64. This could be + // replaced by vmlal_high_lane_s16. But for the interface of + // vmlal_high_lane_s16, there is a bug in gcc 4.9. + // This issue need to be tracked in the future. + int16x4_t in16x4_1 = vget_high_s16(in16x8x4.val[0]); + out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_1, coeff16x4, 0); + } + + // Saturate and store the output. + int16x4_t out16x4_0 = vqshrn_n_s32(out32x4_0, 12); + int16x4_t out16x4_1 = vqshrn_n_s32(out32x4_1, 12); + vst1q_s16(data_out, vcombine_s16(out16x4_0, out16x4_1)); + data_out += 8; + } + break; + } + default: { + for (i = delay; i < endpos1; i += factor * 8) { + // Round value, 0.5 in Q12. + int32x4_t out32x4_0 = vdupq_n_s32(2048); + int32x4_t out32x4_1 = vdupq_n_s32(2048); + + for (j = 0; j < coefficients_length; j++) { + int16x4_t coeff16x4 = vld1_dup_s16(&coefficients[j]); + int16x4_t in16x4_0 = vld1_dup_s16(&data_in[i - j]); + in16x4_0 = vld1_lane_s16(&data_in[i + factor - j], in16x4_0, 1); + in16x4_0 = vld1_lane_s16(&data_in[i + factor * 2 - j], in16x4_0, 2); + in16x4_0 = vld1_lane_s16(&data_in[i + factor * 3 - j], in16x4_0, 3); + int16x4_t in16x4_1 = vld1_dup_s16(&data_in[i + factor * 4 - j]); + in16x4_1 = vld1_lane_s16(&data_in[i + factor * 5 - j], in16x4_1, 1); + in16x4_1 = vld1_lane_s16(&data_in[i + factor * 6 - j], in16x4_1, 2); + in16x4_1 = vld1_lane_s16(&data_in[i + factor * 7 - j], in16x4_1, 3); + + // Mul and accumulate. + out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_0, coeff16x4, 0); + out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_1, coeff16x4, 0); + } + + // Saturate and store the output. + int16x4_t out16x4_0 = vqshrn_n_s32(out32x4_0, 12); + int16x4_t out16x4_1 = vqshrn_n_s32(out32x4_1, 12); + vst1q_s16(data_out, vcombine_s16(out16x4_0, out16x4_1)); + data_out += 8; + } + break; + } + } + + // Second part, do the rest iterations (if any). + for (; i < endpos; i += factor) { + out_s32 = 2048; // Round value, 0.5 in Q12. + + for (j = 0; j < coefficients_length; j++) { + out_s32 = WebRtc_MulAccumW16(coefficients[j], data_in[i - j], out_s32); + } + + // Saturate and store the output. + out_s32 >>= 12; + *data_out++ = WebRtcSpl_SatW32ToW16(out_s32); + } + + return 0; +} + +#endif diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/energy.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/energy.c new file mode 100644 index 000000000..e83f1a698 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/energy.c @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the function WebRtcSpl_Energy(). + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +int32_t WebRtcSpl_Energy(int16_t* vector, + size_t vector_length, + int* scale_factor) +{ + int32_t en = 0; + size_t i; + int scaling = + WebRtcSpl_GetScalingSquare(vector, vector_length, vector_length); + size_t looptimes = vector_length; + int16_t *vectorptr = vector; + + for (i = 0; i < looptimes; i++) + { + en += (*vectorptr * *vectorptr) >> scaling; + vectorptr++; + } + *scale_factor = scaling; + + return en; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar.c new file mode 100644 index 000000000..d389ee47e --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the function WebRtcSpl_FilterAR(). + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +size_t WebRtcSpl_FilterAR(const int16_t* a, + size_t a_length, + const int16_t* x, + size_t x_length, + int16_t* state, + size_t state_length, + int16_t* state_low, + size_t state_low_length, + int16_t* filtered, + int16_t* filtered_low, + size_t filtered_low_length) +{ + int32_t o; + int32_t oLOW; + size_t i, j, stop; + const int16_t* x_ptr = &x[0]; + int16_t* filteredFINAL_ptr = filtered; + int16_t* filteredFINAL_LOW_ptr = filtered_low; + + for (i = 0; i < x_length; i++) + { + // Calculate filtered[i] and filtered_low[i] + const int16_t* a_ptr = &a[1]; + int16_t* filtered_ptr = &filtered[i - 1]; + int16_t* filtered_low_ptr = &filtered_low[i - 1]; + int16_t* state_ptr = &state[state_length - 1]; + int16_t* state_low_ptr = &state_low[state_length - 1]; + + o = (int32_t)(*x_ptr++) * (1 << 12); + oLOW = (int32_t)0; + + stop = (i < a_length) ? i + 1 : a_length; + for (j = 1; j < stop; j++) + { + o -= *a_ptr * *filtered_ptr--; + oLOW -= *a_ptr++ * *filtered_low_ptr--; + } + for (j = i + 1; j < a_length; j++) + { + o -= *a_ptr * *state_ptr--; + oLOW -= *a_ptr++ * *state_low_ptr--; + } + + o += (oLOW >> 12); + *filteredFINAL_ptr = (int16_t)((o + (int32_t)2048) >> 12); + *filteredFINAL_LOW_ptr++ = + (int16_t)(o - ((int32_t)(*filteredFINAL_ptr++) * (1 << 12))); + } + + // Save the filter state + if (x_length >= state_length) + { + WebRtcSpl_CopyFromEndW16(filtered, x_length, a_length - 1, state); + WebRtcSpl_CopyFromEndW16(filtered_low, x_length, a_length - 1, state_low); + } else + { + for (i = 0; i < state_length - x_length; i++) + { + state[i] = state[i + x_length]; + state_low[i] = state_low[i + x_length]; + } + for (i = 0; i < x_length; i++) + { + state[state_length - x_length + i] = filtered[i]; + state[state_length - x_length + i] = filtered_low[i]; + } + } + + return x_length; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar_fast_q12.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar_fast_q12.c new file mode 100644 index 000000000..53e800bc6 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar_fast_q12.c @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/base/checks.h" +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +// TODO(bjornv): Change the return type to report errors. + +void WebRtcSpl_FilterARFastQ12(const int16_t* data_in, + int16_t* data_out, + const int16_t* __restrict coefficients, + size_t coefficients_length, + size_t data_length) { + size_t i = 0; + size_t j = 0; + + RTC_DCHECK_GT(data_length, 0); + RTC_DCHECK_GT(coefficients_length, 1); + + for (i = 0; i < data_length; i++) { + int32_t output = 0; + int32_t sum = 0; + + for (j = coefficients_length - 1; j > 0; j--) { + sum += coefficients[j] * data_out[i - j]; + } + + output = coefficients[0] * data_in[i]; + output -= sum; + + // Saturate and store the output. + output = WEBRTC_SPL_SAT(134215679, output, -134217728); + data_out[i] = (int16_t)((output + 2048) >> 12); + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar_fast_q12_armv7.S b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar_fast_q12_armv7.S new file mode 100644 index 000000000..f16362738 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ar_fast_q12_armv7.S @@ -0,0 +1,218 @@ +@ +@ Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. +@ +@ Use of this source code is governed by a BSD-style license +@ that can be found in the LICENSE file in the root of the source +@ tree. An additional intellectual property rights grant can be found +@ in the file PATENTS. All contributing project authors may +@ be found in the AUTHORS file in the root of the source tree. +@ + +@ This file contains the function WebRtcSpl_FilterARFastQ12(), optimized for +@ ARMv7 platform. The description header can be found in +@ signal_processing_library.h +@ +@ Output is bit-exact with the generic C code as in filter_ar_fast_q12.c, and +@ the reference C code at end of this file. + +@ Assumptions: +@ (1) data_length > 0 +@ (2) coefficients_length > 1 + +@ Register usage: +@ +@ r0: &data_in[i] +@ r1: &data_out[i], for result ouput +@ r2: &coefficients[0] +@ r3: coefficients_length +@ r4: Iteration counter for the outer loop. +@ r5: data_out[j] as multiplication inputs +@ r6: Calculated value for output data_out[]; interation counter for inner loop +@ r7: Partial sum of a filtering multiplication results +@ r8: Partial sum of a filtering multiplication results +@ r9: &data_out[], for filtering input; data_in[i] +@ r10: coefficients[j] +@ r11: Scratch +@ r12: &coefficients[j] + +#include "webrtc/system_wrappers/include/asm_defines.h" + +GLOBAL_FUNCTION WebRtcSpl_FilterARFastQ12 +.align 2 +DEFINE_FUNCTION WebRtcSpl_FilterARFastQ12 + push {r4-r11} + + ldrsh r12, [sp, #32] @ data_length + subs r4, r12, #1 + beq ODD_LENGTH @ jump if data_length == 1 + +LOOP_LENGTH: + add r12, r2, r3, lsl #1 + sub r12, #4 @ &coefficients[coefficients_length - 2] + sub r9, r1, r3, lsl #1 + add r9, #2 @ &data_out[i - coefficients_length + 1] + ldr r5, [r9], #4 @ data_out[i - coefficients_length + {1,2}] + + mov r7, #0 @ sum1 + mov r8, #0 @ sum2 + subs r6, r3, #3 @ Iteration counter for inner loop. + beq ODD_A_LENGTH @ branch if coefficients_length == 3 + blt POST_LOOP_A_LENGTH @ branch if coefficients_length == 2 + +LOOP_A_LENGTH: + ldr r10, [r12], #-4 @ coefficients[j - 1], coefficients[j] + subs r6, #2 + smlatt r8, r10, r5, r8 @ sum2 += coefficients[j] * data_out[i - j + 1]; + smlatb r7, r10, r5, r7 @ sum1 += coefficients[j] * data_out[i - j]; + smlabt r7, r10, r5, r7 @ coefficients[j - 1] * data_out[i - j + 1]; + ldr r5, [r9], #4 @ data_out[i - j + 2], data_out[i - j + 3] + smlabb r8, r10, r5, r8 @ coefficients[j - 1] * data_out[i - j + 2]; + bgt LOOP_A_LENGTH + blt POST_LOOP_A_LENGTH + +ODD_A_LENGTH: + ldrsh r10, [r12, #2] @ Filter coefficients coefficients[2] + sub r12, #2 @ &coefficients[0] + smlabb r7, r10, r5, r7 @ sum1 += coefficients[2] * data_out[i - 2]; + smlabt r8, r10, r5, r8 @ sum2 += coefficients[2] * data_out[i - 1]; + ldr r5, [r9, #-2] @ data_out[i - 1], data_out[i] + +POST_LOOP_A_LENGTH: + ldr r10, [r12] @ coefficients[0], coefficients[1] + smlatb r7, r10, r5, r7 @ sum1 += coefficients[1] * data_out[i - 1]; + + ldr r9, [r0], #4 @ data_in[i], data_in[i + 1] + smulbb r6, r10, r9 @ output1 = coefficients[0] * data_in[i]; + sub r6, r7 @ output1 -= sum1; + + sbfx r11, r6, #12, #16 + ssat r7, #16, r6, asr #12 + cmp r7, r11 + addeq r6, r6, #2048 + ssat r6, #16, r6, asr #12 + strh r6, [r1], #2 @ Store data_out[i] + + smlatb r8, r10, r6, r8 @ sum2 += coefficients[1] * data_out[i]; + smulbt r6, r10, r9 @ output2 = coefficients[0] * data_in[i + 1]; + sub r6, r8 @ output1 -= sum1; + + sbfx r11, r6, #12, #16 + ssat r7, #16, r6, asr #12 + cmp r7, r11 + addeq r6, r6, #2048 + ssat r6, #16, r6, asr #12 + strh r6, [r1], #2 @ Store data_out[i + 1] + + subs r4, #2 + bgt LOOP_LENGTH + blt END @ For even data_length, it's done. Jump to END. + +@ Process i = data_length -1, for the case of an odd length. +ODD_LENGTH: + add r12, r2, r3, lsl #1 + sub r12, #4 @ &coefficients[coefficients_length - 2] + sub r9, r1, r3, lsl #1 + add r9, #2 @ &data_out[i - coefficients_length + 1] + mov r7, #0 @ sum1 + mov r8, #0 @ sum1 + subs r6, r3, #2 @ inner loop counter + beq EVEN_A_LENGTH @ branch if coefficients_length == 2 + +LOOP2_A_LENGTH: + ldr r10, [r12], #-4 @ coefficients[j - 1], coefficients[j] + ldr r5, [r9], #4 @ data_out[i - j], data_out[i - j + 1] + subs r6, #2 + smlatb r7, r10, r5, r7 @ sum1 += coefficients[j] * data_out[i - j]; + smlabt r8, r10, r5, r8 @ coefficients[j - 1] * data_out[i - j + 1]; + bgt LOOP2_A_LENGTH + addlt r12, #2 + blt POST_LOOP2_A_LENGTH + +EVEN_A_LENGTH: + ldrsh r10, [r12, #2] @ Filter coefficients coefficients[1] + ldrsh r5, [r9] @ data_out[i - 1] + smlabb r7, r10, r5, r7 @ sum1 += coefficients[1] * data_out[i - 1]; + +POST_LOOP2_A_LENGTH: + ldrsh r10, [r12] @ Filter coefficients coefficients[0] + ldrsh r9, [r0] @ data_in[i] + smulbb r6, r10, r9 @ output1 = coefficients[0] * data_in[i]; + sub r6, r7 @ output1 -= sum1; + sub r6, r8 @ output1 -= sum1; + sbfx r8, r6, #12, #16 + ssat r7, #16, r6, asr #12 + cmp r7, r8 + addeq r6, r6, #2048 + ssat r6, #16, r6, asr #12 + strh r6, [r1] @ Store the data_out[i] + +END: + pop {r4-r11} + bx lr + +@Reference C code: +@ +@void WebRtcSpl_FilterARFastQ12(int16_t* data_in, +@ int16_t* data_out, +@ int16_t* __restrict coefficients, +@ size_t coefficients_length, +@ size_t data_length) { +@ size_t i = 0; +@ size_t j = 0; +@ +@ assert(data_length > 0); +@ assert(coefficients_length > 1); +@ +@ for (i = 0; i < data_length - 1; i += 2) { +@ int32_t output1 = 0; +@ int32_t sum1 = 0; +@ int32_t output2 = 0; +@ int32_t sum2 = 0; +@ +@ for (j = coefficients_length - 1; j > 2; j -= 2) { +@ sum1 += coefficients[j] * data_out[i - j]; +@ sum1 += coefficients[j - 1] * data_out[i - j + 1]; +@ sum2 += coefficients[j] * data_out[i - j + 1]; +@ sum2 += coefficients[j - 1] * data_out[i - j + 2]; +@ } +@ +@ if (j == 2) { +@ sum1 += coefficients[2] * data_out[i - 2]; +@ sum2 += coefficients[2] * data_out[i - 1]; +@ } +@ +@ sum1 += coefficients[1] * data_out[i - 1]; +@ output1 = coefficients[0] * data_in[i]; +@ output1 -= sum1; +@ // Saturate and store the output. +@ output1 = WEBRTC_SPL_SAT(134215679, output1, -134217728); +@ data_out[i] = (int16_t)((output1 + 2048) >> 12); +@ +@ sum2 += coefficients[1] * data_out[i]; +@ output2 = coefficients[0] * data_in[i + 1]; +@ output2 -= sum2; +@ // Saturate and store the output. +@ output2 = WEBRTC_SPL_SAT(134215679, output2, -134217728); +@ data_out[i + 1] = (int16_t)((output2 + 2048) >> 12); +@ } +@ +@ if (i == data_length - 1) { +@ int32_t output1 = 0; +@ int32_t sum1 = 0; +@ +@ for (j = coefficients_length - 1; j > 1; j -= 2) { +@ sum1 += coefficients[j] * data_out[i - j]; +@ sum1 += coefficients[j - 1] * data_out[i - j + 1]; +@ } +@ +@ if (j == 1) { +@ sum1 += coefficients[1] * data_out[i - 1]; +@ } +@ +@ output1 = coefficients[0] * data_in[i]; +@ output1 -= sum1; +@ // Saturate and store the output. +@ output1 = WEBRTC_SPL_SAT(134215679, output1, -134217728); +@ data_out[i] = (int16_t)((output1 + 2048) >> 12); +@ } +@} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ma_fast_q12.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ma_fast_q12.c new file mode 100644 index 000000000..98f5b3cb6 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/filter_ma_fast_q12.c @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the function WebRtcSpl_FilterMAFastQ12(). + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +#include "webrtc/base/sanitizer.h" + +void WebRtcSpl_FilterMAFastQ12(const int16_t* in_ptr, + int16_t* out_ptr, + const int16_t* B, + size_t B_length, + size_t length) +{ + size_t i, j; + + rtc_MsanCheckInitialized(B, sizeof(B[0]), B_length); + rtc_MsanCheckInitialized(in_ptr - B_length + 1, sizeof(in_ptr[0]), + B_length + length - 1); + + for (i = 0; i < length; i++) + { + int32_t o = 0; + + for (j = 0; j < B_length; j++) + { + o += B[j] * in_ptr[i - j]; + } + + // If output is higher than 32768, saturate it. Same with negative side + // 2^27 = 134217728, which corresponds to 32768 in Q12 + + // Saturate the output + o = WEBRTC_SPL_SAT((int32_t)134215679, o, (int32_t)-134217728); + + *out_ptr++ = (int16_t)((o + (int32_t)2048) >> 12); + } + return; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/get_hanning_window.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/get_hanning_window.c new file mode 100644 index 000000000..d83ac2168 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/get_hanning_window.c @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the function WebRtcSpl_GetHanningWindow(). + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +// Hanning table with 256 entries +static const int16_t kHanningTable[] = { + 1, 2, 6, 10, 15, 22, 30, 39, + 50, 62, 75, 89, 104, 121, 138, 157, + 178, 199, 222, 246, 271, 297, 324, 353, + 383, 413, 446, 479, 513, 549, 586, 624, + 663, 703, 744, 787, 830, 875, 920, 967, + 1015, 1064, 1114, 1165, 1218, 1271, 1325, 1381, + 1437, 1494, 1553, 1612, 1673, 1734, 1796, 1859, + 1924, 1989, 2055, 2122, 2190, 2259, 2329, 2399, + 2471, 2543, 2617, 2691, 2765, 2841, 2918, 2995, + 3073, 3152, 3232, 3312, 3393, 3475, 3558, 3641, + 3725, 3809, 3895, 3980, 4067, 4154, 4242, 4330, + 4419, 4509, 4599, 4689, 4781, 4872, 4964, 5057, + 5150, 5244, 5338, 5432, 5527, 5622, 5718, 5814, + 5910, 6007, 6104, 6202, 6299, 6397, 6495, 6594, + 6693, 6791, 6891, 6990, 7090, 7189, 7289, 7389, + 7489, 7589, 7690, 7790, 7890, 7991, 8091, 8192, + 8293, 8393, 8494, 8594, 8694, 8795, 8895, 8995, + 9095, 9195, 9294, 9394, 9493, 9593, 9691, 9790, + 9889, 9987, 10085, 10182, 10280, 10377, 10474, 10570, +10666, 10762, 10857, 10952, 11046, 11140, 11234, 11327, +11420, 11512, 11603, 11695, 11785, 11875, 11965, 12054, +12142, 12230, 12317, 12404, 12489, 12575, 12659, 12743, +12826, 12909, 12991, 13072, 13152, 13232, 13311, 13389, +13466, 13543, 13619, 13693, 13767, 13841, 13913, 13985, +14055, 14125, 14194, 14262, 14329, 14395, 14460, 14525, +14588, 14650, 14711, 14772, 14831, 14890, 14947, 15003, +15059, 15113, 15166, 15219, 15270, 15320, 15369, 15417, +15464, 15509, 15554, 15597, 15640, 15681, 15721, 15760, +15798, 15835, 15871, 15905, 15938, 15971, 16001, 16031, +16060, 16087, 16113, 16138, 16162, 16185, 16206, 16227, +16246, 16263, 16280, 16295, 16309, 16322, 16334, 16345, +16354, 16362, 16369, 16374, 16378, 16382, 16383, 16384 +}; + +void WebRtcSpl_GetHanningWindow(int16_t *v, size_t size) +{ + size_t jj; + int16_t *vptr1; + + int32_t index; + int32_t factor = ((int32_t)0x40000000); + + factor = WebRtcSpl_DivW32W16(factor, (int16_t)size); + if (size < 513) + index = (int32_t)-0x200000; + else + index = (int32_t)-0x100000; + vptr1 = v; + + for (jj = 0; jj < size; jj++) + { + index += factor; + (*vptr1++) = kHanningTable[index >> 22]; + } + +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/get_scaling_square.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/get_scaling_square.c new file mode 100644 index 000000000..82e3c8b09 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/get_scaling_square.c @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the function WebRtcSpl_GetScalingSquare(). + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +int16_t WebRtcSpl_GetScalingSquare(int16_t* in_vector, + size_t in_vector_length, + size_t times) +{ + int16_t nbits = WebRtcSpl_GetSizeInBits((uint32_t)times); + size_t i; + int16_t smax = -1; + int16_t sabs; + int16_t *sptr = in_vector; + int16_t t; + size_t looptimes = in_vector_length; + + for (i = looptimes; i > 0; i--) + { + sabs = (*sptr > 0 ? *sptr++ : -*sptr++); + smax = (sabs > smax ? sabs : smax); + } + t = WebRtcSpl_NormW32(WEBRTC_SPL_MUL(smax, smax)); + + if (smax == 0) + { + return 0; // Since norm(0) returns 0 + } else + { + return (t > nbits) ? 0 : nbits - t; + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/ilbc_specific_functions.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/ilbc_specific_functions.c new file mode 100644 index 000000000..301a922d7 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/ilbc_specific_functions.c @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains implementations of the iLBC specific functions + * WebRtcSpl_ReverseOrderMultArrayElements() + * WebRtcSpl_ElementwiseVectorMult() + * WebRtcSpl_AddVectorsAndShift() + * WebRtcSpl_AddAffineVectorToVector() + * WebRtcSpl_AffineTransformVector() + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +void WebRtcSpl_ReverseOrderMultArrayElements(int16_t *out, const int16_t *in, + const int16_t *win, + size_t vector_length, + int16_t right_shifts) +{ + size_t i; + int16_t *outptr = out; + const int16_t *inptr = in; + const int16_t *winptr = win; + for (i = 0; i < vector_length; i++) + { + *outptr++ = (int16_t)((*inptr++ * *winptr--) >> right_shifts); + } +} + +void WebRtcSpl_ElementwiseVectorMult(int16_t *out, const int16_t *in, + const int16_t *win, size_t vector_length, + int16_t right_shifts) +{ + size_t i; + int16_t *outptr = out; + const int16_t *inptr = in; + const int16_t *winptr = win; + for (i = 0; i < vector_length; i++) + { + *outptr++ = (int16_t)((*inptr++ * *winptr++) >> right_shifts); + } +} + +void WebRtcSpl_AddVectorsAndShift(int16_t *out, const int16_t *in1, + const int16_t *in2, size_t vector_length, + int16_t right_shifts) +{ + size_t i; + int16_t *outptr = out; + const int16_t *in1ptr = in1; + const int16_t *in2ptr = in2; + for (i = vector_length; i > 0; i--) + { + (*outptr++) = (int16_t)(((*in1ptr++) + (*in2ptr++)) >> right_shifts); + } +} + +void WebRtcSpl_AddAffineVectorToVector(int16_t *out, int16_t *in, + int16_t gain, int32_t add_constant, + int16_t right_shifts, + size_t vector_length) +{ + size_t i; + + for (i = 0; i < vector_length; i++) + { + out[i] += (int16_t)((in[i] * gain + add_constant) >> right_shifts); + } +} + +void WebRtcSpl_AffineTransformVector(int16_t *out, int16_t *in, + int16_t gain, int32_t add_constant, + int16_t right_shifts, size_t vector_length) +{ + size_t i; + + for (i = 0; i < vector_length; i++) + { + out[i] = (int16_t)((in[i] * gain + add_constant) >> right_shifts); + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/real_fft.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/real_fft.h new file mode 100644 index 000000000..e7942f04c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/real_fft.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_REAL_FFT_H_ +#define WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_REAL_FFT_H_ + +#include "webrtc/typedefs.h" + +// For ComplexFFT(), the maximum fft order is 10; +// for OpenMax FFT in ARM, it is 12; +// WebRTC APM uses orders of only 7 and 8. +enum {kMaxFFTOrder = 10}; + +struct RealFFT; + +#ifdef __cplusplus +extern "C" { +#endif + +struct RealFFT* WebRtcSpl_CreateRealFFT(int order); +void WebRtcSpl_FreeRealFFT(struct RealFFT* self); + +// Compute an FFT for a real-valued signal of length of 2^order, +// where 1 < order <= MAX_FFT_ORDER. Transform length is determined by the +// specification structure, which must be initialized prior to calling the FFT +// function with WebRtcSpl_CreateRealFFT(). +// The relationship between the input and output sequences can +// be expressed in terms of the DFT, i.e.: +// x[n] = (2^(-scalefactor)/N) . SUM[k=0,...,N-1] X[k].e^(jnk.2.pi/N) +// n=0,1,2,...N-1 +// N=2^order. +// The conjugate-symmetric output sequence is represented using a CCS vector, +// which is of length N+2, and is organized as follows: +// Index: 0 1 2 3 4 5 . . . N-2 N-1 N N+1 +// Component: R0 0 R1 I1 R2 I2 . . . R[N/2-1] I[N/2-1] R[N/2] 0 +// where R[n] and I[n], respectively, denote the real and imaginary components +// for FFT bin 'n'. Bins are numbered from 0 to N/2, where N is the FFT length. +// Bin index 0 corresponds to the DC component, and bin index N/2 corresponds to +// the foldover frequency. +// +// Input Arguments: +// self - pointer to preallocated and initialized FFT specification structure. +// real_data_in - the input signal. For an ARM Neon platform, it must be +// aligned on a 32-byte boundary. +// +// Output Arguments: +// complex_data_out - the output complex signal with (2^order + 2) 16-bit +// elements. For an ARM Neon platform, it must be different +// from real_data_in, and aligned on a 32-byte boundary. +// +// Return Value: +// 0 - FFT calculation is successful. +// -1 - Error with bad arguments (NULL pointers). +int WebRtcSpl_RealForwardFFT(struct RealFFT* self, + const int16_t* real_data_in, + int16_t* complex_data_out); + +// Compute the inverse FFT for a conjugate-symmetric input sequence of length of +// 2^order, where 1 < order <= MAX_FFT_ORDER. Transform length is determined by +// the specification structure, which must be initialized prior to calling the +// FFT function with WebRtcSpl_CreateRealFFT(). +// For a transform of length M, the input sequence is represented using a packed +// CCS vector of length M+2, which is explained in the comments for +// WebRtcSpl_RealForwardFFTC above. +// +// Input Arguments: +// self - pointer to preallocated and initialized FFT specification structure. +// complex_data_in - the input complex signal with (2^order + 2) 16-bit +// elements. For an ARM Neon platform, it must be aligned on +// a 32-byte boundary. +// +// Output Arguments: +// real_data_out - the output real signal. For an ARM Neon platform, it must +// be different to complex_data_in, and aligned on a 32-byte +// boundary. +// +// Return Value: +// 0 or a positive number - a value that the elements in the |real_data_out| +// should be shifted left with in order to get +// correct physical values. +// -1 - Error with bad arguments (NULL pointers). +int WebRtcSpl_RealInverseFFT(struct RealFFT* self, + const int16_t* complex_data_in, + int16_t* real_data_out); + +#ifdef __cplusplus +} +#endif + +#endif // WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_REAL_FFT_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/signal_processing_library.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/signal_processing_library.h new file mode 100644 index 000000000..89a281f41 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/signal_processing_library.h @@ -0,0 +1,1642 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This header file includes all of the fix point signal processing library (SPL) function + * descriptions and declarations. + * For specific function calls, see bottom of file. + */ + +#ifndef WEBRTC_SPL_SIGNAL_PROCESSING_LIBRARY_H_ +#define WEBRTC_SPL_SIGNAL_PROCESSING_LIBRARY_H_ + +#include +#include "webrtc/typedefs.h" + +// Macros specific for the fixed point implementation +#define WEBRTC_SPL_WORD16_MAX 32767 +#define WEBRTC_SPL_WORD16_MIN -32768 +#define WEBRTC_SPL_WORD32_MAX (int32_t)0x7fffffff +#define WEBRTC_SPL_WORD32_MIN (int32_t)0x80000000 +#define WEBRTC_SPL_MAX_LPC_ORDER 14 +#define WEBRTC_SPL_MIN(A, B) (A < B ? A : B) // Get min value +#define WEBRTC_SPL_MAX(A, B) (A > B ? A : B) // Get max value +// TODO(kma/bjorn): For the next two macros, investigate how to correct the code +// for inputs of a = WEBRTC_SPL_WORD16_MIN or WEBRTC_SPL_WORD32_MIN. +#define WEBRTC_SPL_ABS_W16(a) \ + (((int16_t)a >= 0) ? ((int16_t)a) : -((int16_t)a)) +#define WEBRTC_SPL_ABS_W32(a) \ + (((int32_t)a >= 0) ? ((int32_t)a) : -((int32_t)a)) + +#define WEBRTC_SPL_MUL(a, b) \ + ((int32_t) ((int32_t)(a) * (int32_t)(b))) +#define WEBRTC_SPL_UMUL(a, b) \ + ((uint32_t) ((uint32_t)(a) * (uint32_t)(b))) +#define WEBRTC_SPL_UMUL_32_16(a, b) \ + ((uint32_t) ((uint32_t)(a) * (uint16_t)(b))) +#define WEBRTC_SPL_MUL_16_U16(a, b) \ + ((int32_t)(int16_t)(a) * (uint16_t)(b)) + +#ifndef WEBRTC_ARCH_ARM_V7 +// For ARMv7 platforms, these are inline functions in spl_inl_armv7.h +#ifndef MIPS32_LE +// For MIPS platforms, these are inline functions in spl_inl_mips.h +#define WEBRTC_SPL_MUL_16_16(a, b) \ + ((int32_t) (((int16_t)(a)) * ((int16_t)(b)))) +#define WEBRTC_SPL_MUL_16_32_RSFT16(a, b) \ + (WEBRTC_SPL_MUL_16_16(a, b >> 16) \ + + ((WEBRTC_SPL_MUL_16_16(a, (b & 0xffff) >> 1) + 0x4000) >> 15)) +#endif +#endif + +#define WEBRTC_SPL_MUL_16_32_RSFT11(a, b) \ + (WEBRTC_SPL_MUL_16_16(a, (b) >> 16) * (1 << 5) + \ + (((WEBRTC_SPL_MUL_16_U16(a, (uint16_t)(b)) >> 1) + 0x0200) >> 10)) +#define WEBRTC_SPL_MUL_16_32_RSFT14(a, b) \ + (WEBRTC_SPL_MUL_16_16(a, (b) >> 16) * (1 << 2) + \ + (((WEBRTC_SPL_MUL_16_U16(a, (uint16_t)(b)) >> 1) + 0x1000) >> 13)) +#define WEBRTC_SPL_MUL_16_32_RSFT15(a, b) \ + ((WEBRTC_SPL_MUL_16_16(a, (b) >> 16) * (1 << 1)) + \ + (((WEBRTC_SPL_MUL_16_U16(a, (uint16_t)(b)) >> 1) + 0x2000) >> 14)) + +#define WEBRTC_SPL_MUL_16_16_RSFT(a, b, c) \ + (WEBRTC_SPL_MUL_16_16(a, b) >> (c)) + +#define WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(a, b, c) \ + ((WEBRTC_SPL_MUL_16_16(a, b) + ((int32_t) \ + (((int32_t)1) << ((c) - 1)))) >> (c)) + +// C + the 32 most significant bits of A * B +#define WEBRTC_SPL_SCALEDIFF32(A, B, C) \ + (C + (B >> 16) * A + (((uint32_t)(0x0000FFFF & B) * A) >> 16)) + +#define WEBRTC_SPL_SAT(a, b, c) (b > a ? a : b < c ? c : b) + +// Shifting with negative numbers allowed +// Positive means left shift +#define WEBRTC_SPL_SHIFT_W32(x, c) ((c) >= 0 ? (x) * (1 << (c)) : (x) >> -(c)) + +// Shifting with negative numbers not allowed +// We cannot do casting here due to signed/unsigned problem +#define WEBRTC_SPL_LSHIFT_W32(x, c) ((x) << (c)) + +#define WEBRTC_SPL_RSHIFT_U32(x, c) ((uint32_t)(x) >> (c)) + +#define WEBRTC_SPL_RAND(a) \ + ((int16_t)((((int16_t)a * 18816) >> 7) & 0x00007fff)) + +#ifdef __cplusplus +extern "C" { +#endif + +#define WEBRTC_SPL_MEMCPY_W16(v1, v2, length) \ + memcpy(v1, v2, (length) * sizeof(int16_t)) + +// inline functions: +#include "webrtc/common_audio/signal_processing/include/spl_inl.h" + +// Initialize SPL. Currently it contains only function pointer initialization. +// If the underlying platform is known to be ARM-Neon (WEBRTC_HAS_NEON defined), +// the pointers will be assigned to code optimized for Neon; otherwise, generic +// C code will be assigned. +// Note that this function MUST be called in any application that uses SPL +// functions. +void WebRtcSpl_Init(); + +int16_t WebRtcSpl_GetScalingSquare(int16_t* in_vector, + size_t in_vector_length, + size_t times); + +// Copy and set operations. Implementation in copy_set_operations.c. +// Descriptions at bottom of file. +void WebRtcSpl_MemSetW16(int16_t* vector, + int16_t set_value, + size_t vector_length); +void WebRtcSpl_MemSetW32(int32_t* vector, + int32_t set_value, + size_t vector_length); +void WebRtcSpl_MemCpyReversedOrder(int16_t* out_vector, + int16_t* in_vector, + size_t vector_length); +void WebRtcSpl_CopyFromEndW16(const int16_t* in_vector, + size_t in_vector_length, + size_t samples, + int16_t* out_vector); +void WebRtcSpl_ZerosArrayW16(int16_t* vector, + size_t vector_length); +void WebRtcSpl_ZerosArrayW32(int32_t* vector, + size_t vector_length); +// End: Copy and set operations. + + +// Minimum and maximum operation functions and their pointers. +// Implementation in min_max_operations.c. + +// Returns the largest absolute value in a signed 16-bit vector. +// +// Input: +// - vector : 16-bit input vector. +// - length : Number of samples in vector. +// +// Return value : Maximum absolute value in vector. +typedef int16_t (*MaxAbsValueW16)(const int16_t* vector, size_t length); +extern MaxAbsValueW16 WebRtcSpl_MaxAbsValueW16; +int16_t WebRtcSpl_MaxAbsValueW16C(const int16_t* vector, size_t length); +#if defined(WEBRTC_HAS_NEON) +int16_t WebRtcSpl_MaxAbsValueW16Neon(const int16_t* vector, size_t length); +#endif +#if defined(MIPS32_LE) +int16_t WebRtcSpl_MaxAbsValueW16_mips(const int16_t* vector, size_t length); +#endif + +// Returns the largest absolute value in a signed 32-bit vector. +// +// Input: +// - vector : 32-bit input vector. +// - length : Number of samples in vector. +// +// Return value : Maximum absolute value in vector. +typedef int32_t (*MaxAbsValueW32)(const int32_t* vector, size_t length); +extern MaxAbsValueW32 WebRtcSpl_MaxAbsValueW32; +int32_t WebRtcSpl_MaxAbsValueW32C(const int32_t* vector, size_t length); +#if defined(WEBRTC_HAS_NEON) +int32_t WebRtcSpl_MaxAbsValueW32Neon(const int32_t* vector, size_t length); +#endif +#if defined(MIPS_DSP_R1_LE) +int32_t WebRtcSpl_MaxAbsValueW32_mips(const int32_t* vector, size_t length); +#endif + +// Returns the maximum value of a 16-bit vector. +// +// Input: +// - vector : 16-bit input vector. +// - length : Number of samples in vector. +// +// Return value : Maximum sample value in |vector|. +typedef int16_t (*MaxValueW16)(const int16_t* vector, size_t length); +extern MaxValueW16 WebRtcSpl_MaxValueW16; +int16_t WebRtcSpl_MaxValueW16C(const int16_t* vector, size_t length); +#if defined(WEBRTC_HAS_NEON) +int16_t WebRtcSpl_MaxValueW16Neon(const int16_t* vector, size_t length); +#endif +#if defined(MIPS32_LE) +int16_t WebRtcSpl_MaxValueW16_mips(const int16_t* vector, size_t length); +#endif + +// Returns the maximum value of a 32-bit vector. +// +// Input: +// - vector : 32-bit input vector. +// - length : Number of samples in vector. +// +// Return value : Maximum sample value in |vector|. +typedef int32_t (*MaxValueW32)(const int32_t* vector, size_t length); +extern MaxValueW32 WebRtcSpl_MaxValueW32; +int32_t WebRtcSpl_MaxValueW32C(const int32_t* vector, size_t length); +#if defined(WEBRTC_HAS_NEON) +int32_t WebRtcSpl_MaxValueW32Neon(const int32_t* vector, size_t length); +#endif +#if defined(MIPS32_LE) +int32_t WebRtcSpl_MaxValueW32_mips(const int32_t* vector, size_t length); +#endif + +// Returns the minimum value of a 16-bit vector. +// +// Input: +// - vector : 16-bit input vector. +// - length : Number of samples in vector. +// +// Return value : Minimum sample value in |vector|. +typedef int16_t (*MinValueW16)(const int16_t* vector, size_t length); +extern MinValueW16 WebRtcSpl_MinValueW16; +int16_t WebRtcSpl_MinValueW16C(const int16_t* vector, size_t length); +#if defined(WEBRTC_HAS_NEON) +int16_t WebRtcSpl_MinValueW16Neon(const int16_t* vector, size_t length); +#endif +#if defined(MIPS32_LE) +int16_t WebRtcSpl_MinValueW16_mips(const int16_t* vector, size_t length); +#endif + +// Returns the minimum value of a 32-bit vector. +// +// Input: +// - vector : 32-bit input vector. +// - length : Number of samples in vector. +// +// Return value : Minimum sample value in |vector|. +typedef int32_t (*MinValueW32)(const int32_t* vector, size_t length); +extern MinValueW32 WebRtcSpl_MinValueW32; +int32_t WebRtcSpl_MinValueW32C(const int32_t* vector, size_t length); +#if defined(WEBRTC_HAS_NEON) +int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, size_t length); +#endif +#if defined(MIPS32_LE) +int32_t WebRtcSpl_MinValueW32_mips(const int32_t* vector, size_t length); +#endif + +// Returns the vector index to the largest absolute value of a 16-bit vector. +// +// Input: +// - vector : 16-bit input vector. +// - length : Number of samples in vector. +// +// Return value : Index to the maximum absolute value in vector. +// If there are multiple equal maxima, return the index of the +// first. -32768 will always have precedence over 32767 (despite +// -32768 presenting an int16 absolute value of 32767). +size_t WebRtcSpl_MaxAbsIndexW16(const int16_t* vector, size_t length); + +// Returns the vector index to the maximum sample value of a 16-bit vector. +// +// Input: +// - vector : 16-bit input vector. +// - length : Number of samples in vector. +// +// Return value : Index to the maximum value in vector (if multiple +// indexes have the maximum, return the first). +size_t WebRtcSpl_MaxIndexW16(const int16_t* vector, size_t length); + +// Returns the vector index to the maximum sample value of a 32-bit vector. +// +// Input: +// - vector : 32-bit input vector. +// - length : Number of samples in vector. +// +// Return value : Index to the maximum value in vector (if multiple +// indexes have the maximum, return the first). +size_t WebRtcSpl_MaxIndexW32(const int32_t* vector, size_t length); + +// Returns the vector index to the minimum sample value of a 16-bit vector. +// +// Input: +// - vector : 16-bit input vector. +// - length : Number of samples in vector. +// +// Return value : Index to the mimimum value in vector (if multiple +// indexes have the minimum, return the first). +size_t WebRtcSpl_MinIndexW16(const int16_t* vector, size_t length); + +// Returns the vector index to the minimum sample value of a 32-bit vector. +// +// Input: +// - vector : 32-bit input vector. +// - length : Number of samples in vector. +// +// Return value : Index to the mimimum value in vector (if multiple +// indexes have the minimum, return the first). +size_t WebRtcSpl_MinIndexW32(const int32_t* vector, size_t length); + +// End: Minimum and maximum operations. + + +// Vector scaling operations. Implementation in vector_scaling_operations.c. +// Description at bottom of file. +void WebRtcSpl_VectorBitShiftW16(int16_t* out_vector, + size_t vector_length, + const int16_t* in_vector, + int16_t right_shifts); +void WebRtcSpl_VectorBitShiftW32(int32_t* out_vector, + size_t vector_length, + const int32_t* in_vector, + int16_t right_shifts); +void WebRtcSpl_VectorBitShiftW32ToW16(int16_t* out_vector, + size_t vector_length, + const int32_t* in_vector, + int right_shifts); +void WebRtcSpl_ScaleVector(const int16_t* in_vector, + int16_t* out_vector, + int16_t gain, + size_t vector_length, + int16_t right_shifts); +void WebRtcSpl_ScaleVectorWithSat(const int16_t* in_vector, + int16_t* out_vector, + int16_t gain, + size_t vector_length, + int16_t right_shifts); +void WebRtcSpl_ScaleAndAddVectors(const int16_t* in_vector1, + int16_t gain1, int right_shifts1, + const int16_t* in_vector2, + int16_t gain2, int right_shifts2, + int16_t* out_vector, + size_t vector_length); + +// The functions (with related pointer) perform the vector operation: +// out_vector[k] = ((scale1 * in_vector1[k]) + (scale2 * in_vector2[k]) +// + round_value) >> right_shifts, +// where round_value = (1 << right_shifts) >> 1. +// +// Input: +// - in_vector1 : Input vector 1 +// - in_vector1_scale : Gain to be used for vector 1 +// - in_vector2 : Input vector 2 +// - in_vector2_scale : Gain to be used for vector 2 +// - right_shifts : Number of right bit shifts to be applied +// - length : Number of elements in the input vectors +// +// Output: +// - out_vector : Output vector +// Return value : 0 if OK, -1 if (in_vector1 == NULL +// || in_vector2 == NULL || out_vector == NULL +// || length <= 0 || right_shift < 0). +typedef int (*ScaleAndAddVectorsWithRound)(const int16_t* in_vector1, + int16_t in_vector1_scale, + const int16_t* in_vector2, + int16_t in_vector2_scale, + int right_shifts, + int16_t* out_vector, + size_t length); +extern ScaleAndAddVectorsWithRound WebRtcSpl_ScaleAndAddVectorsWithRound; +int WebRtcSpl_ScaleAndAddVectorsWithRoundC(const int16_t* in_vector1, + int16_t in_vector1_scale, + const int16_t* in_vector2, + int16_t in_vector2_scale, + int right_shifts, + int16_t* out_vector, + size_t length); +#if defined(MIPS_DSP_R1_LE) +int WebRtcSpl_ScaleAndAddVectorsWithRound_mips(const int16_t* in_vector1, + int16_t in_vector1_scale, + const int16_t* in_vector2, + int16_t in_vector2_scale, + int right_shifts, + int16_t* out_vector, + size_t length); +#endif +// End: Vector scaling operations. + +// iLBC specific functions. Implementations in ilbc_specific_functions.c. +// Description at bottom of file. +void WebRtcSpl_ReverseOrderMultArrayElements(int16_t* out_vector, + const int16_t* in_vector, + const int16_t* window, + size_t vector_length, + int16_t right_shifts); +void WebRtcSpl_ElementwiseVectorMult(int16_t* out_vector, + const int16_t* in_vector, + const int16_t* window, + size_t vector_length, + int16_t right_shifts); +void WebRtcSpl_AddVectorsAndShift(int16_t* out_vector, + const int16_t* in_vector1, + const int16_t* in_vector2, + size_t vector_length, + int16_t right_shifts); +void WebRtcSpl_AddAffineVectorToVector(int16_t* out_vector, + int16_t* in_vector, + int16_t gain, + int32_t add_constant, + int16_t right_shifts, + size_t vector_length); +void WebRtcSpl_AffineTransformVector(int16_t* out_vector, + int16_t* in_vector, + int16_t gain, + int32_t add_constant, + int16_t right_shifts, + size_t vector_length); +// End: iLBC specific functions. + +// Signal processing operations. + +// A 32-bit fix-point implementation of auto-correlation computation +// +// Input: +// - in_vector : Vector to calculate autocorrelation upon +// - in_vector_length : Length (in samples) of |vector| +// - order : The order up to which the autocorrelation should be +// calculated +// +// Output: +// - result : auto-correlation values (values should be seen +// relative to each other since the absolute values +// might have been down shifted to avoid overflow) +// +// - scale : The number of left shifts required to obtain the +// auto-correlation in Q0 +// +// Return value : Number of samples in |result|, i.e. (order+1) +size_t WebRtcSpl_AutoCorrelation(const int16_t* in_vector, + size_t in_vector_length, + size_t order, + int32_t* result, + int* scale); + +// A 32-bit fix-point implementation of the Levinson-Durbin algorithm that +// does NOT use the 64 bit class +// +// Input: +// - auto_corr : Vector with autocorrelation values of length >= |order|+1 +// - order : The LPC filter order (support up to order 20) +// +// Output: +// - lpc_coef : lpc_coef[0..order] LPC coefficients in Q12 +// - refl_coef : refl_coef[0...order-1]| Reflection coefficients in Q15 +// +// Return value : 1 for stable 0 for unstable +int16_t WebRtcSpl_LevinsonDurbin(const int32_t* auto_corr, + int16_t* lpc_coef, + int16_t* refl_coef, + size_t order); + +// Converts reflection coefficients |refl_coef| to LPC coefficients |lpc_coef|. +// This version is a 16 bit operation. +// +// NOTE: The 16 bit refl_coef -> lpc_coef conversion might result in a +// "slightly unstable" filter (i.e., a pole just outside the unit circle) in +// "rare" cases even if the reflection coefficients are stable. +// +// Input: +// - refl_coef : Reflection coefficients in Q15 that should be converted +// to LPC coefficients +// - use_order : Number of coefficients in |refl_coef| +// +// Output: +// - lpc_coef : LPC coefficients in Q12 +void WebRtcSpl_ReflCoefToLpc(const int16_t* refl_coef, + int use_order, + int16_t* lpc_coef); + +// Converts LPC coefficients |lpc_coef| to reflection coefficients |refl_coef|. +// This version is a 16 bit operation. +// The conversion is implemented by the step-down algorithm. +// +// Input: +// - lpc_coef : LPC coefficients in Q12, that should be converted to +// reflection coefficients +// - use_order : Number of coefficients in |lpc_coef| +// +// Output: +// - refl_coef : Reflection coefficients in Q15. +void WebRtcSpl_LpcToReflCoef(int16_t* lpc_coef, + int use_order, + int16_t* refl_coef); + +// Calculates reflection coefficients (16 bit) from auto-correlation values +// +// Input: +// - auto_corr : Auto-correlation values +// - use_order : Number of coefficients wanted be calculated +// +// Output: +// - refl_coef : Reflection coefficients in Q15. +void WebRtcSpl_AutoCorrToReflCoef(const int32_t* auto_corr, + int use_order, + int16_t* refl_coef); + +// The functions (with related pointer) calculate the cross-correlation between +// two sequences |seq1| and |seq2|. +// |seq1| is fixed and |seq2| slides as the pointer is increased with the +// amount |step_seq2|. Note the arguments should obey the relationship: +// |dim_seq| - 1 + |step_seq2| * (|dim_cross_correlation| - 1) < +// buffer size of |seq2| +// +// Input: +// - seq1 : First sequence (fixed throughout the correlation) +// - seq2 : Second sequence (slides |step_vector2| for each +// new correlation) +// - dim_seq : Number of samples to use in the cross-correlation +// - dim_cross_correlation : Number of cross-correlations to calculate (the +// start position for |vector2| is updated for each +// new one) +// - right_shifts : Number of right bit shifts to use. This will +// become the output Q-domain. +// - step_seq2 : How many (positive or negative) steps the +// |vector2| pointer should be updated for each new +// cross-correlation value. +// +// Output: +// - cross_correlation : The cross-correlation in Q(-right_shifts) +typedef void (*CrossCorrelation)(int32_t* cross_correlation, + const int16_t* seq1, + const int16_t* seq2, + size_t dim_seq, + size_t dim_cross_correlation, + int right_shifts, + int step_seq2); +extern CrossCorrelation WebRtcSpl_CrossCorrelation; +void WebRtcSpl_CrossCorrelationC(int32_t* cross_correlation, + const int16_t* seq1, + const int16_t* seq2, + size_t dim_seq, + size_t dim_cross_correlation, + int right_shifts, + int step_seq2); +#if defined(WEBRTC_HAS_NEON) +void WebRtcSpl_CrossCorrelationNeon(int32_t* cross_correlation, + const int16_t* seq1, + const int16_t* seq2, + size_t dim_seq, + size_t dim_cross_correlation, + int right_shifts, + int step_seq2); +#endif +#if defined(MIPS32_LE) +void WebRtcSpl_CrossCorrelation_mips(int32_t* cross_correlation, + const int16_t* seq1, + const int16_t* seq2, + size_t dim_seq, + size_t dim_cross_correlation, + int right_shifts, + int step_seq2); +#endif + +// Creates (the first half of) a Hanning window. Size must be at least 1 and +// at most 512. +// +// Input: +// - size : Length of the requested Hanning window (1 to 512) +// +// Output: +// - window : Hanning vector in Q14. +void WebRtcSpl_GetHanningWindow(int16_t* window, size_t size); + +// Calculates y[k] = sqrt(1 - x[k]^2) for each element of the input vector +// |in_vector|. Input and output values are in Q15. +// +// Inputs: +// - in_vector : Values to calculate sqrt(1 - x^2) of +// - vector_length : Length of vector |in_vector| +// +// Output: +// - out_vector : Output values in Q15 +void WebRtcSpl_SqrtOfOneMinusXSquared(int16_t* in_vector, + size_t vector_length, + int16_t* out_vector); +// End: Signal processing operations. + +// Randomization functions. Implementations collected in +// randomization_functions.c and descriptions at bottom of this file. +int16_t WebRtcSpl_RandU(uint32_t* seed); +int16_t WebRtcSpl_RandN(uint32_t* seed); +int16_t WebRtcSpl_RandUArray(int16_t* vector, + int16_t vector_length, + uint32_t* seed); +// End: Randomization functions. + +// Math functions +int32_t WebRtcSpl_Sqrt(int32_t value); +int32_t WebRtcSpl_SqrtFloor(int32_t value); + +// Divisions. Implementations collected in division_operations.c and +// descriptions at bottom of this file. +uint32_t WebRtcSpl_DivU32U16(uint32_t num, uint16_t den); +int32_t WebRtcSpl_DivW32W16(int32_t num, int16_t den); +int16_t WebRtcSpl_DivW32W16ResW16(int32_t num, int16_t den); +int32_t WebRtcSpl_DivResultInQ31(int32_t num, int32_t den); +int32_t WebRtcSpl_DivW32HiLow(int32_t num, int16_t den_hi, int16_t den_low); +// End: Divisions. + +int32_t WebRtcSpl_Energy(int16_t* vector, + size_t vector_length, + int* scale_factor); + +// Calculates the dot product between two (int16_t) vectors. +// +// Input: +// - vector1 : Vector 1 +// - vector2 : Vector 2 +// - vector_length : Number of samples used in the dot product +// - scaling : The number of right bit shifts to apply on each term +// during calculation to avoid overflow, i.e., the +// output will be in Q(-|scaling|) +// +// Return value : The dot product in Q(-scaling) +int32_t WebRtcSpl_DotProductWithScale(const int16_t* vector1, + const int16_t* vector2, + size_t length, + int scaling); + +// Filter operations. +size_t WebRtcSpl_FilterAR(const int16_t* ar_coef, + size_t ar_coef_length, + const int16_t* in_vector, + size_t in_vector_length, + int16_t* filter_state, + size_t filter_state_length, + int16_t* filter_state_low, + size_t filter_state_low_length, + int16_t* out_vector, + int16_t* out_vector_low, + size_t out_vector_low_length); + +// WebRtcSpl_FilterMAFastQ12(...) +// +// Performs a MA filtering on a vector in Q12 +// +// Input: +// - in_vector : Input samples (state in positions +// in_vector[-order] .. in_vector[-1]) +// - ma_coef : Filter coefficients (in Q12) +// - ma_coef_length : Number of B coefficients (order+1) +// - vector_length : Number of samples to be filtered +// +// Output: +// - out_vector : Filtered samples +// +void WebRtcSpl_FilterMAFastQ12(const int16_t* in_vector, + int16_t* out_vector, + const int16_t* ma_coef, + size_t ma_coef_length, + size_t vector_length); + +// Performs a AR filtering on a vector in Q12 +// Input: +// - data_in : Input samples +// - data_out : State information in positions +// data_out[-order] .. data_out[-1] +// - coefficients : Filter coefficients (in Q12) +// - coefficients_length: Number of coefficients (order+1) +// - data_length : Number of samples to be filtered +// Output: +// - data_out : Filtered samples +void WebRtcSpl_FilterARFastQ12(const int16_t* data_in, + int16_t* data_out, + const int16_t* __restrict coefficients, + size_t coefficients_length, + size_t data_length); + +// The functions (with related pointer) perform a MA down sampling filter +// on a vector. +// Input: +// - data_in : Input samples (state in positions +// data_in[-order] .. data_in[-1]) +// - data_in_length : Number of samples in |data_in| to be filtered. +// This must be at least +// |delay| + |factor|*(|out_vector_length|-1) + 1) +// - data_out_length : Number of down sampled samples desired +// - coefficients : Filter coefficients (in Q12) +// - coefficients_length: Number of coefficients (order+1) +// - factor : Decimation factor +// - delay : Delay of filter (compensated for in out_vector) +// Output: +// - data_out : Filtered samples +// Return value : 0 if OK, -1 if |in_vector| is too short +typedef int (*DownsampleFast)(const int16_t* data_in, + size_t data_in_length, + int16_t* data_out, + size_t data_out_length, + const int16_t* __restrict coefficients, + size_t coefficients_length, + int factor, + size_t delay); +extern DownsampleFast WebRtcSpl_DownsampleFast; +int WebRtcSpl_DownsampleFastC(const int16_t* data_in, + size_t data_in_length, + int16_t* data_out, + size_t data_out_length, + const int16_t* __restrict coefficients, + size_t coefficients_length, + int factor, + size_t delay); +#if defined(WEBRTC_HAS_NEON) +int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in, + size_t data_in_length, + int16_t* data_out, + size_t data_out_length, + const int16_t* __restrict coefficients, + size_t coefficients_length, + int factor, + size_t delay); +#endif +#if defined(MIPS32_LE) +int WebRtcSpl_DownsampleFast_mips(const int16_t* data_in, + size_t data_in_length, + int16_t* data_out, + size_t data_out_length, + const int16_t* __restrict coefficients, + size_t coefficients_length, + int factor, + size_t delay); +#endif + +// End: Filter operations. + +// FFT operations + +int WebRtcSpl_ComplexFFT(int16_t vector[], int stages, int mode); +int WebRtcSpl_ComplexIFFT(int16_t vector[], int stages, int mode); + +// Treat a 16-bit complex data buffer |complex_data| as an array of 32-bit +// values, and swap elements whose indexes are bit-reverses of each other. +// +// Input: +// - complex_data : Complex data buffer containing 2^|stages| real +// elements interleaved with 2^|stages| imaginary +// elements: [Re Im Re Im Re Im....] +// - stages : Number of FFT stages. Must be at least 3 and at most +// 10, since the table WebRtcSpl_kSinTable1024[] is 1024 +// elements long. +// +// Output: +// - complex_data : The complex data buffer. + +void WebRtcSpl_ComplexBitReverse(int16_t* __restrict complex_data, int stages); + +// End: FFT operations + +/************************************************************ + * + * RESAMPLING FUNCTIONS AND THEIR STRUCTS ARE DEFINED BELOW + * + ************************************************************/ + +/******************************************************************* + * resample.c + * + * Includes the following resampling combinations + * 22 kHz -> 16 kHz + * 16 kHz -> 22 kHz + * 22 kHz -> 8 kHz + * 8 kHz -> 22 kHz + * + ******************************************************************/ + +// state structure for 22 -> 16 resampler +typedef struct { + int32_t S_22_44[8]; + int32_t S_44_32[8]; + int32_t S_32_16[8]; +} WebRtcSpl_State22khzTo16khz; + +void WebRtcSpl_Resample22khzTo16khz(const int16_t* in, + int16_t* out, + WebRtcSpl_State22khzTo16khz* state, + int32_t* tmpmem); + +void WebRtcSpl_ResetResample22khzTo16khz(WebRtcSpl_State22khzTo16khz* state); + +// state structure for 16 -> 22 resampler +typedef struct { + int32_t S_16_32[8]; + int32_t S_32_22[8]; +} WebRtcSpl_State16khzTo22khz; + +void WebRtcSpl_Resample16khzTo22khz(const int16_t* in, + int16_t* out, + WebRtcSpl_State16khzTo22khz* state, + int32_t* tmpmem); + +void WebRtcSpl_ResetResample16khzTo22khz(WebRtcSpl_State16khzTo22khz* state); + +// state structure for 22 -> 8 resampler +typedef struct { + int32_t S_22_22[16]; + int32_t S_22_16[8]; + int32_t S_16_8[8]; +} WebRtcSpl_State22khzTo8khz; + +void WebRtcSpl_Resample22khzTo8khz(const int16_t* in, int16_t* out, + WebRtcSpl_State22khzTo8khz* state, + int32_t* tmpmem); + +void WebRtcSpl_ResetResample22khzTo8khz(WebRtcSpl_State22khzTo8khz* state); + +// state structure for 8 -> 22 resampler +typedef struct { + int32_t S_8_16[8]; + int32_t S_16_11[8]; + int32_t S_11_22[8]; +} WebRtcSpl_State8khzTo22khz; + +void WebRtcSpl_Resample8khzTo22khz(const int16_t* in, int16_t* out, + WebRtcSpl_State8khzTo22khz* state, + int32_t* tmpmem); + +void WebRtcSpl_ResetResample8khzTo22khz(WebRtcSpl_State8khzTo22khz* state); + +/******************************************************************* + * resample_fractional.c + * Functions for internal use in the other resample functions + * + * Includes the following resampling combinations + * 48 kHz -> 32 kHz + * 32 kHz -> 24 kHz + * 44 kHz -> 32 kHz + * + ******************************************************************/ + +void WebRtcSpl_Resample48khzTo32khz(const int32_t* In, int32_t* Out, size_t K); + +void WebRtcSpl_Resample32khzTo24khz(const int32_t* In, int32_t* Out, size_t K); + +void WebRtcSpl_Resample44khzTo32khz(const int32_t* In, int32_t* Out, size_t K); + +/******************************************************************* + * resample_48khz.c + * + * Includes the following resampling combinations + * 48 kHz -> 16 kHz + * 16 kHz -> 48 kHz + * 48 kHz -> 8 kHz + * 8 kHz -> 48 kHz + * + ******************************************************************/ + +typedef struct { + int32_t S_48_48[16]; + int32_t S_48_32[8]; + int32_t S_32_16[8]; +} WebRtcSpl_State48khzTo16khz; + +void WebRtcSpl_Resample48khzTo16khz(const int16_t* in, int16_t* out, + WebRtcSpl_State48khzTo16khz* state, + int32_t* tmpmem); + +void WebRtcSpl_ResetResample48khzTo16khz(WebRtcSpl_State48khzTo16khz* state); + +typedef struct { + int32_t S_16_32[8]; + int32_t S_32_24[8]; + int32_t S_24_48[8]; +} WebRtcSpl_State16khzTo48khz; + +void WebRtcSpl_Resample16khzTo48khz(const int16_t* in, int16_t* out, + WebRtcSpl_State16khzTo48khz* state, + int32_t* tmpmem); + +void WebRtcSpl_ResetResample16khzTo48khz(WebRtcSpl_State16khzTo48khz* state); + +typedef struct { + int32_t S_48_24[8]; + int32_t S_24_24[16]; + int32_t S_24_16[8]; + int32_t S_16_8[8]; +} WebRtcSpl_State48khzTo8khz; + +void WebRtcSpl_Resample48khzTo8khz(const int16_t* in, int16_t* out, + WebRtcSpl_State48khzTo8khz* state, + int32_t* tmpmem); + +void WebRtcSpl_ResetResample48khzTo8khz(WebRtcSpl_State48khzTo8khz* state); + +typedef struct { + int32_t S_8_16[8]; + int32_t S_16_12[8]; + int32_t S_12_24[8]; + int32_t S_24_48[8]; +} WebRtcSpl_State8khzTo48khz; + +void WebRtcSpl_Resample8khzTo48khz(const int16_t* in, int16_t* out, + WebRtcSpl_State8khzTo48khz* state, + int32_t* tmpmem); + +void WebRtcSpl_ResetResample8khzTo48khz(WebRtcSpl_State8khzTo48khz* state); + +/******************************************************************* + * resample_by_2.c + * + * Includes down and up sampling by a factor of two. + * + ******************************************************************/ + +void WebRtcSpl_DownsampleBy2(const int16_t* in, size_t len, + int16_t* out, int32_t* filtState); + +void WebRtcSpl_UpsampleBy2(const int16_t* in, size_t len, + int16_t* out, int32_t* filtState); + +/************************************************************ + * END OF RESAMPLING FUNCTIONS + ************************************************************/ +void WebRtcSpl_AnalysisQMF(const int16_t* in_data, + size_t in_data_length, + int16_t* low_band, + int16_t* high_band, + int32_t* filter_state1, + int32_t* filter_state2); +void WebRtcSpl_SynthesisQMF(const int16_t* low_band, + const int16_t* high_band, + size_t band_length, + int16_t* out_data, + int32_t* filter_state1, + int32_t* filter_state2); + +#ifdef __cplusplus +} +#endif // __cplusplus +#endif // WEBRTC_SPL_SIGNAL_PROCESSING_LIBRARY_H_ + +// +// WebRtcSpl_AddSatW16(...) +// WebRtcSpl_AddSatW32(...) +// +// Returns the result of a saturated 16-bit, respectively 32-bit, addition of +// the numbers specified by the |var1| and |var2| parameters. +// +// Input: +// - var1 : Input variable 1 +// - var2 : Input variable 2 +// +// Return value : Added and saturated value +// + +// +// WebRtcSpl_SubSatW16(...) +// WebRtcSpl_SubSatW32(...) +// +// Returns the result of a saturated 16-bit, respectively 32-bit, subtraction +// of the numbers specified by the |var1| and |var2| parameters. +// +// Input: +// - var1 : Input variable 1 +// - var2 : Input variable 2 +// +// Returned value : Subtracted and saturated value +// + +// +// WebRtcSpl_GetSizeInBits(...) +// +// Returns the # of bits that are needed at the most to represent the number +// specified by the |value| parameter. +// +// Input: +// - value : Input value +// +// Return value : Number of bits needed to represent |value| +// + +// +// WebRtcSpl_NormW32(...) +// +// Norm returns the # of left shifts required to 32-bit normalize the 32-bit +// signed number specified by the |value| parameter. +// +// Input: +// - value : Input value +// +// Return value : Number of bit shifts needed to 32-bit normalize |value| +// + +// +// WebRtcSpl_NormW16(...) +// +// Norm returns the # of left shifts required to 16-bit normalize the 16-bit +// signed number specified by the |value| parameter. +// +// Input: +// - value : Input value +// +// Return value : Number of bit shifts needed to 32-bit normalize |value| +// + +// +// WebRtcSpl_NormU32(...) +// +// Norm returns the # of left shifts required to 32-bit normalize the unsigned +// 32-bit number specified by the |value| parameter. +// +// Input: +// - value : Input value +// +// Return value : Number of bit shifts needed to 32-bit normalize |value| +// + +// +// WebRtcSpl_GetScalingSquare(...) +// +// Returns the # of bits required to scale the samples specified in the +// |in_vector| parameter so that, if the squares of the samples are added the +// # of times specified by the |times| parameter, the 32-bit addition will not +// overflow (result in int32_t). +// +// Input: +// - in_vector : Input vector to check scaling on +// - in_vector_length : Samples in |in_vector| +// - times : Number of additions to be performed +// +// Return value : Number of right bit shifts needed to avoid +// overflow in the addition calculation +// + +// +// WebRtcSpl_MemSetW16(...) +// +// Sets all the values in the int16_t vector |vector| of length +// |vector_length| to the specified value |set_value| +// +// Input: +// - vector : Pointer to the int16_t vector +// - set_value : Value specified +// - vector_length : Length of vector +// + +// +// WebRtcSpl_MemSetW32(...) +// +// Sets all the values in the int32_t vector |vector| of length +// |vector_length| to the specified value |set_value| +// +// Input: +// - vector : Pointer to the int16_t vector +// - set_value : Value specified +// - vector_length : Length of vector +// + +// +// WebRtcSpl_MemCpyReversedOrder(...) +// +// Copies all the values from the source int16_t vector |in_vector| to a +// destination int16_t vector |out_vector|. It is done in reversed order, +// meaning that the first sample of |in_vector| is copied to the last sample of +// the |out_vector|. The procedure continues until the last sample of +// |in_vector| has been copied to the first sample of |out_vector|. This +// creates a reversed vector. Used in e.g. prediction in iLBC. +// +// Input: +// - in_vector : Pointer to the first sample in a int16_t vector +// of length |length| +// - vector_length : Number of elements to copy +// +// Output: +// - out_vector : Pointer to the last sample in a int16_t vector +// of length |length| +// + +// +// WebRtcSpl_CopyFromEndW16(...) +// +// Copies the rightmost |samples| of |in_vector| (of length |in_vector_length|) +// to the vector |out_vector|. +// +// Input: +// - in_vector : Input vector +// - in_vector_length : Number of samples in |in_vector| +// - samples : Number of samples to extract (from right side) +// from |in_vector| +// +// Output: +// - out_vector : Vector with the requested samples +// + +// +// WebRtcSpl_ZerosArrayW16(...) +// WebRtcSpl_ZerosArrayW32(...) +// +// Inserts the value "zero" in all positions of a w16 and a w32 vector +// respectively. +// +// Input: +// - vector_length : Number of samples in vector +// +// Output: +// - vector : Vector containing all zeros +// + +// +// WebRtcSpl_VectorBitShiftW16(...) +// WebRtcSpl_VectorBitShiftW32(...) +// +// Bit shifts all the values in a vector up or downwards. Different calls for +// int16_t and int32_t vectors respectively. +// +// Input: +// - vector_length : Length of vector +// - in_vector : Pointer to the vector that should be bit shifted +// - right_shifts : Number of right bit shifts (negative value gives left +// shifts) +// +// Output: +// - out_vector : Pointer to the result vector (can be the same as +// |in_vector|) +// + +// +// WebRtcSpl_VectorBitShiftW32ToW16(...) +// +// Bit shifts all the values in a int32_t vector up or downwards and +// stores the result as an int16_t vector. The function will saturate the +// signal if needed, before storing in the output vector. +// +// Input: +// - vector_length : Length of vector +// - in_vector : Pointer to the vector that should be bit shifted +// - right_shifts : Number of right bit shifts (negative value gives left +// shifts) +// +// Output: +// - out_vector : Pointer to the result vector (can be the same as +// |in_vector|) +// + +// +// WebRtcSpl_ScaleVector(...) +// +// Performs the vector operation: +// out_vector[k] = (gain*in_vector[k])>>right_shifts +// +// Input: +// - in_vector : Input vector +// - gain : Scaling gain +// - vector_length : Elements in the |in_vector| +// - right_shifts : Number of right bit shifts applied +// +// Output: +// - out_vector : Output vector (can be the same as |in_vector|) +// + +// +// WebRtcSpl_ScaleVectorWithSat(...) +// +// Performs the vector operation: +// out_vector[k] = SATURATE( (gain*in_vector[k])>>right_shifts ) +// +// Input: +// - in_vector : Input vector +// - gain : Scaling gain +// - vector_length : Elements in the |in_vector| +// - right_shifts : Number of right bit shifts applied +// +// Output: +// - out_vector : Output vector (can be the same as |in_vector|) +// + +// +// WebRtcSpl_ScaleAndAddVectors(...) +// +// Performs the vector operation: +// out_vector[k] = (gain1*in_vector1[k])>>right_shifts1 +// + (gain2*in_vector2[k])>>right_shifts2 +// +// Input: +// - in_vector1 : Input vector 1 +// - gain1 : Gain to be used for vector 1 +// - right_shifts1 : Right bit shift to be used for vector 1 +// - in_vector2 : Input vector 2 +// - gain2 : Gain to be used for vector 2 +// - right_shifts2 : Right bit shift to be used for vector 2 +// - vector_length : Elements in the input vectors +// +// Output: +// - out_vector : Output vector +// + +// +// WebRtcSpl_ReverseOrderMultArrayElements(...) +// +// Performs the vector operation: +// out_vector[n] = (in_vector[n]*window[-n])>>right_shifts +// +// Input: +// - in_vector : Input vector +// - window : Window vector (should be reversed). The pointer +// should be set to the last value in the vector +// - right_shifts : Number of right bit shift to be applied after the +// multiplication +// - vector_length : Number of elements in |in_vector| +// +// Output: +// - out_vector : Output vector (can be same as |in_vector|) +// + +// +// WebRtcSpl_ElementwiseVectorMult(...) +// +// Performs the vector operation: +// out_vector[n] = (in_vector[n]*window[n])>>right_shifts +// +// Input: +// - in_vector : Input vector +// - window : Window vector. +// - right_shifts : Number of right bit shift to be applied after the +// multiplication +// - vector_length : Number of elements in |in_vector| +// +// Output: +// - out_vector : Output vector (can be same as |in_vector|) +// + +// +// WebRtcSpl_AddVectorsAndShift(...) +// +// Performs the vector operation: +// out_vector[k] = (in_vector1[k] + in_vector2[k])>>right_shifts +// +// Input: +// - in_vector1 : Input vector 1 +// - in_vector2 : Input vector 2 +// - right_shifts : Number of right bit shift to be applied after the +// multiplication +// - vector_length : Number of elements in |in_vector1| and |in_vector2| +// +// Output: +// - out_vector : Output vector (can be same as |in_vector1|) +// + +// +// WebRtcSpl_AddAffineVectorToVector(...) +// +// Adds an affine transformed vector to another vector |out_vector|, i.e, +// performs +// out_vector[k] += (in_vector[k]*gain+add_constant)>>right_shifts +// +// Input: +// - in_vector : Input vector +// - gain : Gain value, used to multiply the in vector with +// - add_constant : Constant value to add (usually 1<<(right_shifts-1), +// but others can be used as well +// - right_shifts : Number of right bit shifts (0-16) +// - vector_length : Number of samples in |in_vector| and |out_vector| +// +// Output: +// - out_vector : Vector with the output +// + +// +// WebRtcSpl_AffineTransformVector(...) +// +// Affine transforms a vector, i.e, performs +// out_vector[k] = (in_vector[k]*gain+add_constant)>>right_shifts +// +// Input: +// - in_vector : Input vector +// - gain : Gain value, used to multiply the in vector with +// - add_constant : Constant value to add (usually 1<<(right_shifts-1), +// but others can be used as well +// - right_shifts : Number of right bit shifts (0-16) +// - vector_length : Number of samples in |in_vector| and |out_vector| +// +// Output: +// - out_vector : Vector with the output +// + +// +// WebRtcSpl_IncreaseSeed(...) +// +// Increases the seed (and returns the new value) +// +// Input: +// - seed : Seed for random calculation +// +// Output: +// - seed : Updated seed value +// +// Return value : The new seed value +// + +// +// WebRtcSpl_RandU(...) +// +// Produces a uniformly distributed value in the int16_t range +// +// Input: +// - seed : Seed for random calculation +// +// Output: +// - seed : Updated seed value +// +// Return value : Uniformly distributed value in the range +// [Word16_MIN...Word16_MAX] +// + +// +// WebRtcSpl_RandN(...) +// +// Produces a normal distributed value in the int16_t range +// +// Input: +// - seed : Seed for random calculation +// +// Output: +// - seed : Updated seed value +// +// Return value : N(0,1) value in the Q13 domain +// + +// +// WebRtcSpl_RandUArray(...) +// +// Produces a uniformly distributed vector with elements in the int16_t +// range +// +// Input: +// - vector_length : Samples wanted in the vector +// - seed : Seed for random calculation +// +// Output: +// - vector : Vector with the uniform values +// - seed : Updated seed value +// +// Return value : Number of samples in vector, i.e., |vector_length| +// + +// +// WebRtcSpl_Sqrt(...) +// +// Returns the square root of the input value |value|. The precision of this +// function is integer precision, i.e., sqrt(8) gives 2 as answer. +// If |value| is a negative number then 0 is returned. +// +// Algorithm: +// +// A sixth order Taylor Series expansion is used here to compute the square +// root of a number y^0.5 = (1+x)^0.5 +// where +// x = y-1 +// = 1+(x/2)-0.5*((x/2)^2+0.5*((x/2)^3-0.625*((x/2)^4+0.875*((x/2)^5) +// 0.5 <= x < 1 +// +// Input: +// - value : Value to calculate sqrt of +// +// Return value : Result of the sqrt calculation +// + +// +// WebRtcSpl_SqrtFloor(...) +// +// Returns the square root of the input value |value|. The precision of this +// function is rounding down integer precision, i.e., sqrt(8) gives 2 as answer. +// If |value| is a negative number then 0 is returned. +// +// Algorithm: +// +// An iterative 4 cylce/bit routine +// +// Input: +// - value : Value to calculate sqrt of +// +// Return value : Result of the sqrt calculation +// + +// +// WebRtcSpl_DivU32U16(...) +// +// Divides a uint32_t |num| by a uint16_t |den|. +// +// If |den|==0, (uint32_t)0xFFFFFFFF is returned. +// +// Input: +// - num : Numerator +// - den : Denominator +// +// Return value : Result of the division (as a uint32_t), i.e., the +// integer part of num/den. +// + +// +// WebRtcSpl_DivW32W16(...) +// +// Divides a int32_t |num| by a int16_t |den|. +// +// If |den|==0, (int32_t)0x7FFFFFFF is returned. +// +// Input: +// - num : Numerator +// - den : Denominator +// +// Return value : Result of the division (as a int32_t), i.e., the +// integer part of num/den. +// + +// +// WebRtcSpl_DivW32W16ResW16(...) +// +// Divides a int32_t |num| by a int16_t |den|, assuming that the +// result is less than 32768, otherwise an unpredictable result will occur. +// +// If |den|==0, (int16_t)0x7FFF is returned. +// +// Input: +// - num : Numerator +// - den : Denominator +// +// Return value : Result of the division (as a int16_t), i.e., the +// integer part of num/den. +// + +// +// WebRtcSpl_DivResultInQ31(...) +// +// Divides a int32_t |num| by a int16_t |den|, assuming that the +// absolute value of the denominator is larger than the numerator, otherwise +// an unpredictable result will occur. +// +// Input: +// - num : Numerator +// - den : Denominator +// +// Return value : Result of the division in Q31. +// + +// +// WebRtcSpl_DivW32HiLow(...) +// +// Divides a int32_t |num| by a denominator in hi, low format. The +// absolute value of the denominator has to be larger (or equal to) the +// numerator. +// +// Input: +// - num : Numerator +// - den_hi : High part of denominator +// - den_low : Low part of denominator +// +// Return value : Divided value in Q31 +// + +// +// WebRtcSpl_Energy(...) +// +// Calculates the energy of a vector +// +// Input: +// - vector : Vector which the energy should be calculated on +// - vector_length : Number of samples in vector +// +// Output: +// - scale_factor : Number of left bit shifts needed to get the physical +// energy value, i.e, to get the Q0 value +// +// Return value : Energy value in Q(-|scale_factor|) +// + +// +// WebRtcSpl_FilterAR(...) +// +// Performs a 32-bit AR filtering on a vector in Q12 +// +// Input: +// - ar_coef : AR-coefficient vector (values in Q12), +// ar_coef[0] must be 4096. +// - ar_coef_length : Number of coefficients in |ar_coef|. +// - in_vector : Vector to be filtered. +// - in_vector_length : Number of samples in |in_vector|. +// - filter_state : Current state (higher part) of the filter. +// - filter_state_length : Length (in samples) of |filter_state|. +// - filter_state_low : Current state (lower part) of the filter. +// - filter_state_low_length : Length (in samples) of |filter_state_low|. +// - out_vector_low_length : Maximum length (in samples) of +// |out_vector_low|. +// +// Output: +// - filter_state : Updated state (upper part) vector. +// - filter_state_low : Updated state (lower part) vector. +// - out_vector : Vector containing the upper part of the +// filtered values. +// - out_vector_low : Vector containing the lower part of the +// filtered values. +// +// Return value : Number of samples in the |out_vector|. +// + +// +// WebRtcSpl_ComplexIFFT(...) +// +// Complex Inverse FFT +// +// Computes an inverse complex 2^|stages|-point FFT on the input vector, which +// is in bit-reversed order. The original content of the vector is destroyed in +// the process, since the input is overwritten by the output, normal-ordered, +// FFT vector. With X as the input complex vector, y as the output complex +// vector and with M = 2^|stages|, the following is computed: +// +// M-1 +// y(k) = sum[X(i)*[cos(2*pi*i*k/M) + j*sin(2*pi*i*k/M)]] +// i=0 +// +// The implementations are optimized for speed, not for code size. It uses the +// decimation-in-time algorithm with radix-2 butterfly technique. +// +// Input: +// - vector : In pointer to complex vector containing 2^|stages| +// real elements interleaved with 2^|stages| imaginary +// elements. +// [ReImReImReIm....] +// The elements are in Q(-scale) domain, see more on Return +// Value below. +// +// - stages : Number of FFT stages. Must be at least 3 and at most 10, +// since the table WebRtcSpl_kSinTable1024[] is 1024 +// elements long. +// +// - mode : This parameter gives the user to choose how the FFT +// should work. +// mode==0: Low-complexity and Low-accuracy mode +// mode==1: High-complexity and High-accuracy mode +// +// Output: +// - vector : Out pointer to the FFT vector (the same as input). +// +// Return Value : The scale value that tells the number of left bit shifts +// that the elements in the |vector| should be shifted with +// in order to get Q0 values, i.e. the physically correct +// values. The scale parameter is always 0 or positive, +// except if N>1024 (|stages|>10), which returns a scale +// value of -1, indicating error. +// + +// +// WebRtcSpl_ComplexFFT(...) +// +// Complex FFT +// +// Computes a complex 2^|stages|-point FFT on the input vector, which is in +// bit-reversed order. The original content of the vector is destroyed in +// the process, since the input is overwritten by the output, normal-ordered, +// FFT vector. With x as the input complex vector, Y as the output complex +// vector and with M = 2^|stages|, the following is computed: +// +// M-1 +// Y(k) = 1/M * sum[x(i)*[cos(2*pi*i*k/M) + j*sin(2*pi*i*k/M)]] +// i=0 +// +// The implementations are optimized for speed, not for code size. It uses the +// decimation-in-time algorithm with radix-2 butterfly technique. +// +// This routine prevents overflow by scaling by 2 before each FFT stage. This is +// a fixed scaling, for proper normalization - there will be log2(n) passes, so +// this results in an overall factor of 1/n, distributed to maximize arithmetic +// accuracy. +// +// Input: +// - vector : In pointer to complex vector containing 2^|stages| real +// elements interleaved with 2^|stages| imaginary elements. +// [ReImReImReIm....] +// The output is in the Q0 domain. +// +// - stages : Number of FFT stages. Must be at least 3 and at most 10, +// since the table WebRtcSpl_kSinTable1024[] is 1024 +// elements long. +// +// - mode : This parameter gives the user to choose how the FFT +// should work. +// mode==0: Low-complexity and Low-accuracy mode +// mode==1: High-complexity and High-accuracy mode +// +// Output: +// - vector : The output FFT vector is in the Q0 domain. +// +// Return value : The scale parameter is always 0, except if N>1024, +// which returns a scale value of -1, indicating error. +// + +// +// WebRtcSpl_AnalysisQMF(...) +// +// Splits a 0-2*F Hz signal into two sub bands: 0-F Hz and F-2*F Hz. The +// current version has F = 8000, therefore, a super-wideband audio signal is +// split to lower-band 0-8 kHz and upper-band 8-16 kHz. +// +// Input: +// - in_data : Wide band speech signal, 320 samples (10 ms) +// +// Input & Output: +// - filter_state1 : Filter state for first All-pass filter +// - filter_state2 : Filter state for second All-pass filter +// +// Output: +// - low_band : Lower-band signal 0-8 kHz band, 160 samples (10 ms) +// - high_band : Upper-band signal 8-16 kHz band (flipped in frequency +// domain), 160 samples (10 ms) +// + +// +// WebRtcSpl_SynthesisQMF(...) +// +// Combines the two sub bands (0-F and F-2*F Hz) into a signal of 0-2*F +// Hz, (current version has F = 8000 Hz). So the filter combines lower-band +// (0-8 kHz) and upper-band (8-16 kHz) channels to obtain super-wideband 0-16 +// kHz audio. +// +// Input: +// - low_band : The signal with the 0-8 kHz band, 160 samples (10 ms) +// - high_band : The signal with the 8-16 kHz band, 160 samples (10 ms) +// +// Input & Output: +// - filter_state1 : Filter state for first All-pass filter +// - filter_state2 : Filter state for second All-pass filter +// +// Output: +// - out_data : Super-wideband speech signal, 0-16 kHz +// + +// int16_t WebRtcSpl_SatW32ToW16(...) +// +// This function saturates a 32-bit word into a 16-bit word. +// +// Input: +// - value32 : The value of a 32-bit word. +// +// Output: +// - out16 : the saturated 16-bit word. +// + +// int32_t WebRtc_MulAccumW16(...) +// +// This function multiply a 16-bit word by a 16-bit word, and accumulate this +// value to a 32-bit integer. +// +// Input: +// - a : The value of the first 16-bit word. +// - b : The value of the second 16-bit word. +// - c : The value of an 32-bit integer. +// +// Return Value: The value of a * b + c. +// diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl.h new file mode 100644 index 000000000..90098caaa --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +// This header file includes the inline functions in +// the fix point signal processing library. + +#ifndef WEBRTC_SPL_SPL_INL_H_ +#define WEBRTC_SPL_SPL_INL_H_ + +#include "webrtc/system_wrappers/include/compile_assert_c.h" + +extern const int8_t kWebRtcSpl_CountLeadingZeros32_Table[64]; + +// Don't call this directly except in tests! +static __inline int WebRtcSpl_CountLeadingZeros32_NotBuiltin(uint32_t n) { + // Normalize n by rounding up to the nearest number that is a sequence of 0 + // bits followed by a sequence of 1 bits. This number has the same number of + // leading zeros as the original n. There are exactly 33 such values. + n |= n >> 1; + n |= n >> 2; + n |= n >> 4; + n |= n >> 8; + n |= n >> 16; + + // Multiply the modified n with a constant selected (by exhaustive search) + // such that each of the 33 possible values of n give a product whose 6 most + // significant bits are unique. Then look up the answer in the table. + return kWebRtcSpl_CountLeadingZeros32_Table[(n * 0x8c0b2891) >> 26]; +} + +// Don't call this directly except in tests! +static __inline int WebRtcSpl_CountLeadingZeros64_NotBuiltin(uint64_t n) { + const int leading_zeros = n >> 32 == 0 ? 32 : 0; + return leading_zeros + WebRtcSpl_CountLeadingZeros32_NotBuiltin( + (uint32_t)(n >> (32 - leading_zeros))); +} + +// Returns the number of leading zero bits in the argument. +static __inline int WebRtcSpl_CountLeadingZeros32(uint32_t n) { +#ifdef __GNUC__ + COMPILE_ASSERT(sizeof(unsigned int) == sizeof(uint32_t)); + return n == 0 ? 32 : __builtin_clz(n); +#else + return WebRtcSpl_CountLeadingZeros32_NotBuiltin(n); +#endif +} + +// Returns the number of leading zero bits in the argument. +static __inline int WebRtcSpl_CountLeadingZeros64(uint64_t n) { +#ifdef __GNUC__ + COMPILE_ASSERT(sizeof(unsigned long long) == sizeof(uint64_t)); + return n == 0 ? 64 : __builtin_clzll(n); +#else + return WebRtcSpl_CountLeadingZeros64_NotBuiltin(n); +#endif +} + +#ifdef WEBRTC_ARCH_ARM_V7 +#include "webrtc/common_audio/signal_processing/include/spl_inl_armv7.h" +#else + +#if defined(MIPS32_LE) +#include "webrtc/common_audio/signal_processing/include/spl_inl_mips.h" +#endif + +#if !defined(MIPS_DSP_R1_LE) +static __inline int16_t WebRtcSpl_SatW32ToW16(int32_t value32) { + int16_t out16 = (int16_t) value32; + + if (value32 > 32767) + out16 = 32767; + else if (value32 < -32768) + out16 = -32768; + + return out16; +} + +static __inline int32_t WebRtcSpl_AddSatW32(int32_t a, int32_t b) { + // Do the addition in unsigned numbers, since signed overflow is undefined + // behavior. + const int32_t sum = (int32_t)((uint32_t)a + (uint32_t)b); + + // a + b can't overflow if a and b have different signs. If they have the + // same sign, a + b also has the same sign iff it didn't overflow. + if ((a < 0) == (b < 0) && (a < 0) != (sum < 0)) { + // The direction of the overflow is obvious from the sign of a + b. + return sum < 0 ? INT32_MAX : INT32_MIN; + } + return sum; +} + +static __inline int32_t WebRtcSpl_SubSatW32(int32_t a, int32_t b) { + // Do the subtraction in unsigned numbers, since signed overflow is undefined + // behavior. + const int32_t diff = (int32_t)((uint32_t)a - (uint32_t)b); + + // a - b can't overflow if a and b have the same sign. If they have different + // signs, a - b has the same sign as a iff it didn't overflow. + if ((a < 0) != (b < 0) && (a < 0) != (diff < 0)) { + // The direction of the overflow is obvious from the sign of a - b. + return diff < 0 ? INT32_MAX : INT32_MIN; + } + return diff; +} + +static __inline int16_t WebRtcSpl_AddSatW16(int16_t a, int16_t b) { + return WebRtcSpl_SatW32ToW16((int32_t) a + (int32_t) b); +} + +static __inline int16_t WebRtcSpl_SubSatW16(int16_t var1, int16_t var2) { + return WebRtcSpl_SatW32ToW16((int32_t) var1 - (int32_t) var2); +} +#endif // #if !defined(MIPS_DSP_R1_LE) + +#if !defined(MIPS32_LE) +static __inline int16_t WebRtcSpl_GetSizeInBits(uint32_t n) { + return 32 - WebRtcSpl_CountLeadingZeros32(n); +} + +// Return the number of steps a can be left-shifted without overflow, +// or 0 if a == 0. +static __inline int16_t WebRtcSpl_NormW32(int32_t a) { + return a == 0 ? 0 : WebRtcSpl_CountLeadingZeros32(a < 0 ? ~a : a) - 1; +} + +// Return the number of steps a can be left-shifted without overflow, +// or 0 if a == 0. +static __inline int16_t WebRtcSpl_NormU32(uint32_t a) { + return a == 0 ? 0 : WebRtcSpl_CountLeadingZeros32(a); +} + +// Return the number of steps a can be left-shifted without overflow, +// or 0 if a == 0. +static __inline int16_t WebRtcSpl_NormW16(int16_t a) { + const int32_t a32 = a; + return a == 0 ? 0 : WebRtcSpl_CountLeadingZeros32(a < 0 ? ~a32 : a32) - 17; +} + +static __inline int32_t WebRtc_MulAccumW16(int16_t a, int16_t b, int32_t c) { + return (a * b + c); +} +#endif // #if !defined(MIPS32_LE) + +#endif // WEBRTC_ARCH_ARM_V7 + +#endif // WEBRTC_SPL_SPL_INL_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl_armv7.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl_armv7.h new file mode 100644 index 000000000..271880115 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl_armv7.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* This header file includes the inline functions for ARM processors in + * the fix point signal processing library. + */ + +#ifndef WEBRTC_SPL_SPL_INL_ARMV7_H_ +#define WEBRTC_SPL_SPL_INL_ARMV7_H_ + +/* TODO(kma): Replace some assembly code with GCC intrinsics + * (e.g. __builtin_clz). + */ + +/* This function produces result that is not bit exact with that by the generic + * C version in some cases, although the former is at least as accurate as the + * later. + */ +static __inline int32_t WEBRTC_SPL_MUL_16_32_RSFT16(int16_t a, int32_t b) { + int32_t tmp = 0; + __asm __volatile ("smulwb %0, %1, %2":"=r"(tmp):"r"(b), "r"(a)); + return tmp; +} + +static __inline int32_t WEBRTC_SPL_MUL_16_16(int16_t a, int16_t b) { + int32_t tmp = 0; + __asm __volatile ("smulbb %0, %1, %2":"=r"(tmp):"r"(a), "r"(b)); + return tmp; +} + +// TODO(kma): add unit test. +static __inline int32_t WebRtc_MulAccumW16(int16_t a, int16_t b, int32_t c) { + int32_t tmp = 0; + __asm __volatile ("smlabb %0, %1, %2, %3":"=r"(tmp):"r"(a), "r"(b), "r"(c)); + return tmp; +} + +static __inline int16_t WebRtcSpl_AddSatW16(int16_t a, int16_t b) { + int32_t s_sum = 0; + + __asm __volatile ("qadd16 %0, %1, %2":"=r"(s_sum):"r"(a), "r"(b)); + + return (int16_t) s_sum; +} + +static __inline int32_t WebRtcSpl_AddSatW32(int32_t l_var1, int32_t l_var2) { + int32_t l_sum = 0; + + __asm __volatile ("qadd %0, %1, %2":"=r"(l_sum):"r"(l_var1), "r"(l_var2)); + + return l_sum; +} + +static __inline int32_t WebRtcSpl_SubSatW32(int32_t l_var1, int32_t l_var2) { + int32_t l_sub = 0; + + __asm __volatile ("qsub %0, %1, %2":"=r"(l_sub):"r"(l_var1), "r"(l_var2)); + + return l_sub; +} + +static __inline int16_t WebRtcSpl_SubSatW16(int16_t var1, int16_t var2) { + int32_t s_sub = 0; + + __asm __volatile ("qsub16 %0, %1, %2":"=r"(s_sub):"r"(var1), "r"(var2)); + + return (int16_t)s_sub; +} + +static __inline int16_t WebRtcSpl_GetSizeInBits(uint32_t n) { + int32_t tmp = 0; + + __asm __volatile ("clz %0, %1":"=r"(tmp):"r"(n)); + + return (int16_t)(32 - tmp); +} + +static __inline int16_t WebRtcSpl_NormW32(int32_t a) { + int32_t tmp = 0; + + if (a == 0) { + return 0; + } + else if (a < 0) { + a ^= 0xFFFFFFFF; + } + + __asm __volatile ("clz %0, %1":"=r"(tmp):"r"(a)); + + return (int16_t)(tmp - 1); +} + +static __inline int16_t WebRtcSpl_NormU32(uint32_t a) { + int tmp = 0; + + if (a == 0) return 0; + + __asm __volatile ("clz %0, %1":"=r"(tmp):"r"(a)); + + return (int16_t)tmp; +} + +static __inline int16_t WebRtcSpl_NormW16(int16_t a) { + int32_t tmp = 0; + int32_t a_32 = a; + + if (a_32 == 0) { + return 0; + } + else if (a_32 < 0) { + a_32 ^= 0xFFFFFFFF; + } + + __asm __volatile ("clz %0, %1":"=r"(tmp):"r"(a_32)); + + return (int16_t)(tmp - 17); +} + +// TODO(kma): add unit test. +static __inline int16_t WebRtcSpl_SatW32ToW16(int32_t value32) { + int32_t out = 0; + + __asm __volatile ("ssat %0, #16, %1" : "=r"(out) : "r"(value32)); + + return (int16_t)out; +} + +#endif // WEBRTC_SPL_SPL_INL_ARMV7_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl_mips.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl_mips.h new file mode 100644 index 000000000..cd04bddcf --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/include/spl_inl_mips.h @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +// This header file includes the inline functions in +// the fix point signal processing library. + +#ifndef WEBRTC_SPL_SPL_INL_MIPS_H_ +#define WEBRTC_SPL_SPL_INL_MIPS_H_ + +static __inline int32_t WEBRTC_SPL_MUL_16_16(int32_t a, + int32_t b) { + int32_t value32 = 0; + int32_t a1 = 0, b1 = 0; + + __asm __volatile( +#if defined(MIPS32_R2_LE) + "seh %[a1], %[a] \n\t" + "seh %[b1], %[b] \n\t" +#else + "sll %[a1], %[a], 16 \n\t" + "sll %[b1], %[b], 16 \n\t" + "sra %[a1], %[a1], 16 \n\t" + "sra %[b1], %[b1], 16 \n\t" +#endif + "mul %[value32], %[a1], %[b1] \n\t" + : [value32] "=r" (value32), [a1] "=&r" (a1), [b1] "=&r" (b1) + : [a] "r" (a), [b] "r" (b) + : "hi", "lo" + ); + return value32; +} + +static __inline int32_t WEBRTC_SPL_MUL_16_32_RSFT16(int16_t a, + int32_t b) { + int32_t value32 = 0, b1 = 0, b2 = 0; + int32_t a1 = 0; + + __asm __volatile( +#if defined(MIPS32_R2_LE) + "seh %[a1], %[a] \n\t" +#else + "sll %[a1], %[a], 16 \n\t" + "sra %[a1], %[a1], 16 \n\t" +#endif + "andi %[b2], %[b], 0xFFFF \n\t" + "sra %[b1], %[b], 16 \n\t" + "sra %[b2], %[b2], 1 \n\t" + "mul %[value32], %[a1], %[b1] \n\t" + "mul %[b2], %[a1], %[b2] \n\t" + "addiu %[b2], %[b2], 0x4000 \n\t" + "sra %[b2], %[b2], 15 \n\t" + "addu %[value32], %[value32], %[b2] \n\t" + : [value32] "=&r" (value32), [b1] "=&r" (b1), [b2] "=&r" (b2), + [a1] "=&r" (a1) + : [a] "r" (a), [b] "r" (b) + : "hi", "lo" + ); + return value32; +} + +#if defined(MIPS_DSP_R1_LE) +static __inline int16_t WebRtcSpl_SatW32ToW16(int32_t value32) { + __asm __volatile( + "shll_s.w %[value32], %[value32], 16 \n\t" + "sra %[value32], %[value32], 16 \n\t" + : [value32] "+r" (value32) + : + ); + int16_t out16 = (int16_t)value32; + return out16; +} + +static __inline int16_t WebRtcSpl_AddSatW16(int16_t a, int16_t b) { + int32_t value32 = 0; + + __asm __volatile( + "addq_s.ph %[value32], %[a], %[b] \n\t" + : [value32] "=r" (value32) + : [a] "r" (a), [b] "r" (b) + ); + return (int16_t)value32; +} + +static __inline int32_t WebRtcSpl_AddSatW32(int32_t l_var1, int32_t l_var2) { + int32_t l_sum; + + __asm __volatile( + "addq_s.w %[l_sum], %[l_var1], %[l_var2] \n\t" + : [l_sum] "=r" (l_sum) + : [l_var1] "r" (l_var1), [l_var2] "r" (l_var2) + ); + + return l_sum; +} + +static __inline int16_t WebRtcSpl_SubSatW16(int16_t var1, int16_t var2) { + int32_t value32; + + __asm __volatile( + "subq_s.ph %[value32], %[var1], %[var2] \n\t" + : [value32] "=r" (value32) + : [var1] "r" (var1), [var2] "r" (var2) + ); + + return (int16_t)value32; +} + +static __inline int32_t WebRtcSpl_SubSatW32(int32_t l_var1, int32_t l_var2) { + int32_t l_diff; + + __asm __volatile( + "subq_s.w %[l_diff], %[l_var1], %[l_var2] \n\t" + : [l_diff] "=r" (l_diff) + : [l_var1] "r" (l_var1), [l_var2] "r" (l_var2) + ); + + return l_diff; +} +#endif + +static __inline int16_t WebRtcSpl_GetSizeInBits(uint32_t n) { + int bits = 0; + int i32 = 32; + + __asm __volatile( + "clz %[bits], %[n] \n\t" + "subu %[bits], %[i32], %[bits] \n\t" + : [bits] "=&r" (bits) + : [n] "r" (n), [i32] "r" (i32) + ); + + return (int16_t)bits; +} + +static __inline int16_t WebRtcSpl_NormW32(int32_t a) { + int zeros = 0; + + __asm __volatile( + ".set push \n\t" + ".set noreorder \n\t" + "bnez %[a], 1f \n\t" + " sra %[zeros], %[a], 31 \n\t" + "b 2f \n\t" + " move %[zeros], $zero \n\t" + "1: \n\t" + "xor %[zeros], %[a], %[zeros] \n\t" + "clz %[zeros], %[zeros] \n\t" + "addiu %[zeros], %[zeros], -1 \n\t" + "2: \n\t" + ".set pop \n\t" + : [zeros]"=&r"(zeros) + : [a] "r" (a) + ); + + return (int16_t)zeros; +} + +static __inline int16_t WebRtcSpl_NormU32(uint32_t a) { + int zeros = 0; + + __asm __volatile( + "clz %[zeros], %[a] \n\t" + : [zeros] "=r" (zeros) + : [a] "r" (a) + ); + + return (int16_t)(zeros & 0x1f); +} + +static __inline int16_t WebRtcSpl_NormW16(int16_t a) { + int zeros = 0; + int a0 = a << 16; + + __asm __volatile( + ".set push \n\t" + ".set noreorder \n\t" + "bnez %[a0], 1f \n\t" + " sra %[zeros], %[a0], 31 \n\t" + "b 2f \n\t" + " move %[zeros], $zero \n\t" + "1: \n\t" + "xor %[zeros], %[a0], %[zeros] \n\t" + "clz %[zeros], %[zeros] \n\t" + "addiu %[zeros], %[zeros], -1 \n\t" + "2: \n\t" + ".set pop \n\t" + : [zeros]"=&r"(zeros) + : [a0] "r" (a0) + ); + + return (int16_t)zeros; +} + +static __inline int32_t WebRtc_MulAccumW16(int16_t a, + int16_t b, + int32_t c) { + int32_t res = 0, c1 = 0; + __asm __volatile( +#if defined(MIPS32_R2_LE) + "seh %[a], %[a] \n\t" + "seh %[b], %[b] \n\t" +#else + "sll %[a], %[a], 16 \n\t" + "sll %[b], %[b], 16 \n\t" + "sra %[a], %[a], 16 \n\t" + "sra %[b], %[b], 16 \n\t" +#endif + "mul %[res], %[a], %[b] \n\t" + "addu %[c1], %[c], %[res] \n\t" + : [c1] "=r" (c1), [res] "=&r" (res) + : [a] "r" (a), [b] "r" (b), [c] "r" (c) + : "hi", "lo" + ); + return (c1); +} + +#endif // WEBRTC_SPL_SPL_INL_MIPS_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/levinson_durbin.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/levinson_durbin.c new file mode 100644 index 000000000..d46e55136 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/levinson_durbin.c @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the function WebRtcSpl_LevinsonDurbin(). + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +#define SPL_LEVINSON_MAXORDER 20 + +int16_t WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K, + size_t order) +{ + size_t i, j; + // Auto-correlation coefficients in high precision + int16_t R_hi[SPL_LEVINSON_MAXORDER + 1], R_low[SPL_LEVINSON_MAXORDER + 1]; + // LPC coefficients in high precision + int16_t A_hi[SPL_LEVINSON_MAXORDER + 1], A_low[SPL_LEVINSON_MAXORDER + 1]; + // LPC coefficients for next iteration + int16_t A_upd_hi[SPL_LEVINSON_MAXORDER + 1], A_upd_low[SPL_LEVINSON_MAXORDER + 1]; + // Reflection coefficient in high precision + int16_t K_hi, K_low; + // Prediction gain Alpha in high precision and with scale factor + int16_t Alpha_hi, Alpha_low, Alpha_exp; + int16_t tmp_hi, tmp_low; + int32_t temp1W32, temp2W32, temp3W32; + int16_t norm; + + // Normalize the autocorrelation R[0]...R[order+1] + + norm = WebRtcSpl_NormW32(R[0]); + + for (i = 0; i <= order; ++i) + { + temp1W32 = WEBRTC_SPL_LSHIFT_W32(R[i], norm); + // Put R in hi and low format + R_hi[i] = (int16_t)(temp1W32 >> 16); + R_low[i] = (int16_t)((temp1W32 - ((int32_t)R_hi[i] << 16)) >> 1); + } + + // K = A[1] = -R[1] / R[0] + + temp2W32 = WEBRTC_SPL_LSHIFT_W32((int32_t)R_hi[1],16) + + WEBRTC_SPL_LSHIFT_W32((int32_t)R_low[1],1); // R[1] in Q31 + temp3W32 = WEBRTC_SPL_ABS_W32(temp2W32); // abs R[1] + temp1W32 = WebRtcSpl_DivW32HiLow(temp3W32, R_hi[0], R_low[0]); // abs(R[1])/R[0] in Q31 + // Put back the sign on R[1] + if (temp2W32 > 0) + { + temp1W32 = -temp1W32; + } + + // Put K in hi and low format + K_hi = (int16_t)(temp1W32 >> 16); + K_low = (int16_t)((temp1W32 - ((int32_t)K_hi << 16)) >> 1); + + // Store first reflection coefficient + K[0] = K_hi; + + temp1W32 >>= 4; // A[1] in Q27. + + // Put A[1] in hi and low format + A_hi[1] = (int16_t)(temp1W32 >> 16); + A_low[1] = (int16_t)((temp1W32 - ((int32_t)A_hi[1] << 16)) >> 1); + + // Alpha = R[0] * (1-K^2) + + temp1W32 = ((K_hi * K_low >> 14) + K_hi * K_hi) << 1; // = k^2 in Q31 + + temp1W32 = WEBRTC_SPL_ABS_W32(temp1W32); // Guard against <0 + temp1W32 = (int32_t)0x7fffffffL - temp1W32; // temp1W32 = (1 - K[0]*K[0]) in Q31 + + // Store temp1W32 = 1 - K[0]*K[0] on hi and low format + tmp_hi = (int16_t)(temp1W32 >> 16); + tmp_low = (int16_t)((temp1W32 - ((int32_t)tmp_hi << 16)) >> 1); + + // Calculate Alpha in Q31 + temp1W32 = (R_hi[0] * tmp_hi + (R_hi[0] * tmp_low >> 15) + + (R_low[0] * tmp_hi >> 15)) << 1; + + // Normalize Alpha and put it in hi and low format + + Alpha_exp = WebRtcSpl_NormW32(temp1W32); + temp1W32 = WEBRTC_SPL_LSHIFT_W32(temp1W32, Alpha_exp); + Alpha_hi = (int16_t)(temp1W32 >> 16); + Alpha_low = (int16_t)((temp1W32 - ((int32_t)Alpha_hi << 16)) >> 1); + + // Perform the iterative calculations in the Levinson-Durbin algorithm + + for (i = 2; i <= order; i++) + { + /* ---- + temp1W32 = R[i] + > R[j]*A[i-j] + / + ---- + j=1..i-1 + */ + + temp1W32 = 0; + + for (j = 1; j < i; j++) + { + // temp1W32 is in Q31 + temp1W32 += (R_hi[j] * A_hi[i - j] << 1) + + (((R_hi[j] * A_low[i - j] >> 15) + + (R_low[j] * A_hi[i - j] >> 15)) << 1); + } + + temp1W32 = WEBRTC_SPL_LSHIFT_W32(temp1W32, 4); + temp1W32 += (WEBRTC_SPL_LSHIFT_W32((int32_t)R_hi[i], 16) + + WEBRTC_SPL_LSHIFT_W32((int32_t)R_low[i], 1)); + + // K = -temp1W32 / Alpha + temp2W32 = WEBRTC_SPL_ABS_W32(temp1W32); // abs(temp1W32) + temp3W32 = WebRtcSpl_DivW32HiLow(temp2W32, Alpha_hi, Alpha_low); // abs(temp1W32)/Alpha + + // Put the sign of temp1W32 back again + if (temp1W32 > 0) + { + temp3W32 = -temp3W32; + } + + // Use the Alpha shifts from earlier to de-normalize + norm = WebRtcSpl_NormW32(temp3W32); + if ((Alpha_exp <= norm) || (temp3W32 == 0)) + { + temp3W32 = WEBRTC_SPL_LSHIFT_W32(temp3W32, Alpha_exp); + } else + { + if (temp3W32 > 0) + { + temp3W32 = (int32_t)0x7fffffffL; + } else + { + temp3W32 = (int32_t)0x80000000L; + } + } + + // Put K on hi and low format + K_hi = (int16_t)(temp3W32 >> 16); + K_low = (int16_t)((temp3W32 - ((int32_t)K_hi << 16)) >> 1); + + // Store Reflection coefficient in Q15 + K[i - 1] = K_hi; + + // Test for unstable filter. + // If unstable return 0 and let the user decide what to do in that case + + if ((int32_t)WEBRTC_SPL_ABS_W16(K_hi) > (int32_t)32750) + { + return 0; // Unstable filter + } + + /* + Compute updated LPC coefficient: Anew[i] + Anew[j]= A[j] + K*A[i-j] for j=1..i-1 + Anew[i]= K + */ + + for (j = 1; j < i; j++) + { + // temp1W32 = A[j] in Q27 + temp1W32 = WEBRTC_SPL_LSHIFT_W32((int32_t)A_hi[j],16) + + WEBRTC_SPL_LSHIFT_W32((int32_t)A_low[j],1); + + // temp1W32 += K*A[i-j] in Q27 + temp1W32 += (K_hi * A_hi[i - j] + (K_hi * A_low[i - j] >> 15) + + (K_low * A_hi[i - j] >> 15)) << 1; + + // Put Anew in hi and low format + A_upd_hi[j] = (int16_t)(temp1W32 >> 16); + A_upd_low[j] = (int16_t)( + (temp1W32 - ((int32_t)A_upd_hi[j] << 16)) >> 1); + } + + // temp3W32 = K in Q27 (Convert from Q31 to Q27) + temp3W32 >>= 4; + + // Store Anew in hi and low format + A_upd_hi[i] = (int16_t)(temp3W32 >> 16); + A_upd_low[i] = (int16_t)( + (temp3W32 - ((int32_t)A_upd_hi[i] << 16)) >> 1); + + // Alpha = Alpha * (1-K^2) + + temp1W32 = ((K_hi * K_low >> 14) + K_hi * K_hi) << 1; // K*K in Q31 + + temp1W32 = WEBRTC_SPL_ABS_W32(temp1W32); // Guard against <0 + temp1W32 = (int32_t)0x7fffffffL - temp1W32; // 1 - K*K in Q31 + + // Convert 1- K^2 in hi and low format + tmp_hi = (int16_t)(temp1W32 >> 16); + tmp_low = (int16_t)((temp1W32 - ((int32_t)tmp_hi << 16)) >> 1); + + // Calculate Alpha = Alpha * (1-K^2) in Q31 + temp1W32 = (Alpha_hi * tmp_hi + (Alpha_hi * tmp_low >> 15) + + (Alpha_low * tmp_hi >> 15)) << 1; + + // Normalize Alpha and store it on hi and low format + + norm = WebRtcSpl_NormW32(temp1W32); + temp1W32 = WEBRTC_SPL_LSHIFT_W32(temp1W32, norm); + + Alpha_hi = (int16_t)(temp1W32 >> 16); + Alpha_low = (int16_t)((temp1W32 - ((int32_t)Alpha_hi << 16)) >> 1); + + // Update the total normalization of Alpha + Alpha_exp = Alpha_exp + norm; + + // Update A[] + + for (j = 1; j <= i; j++) + { + A_hi[j] = A_upd_hi[j]; + A_low[j] = A_upd_low[j]; + } + } + + /* + Set A[0] to 1.0 and store the A[i] i=1...order in Q12 + (Convert from Q27 and use rounding) + */ + + A[0] = 4096; + + for (i = 1; i <= order; i++) + { + // temp1W32 in Q27 + temp1W32 = WEBRTC_SPL_LSHIFT_W32((int32_t)A_hi[i], 16) + + WEBRTC_SPL_LSHIFT_W32((int32_t)A_low[i], 1); + // Round and store upper word + A[i] = (int16_t)(((temp1W32 << 1) + 32768) >> 16); + } + return 1; // Stable filters +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/lpc_to_refl_coef.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/lpc_to_refl_coef.c new file mode 100644 index 000000000..edcebd4e6 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/lpc_to_refl_coef.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the function WebRtcSpl_LpcToReflCoef(). + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +#define SPL_LPC_TO_REFL_COEF_MAX_AR_MODEL_ORDER 50 + +void WebRtcSpl_LpcToReflCoef(int16_t* a16, int use_order, int16_t* k16) +{ + int m, k; + int32_t tmp32[SPL_LPC_TO_REFL_COEF_MAX_AR_MODEL_ORDER]; + int32_t tmp_inv_denom32; + int16_t tmp_inv_denom16; + + k16[use_order - 1] = a16[use_order] << 3; // Q12<<3 => Q15 + for (m = use_order - 1; m > 0; m--) + { + // (1 - k^2) in Q30 + tmp_inv_denom32 = 1073741823 - k16[m] * k16[m]; + // (1 - k^2) in Q15 + tmp_inv_denom16 = (int16_t)(tmp_inv_denom32 >> 15); + + for (k = 1; k <= m; k++) + { + // tmp[k] = (a[k] - RC[m] * a[m-k+1]) / (1.0 - RC[m]*RC[m]); + + // [Q12<<16 - (Q15*Q12)<<1] = [Q28 - Q28] = Q28 + tmp32[k] = (a16[k] << 16) - (k16[m] * a16[m - k + 1] << 1); + + tmp32[k] = WebRtcSpl_DivW32W16(tmp32[k], tmp_inv_denom16); //Q28/Q15 = Q13 + } + + for (k = 1; k < m; k++) + { + a16[k] = (int16_t)(tmp32[k] >> 1); // Q13>>1 => Q12 + } + + tmp32[m] = WEBRTC_SPL_SAT(8191, tmp32[m], -8191); + k16[m - 1] = (int16_t)WEBRTC_SPL_LSHIFT_W32(tmp32[m], 2); //Q13<<2 => Q15 + } + return; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/min_max_operations.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/min_max_operations.c new file mode 100644 index 000000000..bc23a9c04 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/min_max_operations.c @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * This file contains the implementation of functions + * WebRtcSpl_MaxAbsValueW16C() + * WebRtcSpl_MaxAbsValueW32C() + * WebRtcSpl_MaxValueW16C() + * WebRtcSpl_MaxValueW32C() + * WebRtcSpl_MinValueW16C() + * WebRtcSpl_MinValueW32C() + * WebRtcSpl_MaxAbsIndexW16() + * WebRtcSpl_MaxIndexW16() + * WebRtcSpl_MaxIndexW32() + * WebRtcSpl_MinIndexW16() + * WebRtcSpl_MinIndexW32() + * + */ + +#include + +#include "webrtc/base/checks.h" +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +// TODO(bjorn/kma): Consolidate function pairs (e.g. combine +// WebRtcSpl_MaxAbsValueW16C and WebRtcSpl_MaxAbsIndexW16 into a single one.) +// TODO(kma): Move the next six functions into min_max_operations_c.c. + +// Maximum absolute value of word16 vector. C version for generic platforms. +int16_t WebRtcSpl_MaxAbsValueW16C(const int16_t* vector, size_t length) { + size_t i = 0; + int absolute = 0, maximum = 0; + + RTC_DCHECK_GT(length, 0); + + for (i = 0; i < length; i++) { + absolute = abs((int)vector[i]); + + if (absolute > maximum) { + maximum = absolute; + } + } + + // Guard the case for abs(-32768). + if (maximum > WEBRTC_SPL_WORD16_MAX) { + maximum = WEBRTC_SPL_WORD16_MAX; + } + + return (int16_t)maximum; +} + +// Maximum absolute value of word32 vector. C version for generic platforms. +int32_t WebRtcSpl_MaxAbsValueW32C(const int32_t* vector, size_t length) { + // Use uint32_t for the local variables, to accommodate the return value + // of abs(0x80000000), which is 0x80000000. + + uint32_t absolute = 0, maximum = 0; + size_t i = 0; + + RTC_DCHECK_GT(length, 0); + + for (i = 0; i < length; i++) { + absolute = abs((int)vector[i]); + if (absolute > maximum) { + maximum = absolute; + } + } + + maximum = WEBRTC_SPL_MIN(maximum, WEBRTC_SPL_WORD32_MAX); + + return (int32_t)maximum; +} + +// Maximum value of word16 vector. C version for generic platforms. +int16_t WebRtcSpl_MaxValueW16C(const int16_t* vector, size_t length) { + int16_t maximum = WEBRTC_SPL_WORD16_MIN; + size_t i = 0; + + RTC_DCHECK_GT(length, 0); + + for (i = 0; i < length; i++) { + if (vector[i] > maximum) + maximum = vector[i]; + } + return maximum; +} + +// Maximum value of word32 vector. C version for generic platforms. +int32_t WebRtcSpl_MaxValueW32C(const int32_t* vector, size_t length) { + int32_t maximum = WEBRTC_SPL_WORD32_MIN; + size_t i = 0; + + RTC_DCHECK_GT(length, 0); + + for (i = 0; i < length; i++) { + if (vector[i] > maximum) + maximum = vector[i]; + } + return maximum; +} + +// Minimum value of word16 vector. C version for generic platforms. +int16_t WebRtcSpl_MinValueW16C(const int16_t* vector, size_t length) { + int16_t minimum = WEBRTC_SPL_WORD16_MAX; + size_t i = 0; + + RTC_DCHECK_GT(length, 0); + + for (i = 0; i < length; i++) { + if (vector[i] < minimum) + minimum = vector[i]; + } + return minimum; +} + +// Minimum value of word32 vector. C version for generic platforms. +int32_t WebRtcSpl_MinValueW32C(const int32_t* vector, size_t length) { + int32_t minimum = WEBRTC_SPL_WORD32_MAX; + size_t i = 0; + + RTC_DCHECK_GT(length, 0); + + for (i = 0; i < length; i++) { + if (vector[i] < minimum) + minimum = vector[i]; + } + return minimum; +} + +// Index of maximum absolute value in a word16 vector. +size_t WebRtcSpl_MaxAbsIndexW16(const int16_t* vector, size_t length) { + // Use type int for local variables, to accomodate the value of abs(-32768). + + size_t i = 0, index = 0; + int absolute = 0, maximum = 0; + + RTC_DCHECK_GT(length, 0); + + for (i = 0; i < length; i++) { + absolute = abs((int)vector[i]); + + if (absolute > maximum) { + maximum = absolute; + index = i; + } + } + + return index; +} + +// Index of maximum value in a word16 vector. +size_t WebRtcSpl_MaxIndexW16(const int16_t* vector, size_t length) { + size_t i = 0, index = 0; + int16_t maximum = WEBRTC_SPL_WORD16_MIN; + + RTC_DCHECK_GT(length, 0); + + for (i = 0; i < length; i++) { + if (vector[i] > maximum) { + maximum = vector[i]; + index = i; + } + } + + return index; +} + +// Index of maximum value in a word32 vector. +size_t WebRtcSpl_MaxIndexW32(const int32_t* vector, size_t length) { + size_t i = 0, index = 0; + int32_t maximum = WEBRTC_SPL_WORD32_MIN; + + RTC_DCHECK_GT(length, 0); + + for (i = 0; i < length; i++) { + if (vector[i] > maximum) { + maximum = vector[i]; + index = i; + } + } + + return index; +} + +// Index of minimum value in a word16 vector. +size_t WebRtcSpl_MinIndexW16(const int16_t* vector, size_t length) { + size_t i = 0, index = 0; + int16_t minimum = WEBRTC_SPL_WORD16_MAX; + + RTC_DCHECK_GT(length, 0); + + for (i = 0; i < length; i++) { + if (vector[i] < minimum) { + minimum = vector[i]; + index = i; + } + } + + return index; +} + +// Index of minimum value in a word32 vector. +size_t WebRtcSpl_MinIndexW32(const int32_t* vector, size_t length) { + size_t i = 0, index = 0; + int32_t minimum = WEBRTC_SPL_WORD32_MAX; + + RTC_DCHECK_GT(length, 0); + + for (i = 0; i < length; i++) { + if (vector[i] < minimum) { + minimum = vector[i]; + index = i; + } + } + + return index; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/min_max_operations_neon.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/min_max_operations_neon.c new file mode 100644 index 000000000..7a001086f --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/min_max_operations_neon.c @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifdef WEBRTC_ARCH_ARM_FAMILY + +#include +#include + +#include "webrtc/base/checks.h" +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +// Maximum absolute value of word16 vector. C version for generic platforms. +int16_t WebRtcSpl_MaxAbsValueW16Neon(const int16_t* vector, size_t length) { + int absolute = 0, maximum = 0; + + RTC_DCHECK_GT(length, 0); + + const int16_t* p_start = vector; + size_t rest = length & 7; + const int16_t* p_end = vector + length - rest; + + int16x8_t v; + uint16x8_t max_qv; + max_qv = vdupq_n_u16(0); + + while (p_start < p_end) { + v = vld1q_s16(p_start); + // Note vabs doesn't change the value of -32768. + v = vabsq_s16(v); + // Use u16 so we don't lose the value -32768. + max_qv = vmaxq_u16(max_qv, vreinterpretq_u16_s16(v)); + p_start += 8; + } + +#ifdef WEBRTC_ARCH_ARM64 + maximum = (int)vmaxvq_u16(max_qv); +#else + uint16x4_t max_dv; + max_dv = vmax_u16(vget_low_u16(max_qv), vget_high_u16(max_qv)); + max_dv = vpmax_u16(max_dv, max_dv); + max_dv = vpmax_u16(max_dv, max_dv); + + maximum = (int)vget_lane_u16(max_dv, 0); +#endif + + p_end = vector + length; + while (p_start < p_end) { + absolute = abs((int)(*p_start)); + + if (absolute > maximum) { + maximum = absolute; + } + p_start++; + } + + // Guard the case for abs(-32768). + if (maximum > WEBRTC_SPL_WORD16_MAX) { + maximum = WEBRTC_SPL_WORD16_MAX; + } + + return (int16_t)maximum; +} + +// Maximum absolute value of word32 vector. NEON intrinsics version for +// ARM 32-bit/64-bit platforms. +int32_t WebRtcSpl_MaxAbsValueW32Neon(const int32_t* vector, size_t length) { + // Use uint32_t for the local variables, to accommodate the return value + // of abs(0x80000000), which is 0x80000000. + + uint32_t absolute = 0, maximum = 0; + size_t i = 0; + size_t residual = length & 0x7; + + RTC_DCHECK_GT(length, 0); + + const int32_t* p_start = vector; + uint32x4_t max32x4_0 = vdupq_n_u32(0); + uint32x4_t max32x4_1 = vdupq_n_u32(0); + + // First part, unroll the loop 8 times. + for (i = 0; i < length - residual; i += 8) { + int32x4_t in32x4_0 = vld1q_s32(p_start); + p_start += 4; + int32x4_t in32x4_1 = vld1q_s32(p_start); + p_start += 4; + in32x4_0 = vabsq_s32(in32x4_0); + in32x4_1 = vabsq_s32(in32x4_1); + // vabs doesn't change the value of 0x80000000. + // Use u32 so we don't lose the value 0x80000000. + max32x4_0 = vmaxq_u32(max32x4_0, vreinterpretq_u32_s32(in32x4_0)); + max32x4_1 = vmaxq_u32(max32x4_1, vreinterpretq_u32_s32(in32x4_1)); + } + + uint32x4_t max32x4 = vmaxq_u32(max32x4_0, max32x4_1); +#if defined(WEBRTC_ARCH_ARM64) + maximum = vmaxvq_u32(max32x4); +#else + uint32x2_t max32x2 = vmax_u32(vget_low_u32(max32x4), vget_high_u32(max32x4)); + max32x2 = vpmax_u32(max32x2, max32x2); + + maximum = vget_lane_u32(max32x2, 0); +#endif + + // Second part, do the remaining iterations (if any). + for (i = residual; i > 0; i--) { + absolute = abs((int)(*p_start)); + if (absolute > maximum) { + maximum = absolute; + } + p_start++; + } + + // Guard against the case for 0x80000000. + maximum = WEBRTC_SPL_MIN(maximum, WEBRTC_SPL_WORD32_MAX); + + return (int32_t)maximum; +} + +// Maximum value of word16 vector. NEON intrinsics version for +// ARM 32-bit/64-bit platforms. +int16_t WebRtcSpl_MaxValueW16Neon(const int16_t* vector, size_t length) { + int16_t maximum = WEBRTC_SPL_WORD16_MIN; + size_t i = 0; + size_t residual = length & 0x7; + + RTC_DCHECK_GT(length, 0); + + const int16_t* p_start = vector; + int16x8_t max16x8 = vdupq_n_s16(WEBRTC_SPL_WORD16_MIN); + + // First part, unroll the loop 8 times. + for (i = 0; i < length - residual; i += 8) { + int16x8_t in16x8 = vld1q_s16(p_start); + max16x8 = vmaxq_s16(max16x8, in16x8); + p_start += 8; + } + +#if defined(WEBRTC_ARCH_ARM64) + maximum = vmaxvq_s16(max16x8); +#else + int16x4_t max16x4 = vmax_s16(vget_low_s16(max16x8), vget_high_s16(max16x8)); + max16x4 = vpmax_s16(max16x4, max16x4); + max16x4 = vpmax_s16(max16x4, max16x4); + + maximum = vget_lane_s16(max16x4, 0); +#endif + + // Second part, do the remaining iterations (if any). + for (i = residual; i > 0; i--) { + if (*p_start > maximum) + maximum = *p_start; + p_start++; + } + return maximum; +} + +// Maximum value of word32 vector. NEON intrinsics version for +// ARM 32-bit/64-bit platforms. +int32_t WebRtcSpl_MaxValueW32Neon(const int32_t* vector, size_t length) { + int32_t maximum = WEBRTC_SPL_WORD32_MIN; + size_t i = 0; + size_t residual = length & 0x7; + + RTC_DCHECK_GT(length, 0); + + const int32_t* p_start = vector; + int32x4_t max32x4_0 = vdupq_n_s32(WEBRTC_SPL_WORD32_MIN); + int32x4_t max32x4_1 = vdupq_n_s32(WEBRTC_SPL_WORD32_MIN); + + // First part, unroll the loop 8 times. + for (i = 0; i < length - residual; i += 8) { + int32x4_t in32x4_0 = vld1q_s32(p_start); + p_start += 4; + int32x4_t in32x4_1 = vld1q_s32(p_start); + p_start += 4; + max32x4_0 = vmaxq_s32(max32x4_0, in32x4_0); + max32x4_1 = vmaxq_s32(max32x4_1, in32x4_1); + } + + int32x4_t max32x4 = vmaxq_s32(max32x4_0, max32x4_1); +#if defined(WEBRTC_ARCH_ARM64) + maximum = vmaxvq_s32(max32x4); +#else + int32x2_t max32x2 = vmax_s32(vget_low_s32(max32x4), vget_high_s32(max32x4)); + max32x2 = vpmax_s32(max32x2, max32x2); + + maximum = vget_lane_s32(max32x2, 0); +#endif + + // Second part, do the remaining iterations (if any). + for (i = residual; i > 0; i--) { + if (*p_start > maximum) + maximum = *p_start; + p_start++; + } + return maximum; +} + +// Minimum value of word16 vector. NEON intrinsics version for +// ARM 32-bit/64-bit platforms. +int16_t WebRtcSpl_MinValueW16Neon(const int16_t* vector, size_t length) { + int16_t minimum = WEBRTC_SPL_WORD16_MAX; + size_t i = 0; + size_t residual = length & 0x7; + + RTC_DCHECK_GT(length, 0); + + const int16_t* p_start = vector; + int16x8_t min16x8 = vdupq_n_s16(WEBRTC_SPL_WORD16_MAX); + + // First part, unroll the loop 8 times. + for (i = 0; i < length - residual; i += 8) { + int16x8_t in16x8 = vld1q_s16(p_start); + min16x8 = vminq_s16(min16x8, in16x8); + p_start += 8; + } + +#if defined(WEBRTC_ARCH_ARM64) + minimum = vminvq_s16(min16x8); +#else + int16x4_t min16x4 = vmin_s16(vget_low_s16(min16x8), vget_high_s16(min16x8)); + min16x4 = vpmin_s16(min16x4, min16x4); + min16x4 = vpmin_s16(min16x4, min16x4); + + minimum = vget_lane_s16(min16x4, 0); +#endif + + // Second part, do the remaining iterations (if any). + for (i = residual; i > 0; i--) { + if (*p_start < minimum) + minimum = *p_start; + p_start++; + } + return minimum; +} + +// Minimum value of word32 vector. NEON intrinsics version for +// ARM 32-bit/64-bit platforms. +int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, size_t length) { + int32_t minimum = WEBRTC_SPL_WORD32_MAX; + size_t i = 0; + size_t residual = length & 0x7; + + RTC_DCHECK_GT(length, 0); + + const int32_t* p_start = vector; + int32x4_t min32x4_0 = vdupq_n_s32(WEBRTC_SPL_WORD32_MAX); + int32x4_t min32x4_1 = vdupq_n_s32(WEBRTC_SPL_WORD32_MAX); + + // First part, unroll the loop 8 times. + for (i = 0; i < length - residual; i += 8) { + int32x4_t in32x4_0 = vld1q_s32(p_start); + p_start += 4; + int32x4_t in32x4_1 = vld1q_s32(p_start); + p_start += 4; + min32x4_0 = vminq_s32(min32x4_0, in32x4_0); + min32x4_1 = vminq_s32(min32x4_1, in32x4_1); + } + + int32x4_t min32x4 = vminq_s32(min32x4_0, min32x4_1); +#if defined(WEBRTC_ARCH_ARM64) + minimum = vminvq_s32(min32x4); +#else + int32x2_t min32x2 = vmin_s32(vget_low_s32(min32x4), vget_high_s32(min32x4)); + min32x2 = vpmin_s32(min32x2, min32x2); + + minimum = vget_lane_s32(min32x2, 0); +#endif + + // Second part, do the remaining iterations (if any). + for (i = residual; i > 0; i--) { + if (*p_start < minimum) + minimum = *p_start; + p_start++; + } + return minimum; +} + +#endif diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/randomization_functions.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/randomization_functions.c new file mode 100644 index 000000000..73f24093c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/randomization_functions.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains implementations of the randomization functions + * WebRtcSpl_RandU() + * WebRtcSpl_RandN() + * WebRtcSpl_RandUArray() + * + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +static const uint32_t kMaxSeedUsed = 0x80000000; + +static const int16_t kRandNTable[] = { + 9178, -7260, 40, 10189, 4894, -3531, -13779, 14764, + -4008, -8884, -8990, 1008, 7368, 5184, 3251, -5817, + -9786, 5963, 1770, 8066, -7135, 10772, -2298, 1361, + 6484, 2241, -8633, 792, 199, -3344, 6553, -10079, + -15040, 95, 11608, -12469, 14161, -4176, 2476, 6403, + 13685, -16005, 6646, 2239, 10916, -3004, -602, -3141, + 2142, 14144, -5829, 5305, 8209, 4713, 2697, -5112, + 16092, -1210, -2891, -6631, -5360, -11878, -6781, -2739, + -6392, 536, 10923, 10872, 5059, -4748, -7770, 5477, + 38, -1025, -2892, 1638, 6304, 14375, -11028, 1553, + -1565, 10762, -393, 4040, 5257, 12310, 6554, -4799, + 4899, -6354, 1603, -1048, -2220, 8247, -186, -8944, + -12004, 2332, 4801, -4933, 6371, 131, 8614, -5927, + -8287, -22760, 4033, -15162, 3385, 3246, 3153, -5250, + 3766, 784, 6494, -62, 3531, -1582, 15572, 662, + -3952, -330, -3196, 669, 7236, -2678, -6569, 23319, + -8645, -741, 14830, -15976, 4903, 315, -11342, 10311, + 1858, -7777, 2145, 5436, 5677, -113, -10033, 826, + -1353, 17210, 7768, 986, -1471, 8291, -4982, 8207, + -14911, -6255, -2449, -11881, -7059, -11703, -4338, 8025, + 7538, -2823, -12490, 9470, -1613, -2529, -10092, -7807, + 9480, 6970, -12844, 5123, 3532, 4816, 4803, -8455, + -5045, 14032, -4378, -1643, 5756, -11041, -2732, -16618, + -6430, -18375, -3320, 6098, 5131, -4269, -8840, 2482, + -7048, 1547, -21890, -6505, -7414, -424, -11722, 7955, + 1653, -17299, 1823, 473, -9232, 3337, 1111, 873, + 4018, -8982, 9889, 3531, -11763, -3799, 7373, -4539, + 3231, 7054, -8537, 7616, 6244, 16635, 447, -2915, + 13967, 705, -2669, -1520, -1771, -16188, 5956, 5117, + 6371, -9936, -1448, 2480, 5128, 7550, -8130, 5236, + 8213, -6443, 7707, -1950, -13811, 7218, 7031, -3883, + 67, 5731, -2874, 13480, -3743, 9298, -3280, 3552, + -4425, -18, -3785, -9988, -5357, 5477, -11794, 2117, + 1416, -9935, 3376, 802, -5079, -8243, 12652, 66, + 3653, -2368, 6781, -21895, -7227, 2487, 7839, -385, + 6646, -7016, -4658, 5531, -1705, 834, 129, 3694, + -1343, 2238, -22640, -6417, -11139, 11301, -2945, -3494, + -5626, 185, -3615, -2041, -7972, -3106, -60, -23497, + -1566, 17064, 3519, 2518, 304, -6805, -10269, 2105, + 1936, -426, -736, -8122, -1467, 4238, -6939, -13309, + 360, 7402, -7970, 12576, 3287, 12194, -6289, -16006, + 9171, 4042, -9193, 9123, -2512, 6388, -4734, -8739, + 1028, -5406, -1696, 5889, -666, -4736, 4971, 3565, + 9362, -6292, 3876, -3652, -19666, 7523, -4061, 391, + -11773, 7502, -3763, 4929, -9478, 13278, 2805, 4496, + 7814, 16419, 12455, -14773, 2127, -2746, 3763, 4847, + 3698, 6978, 4751, -6957, -3581, -45, 6252, 1513, + -4797, -7925, 11270, 16188, -2359, -5269, 9376, -10777, + 7262, 20031, -6515, -2208, -5353, 8085, -1341, -1303, + 7333, 5576, 3625, 5763, -7931, 9833, -3371, -10305, + 6534, -13539, -9971, 997, 8464, -4064, -1495, 1857, + 13624, 5458, 9490, -11086, -4524, 12022, -550, -198, + 408, -8455, -7068, 10289, 9712, -3366, 9028, -7621, + -5243, 2362, 6909, 4672, -4933, -1799, 4709, -4563, + -62, -566, 1624, -7010, 14730, -17791, -3697, -2344, + -1741, 7099, -9509, -6855, -1989, 3495, -2289, 2031, + 12784, 891, 14189, -3963, -5683, 421, -12575, 1724, + -12682, -5970, -8169, 3143, -1824, -5488, -5130, 8536, + 12799, 794, 5738, 3459, -11689, -258, -3738, -3775, + -8742, 2333, 8312, -9383, 10331, 13119, 8398, 10644, + -19433, -6446, -16277, -11793, 16284, 9345, 15222, 15834, + 2009, -7349, 130, -14547, 338, -5998, 3337, 21492, + 2406, 7703, -951, 11196, -564, 3406, 2217, 4806, + 2374, -5797, 11839, 8940, -11874, 18213, 2855, 10492 +}; + +static uint32_t IncreaseSeed(uint32_t* seed) { + seed[0] = (seed[0] * ((int32_t)69069) + 1) & (kMaxSeedUsed - 1); + return seed[0]; +} + +int16_t WebRtcSpl_RandU(uint32_t* seed) { + return (int16_t)(IncreaseSeed(seed) >> 16); +} + +int16_t WebRtcSpl_RandN(uint32_t* seed) { + return kRandNTable[IncreaseSeed(seed) >> 23]; +} + +// Creates an array of uniformly distributed variables. +int16_t WebRtcSpl_RandUArray(int16_t* vector, + int16_t vector_length, + uint32_t* seed) { + int i; + for (i = 0; i < vector_length; i++) { + vector[i] = WebRtcSpl_RandU(seed); + } + return vector_length; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/real_fft.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/real_fft.c new file mode 100644 index 000000000..92daae4d3 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/real_fft.c @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/signal_processing/include/real_fft.h" + +#include + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +struct RealFFT { + int order; +}; + +struct RealFFT* WebRtcSpl_CreateRealFFT(int order) { + struct RealFFT* self = NULL; + + if (order > kMaxFFTOrder || order < 0) { + return NULL; + } + + self = malloc(sizeof(struct RealFFT)); + if (self == NULL) { + return NULL; + } + self->order = order; + + return self; +} + +void WebRtcSpl_FreeRealFFT(struct RealFFT* self) { + if (self != NULL) { + free(self); + } +} + +// The C version FFT functions (i.e. WebRtcSpl_RealForwardFFT and +// WebRtcSpl_RealInverseFFT) are real-valued FFT wrappers for complex-valued +// FFT implementation in SPL. + +int WebRtcSpl_RealForwardFFT(struct RealFFT* self, + const int16_t* real_data_in, + int16_t* complex_data_out) { + int i = 0; + int j = 0; + int result = 0; + int n = 1 << self->order; + // The complex-value FFT implementation needs a buffer to hold 2^order + // 16-bit COMPLEX numbers, for both time and frequency data. + int16_t complex_buffer[2 << kMaxFFTOrder]; + + // Insert zeros to the imaginary parts for complex forward FFT input. + for (i = 0, j = 0; i < n; i += 1, j += 2) { + complex_buffer[j] = real_data_in[i]; + complex_buffer[j + 1] = 0; + }; + + WebRtcSpl_ComplexBitReverse(complex_buffer, self->order); + result = WebRtcSpl_ComplexFFT(complex_buffer, self->order, 1); + + // For real FFT output, use only the first N + 2 elements from + // complex forward FFT. + memcpy(complex_data_out, complex_buffer, sizeof(int16_t) * (n + 2)); + + return result; +} + +int WebRtcSpl_RealInverseFFT(struct RealFFT* self, + const int16_t* complex_data_in, + int16_t* real_data_out) { + int i = 0; + int j = 0; + int result = 0; + int n = 1 << self->order; + // Create the buffer specific to complex-valued FFT implementation. + int16_t complex_buffer[2 << kMaxFFTOrder]; + + // For n-point FFT, first copy the first n + 2 elements into complex + // FFT, then construct the remaining n - 2 elements by real FFT's + // conjugate-symmetric properties. + memcpy(complex_buffer, complex_data_in, sizeof(int16_t) * (n + 2)); + for (i = n + 2; i < 2 * n; i += 2) { + complex_buffer[i] = complex_data_in[2 * n - i]; + complex_buffer[i + 1] = -complex_data_in[2 * n - i + 1]; + } + + WebRtcSpl_ComplexBitReverse(complex_buffer, self->order); + result = WebRtcSpl_ComplexIFFT(complex_buffer, self->order, 1); + + // Strip out the imaginary parts of the complex inverse FFT output. + for (i = 0, j = 0; i < n; i += 1, j += 2) { + real_data_out[i] = complex_buffer[j]; + } + + return result; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/refl_coef_to_lpc.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/refl_coef_to_lpc.c new file mode 100644 index 000000000..06a29b663 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/refl_coef_to_lpc.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the function WebRtcSpl_ReflCoefToLpc(). + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +void WebRtcSpl_ReflCoefToLpc(const int16_t *k, int use_order, int16_t *a) +{ + int16_t any[WEBRTC_SPL_MAX_LPC_ORDER + 1]; + int16_t *aptr, *aptr2, *anyptr; + const int16_t *kptr; + int m, i; + + kptr = k; + *a = 4096; // i.e., (Word16_MAX >> 3)+1. + *any = *a; + a[1] = *k >> 3; + + for (m = 1; m < use_order; m++) + { + kptr++; + aptr = a; + aptr++; + aptr2 = &a[m]; + anyptr = any; + anyptr++; + + any[m + 1] = *kptr >> 3; + for (i = 0; i < m; i++) + { + *anyptr = *aptr + (int16_t)((*aptr2 * *kptr) >> 15); + anyptr++; + aptr++; + aptr2--; + } + + aptr = a; + anyptr = any; + for (i = 0; i < (m + 2); i++) + { + *aptr = *anyptr; + aptr++; + anyptr++; + } + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample.c new file mode 100644 index 000000000..45fe52aa9 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample.c @@ -0,0 +1,505 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the resampling functions for 22 kHz. + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +#include "webrtc/common_audio/signal_processing/resample_by_2_internal.h" + +// Declaration of internally used functions +static void WebRtcSpl_32khzTo22khzIntToShort(const int32_t *In, int16_t *Out, + int32_t K); + +void WebRtcSpl_32khzTo22khzIntToInt(const int32_t *In, int32_t *Out, + int32_t K); + +// interpolation coefficients +static const int16_t kCoefficients32To22[5][9] = { + {127, -712, 2359, -6333, 23456, 16775, -3695, 945, -154}, + {-39, 230, -830, 2785, 32366, -2324, 760, -218, 38}, + {117, -663, 2222, -6133, 26634, 13070, -3174, 831, -137}, + {-77, 457, -1677, 5958, 31175, -4136, 1405, -408, 71}, + { 98, -560, 1900, -5406, 29240, 9423, -2480, 663, -110} +}; + +////////////////////// +// 22 kHz -> 16 kHz // +////////////////////// + +// number of subblocks; options: 1, 2, 4, 5, 10 +#define SUB_BLOCKS_22_16 5 + +// 22 -> 16 resampler +void WebRtcSpl_Resample22khzTo16khz(const int16_t* in, int16_t* out, + WebRtcSpl_State22khzTo16khz* state, int32_t* tmpmem) +{ + int k; + + // process two blocks of 10/SUB_BLOCKS_22_16 ms (to reduce temp buffer size) + for (k = 0; k < SUB_BLOCKS_22_16; k++) + { + ///// 22 --> 44 ///// + // int16_t in[220/SUB_BLOCKS_22_16] + // int32_t out[440/SUB_BLOCKS_22_16] + ///// + WebRtcSpl_UpBy2ShortToInt(in, 220 / SUB_BLOCKS_22_16, tmpmem + 16, state->S_22_44); + + ///// 44 --> 32 ///// + // int32_t in[440/SUB_BLOCKS_22_16] + // int32_t out[320/SUB_BLOCKS_22_16] + ///// + // copy state to and from input array + tmpmem[8] = state->S_44_32[0]; + tmpmem[9] = state->S_44_32[1]; + tmpmem[10] = state->S_44_32[2]; + tmpmem[11] = state->S_44_32[3]; + tmpmem[12] = state->S_44_32[4]; + tmpmem[13] = state->S_44_32[5]; + tmpmem[14] = state->S_44_32[6]; + tmpmem[15] = state->S_44_32[7]; + state->S_44_32[0] = tmpmem[440 / SUB_BLOCKS_22_16 + 8]; + state->S_44_32[1] = tmpmem[440 / SUB_BLOCKS_22_16 + 9]; + state->S_44_32[2] = tmpmem[440 / SUB_BLOCKS_22_16 + 10]; + state->S_44_32[3] = tmpmem[440 / SUB_BLOCKS_22_16 + 11]; + state->S_44_32[4] = tmpmem[440 / SUB_BLOCKS_22_16 + 12]; + state->S_44_32[5] = tmpmem[440 / SUB_BLOCKS_22_16 + 13]; + state->S_44_32[6] = tmpmem[440 / SUB_BLOCKS_22_16 + 14]; + state->S_44_32[7] = tmpmem[440 / SUB_BLOCKS_22_16 + 15]; + + WebRtcSpl_Resample44khzTo32khz(tmpmem + 8, tmpmem, 40 / SUB_BLOCKS_22_16); + + ///// 32 --> 16 ///// + // int32_t in[320/SUB_BLOCKS_22_16] + // int32_t out[160/SUB_BLOCKS_22_16] + ///// + WebRtcSpl_DownBy2IntToShort(tmpmem, 320 / SUB_BLOCKS_22_16, out, state->S_32_16); + + // move input/output pointers 10/SUB_BLOCKS_22_16 ms seconds ahead + in += 220 / SUB_BLOCKS_22_16; + out += 160 / SUB_BLOCKS_22_16; + } +} + +// initialize state of 22 -> 16 resampler +void WebRtcSpl_ResetResample22khzTo16khz(WebRtcSpl_State22khzTo16khz* state) +{ + int k; + for (k = 0; k < 8; k++) + { + state->S_22_44[k] = 0; + state->S_44_32[k] = 0; + state->S_32_16[k] = 0; + } +} + +////////////////////// +// 16 kHz -> 22 kHz // +////////////////////// + +// number of subblocks; options: 1, 2, 4, 5, 10 +#define SUB_BLOCKS_16_22 4 + +// 16 -> 22 resampler +void WebRtcSpl_Resample16khzTo22khz(const int16_t* in, int16_t* out, + WebRtcSpl_State16khzTo22khz* state, int32_t* tmpmem) +{ + int k; + + // process two blocks of 10/SUB_BLOCKS_16_22 ms (to reduce temp buffer size) + for (k = 0; k < SUB_BLOCKS_16_22; k++) + { + ///// 16 --> 32 ///// + // int16_t in[160/SUB_BLOCKS_16_22] + // int32_t out[320/SUB_BLOCKS_16_22] + ///// + WebRtcSpl_UpBy2ShortToInt(in, 160 / SUB_BLOCKS_16_22, tmpmem + 8, state->S_16_32); + + ///// 32 --> 22 ///// + // int32_t in[320/SUB_BLOCKS_16_22] + // int32_t out[220/SUB_BLOCKS_16_22] + ///// + // copy state to and from input array + tmpmem[0] = state->S_32_22[0]; + tmpmem[1] = state->S_32_22[1]; + tmpmem[2] = state->S_32_22[2]; + tmpmem[3] = state->S_32_22[3]; + tmpmem[4] = state->S_32_22[4]; + tmpmem[5] = state->S_32_22[5]; + tmpmem[6] = state->S_32_22[6]; + tmpmem[7] = state->S_32_22[7]; + state->S_32_22[0] = tmpmem[320 / SUB_BLOCKS_16_22]; + state->S_32_22[1] = tmpmem[320 / SUB_BLOCKS_16_22 + 1]; + state->S_32_22[2] = tmpmem[320 / SUB_BLOCKS_16_22 + 2]; + state->S_32_22[3] = tmpmem[320 / SUB_BLOCKS_16_22 + 3]; + state->S_32_22[4] = tmpmem[320 / SUB_BLOCKS_16_22 + 4]; + state->S_32_22[5] = tmpmem[320 / SUB_BLOCKS_16_22 + 5]; + state->S_32_22[6] = tmpmem[320 / SUB_BLOCKS_16_22 + 6]; + state->S_32_22[7] = tmpmem[320 / SUB_BLOCKS_16_22 + 7]; + + WebRtcSpl_32khzTo22khzIntToShort(tmpmem, out, 20 / SUB_BLOCKS_16_22); + + // move input/output pointers 10/SUB_BLOCKS_16_22 ms seconds ahead + in += 160 / SUB_BLOCKS_16_22; + out += 220 / SUB_BLOCKS_16_22; + } +} + +// initialize state of 16 -> 22 resampler +void WebRtcSpl_ResetResample16khzTo22khz(WebRtcSpl_State16khzTo22khz* state) +{ + int k; + for (k = 0; k < 8; k++) + { + state->S_16_32[k] = 0; + state->S_32_22[k] = 0; + } +} + +////////////////////// +// 22 kHz -> 8 kHz // +////////////////////// + +// number of subblocks; options: 1, 2, 5, 10 +#define SUB_BLOCKS_22_8 2 + +// 22 -> 8 resampler +void WebRtcSpl_Resample22khzTo8khz(const int16_t* in, int16_t* out, + WebRtcSpl_State22khzTo8khz* state, int32_t* tmpmem) +{ + int k; + + // process two blocks of 10/SUB_BLOCKS_22_8 ms (to reduce temp buffer size) + for (k = 0; k < SUB_BLOCKS_22_8; k++) + { + ///// 22 --> 22 lowpass ///// + // int16_t in[220/SUB_BLOCKS_22_8] + // int32_t out[220/SUB_BLOCKS_22_8] + ///// + WebRtcSpl_LPBy2ShortToInt(in, 220 / SUB_BLOCKS_22_8, tmpmem + 16, state->S_22_22); + + ///// 22 --> 16 ///// + // int32_t in[220/SUB_BLOCKS_22_8] + // int32_t out[160/SUB_BLOCKS_22_8] + ///// + // copy state to and from input array + tmpmem[8] = state->S_22_16[0]; + tmpmem[9] = state->S_22_16[1]; + tmpmem[10] = state->S_22_16[2]; + tmpmem[11] = state->S_22_16[3]; + tmpmem[12] = state->S_22_16[4]; + tmpmem[13] = state->S_22_16[5]; + tmpmem[14] = state->S_22_16[6]; + tmpmem[15] = state->S_22_16[7]; + state->S_22_16[0] = tmpmem[220 / SUB_BLOCKS_22_8 + 8]; + state->S_22_16[1] = tmpmem[220 / SUB_BLOCKS_22_8 + 9]; + state->S_22_16[2] = tmpmem[220 / SUB_BLOCKS_22_8 + 10]; + state->S_22_16[3] = tmpmem[220 / SUB_BLOCKS_22_8 + 11]; + state->S_22_16[4] = tmpmem[220 / SUB_BLOCKS_22_8 + 12]; + state->S_22_16[5] = tmpmem[220 / SUB_BLOCKS_22_8 + 13]; + state->S_22_16[6] = tmpmem[220 / SUB_BLOCKS_22_8 + 14]; + state->S_22_16[7] = tmpmem[220 / SUB_BLOCKS_22_8 + 15]; + + WebRtcSpl_Resample44khzTo32khz(tmpmem + 8, tmpmem, 20 / SUB_BLOCKS_22_8); + + ///// 16 --> 8 ///// + // int32_t in[160/SUB_BLOCKS_22_8] + // int32_t out[80/SUB_BLOCKS_22_8] + ///// + WebRtcSpl_DownBy2IntToShort(tmpmem, 160 / SUB_BLOCKS_22_8, out, state->S_16_8); + + // move input/output pointers 10/SUB_BLOCKS_22_8 ms seconds ahead + in += 220 / SUB_BLOCKS_22_8; + out += 80 / SUB_BLOCKS_22_8; + } +} + +// initialize state of 22 -> 8 resampler +void WebRtcSpl_ResetResample22khzTo8khz(WebRtcSpl_State22khzTo8khz* state) +{ + int k; + for (k = 0; k < 8; k++) + { + state->S_22_22[k] = 0; + state->S_22_22[k + 8] = 0; + state->S_22_16[k] = 0; + state->S_16_8[k] = 0; + } +} + +////////////////////// +// 8 kHz -> 22 kHz // +////////////////////// + +// number of subblocks; options: 1, 2, 5, 10 +#define SUB_BLOCKS_8_22 2 + +// 8 -> 22 resampler +void WebRtcSpl_Resample8khzTo22khz(const int16_t* in, int16_t* out, + WebRtcSpl_State8khzTo22khz* state, int32_t* tmpmem) +{ + int k; + + // process two blocks of 10/SUB_BLOCKS_8_22 ms (to reduce temp buffer size) + for (k = 0; k < SUB_BLOCKS_8_22; k++) + { + ///// 8 --> 16 ///// + // int16_t in[80/SUB_BLOCKS_8_22] + // int32_t out[160/SUB_BLOCKS_8_22] + ///// + WebRtcSpl_UpBy2ShortToInt(in, 80 / SUB_BLOCKS_8_22, tmpmem + 18, state->S_8_16); + + ///// 16 --> 11 ///// + // int32_t in[160/SUB_BLOCKS_8_22] + // int32_t out[110/SUB_BLOCKS_8_22] + ///// + // copy state to and from input array + tmpmem[10] = state->S_16_11[0]; + tmpmem[11] = state->S_16_11[1]; + tmpmem[12] = state->S_16_11[2]; + tmpmem[13] = state->S_16_11[3]; + tmpmem[14] = state->S_16_11[4]; + tmpmem[15] = state->S_16_11[5]; + tmpmem[16] = state->S_16_11[6]; + tmpmem[17] = state->S_16_11[7]; + state->S_16_11[0] = tmpmem[160 / SUB_BLOCKS_8_22 + 10]; + state->S_16_11[1] = tmpmem[160 / SUB_BLOCKS_8_22 + 11]; + state->S_16_11[2] = tmpmem[160 / SUB_BLOCKS_8_22 + 12]; + state->S_16_11[3] = tmpmem[160 / SUB_BLOCKS_8_22 + 13]; + state->S_16_11[4] = tmpmem[160 / SUB_BLOCKS_8_22 + 14]; + state->S_16_11[5] = tmpmem[160 / SUB_BLOCKS_8_22 + 15]; + state->S_16_11[6] = tmpmem[160 / SUB_BLOCKS_8_22 + 16]; + state->S_16_11[7] = tmpmem[160 / SUB_BLOCKS_8_22 + 17]; + + WebRtcSpl_32khzTo22khzIntToInt(tmpmem + 10, tmpmem, 10 / SUB_BLOCKS_8_22); + + ///// 11 --> 22 ///// + // int32_t in[110/SUB_BLOCKS_8_22] + // int16_t out[220/SUB_BLOCKS_8_22] + ///// + WebRtcSpl_UpBy2IntToShort(tmpmem, 110 / SUB_BLOCKS_8_22, out, state->S_11_22); + + // move input/output pointers 10/SUB_BLOCKS_8_22 ms seconds ahead + in += 80 / SUB_BLOCKS_8_22; + out += 220 / SUB_BLOCKS_8_22; + } +} + +// initialize state of 8 -> 22 resampler +void WebRtcSpl_ResetResample8khzTo22khz(WebRtcSpl_State8khzTo22khz* state) +{ + int k; + for (k = 0; k < 8; k++) + { + state->S_8_16[k] = 0; + state->S_16_11[k] = 0; + state->S_11_22[k] = 0; + } +} + +// compute two inner-products and store them to output array +static void WebRtcSpl_DotProdIntToInt(const int32_t* in1, const int32_t* in2, + const int16_t* coef_ptr, int32_t* out1, + int32_t* out2) +{ + int32_t tmp1 = 16384; + int32_t tmp2 = 16384; + int16_t coef; + + coef = coef_ptr[0]; + tmp1 += coef * in1[0]; + tmp2 += coef * in2[-0]; + + coef = coef_ptr[1]; + tmp1 += coef * in1[1]; + tmp2 += coef * in2[-1]; + + coef = coef_ptr[2]; + tmp1 += coef * in1[2]; + tmp2 += coef * in2[-2]; + + coef = coef_ptr[3]; + tmp1 += coef * in1[3]; + tmp2 += coef * in2[-3]; + + coef = coef_ptr[4]; + tmp1 += coef * in1[4]; + tmp2 += coef * in2[-4]; + + coef = coef_ptr[5]; + tmp1 += coef * in1[5]; + tmp2 += coef * in2[-5]; + + coef = coef_ptr[6]; + tmp1 += coef * in1[6]; + tmp2 += coef * in2[-6]; + + coef = coef_ptr[7]; + tmp1 += coef * in1[7]; + tmp2 += coef * in2[-7]; + + coef = coef_ptr[8]; + *out1 = tmp1 + coef * in1[8]; + *out2 = tmp2 + coef * in2[-8]; +} + +// compute two inner-products and store them to output array +static void WebRtcSpl_DotProdIntToShort(const int32_t* in1, const int32_t* in2, + const int16_t* coef_ptr, int16_t* out1, + int16_t* out2) +{ + int32_t tmp1 = 16384; + int32_t tmp2 = 16384; + int16_t coef; + + coef = coef_ptr[0]; + tmp1 += coef * in1[0]; + tmp2 += coef * in2[-0]; + + coef = coef_ptr[1]; + tmp1 += coef * in1[1]; + tmp2 += coef * in2[-1]; + + coef = coef_ptr[2]; + tmp1 += coef * in1[2]; + tmp2 += coef * in2[-2]; + + coef = coef_ptr[3]; + tmp1 += coef * in1[3]; + tmp2 += coef * in2[-3]; + + coef = coef_ptr[4]; + tmp1 += coef * in1[4]; + tmp2 += coef * in2[-4]; + + coef = coef_ptr[5]; + tmp1 += coef * in1[5]; + tmp2 += coef * in2[-5]; + + coef = coef_ptr[6]; + tmp1 += coef * in1[6]; + tmp2 += coef * in2[-6]; + + coef = coef_ptr[7]; + tmp1 += coef * in1[7]; + tmp2 += coef * in2[-7]; + + coef = coef_ptr[8]; + tmp1 += coef * in1[8]; + tmp2 += coef * in2[-8]; + + // scale down, round and saturate + tmp1 >>= 15; + if (tmp1 > (int32_t)0x00007FFF) + tmp1 = 0x00007FFF; + if (tmp1 < (int32_t)0xFFFF8000) + tmp1 = 0xFFFF8000; + tmp2 >>= 15; + if (tmp2 > (int32_t)0x00007FFF) + tmp2 = 0x00007FFF; + if (tmp2 < (int32_t)0xFFFF8000) + tmp2 = 0xFFFF8000; + *out1 = (int16_t)tmp1; + *out2 = (int16_t)tmp2; +} + +// Resampling ratio: 11/16 +// input: int32_t (normalized, not saturated) :: size 16 * K +// output: int32_t (shifted 15 positions to the left, + offset 16384) :: size 11 * K +// K: Number of blocks + +void WebRtcSpl_32khzTo22khzIntToInt(const int32_t* In, + int32_t* Out, + int32_t K) +{ + ///////////////////////////////////////////////////////////// + // Filter operation: + // + // Perform resampling (16 input samples -> 11 output samples); + // process in sub blocks of size 16 samples. + int32_t m; + + for (m = 0; m < K; m++) + { + // first output sample + Out[0] = ((int32_t)In[3] << 15) + (1 << 14); + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_DotProdIntToInt(&In[0], &In[22], kCoefficients32To22[0], &Out[1], &Out[10]); + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_DotProdIntToInt(&In[2], &In[20], kCoefficients32To22[1], &Out[2], &Out[9]); + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_DotProdIntToInt(&In[3], &In[19], kCoefficients32To22[2], &Out[3], &Out[8]); + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_DotProdIntToInt(&In[5], &In[17], kCoefficients32To22[3], &Out[4], &Out[7]); + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_DotProdIntToInt(&In[6], &In[16], kCoefficients32To22[4], &Out[5], &Out[6]); + + // update pointers + In += 16; + Out += 11; + } +} + +// Resampling ratio: 11/16 +// input: int32_t (normalized, not saturated) :: size 16 * K +// output: int16_t (saturated) :: size 11 * K +// K: Number of blocks + +void WebRtcSpl_32khzTo22khzIntToShort(const int32_t *In, + int16_t *Out, + int32_t K) +{ + ///////////////////////////////////////////////////////////// + // Filter operation: + // + // Perform resampling (16 input samples -> 11 output samples); + // process in sub blocks of size 16 samples. + int32_t tmp; + int32_t m; + + for (m = 0; m < K; m++) + { + // first output sample + tmp = In[3]; + if (tmp > (int32_t)0x00007FFF) + tmp = 0x00007FFF; + if (tmp < (int32_t)0xFFFF8000) + tmp = 0xFFFF8000; + Out[0] = (int16_t)tmp; + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_DotProdIntToShort(&In[0], &In[22], kCoefficients32To22[0], &Out[1], &Out[10]); + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_DotProdIntToShort(&In[2], &In[20], kCoefficients32To22[1], &Out[2], &Out[9]); + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_DotProdIntToShort(&In[3], &In[19], kCoefficients32To22[2], &Out[3], &Out[8]); + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_DotProdIntToShort(&In[5], &In[17], kCoefficients32To22[3], &Out[4], &Out[7]); + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_DotProdIntToShort(&In[6], &In[16], kCoefficients32To22[4], &Out[5], &Out[6]); + + // update pointers + In += 16; + Out += 11; + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_48khz.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_48khz.c new file mode 100644 index 000000000..2220cc333 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_48khz.c @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains resampling functions between 48 kHz and nb/wb. + * The description header can be found in signal_processing_library.h + * + */ + +#include +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +#include "webrtc/common_audio/signal_processing/resample_by_2_internal.h" + +//////////////////////////// +///// 48 kHz -> 16 kHz ///// +//////////////////////////// + +// 48 -> 16 resampler +void WebRtcSpl_Resample48khzTo16khz(const int16_t* in, int16_t* out, + WebRtcSpl_State48khzTo16khz* state, int32_t* tmpmem) +{ + ///// 48 --> 48(LP) ///// + // int16_t in[480] + // int32_t out[480] + ///// + WebRtcSpl_LPBy2ShortToInt(in, 480, tmpmem + 16, state->S_48_48); + + ///// 48 --> 32 ///// + // int32_t in[480] + // int32_t out[320] + ///// + // copy state to and from input array + memcpy(tmpmem + 8, state->S_48_32, 8 * sizeof(int32_t)); + memcpy(state->S_48_32, tmpmem + 488, 8 * sizeof(int32_t)); + WebRtcSpl_Resample48khzTo32khz(tmpmem + 8, tmpmem, 160); + + ///// 32 --> 16 ///// + // int32_t in[320] + // int16_t out[160] + ///// + WebRtcSpl_DownBy2IntToShort(tmpmem, 320, out, state->S_32_16); +} + +// initialize state of 48 -> 16 resampler +void WebRtcSpl_ResetResample48khzTo16khz(WebRtcSpl_State48khzTo16khz* state) +{ + memset(state->S_48_48, 0, 16 * sizeof(int32_t)); + memset(state->S_48_32, 0, 8 * sizeof(int32_t)); + memset(state->S_32_16, 0, 8 * sizeof(int32_t)); +} + +//////////////////////////// +///// 16 kHz -> 48 kHz ///// +//////////////////////////// + +// 16 -> 48 resampler +void WebRtcSpl_Resample16khzTo48khz(const int16_t* in, int16_t* out, + WebRtcSpl_State16khzTo48khz* state, int32_t* tmpmem) +{ + ///// 16 --> 32 ///// + // int16_t in[160] + // int32_t out[320] + ///// + WebRtcSpl_UpBy2ShortToInt(in, 160, tmpmem + 16, state->S_16_32); + + ///// 32 --> 24 ///// + // int32_t in[320] + // int32_t out[240] + // copy state to and from input array + ///// + memcpy(tmpmem + 8, state->S_32_24, 8 * sizeof(int32_t)); + memcpy(state->S_32_24, tmpmem + 328, 8 * sizeof(int32_t)); + WebRtcSpl_Resample32khzTo24khz(tmpmem + 8, tmpmem, 80); + + ///// 24 --> 48 ///// + // int32_t in[240] + // int16_t out[480] + ///// + WebRtcSpl_UpBy2IntToShort(tmpmem, 240, out, state->S_24_48); +} + +// initialize state of 16 -> 48 resampler +void WebRtcSpl_ResetResample16khzTo48khz(WebRtcSpl_State16khzTo48khz* state) +{ + memset(state->S_16_32, 0, 8 * sizeof(int32_t)); + memset(state->S_32_24, 0, 8 * sizeof(int32_t)); + memset(state->S_24_48, 0, 8 * sizeof(int32_t)); +} + +//////////////////////////// +///// 48 kHz -> 8 kHz ///// +//////////////////////////// + +// 48 -> 8 resampler +void WebRtcSpl_Resample48khzTo8khz(const int16_t* in, int16_t* out, + WebRtcSpl_State48khzTo8khz* state, int32_t* tmpmem) +{ + ///// 48 --> 24 ///// + // int16_t in[480] + // int32_t out[240] + ///// + WebRtcSpl_DownBy2ShortToInt(in, 480, tmpmem + 256, state->S_48_24); + + ///// 24 --> 24(LP) ///// + // int32_t in[240] + // int32_t out[240] + ///// + WebRtcSpl_LPBy2IntToInt(tmpmem + 256, 240, tmpmem + 16, state->S_24_24); + + ///// 24 --> 16 ///// + // int32_t in[240] + // int32_t out[160] + ///// + // copy state to and from input array + memcpy(tmpmem + 8, state->S_24_16, 8 * sizeof(int32_t)); + memcpy(state->S_24_16, tmpmem + 248, 8 * sizeof(int32_t)); + WebRtcSpl_Resample48khzTo32khz(tmpmem + 8, tmpmem, 80); + + ///// 16 --> 8 ///// + // int32_t in[160] + // int16_t out[80] + ///// + WebRtcSpl_DownBy2IntToShort(tmpmem, 160, out, state->S_16_8); +} + +// initialize state of 48 -> 8 resampler +void WebRtcSpl_ResetResample48khzTo8khz(WebRtcSpl_State48khzTo8khz* state) +{ + memset(state->S_48_24, 0, 8 * sizeof(int32_t)); + memset(state->S_24_24, 0, 16 * sizeof(int32_t)); + memset(state->S_24_16, 0, 8 * sizeof(int32_t)); + memset(state->S_16_8, 0, 8 * sizeof(int32_t)); +} + +//////////////////////////// +///// 8 kHz -> 48 kHz ///// +//////////////////////////// + +// 8 -> 48 resampler +void WebRtcSpl_Resample8khzTo48khz(const int16_t* in, int16_t* out, + WebRtcSpl_State8khzTo48khz* state, int32_t* tmpmem) +{ + ///// 8 --> 16 ///// + // int16_t in[80] + // int32_t out[160] + ///// + WebRtcSpl_UpBy2ShortToInt(in, 80, tmpmem + 264, state->S_8_16); + + ///// 16 --> 12 ///// + // int32_t in[160] + // int32_t out[120] + ///// + // copy state to and from input array + memcpy(tmpmem + 256, state->S_16_12, 8 * sizeof(int32_t)); + memcpy(state->S_16_12, tmpmem + 416, 8 * sizeof(int32_t)); + WebRtcSpl_Resample32khzTo24khz(tmpmem + 256, tmpmem + 240, 40); + + ///// 12 --> 24 ///// + // int32_t in[120] + // int16_t out[240] + ///// + WebRtcSpl_UpBy2IntToInt(tmpmem + 240, 120, tmpmem, state->S_12_24); + + ///// 24 --> 48 ///// + // int32_t in[240] + // int16_t out[480] + ///// + WebRtcSpl_UpBy2IntToShort(tmpmem, 240, out, state->S_24_48); +} + +// initialize state of 8 -> 48 resampler +void WebRtcSpl_ResetResample8khzTo48khz(WebRtcSpl_State8khzTo48khz* state) +{ + memset(state->S_8_16, 0, 8 * sizeof(int32_t)); + memset(state->S_16_12, 0, 8 * sizeof(int32_t)); + memset(state->S_12_24, 0, 8 * sizeof(int32_t)); + memset(state->S_24_48, 0, 8 * sizeof(int32_t)); +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2.c new file mode 100644 index 000000000..dcba82e35 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2.c @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the resampling by two functions. + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +#ifdef WEBRTC_ARCH_ARM_V7 + +// allpass filter coefficients. +static const uint32_t kResampleAllpass1[3] = {3284, 24441, 49528 << 15}; +static const uint32_t kResampleAllpass2[3] = + {12199, 37471 << 15, 60255 << 15}; + +// Multiply two 32-bit values and accumulate to another input value. +// Return: state + ((diff * tbl_value) >> 16) + +static __inline int32_t MUL_ACCUM_1(int32_t tbl_value, + int32_t diff, + int32_t state) { + int32_t result; + __asm __volatile ("smlawb %0, %1, %2, %3": "=r"(result): "r"(diff), + "r"(tbl_value), "r"(state)); + return result; +} + +// Multiply two 32-bit values and accumulate to another input value. +// Return: Return: state + (((diff << 1) * tbl_value) >> 32) +// +// The reason to introduce this function is that, in case we can't use smlawb +// instruction (in MUL_ACCUM_1) due to input value range, we can still use +// smmla to save some cycles. + +static __inline int32_t MUL_ACCUM_2(int32_t tbl_value, + int32_t diff, + int32_t state) { + int32_t result; + __asm __volatile ("smmla %0, %1, %2, %3": "=r"(result): "r"(diff << 1), + "r"(tbl_value), "r"(state)); + return result; +} + +#else + +// allpass filter coefficients. +static const uint16_t kResampleAllpass1[3] = {3284, 24441, 49528}; +static const uint16_t kResampleAllpass2[3] = {12199, 37471, 60255}; + +// Multiply a 32-bit value with a 16-bit value and accumulate to another input: +#define MUL_ACCUM_1(a, b, c) WEBRTC_SPL_SCALEDIFF32(a, b, c) +#define MUL_ACCUM_2(a, b, c) WEBRTC_SPL_SCALEDIFF32(a, b, c) + +#endif // WEBRTC_ARCH_ARM_V7 + + +// decimator +#if !defined(MIPS32_LE) +void WebRtcSpl_DownsampleBy2(const int16_t* in, size_t len, + int16_t* out, int32_t* filtState) { + int32_t tmp1, tmp2, diff, in32, out32; + size_t i; + + register int32_t state0 = filtState[0]; + register int32_t state1 = filtState[1]; + register int32_t state2 = filtState[2]; + register int32_t state3 = filtState[3]; + register int32_t state4 = filtState[4]; + register int32_t state5 = filtState[5]; + register int32_t state6 = filtState[6]; + register int32_t state7 = filtState[7]; + + for (i = (len >> 1); i > 0; i--) { + // lower allpass filter + in32 = (int32_t)(*in++) << 10; + diff = in32 - state1; + tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state0); + state0 = in32; + diff = tmp1 - state2; + tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state1); + state1 = tmp1; + diff = tmp2 - state3; + state3 = MUL_ACCUM_2(kResampleAllpass2[2], diff, state2); + state2 = tmp2; + + // upper allpass filter + in32 = (int32_t)(*in++) << 10; + diff = in32 - state5; + tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state4); + state4 = in32; + diff = tmp1 - state6; + tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state5); + state5 = tmp1; + diff = tmp2 - state7; + state7 = MUL_ACCUM_2(kResampleAllpass1[2], diff, state6); + state6 = tmp2; + + // add two allpass outputs, divide by two and round + out32 = (state3 + state7 + 1024) >> 11; + + // limit amplitude to prevent wrap-around, and write to output array + *out++ = WebRtcSpl_SatW32ToW16(out32); + } + + filtState[0] = state0; + filtState[1] = state1; + filtState[2] = state2; + filtState[3] = state3; + filtState[4] = state4; + filtState[5] = state5; + filtState[6] = state6; + filtState[7] = state7; +} +#endif // #if defined(MIPS32_LE) + + +void WebRtcSpl_UpsampleBy2(const int16_t* in, size_t len, + int16_t* out, int32_t* filtState) { + int32_t tmp1, tmp2, diff, in32, out32; + size_t i; + + register int32_t state0 = filtState[0]; + register int32_t state1 = filtState[1]; + register int32_t state2 = filtState[2]; + register int32_t state3 = filtState[3]; + register int32_t state4 = filtState[4]; + register int32_t state5 = filtState[5]; + register int32_t state6 = filtState[6]; + register int32_t state7 = filtState[7]; + + for (i = len; i > 0; i--) { + // lower allpass filter + in32 = (int32_t)(*in++) << 10; + diff = in32 - state1; + tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state0); + state0 = in32; + diff = tmp1 - state2; + tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state1); + state1 = tmp1; + diff = tmp2 - state3; + state3 = MUL_ACCUM_2(kResampleAllpass1[2], diff, state2); + state2 = tmp2; + + // round; limit amplitude to prevent wrap-around; write to output array + out32 = (state3 + 512) >> 10; + *out++ = WebRtcSpl_SatW32ToW16(out32); + + // upper allpass filter + diff = in32 - state5; + tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state4); + state4 = in32; + diff = tmp1 - state6; + tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state5); + state5 = tmp1; + diff = tmp2 - state7; + state7 = MUL_ACCUM_2(kResampleAllpass2[2], diff, state6); + state6 = tmp2; + + // round; limit amplitude to prevent wrap-around; write to output array + out32 = (state7 + 512) >> 10; + *out++ = WebRtcSpl_SatW32ToW16(out32); + } + + filtState[0] = state0; + filtState[1] = state1; + filtState[2] = state2; + filtState[3] = state3; + filtState[4] = state4; + filtState[5] = state5; + filtState[6] = state6; + filtState[7] = state7; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2_internal.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2_internal.c new file mode 100644 index 000000000..085069c83 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2_internal.c @@ -0,0 +1,679 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This header file contains some internal resampling functions. + * + */ + +#include "webrtc/common_audio/signal_processing/resample_by_2_internal.h" + +// allpass filter coefficients. +static const int16_t kResampleAllpass[2][3] = { + {821, 6110, 12382}, + {3050, 9368, 15063} +}; + +// +// decimator +// input: int32_t (shifted 15 positions to the left, + offset 16384) OVERWRITTEN! +// output: int16_t (saturated) (of length len/2) +// state: filter state array; length = 8 + +void WebRtcSpl_DownBy2IntToShort(int32_t *in, int32_t len, int16_t *out, + int32_t *state) +{ + int32_t tmp0, tmp1, diff; + int32_t i; + + len >>= 1; + + // lower allpass filter (operates on even input samples) + for (i = 0; i < len; i++) + { + tmp0 = in[i << 1]; + diff = tmp0 - state[1]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[0] + diff * kResampleAllpass[1][0]; + state[0] = tmp0; + diff = tmp1 - state[2]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[1] + diff * kResampleAllpass[1][1]; + state[1] = tmp1; + diff = tmp0 - state[3]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[3] = state[2] + diff * kResampleAllpass[1][2]; + state[2] = tmp0; + + // divide by two and store temporarily + in[i << 1] = (state[3] >> 1); + } + + in++; + + // upper allpass filter (operates on odd input samples) + for (i = 0; i < len; i++) + { + tmp0 = in[i << 1]; + diff = tmp0 - state[5]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[4] + diff * kResampleAllpass[0][0]; + state[4] = tmp0; + diff = tmp1 - state[6]; + // scale down and round + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[5] + diff * kResampleAllpass[0][1]; + state[5] = tmp1; + diff = tmp0 - state[7]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[7] = state[6] + diff * kResampleAllpass[0][2]; + state[6] = tmp0; + + // divide by two and store temporarily + in[i << 1] = (state[7] >> 1); + } + + in--; + + // combine allpass outputs + for (i = 0; i < len; i += 2) + { + // divide by two, add both allpass outputs and round + tmp0 = (in[i << 1] + in[(i << 1) + 1]) >> 15; + tmp1 = (in[(i << 1) + 2] + in[(i << 1) + 3]) >> 15; + if (tmp0 > (int32_t)0x00007FFF) + tmp0 = 0x00007FFF; + if (tmp0 < (int32_t)0xFFFF8000) + tmp0 = 0xFFFF8000; + out[i] = (int16_t)tmp0; + if (tmp1 > (int32_t)0x00007FFF) + tmp1 = 0x00007FFF; + if (tmp1 < (int32_t)0xFFFF8000) + tmp1 = 0xFFFF8000; + out[i + 1] = (int16_t)tmp1; + } +} + +// +// decimator +// input: int16_t +// output: int32_t (shifted 15 positions to the left, + offset 16384) (of length len/2) +// state: filter state array; length = 8 + +void WebRtcSpl_DownBy2ShortToInt(const int16_t *in, + int32_t len, + int32_t *out, + int32_t *state) +{ + int32_t tmp0, tmp1, diff; + int32_t i; + + len >>= 1; + + // lower allpass filter (operates on even input samples) + for (i = 0; i < len; i++) + { + tmp0 = ((int32_t)in[i << 1] << 15) + (1 << 14); + diff = tmp0 - state[1]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[0] + diff * kResampleAllpass[1][0]; + state[0] = tmp0; + diff = tmp1 - state[2]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[1] + diff * kResampleAllpass[1][1]; + state[1] = tmp1; + diff = tmp0 - state[3]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[3] = state[2] + diff * kResampleAllpass[1][2]; + state[2] = tmp0; + + // divide by two and store temporarily + out[i] = (state[3] >> 1); + } + + in++; + + // upper allpass filter (operates on odd input samples) + for (i = 0; i < len; i++) + { + tmp0 = ((int32_t)in[i << 1] << 15) + (1 << 14); + diff = tmp0 - state[5]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[4] + diff * kResampleAllpass[0][0]; + state[4] = tmp0; + diff = tmp1 - state[6]; + // scale down and round + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[5] + diff * kResampleAllpass[0][1]; + state[5] = tmp1; + diff = tmp0 - state[7]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[7] = state[6] + diff * kResampleAllpass[0][2]; + state[6] = tmp0; + + // divide by two and store temporarily + out[i] += (state[7] >> 1); + } + + in--; +} + +// +// interpolator +// input: int16_t +// output: int32_t (normalized, not saturated) (of length len*2) +// state: filter state array; length = 8 +void WebRtcSpl_UpBy2ShortToInt(const int16_t *in, int32_t len, int32_t *out, + int32_t *state) +{ + int32_t tmp0, tmp1, diff; + int32_t i; + + // upper allpass filter (generates odd output samples) + for (i = 0; i < len; i++) + { + tmp0 = ((int32_t)in[i] << 15) + (1 << 14); + diff = tmp0 - state[5]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[4] + diff * kResampleAllpass[0][0]; + state[4] = tmp0; + diff = tmp1 - state[6]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[5] + diff * kResampleAllpass[0][1]; + state[5] = tmp1; + diff = tmp0 - state[7]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[7] = state[6] + diff * kResampleAllpass[0][2]; + state[6] = tmp0; + + // scale down, round and store + out[i << 1] = state[7] >> 15; + } + + out++; + + // lower allpass filter (generates even output samples) + for (i = 0; i < len; i++) + { + tmp0 = ((int32_t)in[i] << 15) + (1 << 14); + diff = tmp0 - state[1]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[0] + diff * kResampleAllpass[1][0]; + state[0] = tmp0; + diff = tmp1 - state[2]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[1] + diff * kResampleAllpass[1][1]; + state[1] = tmp1; + diff = tmp0 - state[3]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[3] = state[2] + diff * kResampleAllpass[1][2]; + state[2] = tmp0; + + // scale down, round and store + out[i << 1] = state[3] >> 15; + } +} + +// +// interpolator +// input: int32_t (shifted 15 positions to the left, + offset 16384) +// output: int32_t (shifted 15 positions to the left, + offset 16384) (of length len*2) +// state: filter state array; length = 8 +void WebRtcSpl_UpBy2IntToInt(const int32_t *in, int32_t len, int32_t *out, + int32_t *state) +{ + int32_t tmp0, tmp1, diff; + int32_t i; + + // upper allpass filter (generates odd output samples) + for (i = 0; i < len; i++) + { + tmp0 = in[i]; + diff = tmp0 - state[5]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[4] + diff * kResampleAllpass[0][0]; + state[4] = tmp0; + diff = tmp1 - state[6]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[5] + diff * kResampleAllpass[0][1]; + state[5] = tmp1; + diff = tmp0 - state[7]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[7] = state[6] + diff * kResampleAllpass[0][2]; + state[6] = tmp0; + + // scale down, round and store + out[i << 1] = state[7]; + } + + out++; + + // lower allpass filter (generates even output samples) + for (i = 0; i < len; i++) + { + tmp0 = in[i]; + diff = tmp0 - state[1]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[0] + diff * kResampleAllpass[1][0]; + state[0] = tmp0; + diff = tmp1 - state[2]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[1] + diff * kResampleAllpass[1][1]; + state[1] = tmp1; + diff = tmp0 - state[3]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[3] = state[2] + diff * kResampleAllpass[1][2]; + state[2] = tmp0; + + // scale down, round and store + out[i << 1] = state[3]; + } +} + +// +// interpolator +// input: int32_t (shifted 15 positions to the left, + offset 16384) +// output: int16_t (saturated) (of length len*2) +// state: filter state array; length = 8 +void WebRtcSpl_UpBy2IntToShort(const int32_t *in, int32_t len, int16_t *out, + int32_t *state) +{ + int32_t tmp0, tmp1, diff; + int32_t i; + + // upper allpass filter (generates odd output samples) + for (i = 0; i < len; i++) + { + tmp0 = in[i]; + diff = tmp0 - state[5]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[4] + diff * kResampleAllpass[0][0]; + state[4] = tmp0; + diff = tmp1 - state[6]; + // scale down and round + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[5] + diff * kResampleAllpass[0][1]; + state[5] = tmp1; + diff = tmp0 - state[7]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[7] = state[6] + diff * kResampleAllpass[0][2]; + state[6] = tmp0; + + // scale down, saturate and store + tmp1 = state[7] >> 15; + if (tmp1 > (int32_t)0x00007FFF) + tmp1 = 0x00007FFF; + if (tmp1 < (int32_t)0xFFFF8000) + tmp1 = 0xFFFF8000; + out[i << 1] = (int16_t)tmp1; + } + + out++; + + // lower allpass filter (generates even output samples) + for (i = 0; i < len; i++) + { + tmp0 = in[i]; + diff = tmp0 - state[1]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[0] + diff * kResampleAllpass[1][0]; + state[0] = tmp0; + diff = tmp1 - state[2]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[1] + diff * kResampleAllpass[1][1]; + state[1] = tmp1; + diff = tmp0 - state[3]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[3] = state[2] + diff * kResampleAllpass[1][2]; + state[2] = tmp0; + + // scale down, saturate and store + tmp1 = state[3] >> 15; + if (tmp1 > (int32_t)0x00007FFF) + tmp1 = 0x00007FFF; + if (tmp1 < (int32_t)0xFFFF8000) + tmp1 = 0xFFFF8000; + out[i << 1] = (int16_t)tmp1; + } +} + +// lowpass filter +// input: int16_t +// output: int32_t (normalized, not saturated) +// state: filter state array; length = 8 +void WebRtcSpl_LPBy2ShortToInt(const int16_t* in, int32_t len, int32_t* out, + int32_t* state) +{ + int32_t tmp0, tmp1, diff; + int32_t i; + + len >>= 1; + + // lower allpass filter: odd input -> even output samples + in++; + // initial state of polyphase delay element + tmp0 = state[12]; + for (i = 0; i < len; i++) + { + diff = tmp0 - state[1]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[0] + diff * kResampleAllpass[1][0]; + state[0] = tmp0; + diff = tmp1 - state[2]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[1] + diff * kResampleAllpass[1][1]; + state[1] = tmp1; + diff = tmp0 - state[3]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[3] = state[2] + diff * kResampleAllpass[1][2]; + state[2] = tmp0; + + // scale down, round and store + out[i << 1] = state[3] >> 1; + tmp0 = ((int32_t)in[i << 1] << 15) + (1 << 14); + } + in--; + + // upper allpass filter: even input -> even output samples + for (i = 0; i < len; i++) + { + tmp0 = ((int32_t)in[i << 1] << 15) + (1 << 14); + diff = tmp0 - state[5]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[4] + diff * kResampleAllpass[0][0]; + state[4] = tmp0; + diff = tmp1 - state[6]; + // scale down and round + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[5] + diff * kResampleAllpass[0][1]; + state[5] = tmp1; + diff = tmp0 - state[7]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[7] = state[6] + diff * kResampleAllpass[0][2]; + state[6] = tmp0; + + // average the two allpass outputs, scale down and store + out[i << 1] = (out[i << 1] + (state[7] >> 1)) >> 15; + } + + // switch to odd output samples + out++; + + // lower allpass filter: even input -> odd output samples + for (i = 0; i < len; i++) + { + tmp0 = ((int32_t)in[i << 1] << 15) + (1 << 14); + diff = tmp0 - state[9]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[8] + diff * kResampleAllpass[1][0]; + state[8] = tmp0; + diff = tmp1 - state[10]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[9] + diff * kResampleAllpass[1][1]; + state[9] = tmp1; + diff = tmp0 - state[11]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[11] = state[10] + diff * kResampleAllpass[1][2]; + state[10] = tmp0; + + // scale down, round and store + out[i << 1] = state[11] >> 1; + } + + // upper allpass filter: odd input -> odd output samples + in++; + for (i = 0; i < len; i++) + { + tmp0 = ((int32_t)in[i << 1] << 15) + (1 << 14); + diff = tmp0 - state[13]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[12] + diff * kResampleAllpass[0][0]; + state[12] = tmp0; + diff = tmp1 - state[14]; + // scale down and round + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[13] + diff * kResampleAllpass[0][1]; + state[13] = tmp1; + diff = tmp0 - state[15]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[15] = state[14] + diff * kResampleAllpass[0][2]; + state[14] = tmp0; + + // average the two allpass outputs, scale down and store + out[i << 1] = (out[i << 1] + (state[15] >> 1)) >> 15; + } +} + +// lowpass filter +// input: int32_t (shifted 15 positions to the left, + offset 16384) +// output: int32_t (normalized, not saturated) +// state: filter state array; length = 8 +void WebRtcSpl_LPBy2IntToInt(const int32_t* in, int32_t len, int32_t* out, + int32_t* state) +{ + int32_t tmp0, tmp1, diff; + int32_t i; + + len >>= 1; + + // lower allpass filter: odd input -> even output samples + in++; + // initial state of polyphase delay element + tmp0 = state[12]; + for (i = 0; i < len; i++) + { + diff = tmp0 - state[1]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[0] + diff * kResampleAllpass[1][0]; + state[0] = tmp0; + diff = tmp1 - state[2]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[1] + diff * kResampleAllpass[1][1]; + state[1] = tmp1; + diff = tmp0 - state[3]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[3] = state[2] + diff * kResampleAllpass[1][2]; + state[2] = tmp0; + + // scale down, round and store + out[i << 1] = state[3] >> 1; + tmp0 = in[i << 1]; + } + in--; + + // upper allpass filter: even input -> even output samples + for (i = 0; i < len; i++) + { + tmp0 = in[i << 1]; + diff = tmp0 - state[5]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[4] + diff * kResampleAllpass[0][0]; + state[4] = tmp0; + diff = tmp1 - state[6]; + // scale down and round + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[5] + diff * kResampleAllpass[0][1]; + state[5] = tmp1; + diff = tmp0 - state[7]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[7] = state[6] + diff * kResampleAllpass[0][2]; + state[6] = tmp0; + + // average the two allpass outputs, scale down and store + out[i << 1] = (out[i << 1] + (state[7] >> 1)) >> 15; + } + + // switch to odd output samples + out++; + + // lower allpass filter: even input -> odd output samples + for (i = 0; i < len; i++) + { + tmp0 = in[i << 1]; + diff = tmp0 - state[9]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[8] + diff * kResampleAllpass[1][0]; + state[8] = tmp0; + diff = tmp1 - state[10]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[9] + diff * kResampleAllpass[1][1]; + state[9] = tmp1; + diff = tmp0 - state[11]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[11] = state[10] + diff * kResampleAllpass[1][2]; + state[10] = tmp0; + + // scale down, round and store + out[i << 1] = state[11] >> 1; + } + + // upper allpass filter: odd input -> odd output samples + in++; + for (i = 0; i < len; i++) + { + tmp0 = in[i << 1]; + diff = tmp0 - state[13]; + // scale down and round + diff = (diff + (1 << 13)) >> 14; + tmp1 = state[12] + diff * kResampleAllpass[0][0]; + state[12] = tmp0; + diff = tmp1 - state[14]; + // scale down and round + diff = diff >> 14; + if (diff < 0) + diff += 1; + tmp0 = state[13] + diff * kResampleAllpass[0][1]; + state[13] = tmp1; + diff = tmp0 - state[15]; + // scale down and truncate + diff = diff >> 14; + if (diff < 0) + diff += 1; + state[15] = state[14] + diff * kResampleAllpass[0][2]; + state[14] = tmp0; + + // average the two allpass outputs, scale down and store + out[i << 1] = (out[i << 1] + (state[15] >> 1)) >> 15; + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2_internal.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2_internal.h new file mode 100644 index 000000000..5c9533eef --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_by_2_internal.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This header file contains some internal resampling functions. + * + */ + +#ifndef WEBRTC_SPL_RESAMPLE_BY_2_INTERNAL_H_ +#define WEBRTC_SPL_RESAMPLE_BY_2_INTERNAL_H_ + +#include "webrtc/typedefs.h" + +/******************************************************************* + * resample_by_2_fast.c + * Functions for internal use in the other resample functions + ******************************************************************/ +void WebRtcSpl_DownBy2IntToShort(int32_t *in, int32_t len, int16_t *out, + int32_t *state); + +void WebRtcSpl_DownBy2ShortToInt(const int16_t *in, int32_t len, + int32_t *out, int32_t *state); + +void WebRtcSpl_UpBy2ShortToInt(const int16_t *in, int32_t len, + int32_t *out, int32_t *state); + +void WebRtcSpl_UpBy2IntToInt(const int32_t *in, int32_t len, int32_t *out, + int32_t *state); + +void WebRtcSpl_UpBy2IntToShort(const int32_t *in, int32_t len, + int16_t *out, int32_t *state); + +void WebRtcSpl_LPBy2ShortToInt(const int16_t* in, int32_t len, + int32_t* out, int32_t* state); + +void WebRtcSpl_LPBy2IntToInt(const int32_t* in, int32_t len, int32_t* out, + int32_t* state); + +#endif // WEBRTC_SPL_RESAMPLE_BY_2_INTERNAL_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_fractional.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_fractional.c new file mode 100644 index 000000000..6409fbac4 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/resample_fractional.c @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the resampling functions between 48, 44, 32 and 24 kHz. + * The description headers can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +// interpolation coefficients +static const int16_t kCoefficients48To32[2][8] = { + {778, -2050, 1087, 23285, 12903, -3783, 441, 222}, + {222, 441, -3783, 12903, 23285, 1087, -2050, 778} +}; + +static const int16_t kCoefficients32To24[3][8] = { + {767, -2362, 2434, 24406, 10620, -3838, 721, 90}, + {386, -381, -2646, 19062, 19062, -2646, -381, 386}, + {90, 721, -3838, 10620, 24406, 2434, -2362, 767} +}; + +static const int16_t kCoefficients44To32[4][9] = { + {117, -669, 2245, -6183, 26267, 13529, -3245, 845, -138}, + {-101, 612, -2283, 8532, 29790, -5138, 1789, -524, 91}, + {50, -292, 1016, -3064, 32010, 3933, -1147, 315, -53}, + {-156, 974, -3863, 18603, 21691, -6246, 2353, -712, 126} +}; + +// Resampling ratio: 2/3 +// input: int32_t (normalized, not saturated) :: size 3 * K +// output: int32_t (shifted 15 positions to the left, + offset 16384) :: size 2 * K +// K: number of blocks + +void WebRtcSpl_Resample48khzTo32khz(const int32_t *In, int32_t *Out, size_t K) +{ + ///////////////////////////////////////////////////////////// + // Filter operation: + // + // Perform resampling (3 input samples -> 2 output samples); + // process in sub blocks of size 3 samples. + int32_t tmp; + size_t m; + + for (m = 0; m < K; m++) + { + tmp = 1 << 14; + tmp += kCoefficients48To32[0][0] * In[0]; + tmp += kCoefficients48To32[0][1] * In[1]; + tmp += kCoefficients48To32[0][2] * In[2]; + tmp += kCoefficients48To32[0][3] * In[3]; + tmp += kCoefficients48To32[0][4] * In[4]; + tmp += kCoefficients48To32[0][5] * In[5]; + tmp += kCoefficients48To32[0][6] * In[6]; + tmp += kCoefficients48To32[0][7] * In[7]; + Out[0] = tmp; + + tmp = 1 << 14; + tmp += kCoefficients48To32[1][0] * In[1]; + tmp += kCoefficients48To32[1][1] * In[2]; + tmp += kCoefficients48To32[1][2] * In[3]; + tmp += kCoefficients48To32[1][3] * In[4]; + tmp += kCoefficients48To32[1][4] * In[5]; + tmp += kCoefficients48To32[1][5] * In[6]; + tmp += kCoefficients48To32[1][6] * In[7]; + tmp += kCoefficients48To32[1][7] * In[8]; + Out[1] = tmp; + + // update pointers + In += 3; + Out += 2; + } +} + +// Resampling ratio: 3/4 +// input: int32_t (normalized, not saturated) :: size 4 * K +// output: int32_t (shifted 15 positions to the left, + offset 16384) :: size 3 * K +// K: number of blocks + +void WebRtcSpl_Resample32khzTo24khz(const int32_t *In, int32_t *Out, size_t K) +{ + ///////////////////////////////////////////////////////////// + // Filter operation: + // + // Perform resampling (4 input samples -> 3 output samples); + // process in sub blocks of size 4 samples. + size_t m; + int32_t tmp; + + for (m = 0; m < K; m++) + { + tmp = 1 << 14; + tmp += kCoefficients32To24[0][0] * In[0]; + tmp += kCoefficients32To24[0][1] * In[1]; + tmp += kCoefficients32To24[0][2] * In[2]; + tmp += kCoefficients32To24[0][3] * In[3]; + tmp += kCoefficients32To24[0][4] * In[4]; + tmp += kCoefficients32To24[0][5] * In[5]; + tmp += kCoefficients32To24[0][6] * In[6]; + tmp += kCoefficients32To24[0][7] * In[7]; + Out[0] = tmp; + + tmp = 1 << 14; + tmp += kCoefficients32To24[1][0] * In[1]; + tmp += kCoefficients32To24[1][1] * In[2]; + tmp += kCoefficients32To24[1][2] * In[3]; + tmp += kCoefficients32To24[1][3] * In[4]; + tmp += kCoefficients32To24[1][4] * In[5]; + tmp += kCoefficients32To24[1][5] * In[6]; + tmp += kCoefficients32To24[1][6] * In[7]; + tmp += kCoefficients32To24[1][7] * In[8]; + Out[1] = tmp; + + tmp = 1 << 14; + tmp += kCoefficients32To24[2][0] * In[2]; + tmp += kCoefficients32To24[2][1] * In[3]; + tmp += kCoefficients32To24[2][2] * In[4]; + tmp += kCoefficients32To24[2][3] * In[5]; + tmp += kCoefficients32To24[2][4] * In[6]; + tmp += kCoefficients32To24[2][5] * In[7]; + tmp += kCoefficients32To24[2][6] * In[8]; + tmp += kCoefficients32To24[2][7] * In[9]; + Out[2] = tmp; + + // update pointers + In += 4; + Out += 3; + } +} + +// +// fractional resampling filters +// Fout = 11/16 * Fin +// Fout = 8/11 * Fin +// + +// compute two inner-products and store them to output array +static void WebRtcSpl_ResampDotProduct(const int32_t *in1, const int32_t *in2, + const int16_t *coef_ptr, int32_t *out1, + int32_t *out2) +{ + int32_t tmp1 = 16384; + int32_t tmp2 = 16384; + int16_t coef; + + coef = coef_ptr[0]; + tmp1 += coef * in1[0]; + tmp2 += coef * in2[-0]; + + coef = coef_ptr[1]; + tmp1 += coef * in1[1]; + tmp2 += coef * in2[-1]; + + coef = coef_ptr[2]; + tmp1 += coef * in1[2]; + tmp2 += coef * in2[-2]; + + coef = coef_ptr[3]; + tmp1 += coef * in1[3]; + tmp2 += coef * in2[-3]; + + coef = coef_ptr[4]; + tmp1 += coef * in1[4]; + tmp2 += coef * in2[-4]; + + coef = coef_ptr[5]; + tmp1 += coef * in1[5]; + tmp2 += coef * in2[-5]; + + coef = coef_ptr[6]; + tmp1 += coef * in1[6]; + tmp2 += coef * in2[-6]; + + coef = coef_ptr[7]; + tmp1 += coef * in1[7]; + tmp2 += coef * in2[-7]; + + coef = coef_ptr[8]; + *out1 = tmp1 + coef * in1[8]; + *out2 = tmp2 + coef * in2[-8]; +} + +// Resampling ratio: 8/11 +// input: int32_t (normalized, not saturated) :: size 11 * K +// output: int32_t (shifted 15 positions to the left, + offset 16384) :: size 8 * K +// K: number of blocks + +void WebRtcSpl_Resample44khzTo32khz(const int32_t *In, int32_t *Out, size_t K) +{ + ///////////////////////////////////////////////////////////// + // Filter operation: + // + // Perform resampling (11 input samples -> 8 output samples); + // process in sub blocks of size 11 samples. + int32_t tmp; + size_t m; + + for (m = 0; m < K; m++) + { + tmp = 1 << 14; + + // first output sample + Out[0] = ((int32_t)In[3] << 15) + tmp; + + // sum and accumulate filter coefficients and input samples + tmp += kCoefficients44To32[3][0] * In[5]; + tmp += kCoefficients44To32[3][1] * In[6]; + tmp += kCoefficients44To32[3][2] * In[7]; + tmp += kCoefficients44To32[3][3] * In[8]; + tmp += kCoefficients44To32[3][4] * In[9]; + tmp += kCoefficients44To32[3][5] * In[10]; + tmp += kCoefficients44To32[3][6] * In[11]; + tmp += kCoefficients44To32[3][7] * In[12]; + tmp += kCoefficients44To32[3][8] * In[13]; + Out[4] = tmp; + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_ResampDotProduct(&In[0], &In[17], kCoefficients44To32[0], &Out[1], &Out[7]); + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_ResampDotProduct(&In[2], &In[15], kCoefficients44To32[1], &Out[2], &Out[6]); + + // sum and accumulate filter coefficients and input samples + WebRtcSpl_ResampDotProduct(&In[3], &In[14], kCoefficients44To32[2], &Out[3], &Out[5]); + + // update pointers + In += 11; + Out += 8; + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_init.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_init.c new file mode 100644 index 000000000..c9c4e659c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_init.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* The global function contained in this file initializes SPL function + * pointers, currently only for ARM platforms. + * + * Some code came from common/rtcd.c in the WebM project. + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +#include "webrtc/system_wrappers/include/cpu_features_wrapper.h" + +/* Declare function pointers. */ +MaxAbsValueW16 WebRtcSpl_MaxAbsValueW16; +MaxAbsValueW32 WebRtcSpl_MaxAbsValueW32; +MaxValueW16 WebRtcSpl_MaxValueW16; +MaxValueW32 WebRtcSpl_MaxValueW32; +MinValueW16 WebRtcSpl_MinValueW16; +MinValueW32 WebRtcSpl_MinValueW32; +CrossCorrelation WebRtcSpl_CrossCorrelation; +DownsampleFast WebRtcSpl_DownsampleFast; +ScaleAndAddVectorsWithRound WebRtcSpl_ScaleAndAddVectorsWithRound; + +#if (!defined(WEBRTC_HAS_NEON)) && !defined(MIPS32_LE) +/* Initialize function pointers to the generic C version. */ +static void InitPointersToC() { + WebRtcSpl_MaxAbsValueW16 = WebRtcSpl_MaxAbsValueW16C; + WebRtcSpl_MaxAbsValueW32 = WebRtcSpl_MaxAbsValueW32C; + WebRtcSpl_MaxValueW16 = WebRtcSpl_MaxValueW16C; + WebRtcSpl_MaxValueW32 = WebRtcSpl_MaxValueW32C; + WebRtcSpl_MinValueW16 = WebRtcSpl_MinValueW16C; + WebRtcSpl_MinValueW32 = WebRtcSpl_MinValueW32C; + WebRtcSpl_CrossCorrelation = WebRtcSpl_CrossCorrelationC; + WebRtcSpl_DownsampleFast = WebRtcSpl_DownsampleFastC; + WebRtcSpl_ScaleAndAddVectorsWithRound = + WebRtcSpl_ScaleAndAddVectorsWithRoundC; +} +#endif + +#if defined(WEBRTC_HAS_NEON) +/* Initialize function pointers to the Neon version. */ +static void InitPointersToNeon() { + WebRtcSpl_MaxAbsValueW16 = WebRtcSpl_MaxAbsValueW16Neon; + WebRtcSpl_MaxAbsValueW32 = WebRtcSpl_MaxAbsValueW32Neon; + WebRtcSpl_MaxValueW16 = WebRtcSpl_MaxValueW16Neon; + WebRtcSpl_MaxValueW32 = WebRtcSpl_MaxValueW32Neon; + WebRtcSpl_MinValueW16 = WebRtcSpl_MinValueW16Neon; + WebRtcSpl_MinValueW32 = WebRtcSpl_MinValueW32Neon; + WebRtcSpl_CrossCorrelation = WebRtcSpl_CrossCorrelationNeon; + WebRtcSpl_DownsampleFast = WebRtcSpl_DownsampleFastNeon; + WebRtcSpl_ScaleAndAddVectorsWithRound = + WebRtcSpl_ScaleAndAddVectorsWithRoundC; +} +#endif + +#if defined(MIPS32_LE) +/* Initialize function pointers to the MIPS version. */ +static void InitPointersToMIPS() { + WebRtcSpl_MaxAbsValueW16 = WebRtcSpl_MaxAbsValueW16_mips; + WebRtcSpl_MaxValueW16 = WebRtcSpl_MaxValueW16_mips; + WebRtcSpl_MaxValueW32 = WebRtcSpl_MaxValueW32_mips; + WebRtcSpl_MinValueW16 = WebRtcSpl_MinValueW16_mips; + WebRtcSpl_MinValueW32 = WebRtcSpl_MinValueW32_mips; + WebRtcSpl_CrossCorrelation = WebRtcSpl_CrossCorrelation_mips; + WebRtcSpl_DownsampleFast = WebRtcSpl_DownsampleFast_mips; +#if defined(MIPS_DSP_R1_LE) + WebRtcSpl_MaxAbsValueW32 = WebRtcSpl_MaxAbsValueW32_mips; + WebRtcSpl_ScaleAndAddVectorsWithRound = + WebRtcSpl_ScaleAndAddVectorsWithRound_mips; +#else + WebRtcSpl_MaxAbsValueW32 = WebRtcSpl_MaxAbsValueW32C; + WebRtcSpl_ScaleAndAddVectorsWithRound = + WebRtcSpl_ScaleAndAddVectorsWithRoundC; +#endif +} +#endif + +static void InitFunctionPointers(void) { +#if defined(WEBRTC_HAS_NEON) + InitPointersToNeon(); +#elif defined(MIPS32_LE) + InitPointersToMIPS(); +#else + InitPointersToC(); +#endif /* WEBRTC_HAS_NEON */ +} + +#if defined(WEBRTC_POSIX) +#include + +static void once(void (*func)(void)) { + static pthread_once_t lock = PTHREAD_ONCE_INIT; + pthread_once(&lock, func); +} + +#elif defined(_WIN32) +#include + +static void once(void (*func)(void)) { + /* Didn't use InitializeCriticalSection() since there's no race-free context + * in which to execute it. + * + * TODO(kma): Change to different implementation (e.g. + * InterlockedCompareExchangePointer) to avoid issues similar to + * http://code.google.com/p/webm/issues/detail?id=467. + */ + static CRITICAL_SECTION lock = {(void *)((size_t)-1), -1, 0, 0, 0, 0}; + static int done = 0; + + EnterCriticalSection(&lock); + if (!done) { + func(); + done = 1; + } + LeaveCriticalSection(&lock); +} + +/* There's no fallback version as an #else block here to ensure thread safety. + * In case of neither pthread for WEBRTC_POSIX nor _WIN32 is present, build + * system should pick it up. + */ +#endif /* WEBRTC_POSIX */ + +void WebRtcSpl_Init() { + once(InitFunctionPointers); +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_inl.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_inl.c new file mode 100644 index 000000000..efa6a65f0 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_inl.c @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "webrtc/common_audio/signal_processing/include/spl_inl.h" + +// Table used by WebRtcSpl_CountLeadingZeros32_NotBuiltin. For each uint32_t n +// that's a sequence of 0 bits followed by a sequence of 1 bits, the entry at +// index (n * 0x8c0b2891) >> 26 in this table gives the number of zero bits in +// n. +const int8_t kWebRtcSpl_CountLeadingZeros32_Table[64] = { + 32, 8, 17, -1, -1, 14, -1, -1, -1, 20, -1, -1, -1, 28, -1, 18, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 26, 25, 24, + 4, 11, 23, 31, 3, 7, 10, 16, 22, 30, -1, -1, 2, 6, 13, 9, + -1, 15, -1, 21, -1, 29, 19, -1, -1, -1, -1, -1, 1, 27, 5, 12, +}; diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt.c new file mode 100644 index 000000000..511039b65 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt.c @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the function WebRtcSpl_Sqrt(). + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/base/checks.h" +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +int32_t WebRtcSpl_SqrtLocal(int32_t in); + +int32_t WebRtcSpl_SqrtLocal(int32_t in) +{ + + int16_t x_half, t16; + int32_t A, B, x2; + + /* The following block performs: + y=in/2 + x=y-2^30 + x_half=x/2^31 + t = 1 + (x_half) - 0.5*((x_half)^2) + 0.5*((x_half)^3) - 0.625*((x_half)^4) + + 0.875*((x_half)^5) + */ + + B = in / 2; + + B = B - ((int32_t)0x40000000); // B = in/2 - 1/2 + x_half = (int16_t)(B >> 16); // x_half = x/2 = (in-1)/2 + B = B + ((int32_t)0x40000000); // B = 1 + x/2 + B = B + ((int32_t)0x40000000); // Add 0.5 twice (since 1.0 does not exist in Q31) + + x2 = ((int32_t)x_half) * ((int32_t)x_half) * 2; // A = (x/2)^2 + A = -x2; // A = -(x/2)^2 + B = B + (A >> 1); // B = 1 + x/2 - 0.5*(x/2)^2 + + A >>= 16; + A = A * A * 2; // A = (x/2)^4 + t16 = (int16_t)(A >> 16); + B += -20480 * t16 * 2; // B = B - 0.625*A + // After this, B = 1 + x/2 - 0.5*(x/2)^2 - 0.625*(x/2)^4 + + A = x_half * t16 * 2; // A = (x/2)^5 + t16 = (int16_t)(A >> 16); + B += 28672 * t16 * 2; // B = B + 0.875*A + // After this, B = 1 + x/2 - 0.5*(x/2)^2 - 0.625*(x/2)^4 + 0.875*(x/2)^5 + + t16 = (int16_t)(x2 >> 16); + A = x_half * t16 * 2; // A = x/2^3 + + B = B + (A >> 1); // B = B + 0.5*A + // After this, B = 1 + x/2 - 0.5*(x/2)^2 + 0.5*(x/2)^3 - 0.625*(x/2)^4 + 0.875*(x/2)^5 + + B = B + ((int32_t)32768); // Round off bit + + return B; +} + +int32_t WebRtcSpl_Sqrt(int32_t value) +{ + /* + Algorithm: + + Six term Taylor Series is used here to compute the square root of a number + y^0.5 = (1+x)^0.5 where x = y-1 + = 1+(x/2)-0.5*((x/2)^2+0.5*((x/2)^3-0.625*((x/2)^4+0.875*((x/2)^5) + 0.5 <= x < 1 + + Example of how the algorithm works, with ut=sqrt(in), and + with in=73632 and ut=271 (even shift value case): + + in=73632 + y= in/131072 + x=y-1 + t = 1 + (x/2) - 0.5*((x/2)^2) + 0.5*((x/2)^3) - 0.625*((x/2)^4) + 0.875*((x/2)^5) + ut=t*(1/sqrt(2))*512 + + or: + + in=73632 + in2=73632*2^14 + y= in2/2^31 + x=y-1 + t = 1 + (x/2) - 0.5*((x/2)^2) + 0.5*((x/2)^3) - 0.625*((x/2)^4) + 0.875*((x/2)^5) + ut=t*(1/sqrt(2)) + ut2=ut*2^9 + + which gives: + + in = 73632 + in2 = 1206386688 + y = 0.56176757812500 + x = -0.43823242187500 + t = 0.74973506527313 + ut = 0.53014274874797 + ut2 = 2.714330873589594e+002 + + or: + + in=73632 + in2=73632*2^14 + y=in2/2 + x=y-2^30 + x_half=x/2^31 + t = 1 + (x_half) - 0.5*((x_half)^2) + 0.5*((x_half)^3) - 0.625*((x_half)^4) + + 0.875*((x_half)^5) + ut=t*(1/sqrt(2)) + ut2=ut*2^9 + + which gives: + + in = 73632 + in2 = 1206386688 + y = 603193344 + x = -470548480 + x_half = -0.21911621093750 + t = 0.74973506527313 + ut = 0.53014274874797 + ut2 = 2.714330873589594e+002 + + */ + + int16_t x_norm, nshift, t16, sh; + int32_t A; + + int16_t k_sqrt_2 = 23170; // 1/sqrt2 (==5a82) + + A = value; + + // The convention in this function is to calculate sqrt(abs(A)). Negate the + // input if it is negative. + if (A < 0) { + if (A == WEBRTC_SPL_WORD32_MIN) { + // This number cannot be held in an int32_t after negating. + // Map it to the maximum positive value. + A = WEBRTC_SPL_WORD32_MAX; + } else { + A = -A; + } + } else if (A == 0) { + return 0; // sqrt(0) = 0 + } + + sh = WebRtcSpl_NormW32(A); // # shifts to normalize A + A = WEBRTC_SPL_LSHIFT_W32(A, sh); // Normalize A + if (A < (WEBRTC_SPL_WORD32_MAX - 32767)) + { + A = A + ((int32_t)32768); // Round off bit + } else + { + A = WEBRTC_SPL_WORD32_MAX; + } + + x_norm = (int16_t)(A >> 16); // x_norm = AH + + nshift = (sh / 2); + RTC_DCHECK_GE(nshift, 0); + + A = (int32_t)WEBRTC_SPL_LSHIFT_W32((int32_t)x_norm, 16); + A = WEBRTC_SPL_ABS_W32(A); // A = abs(x_norm<<16) + A = WebRtcSpl_SqrtLocal(A); // A = sqrt(A) + + if (2 * nshift == sh) { + // Even shift value case + + t16 = (int16_t)(A >> 16); // t16 = AH + + A = k_sqrt_2 * t16 * 2; // A = 1/sqrt(2)*t16 + A = A + ((int32_t)32768); // Round off + A = A & ((int32_t)0x7fff0000); // Round off + + A >>= 15; // A = A>>16 + + } else + { + A >>= 16; // A = A>>16 + } + + A = A & ((int32_t)0x0000ffff); + A >>= nshift; // De-normalize the result. + + return A; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt_floor.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt_floor.c new file mode 100644 index 000000000..370307a08 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt_floor.c @@ -0,0 +1,77 @@ +/* + * Written by Wilco Dijkstra, 1996. The following email exchange establishes the + * license. + * + * From: Wilco Dijkstra + * Date: Fri, Jun 24, 2011 at 3:20 AM + * Subject: Re: sqrt routine + * To: Kevin Ma + * Hi Kevin, + * Thanks for asking. Those routines are public domain (originally posted to + * comp.sys.arm a long time ago), so you can use them freely for any purpose. + * Cheers, + * Wilco + * + * ----- Original Message ----- + * From: "Kevin Ma" + * To: + * Sent: Thursday, June 23, 2011 11:44 PM + * Subject: Fwd: sqrt routine + * Hi Wilco, + * I saw your sqrt routine from several web sites, including + * http://www.finesse.demon.co.uk/steven/sqrt.html. + * Just wonder if there's any copyright information with your Successive + * approximation routines, or if I can freely use it for any purpose. + * Thanks. + * Kevin + */ + +// Minor modifications in code style for WebRTC, 2012. + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +/* + * Algorithm: + * Successive approximation of the equation (root + delta) ^ 2 = N + * until delta < 1. If delta < 1 we have the integer part of SQRT (N). + * Use delta = 2^i for i = 15 .. 0. + * + * Output precision is 16 bits. Note for large input values (close to + * 0x7FFFFFFF), bit 15 (the highest bit of the low 16-bit half word) + * contains the MSB information (a non-sign value). Do with caution + * if you need to cast the output to int16_t type. + * + * If the input value is negative, it returns 0. + */ + +#define WEBRTC_SPL_SQRT_ITER(N) \ + try1 = root + (1 << (N)); \ + if (value >= try1 << (N)) \ + { \ + value -= try1 << (N); \ + root |= 2 << (N); \ + } + +int32_t WebRtcSpl_SqrtFloor(int32_t value) +{ + int32_t root = 0, try1; + + WEBRTC_SPL_SQRT_ITER (15); + WEBRTC_SPL_SQRT_ITER (14); + WEBRTC_SPL_SQRT_ITER (13); + WEBRTC_SPL_SQRT_ITER (12); + WEBRTC_SPL_SQRT_ITER (11); + WEBRTC_SPL_SQRT_ITER (10); + WEBRTC_SPL_SQRT_ITER ( 9); + WEBRTC_SPL_SQRT_ITER ( 8); + WEBRTC_SPL_SQRT_ITER ( 7); + WEBRTC_SPL_SQRT_ITER ( 6); + WEBRTC_SPL_SQRT_ITER ( 5); + WEBRTC_SPL_SQRT_ITER ( 4); + WEBRTC_SPL_SQRT_ITER ( 3); + WEBRTC_SPL_SQRT_ITER ( 2); + WEBRTC_SPL_SQRT_ITER ( 1); + WEBRTC_SPL_SQRT_ITER ( 0); + + return root >> 1; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt_floor_arm.S b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt_floor_arm.S new file mode 100644 index 000000000..72cd2d9a0 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/spl_sqrt_floor_arm.S @@ -0,0 +1,110 @@ +@ +@ Written by Wilco Dijkstra, 1996. The following email exchange establishes the +@ license. +@ +@ From: Wilco Dijkstra +@ Date: Fri, Jun 24, 2011 at 3:20 AM +@ Subject: Re: sqrt routine +@ To: Kevin Ma +@ Hi Kevin, +@ Thanks for asking. Those routines are public domain (originally posted to +@ comp.sys.arm a long time ago), so you can use them freely for any purpose. +@ Cheers, +@ Wilco +@ +@ ----- Original Message ----- +@ From: "Kevin Ma" +@ To: +@ Sent: Thursday, June 23, 2011 11:44 PM +@ Subject: Fwd: sqrt routine +@ Hi Wilco, +@ I saw your sqrt routine from several web sites, including +@ http://www.finesse.demon.co.uk/steven/sqrt.html. +@ Just wonder if there's any copyright information with your Successive +@ approximation routines, or if I can freely use it for any purpose. +@ Thanks. +@ Kevin + +@ Minor modifications in code style for WebRTC, 2012. +@ Output is bit-exact with the reference C code in spl_sqrt_floor.c. + +@ Input : r0 32 bit unsigned integer +@ Output: r0 = INT (SQRT (r0)), precision is 16 bits +@ Registers touched: r1, r2 + +#include "webrtc/system_wrappers/include/asm_defines.h" + +GLOBAL_FUNCTION WebRtcSpl_SqrtFloor +.align 2 +DEFINE_FUNCTION WebRtcSpl_SqrtFloor + mov r1, #3 << 30 + mov r2, #1 << 30 + + @ unroll for i = 0 .. 15 + + cmp r0, r2, ror #2 * 0 + subhs r0, r0, r2, ror #2 * 0 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 1 + subhs r0, r0, r2, ror #2 * 1 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 2 + subhs r0, r0, r2, ror #2 * 2 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 3 + subhs r0, r0, r2, ror #2 * 3 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 4 + subhs r0, r0, r2, ror #2 * 4 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 5 + subhs r0, r0, r2, ror #2 * 5 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 6 + subhs r0, r0, r2, ror #2 * 6 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 7 + subhs r0, r0, r2, ror #2 * 7 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 8 + subhs r0, r0, r2, ror #2 * 8 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 9 + subhs r0, r0, r2, ror #2 * 9 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 10 + subhs r0, r0, r2, ror #2 * 10 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 11 + subhs r0, r0, r2, ror #2 * 11 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 12 + subhs r0, r0, r2, ror #2 * 12 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 13 + subhs r0, r0, r2, ror #2 * 13 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 14 + subhs r0, r0, r2, ror #2 * 14 + adc r2, r1, r2, lsl #1 + + cmp r0, r2, ror #2 * 15 + subhs r0, r0, r2, ror #2 * 15 + adc r2, r1, r2, lsl #1 + + bic r0, r2, #3 << 30 @ for rounding add: cmp r0, r2 adc r2, #1 + bx lr diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/splitting_filter_impl.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/splitting_filter_impl.c new file mode 100644 index 000000000..1400623a7 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/splitting_filter_impl.c @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * This file contains the splitting filter functions. + * + */ + +#include "webrtc/base/checks.h" +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +// Maximum number of samples in a low/high-band frame. +enum +{ + kMaxBandFrameLength = 320 // 10 ms at 64 kHz. +}; + +// QMF filter coefficients in Q16. +static const uint16_t WebRtcSpl_kAllPassFilter1[3] = {6418, 36982, 57261}; +static const uint16_t WebRtcSpl_kAllPassFilter2[3] = {21333, 49062, 63010}; + +/////////////////////////////////////////////////////////////////////////////////////////////// +// WebRtcSpl_AllPassQMF(...) +// +// Allpass filter used by the analysis and synthesis parts of the QMF filter. +// +// Input: +// - in_data : Input data sequence (Q10) +// - data_length : Length of data sequence (>2) +// - filter_coefficients : Filter coefficients (length 3, Q16) +// +// Input & Output: +// - filter_state : Filter state (length 6, Q10). +// +// Output: +// - out_data : Output data sequence (Q10), length equal to +// |data_length| +// + +void WebRtcSpl_AllPassQMF(int32_t* in_data, size_t data_length, + int32_t* out_data, const uint16_t* filter_coefficients, + int32_t* filter_state) +{ + // The procedure is to filter the input with three first order all pass filters + // (cascade operations). + // + // a_3 + q^-1 a_2 + q^-1 a_1 + q^-1 + // y[n] = ----------- ----------- ----------- x[n] + // 1 + a_3q^-1 1 + a_2q^-1 1 + a_1q^-1 + // + // The input vector |filter_coefficients| includes these three filter coefficients. + // The filter state contains the in_data state, in_data[-1], followed by + // the out_data state, out_data[-1]. This is repeated for each cascade. + // The first cascade filter will filter the |in_data| and store the output in + // |out_data|. The second will the take the |out_data| as input and make an + // intermediate storage in |in_data|, to save memory. The third, and final, cascade + // filter operation takes the |in_data| (which is the output from the previous cascade + // filter) and store the output in |out_data|. + // Note that the input vector values are changed during the process. + size_t k; + int32_t diff; + // First all-pass cascade; filter from in_data to out_data. + + // Let y_i[n] indicate the output of cascade filter i (with filter coefficient a_i) at + // vector position n. Then the final output will be y[n] = y_3[n] + + // First loop, use the states stored in memory. + // "diff" should be safe from wrap around since max values are 2^25 + // diff = (x[0] - y_1[-1]) + diff = WebRtcSpl_SubSatW32(in_data[0], filter_state[1]); + // y_1[0] = x[-1] + a_1 * (x[0] - y_1[-1]) + out_data[0] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[0], diff, filter_state[0]); + + // For the remaining loops, use previous values. + for (k = 1; k < data_length; k++) + { + // diff = (x[n] - y_1[n-1]) + diff = WebRtcSpl_SubSatW32(in_data[k], out_data[k - 1]); + // y_1[n] = x[n-1] + a_1 * (x[n] - y_1[n-1]) + out_data[k] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[0], diff, in_data[k - 1]); + } + + // Update states. + filter_state[0] = in_data[data_length - 1]; // x[N-1], becomes x[-1] next time + filter_state[1] = out_data[data_length - 1]; // y_1[N-1], becomes y_1[-1] next time + + // Second all-pass cascade; filter from out_data to in_data. + // diff = (y_1[0] - y_2[-1]) + diff = WebRtcSpl_SubSatW32(out_data[0], filter_state[3]); + // y_2[0] = y_1[-1] + a_2 * (y_1[0] - y_2[-1]) + in_data[0] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[1], diff, filter_state[2]); + for (k = 1; k < data_length; k++) + { + // diff = (y_1[n] - y_2[n-1]) + diff = WebRtcSpl_SubSatW32(out_data[k], in_data[k - 1]); + // y_2[0] = y_1[-1] + a_2 * (y_1[0] - y_2[-1]) + in_data[k] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[1], diff, out_data[k-1]); + } + + filter_state[2] = out_data[data_length - 1]; // y_1[N-1], becomes y_1[-1] next time + filter_state[3] = in_data[data_length - 1]; // y_2[N-1], becomes y_2[-1] next time + + // Third all-pass cascade; filter from in_data to out_data. + // diff = (y_2[0] - y[-1]) + diff = WebRtcSpl_SubSatW32(in_data[0], filter_state[5]); + // y[0] = y_2[-1] + a_3 * (y_2[0] - y[-1]) + out_data[0] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[2], diff, filter_state[4]); + for (k = 1; k < data_length; k++) + { + // diff = (y_2[n] - y[n-1]) + diff = WebRtcSpl_SubSatW32(in_data[k], out_data[k - 1]); + // y[n] = y_2[n-1] + a_3 * (y_2[n] - y[n-1]) + out_data[k] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[2], diff, in_data[k-1]); + } + filter_state[4] = in_data[data_length - 1]; // y_2[N-1], becomes y_2[-1] next time + filter_state[5] = out_data[data_length - 1]; // y[N-1], becomes y[-1] next time +} + +void WebRtcSpl_AnalysisQMF(const int16_t* in_data, size_t in_data_length, + int16_t* low_band, int16_t* high_band, + int32_t* filter_state1, int32_t* filter_state2) +{ + size_t i; + int16_t k; + int32_t tmp; + int32_t half_in1[kMaxBandFrameLength]; + int32_t half_in2[kMaxBandFrameLength]; + int32_t filter1[kMaxBandFrameLength]; + int32_t filter2[kMaxBandFrameLength]; + const size_t band_length = in_data_length / 2; + RTC_DCHECK_EQ(0, in_data_length % 2); + RTC_DCHECK_LE(band_length, kMaxBandFrameLength); + + // Split even and odd samples. Also shift them to Q10. + for (i = 0, k = 0; i < band_length; i++, k += 2) + { + half_in2[i] = WEBRTC_SPL_LSHIFT_W32((int32_t)in_data[k], 10); + half_in1[i] = WEBRTC_SPL_LSHIFT_W32((int32_t)in_data[k + 1], 10); + } + + // All pass filter even and odd samples, independently. + WebRtcSpl_AllPassQMF(half_in1, band_length, filter1, + WebRtcSpl_kAllPassFilter1, filter_state1); + WebRtcSpl_AllPassQMF(half_in2, band_length, filter2, + WebRtcSpl_kAllPassFilter2, filter_state2); + + // Take the sum and difference of filtered version of odd and even + // branches to get upper & lower band. + for (i = 0; i < band_length; i++) + { + tmp = (filter1[i] + filter2[i] + 1024) >> 11; + low_band[i] = WebRtcSpl_SatW32ToW16(tmp); + + tmp = (filter1[i] - filter2[i] + 1024) >> 11; + high_band[i] = WebRtcSpl_SatW32ToW16(tmp); + } +} + +void WebRtcSpl_SynthesisQMF(const int16_t* low_band, const int16_t* high_band, + size_t band_length, int16_t* out_data, + int32_t* filter_state1, int32_t* filter_state2) +{ + int32_t tmp; + int32_t half_in1[kMaxBandFrameLength]; + int32_t half_in2[kMaxBandFrameLength]; + int32_t filter1[kMaxBandFrameLength]; + int32_t filter2[kMaxBandFrameLength]; + size_t i; + int16_t k; + RTC_DCHECK_LE(band_length, kMaxBandFrameLength); + + // Obtain the sum and difference channels out of upper and lower-band channels. + // Also shift to Q10 domain. + for (i = 0; i < band_length; i++) + { + tmp = (int32_t)low_band[i] + (int32_t)high_band[i]; + half_in1[i] = tmp * (1 << 10); + tmp = (int32_t)low_band[i] - (int32_t)high_band[i]; + half_in2[i] = tmp * (1 << 10); + } + + // all-pass filter the sum and difference channels + WebRtcSpl_AllPassQMF(half_in1, band_length, filter1, + WebRtcSpl_kAllPassFilter2, filter_state1); + WebRtcSpl_AllPassQMF(half_in2, band_length, filter2, + WebRtcSpl_kAllPassFilter1, filter_state2); + + // The filtered signals are even and odd samples of the output. Combine + // them. The signals are Q10 should shift them back to Q0 and take care of + // saturation. + for (i = 0, k = 0; i < band_length; i++) + { + tmp = (filter2[i] + 512) >> 10; + out_data[k++] = WebRtcSpl_SatW32ToW16(tmp); + + tmp = (filter1[i] + 512) >> 10; + out_data[k++] = WebRtcSpl_SatW32ToW16(tmp); + } + +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c new file mode 100644 index 000000000..ff78b5228 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains the function WebRtcSpl_SqrtOfOneMinusXSquared(). + * The description header can be found in signal_processing_library.h + * + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +void WebRtcSpl_SqrtOfOneMinusXSquared(int16_t *xQ15, size_t vector_length, + int16_t *yQ15) +{ + int32_t sq; + size_t m; + int16_t tmp; + + for (m = 0; m < vector_length; m++) + { + tmp = xQ15[m]; + sq = tmp * tmp; // x^2 in Q30 + sq = 1073741823 - sq; // 1-x^2, where 1 ~= 0.99999999906 is 1073741823 in Q30 + sq = WebRtcSpl_Sqrt(sq); // sqrt(1-x^2) in Q15 + yQ15[m] = (int16_t)sq; + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/vector_scaling_operations.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/vector_scaling_operations.c new file mode 100644 index 000000000..fdefd0676 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/signal_processing/vector_scaling_operations.c @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +/* + * This file contains implementations of the functions + * WebRtcSpl_VectorBitShiftW16() + * WebRtcSpl_VectorBitShiftW32() + * WebRtcSpl_VectorBitShiftW32ToW16() + * WebRtcSpl_ScaleVector() + * WebRtcSpl_ScaleVectorWithSat() + * WebRtcSpl_ScaleAndAddVectors() + * WebRtcSpl_ScaleAndAddVectorsWithRoundC() + */ + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" + +void WebRtcSpl_VectorBitShiftW16(int16_t *res, size_t length, + const int16_t *in, int16_t right_shifts) +{ + size_t i; + + if (right_shifts > 0) + { + for (i = length; i > 0; i--) + { + (*res++) = ((*in++) >> right_shifts); + } + } else + { + for (i = length; i > 0; i--) + { + (*res++) = ((*in++) << (-right_shifts)); + } + } +} + +void WebRtcSpl_VectorBitShiftW32(int32_t *out_vector, + size_t vector_length, + const int32_t *in_vector, + int16_t right_shifts) +{ + size_t i; + + if (right_shifts > 0) + { + for (i = vector_length; i > 0; i--) + { + (*out_vector++) = ((*in_vector++) >> right_shifts); + } + } else + { + for (i = vector_length; i > 0; i--) + { + (*out_vector++) = ((*in_vector++) << (-right_shifts)); + } + } +} + +void WebRtcSpl_VectorBitShiftW32ToW16(int16_t* out, size_t length, + const int32_t* in, int right_shifts) { + size_t i; + int32_t tmp_w32; + + if (right_shifts >= 0) { + for (i = length; i > 0; i--) { + tmp_w32 = (*in++) >> right_shifts; + (*out++) = WebRtcSpl_SatW32ToW16(tmp_w32); + } + } else { + int left_shifts = -right_shifts; + for (i = length; i > 0; i--) { + tmp_w32 = (*in++) << left_shifts; + (*out++) = WebRtcSpl_SatW32ToW16(tmp_w32); + } + } +} + +void WebRtcSpl_ScaleVector(const int16_t *in_vector, int16_t *out_vector, + int16_t gain, size_t in_vector_length, + int16_t right_shifts) +{ + // Performs vector operation: out_vector = (gain*in_vector)>>right_shifts + size_t i; + const int16_t *inptr; + int16_t *outptr; + + inptr = in_vector; + outptr = out_vector; + + for (i = 0; i < in_vector_length; i++) + { + *outptr++ = (int16_t)((*inptr++ * gain) >> right_shifts); + } +} + +void WebRtcSpl_ScaleVectorWithSat(const int16_t *in_vector, int16_t *out_vector, + int16_t gain, size_t in_vector_length, + int16_t right_shifts) +{ + // Performs vector operation: out_vector = (gain*in_vector)>>right_shifts + size_t i; + const int16_t *inptr; + int16_t *outptr; + + inptr = in_vector; + outptr = out_vector; + + for (i = 0; i < in_vector_length; i++) { + *outptr++ = WebRtcSpl_SatW32ToW16((*inptr++ * gain) >> right_shifts); + } +} + +void WebRtcSpl_ScaleAndAddVectors(const int16_t *in1, int16_t gain1, int shift1, + const int16_t *in2, int16_t gain2, int shift2, + int16_t *out, size_t vector_length) +{ + // Performs vector operation: out = (gain1*in1)>>shift1 + (gain2*in2)>>shift2 + size_t i; + const int16_t *in1ptr; + const int16_t *in2ptr; + int16_t *outptr; + + in1ptr = in1; + in2ptr = in2; + outptr = out; + + for (i = 0; i < vector_length; i++) + { + *outptr++ = (int16_t)((gain1 * *in1ptr++) >> shift1) + + (int16_t)((gain2 * *in2ptr++) >> shift2); + } +} + +// C version of WebRtcSpl_ScaleAndAddVectorsWithRound() for generic platforms. +int WebRtcSpl_ScaleAndAddVectorsWithRoundC(const int16_t* in_vector1, + int16_t in_vector1_scale, + const int16_t* in_vector2, + int16_t in_vector2_scale, + int right_shifts, + int16_t* out_vector, + size_t length) { + size_t i = 0; + int round_value = (1 << right_shifts) >> 1; + + if (in_vector1 == NULL || in_vector2 == NULL || out_vector == NULL || + length == 0 || right_shifts < 0) { + return -1; + } + + for (i = 0; i < length; i++) { + out_vector[i] = (int16_t)(( + in_vector1[i] * in_vector1_scale + in_vector2[i] * in_vector2_scale + + round_value) >> right_shifts); + } + + return 0; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/sparse_fir_filter.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/sparse_fir_filter.cc new file mode 100644 index 000000000..2928004a0 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/sparse_fir_filter.cc @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/sparse_fir_filter.h" + +#include "webrtc/base/checks.h" + +namespace webrtc { + +SparseFIRFilter::SparseFIRFilter(const float* nonzero_coeffs, + size_t num_nonzero_coeffs, + size_t sparsity, + size_t offset) + : sparsity_(sparsity), + offset_(offset), + nonzero_coeffs_(nonzero_coeffs, nonzero_coeffs + num_nonzero_coeffs), + state_(sparsity_ * (num_nonzero_coeffs - 1) + offset_, 0.f) { + RTC_CHECK_GE(num_nonzero_coeffs, 1); + RTC_CHECK_GE(sparsity, 1); +} + +SparseFIRFilter::~SparseFIRFilter() = default; + +void SparseFIRFilter::Filter(const float* in, size_t length, float* out) { + // Convolves the input signal |in| with the filter kernel |nonzero_coeffs_| + // taking into account the previous state. + for (size_t i = 0; i < length; ++i) { + out[i] = 0.f; + size_t j; + for (j = 0; i >= j * sparsity_ + offset_ && + j < nonzero_coeffs_.size(); ++j) { + out[i] += in[i - j * sparsity_ - offset_] * nonzero_coeffs_[j]; + } + for (; j < nonzero_coeffs_.size(); ++j) { + out[i] += state_[i + (nonzero_coeffs_.size() - j - 1) * sparsity_] * + nonzero_coeffs_[j]; + } + } + + // Update current state. + if (state_.size() > 0u) { + if (length >= state_.size()) { + std::memcpy(&state_[0], + &in[length - state_.size()], + state_.size() * sizeof(*in)); + } else { + std::memmove(&state_[0], + &state_[length], + (state_.size() - length) * sizeof(state_[0])); + std::memcpy(&state_[state_.size() - length], in, length * sizeof(*in)); + } + } +} + +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/sparse_fir_filter.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/sparse_fir_filter.h new file mode 100644 index 000000000..2d406a0f1 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/sparse_fir_filter.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_COMMON_AUDIO_SPARSE_FIR_FILTER_H_ +#define WEBRTC_COMMON_AUDIO_SPARSE_FIR_FILTER_H_ + +#include +#include + +#include "webrtc/base/constructormagic.h" + +namespace webrtc { + +// A Finite Impulse Response filter implementation which takes advantage of a +// sparse structure with uniformly distributed non-zero coefficients. +class SparseFIRFilter final { + public: + // |num_nonzero_coeffs| is the number of non-zero coefficients, + // |nonzero_coeffs|. They are assumed to be uniformly distributed every + // |sparsity| samples and with an initial |offset|. The rest of the filter + // coefficients will be assumed zeros. For example, with sparsity = 3, and + // offset = 1 the filter coefficients will be: + // B = [0 coeffs[0] 0 0 coeffs[1] 0 0 coeffs[2] ... ] + // All initial state values will be zeros. + SparseFIRFilter(const float* nonzero_coeffs, + size_t num_nonzero_coeffs, + size_t sparsity, + size_t offset); + ~SparseFIRFilter(); + + // Filters the |in| data supplied. + // |out| must be previously allocated and it must be at least of |length|. + void Filter(const float* in, size_t length, float* out); + + private: + const size_t sparsity_; + const size_t offset_; + const std::vector nonzero_coeffs_; + std::vector state_; + + RTC_DISALLOW_COPY_AND_ASSIGN(SparseFIRFilter); +}; + +} // namespace webrtc + +#endif // WEBRTC_COMMON_AUDIO_SPARSE_FIR_FILTER_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_file.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_file.cc new file mode 100644 index 000000000..2b9098a6c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_file.cc @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/common_audio/wav_file.h" + +#include +#include +#include +#include + +#include "webrtc/base/checks.h" +#include "webrtc/base/safe_conversions.h" +#include "webrtc/common_audio/include/audio_util.h" +#include "webrtc/common_audio/wav_header.h" + +namespace webrtc { + +// We write 16-bit PCM WAV files. +static const WavFormat kWavFormat = kWavFormatPcm; +static const size_t kBytesPerSample = 2; + +// Doesn't take ownership of the file handle and won't close it. +class ReadableWavFile : public ReadableWav { + public: + explicit ReadableWavFile(FILE* file) : file_(file) {} + virtual size_t Read(void* buf, size_t num_bytes) { + return fread(buf, 1, num_bytes, file_); + } + + private: + FILE* file_; +}; + +std::string WavFile::FormatAsString() const { + std::ostringstream s; + s << "Sample rate: " << sample_rate() << " Hz, Channels: " << num_channels() + << ", Duration: " + << (1.f * num_samples()) / (num_channels() * sample_rate()) << " s"; + return s.str(); +} + +WavReader::WavReader(const std::string& filename) + : file_handle_(fopen(filename.c_str(), "rb")) { + RTC_CHECK(file_handle_) << "Could not open wav file for reading."; + + ReadableWavFile readable(file_handle_); + WavFormat format; + size_t bytes_per_sample; + RTC_CHECK(ReadWavHeader(&readable, &num_channels_, &sample_rate_, &format, + &bytes_per_sample, &num_samples_)); + num_samples_remaining_ = num_samples_; + RTC_CHECK_EQ(kWavFormat, format); + RTC_CHECK_EQ(kBytesPerSample, bytes_per_sample); +} + +WavReader::~WavReader() { + Close(); +} + +int WavReader::sample_rate() const { + return sample_rate_; +} + +size_t WavReader::num_channels() const { + return num_channels_; +} + +size_t WavReader::num_samples() const { + return num_samples_; +} + +size_t WavReader::ReadSamples(size_t num_samples, int16_t* samples) { +#ifndef WEBRTC_ARCH_LITTLE_ENDIAN +#error "Need to convert samples to big-endian when reading from WAV file" +#endif + // There could be metadata after the audio; ensure we don't read it. + num_samples = std::min(num_samples, num_samples_remaining_); + const size_t read = + fread(samples, sizeof(*samples), num_samples, file_handle_); + // If we didn't read what was requested, ensure we've reached the EOF. + RTC_CHECK(read == num_samples || feof(file_handle_)); + RTC_CHECK_LE(read, num_samples_remaining_); + num_samples_remaining_ -= read; + return read; +} + +size_t WavReader::ReadSamples(size_t num_samples, float* samples) { + static const size_t kChunksize = 4096 / sizeof(uint16_t); + size_t read = 0; + for (size_t i = 0; i < num_samples; i += kChunksize) { + int16_t isamples[kChunksize]; + size_t chunk = std::min(kChunksize, num_samples - i); + chunk = ReadSamples(chunk, isamples); + for (size_t j = 0; j < chunk; ++j) + samples[i + j] = isamples[j]; + read += chunk; + } + return read; +} + +void WavReader::Close() { + RTC_CHECK_EQ(0, fclose(file_handle_)); + file_handle_ = NULL; +} + +WavWriter::WavWriter(const std::string& filename, int sample_rate, + size_t num_channels) + : sample_rate_(sample_rate), + num_channels_(num_channels), + num_samples_(0), + file_handle_(fopen(filename.c_str(), "wb")) { + RTC_CHECK(file_handle_) << "Could not open wav file for writing."; + RTC_CHECK(CheckWavParameters(num_channels_, sample_rate_, kWavFormat, + kBytesPerSample, num_samples_)); + + // Write a blank placeholder header, since we need to know the total number + // of samples before we can fill in the real data. + static const uint8_t blank_header[kWavHeaderSize] = {0}; + RTC_CHECK_EQ(1, fwrite(blank_header, kWavHeaderSize, 1, file_handle_)); +} + +WavWriter::~WavWriter() { + Close(); +} + +int WavWriter::sample_rate() const { + return sample_rate_; +} + +size_t WavWriter::num_channels() const { + return num_channels_; +} + +size_t WavWriter::num_samples() const { + return num_samples_; +} + +void WavWriter::WriteSamples(const int16_t* samples, size_t num_samples) { +#ifndef WEBRTC_ARCH_LITTLE_ENDIAN +#error "Need to convert samples to little-endian when writing to WAV file" +#endif + const size_t written = + fwrite(samples, sizeof(*samples), num_samples, file_handle_); + RTC_CHECK_EQ(num_samples, written); + num_samples_ += written; + RTC_CHECK(num_samples_ >= written); // detect size_t overflow +} + +void WavWriter::WriteSamples(const float* samples, size_t num_samples) { + static const size_t kChunksize = 4096 / sizeof(uint16_t); + for (size_t i = 0; i < num_samples; i += kChunksize) { + int16_t isamples[kChunksize]; + const size_t chunk = std::min(kChunksize, num_samples - i); + FloatS16ToS16(samples + i, chunk, isamples); + WriteSamples(isamples, chunk); + } +} + +void WavWriter::Close() { + RTC_CHECK_EQ(0, fseek(file_handle_, 0, SEEK_SET)); + uint8_t header[kWavHeaderSize]; + WriteWavHeader(header, num_channels_, sample_rate_, kWavFormat, + kBytesPerSample, num_samples_); + RTC_CHECK_EQ(1, fwrite(header, kWavHeaderSize, 1, file_handle_)); + RTC_CHECK_EQ(0, fclose(file_handle_)); + file_handle_ = NULL; +} + +} // namespace webrtc + +rtc_WavWriter* rtc_WavOpen(const char* filename, + int sample_rate, + size_t num_channels) { + return reinterpret_cast( + new webrtc::WavWriter(filename, sample_rate, num_channels)); +} + +void rtc_WavClose(rtc_WavWriter* wf) { + delete reinterpret_cast(wf); +} + +void rtc_WavWriteSamples(rtc_WavWriter* wf, + const float* samples, + size_t num_samples) { + reinterpret_cast(wf)->WriteSamples(samples, num_samples); +} + +int rtc_WavSampleRate(const rtc_WavWriter* wf) { + return reinterpret_cast(wf)->sample_rate(); +} + +size_t rtc_WavNumChannels(const rtc_WavWriter* wf) { + return reinterpret_cast(wf)->num_channels(); +} + +size_t rtc_WavNumSamples(const rtc_WavWriter* wf) { + return reinterpret_cast(wf)->num_samples(); +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_file.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_file.h new file mode 100644 index 000000000..812c21daf --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_file.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_COMMON_AUDIO_WAV_FILE_H_ +#define WEBRTC_COMMON_AUDIO_WAV_FILE_H_ + +#ifdef __cplusplus + +#include +#include +#include + +#include "webrtc/base/constructormagic.h" + +namespace webrtc { + +// Interface to provide access to WAV file parameters. +class WavFile { + public: + virtual ~WavFile() {} + + virtual int sample_rate() const = 0; + virtual size_t num_channels() const = 0; + virtual size_t num_samples() const = 0; + + // Returns a human-readable string containing the audio format. + std::string FormatAsString() const; +}; + +// Simple C++ class for writing 16-bit PCM WAV files. All error handling is +// by calls to RTC_CHECK(), making it unsuitable for anything but debug code. +class WavWriter final : public WavFile { + public: + // Open a new WAV file for writing. + WavWriter(const std::string& filename, int sample_rate, size_t num_channels); + + // Close the WAV file, after writing its header. + ~WavWriter() override; + + // Write additional samples to the file. Each sample is in the range + // [-32768,32767], and there must be the previously specified number of + // interleaved channels. + void WriteSamples(const float* samples, size_t num_samples); + void WriteSamples(const int16_t* samples, size_t num_samples); + + int sample_rate() const override; + size_t num_channels() const override; + size_t num_samples() const override; + + private: + void Close(); + const int sample_rate_; + const size_t num_channels_; + size_t num_samples_; // Total number of samples written to file. + FILE* file_handle_; // Output file, owned by this class + + RTC_DISALLOW_COPY_AND_ASSIGN(WavWriter); +}; + +// Follows the conventions of WavWriter. +class WavReader final : public WavFile { + public: + // Opens an existing WAV file for reading. + explicit WavReader(const std::string& filename); + + // Close the WAV file. + ~WavReader() override; + + // Returns the number of samples read. If this is less than requested, + // verifies that the end of the file was reached. + size_t ReadSamples(size_t num_samples, float* samples); + size_t ReadSamples(size_t num_samples, int16_t* samples); + + int sample_rate() const override; + size_t num_channels() const override; + size_t num_samples() const override; + + private: + void Close(); + int sample_rate_; + size_t num_channels_; + size_t num_samples_; // Total number of samples in the file. + size_t num_samples_remaining_; + FILE* file_handle_; // Input file, owned by this class. + + RTC_DISALLOW_COPY_AND_ASSIGN(WavReader); +}; + +} // namespace webrtc + +extern "C" { +#endif // __cplusplus + +// C wrappers for the WavWriter class. +typedef struct rtc_WavWriter rtc_WavWriter; +rtc_WavWriter* rtc_WavOpen(const char* filename, + int sample_rate, + size_t num_channels); +void rtc_WavClose(rtc_WavWriter* wf); +void rtc_WavWriteSamples(rtc_WavWriter* wf, + const float* samples, + size_t num_samples); +int rtc_WavSampleRate(const rtc_WavWriter* wf); +size_t rtc_WavNumChannels(const rtc_WavWriter* wf); +size_t rtc_WavNumSamples(const rtc_WavWriter* wf); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBRTC_COMMON_AUDIO_WAV_FILE_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_header.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_header.cc new file mode 100644 index 000000000..402ea1791 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_header.cc @@ -0,0 +1,243 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Based on the WAV file format documentation at +// https://ccrma.stanford.edu/courses/422/projects/WaveFormat/ and +// http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html + +#include "webrtc/common_audio/wav_header.h" + +#include +#include +#include +#include + +#include "webrtc/base/checks.h" +#include "webrtc/common_audio/include/audio_util.h" + +namespace webrtc { +namespace { + +struct ChunkHeader { + uint32_t ID; + uint32_t Size; +}; +static_assert(sizeof(ChunkHeader) == 8, "ChunkHeader size"); + +// We can't nest this definition in WavHeader, because VS2013 gives an error +// on sizeof(WavHeader::fmt): "error C2070: 'unknown': illegal sizeof operand". +struct FmtSubchunk { + ChunkHeader header; + uint16_t AudioFormat; + uint16_t NumChannels; + uint32_t SampleRate; + uint32_t ByteRate; + uint16_t BlockAlign; + uint16_t BitsPerSample; +}; +static_assert(sizeof(FmtSubchunk) == 24, "FmtSubchunk size"); +const uint32_t kFmtSubchunkSize = sizeof(FmtSubchunk) - sizeof(ChunkHeader); + +struct WavHeader { + struct { + ChunkHeader header; + uint32_t Format; + } riff; + FmtSubchunk fmt; + struct { + ChunkHeader header; + } data; +}; +static_assert(sizeof(WavHeader) == kWavHeaderSize, "no padding in header"); + +} // namespace + +bool CheckWavParameters(size_t num_channels, + int sample_rate, + WavFormat format, + size_t bytes_per_sample, + size_t num_samples) { + // num_channels, sample_rate, and bytes_per_sample must be positive, must fit + // in their respective fields, and their product must fit in the 32-bit + // ByteRate field. + if (num_channels == 0 || sample_rate <= 0 || bytes_per_sample == 0) + return false; + if (static_cast(sample_rate) > std::numeric_limits::max()) + return false; + if (num_channels > std::numeric_limits::max()) + return false; + if (static_cast(bytes_per_sample) * 8 > + std::numeric_limits::max()) + return false; + if (static_cast(sample_rate) * num_channels * bytes_per_sample > + std::numeric_limits::max()) + return false; + + // format and bytes_per_sample must agree. + switch (format) { + case kWavFormatPcm: + // Other values may be OK, but for now we're conservative: + if (bytes_per_sample != 1 && bytes_per_sample != 2) + return false; + break; + case kWavFormatALaw: + case kWavFormatMuLaw: + if (bytes_per_sample != 1) + return false; + break; + default: + return false; + } + + // The number of bytes in the file, not counting the first ChunkHeader, must + // be less than 2^32; otherwise, the ChunkSize field overflows. + const size_t header_size = kWavHeaderSize - sizeof(ChunkHeader); + const size_t max_samples = + (std::numeric_limits::max() - header_size) / bytes_per_sample; + if (num_samples > max_samples) + return false; + + // Each channel must have the same number of samples. + if (num_samples % num_channels != 0) + return false; + + return true; +} + +#ifdef WEBRTC_ARCH_LITTLE_ENDIAN +static inline void WriteLE16(uint16_t* f, uint16_t x) { *f = x; } +static inline void WriteLE32(uint32_t* f, uint32_t x) { *f = x; } +static inline void WriteFourCC(uint32_t* f, char a, char b, char c, char d) { + *f = static_cast(a) + | static_cast(b) << 8 + | static_cast(c) << 16 + | static_cast(d) << 24; +} + +static inline uint16_t ReadLE16(uint16_t x) { return x; } +static inline uint32_t ReadLE32(uint32_t x) { return x; } +static inline std::string ReadFourCC(uint32_t x) { + return std::string(reinterpret_cast(&x), 4); +} +#else +#error "Write be-to-le conversion functions" +#endif + +static inline uint32_t RiffChunkSize(size_t bytes_in_payload) { + return static_cast( + bytes_in_payload + kWavHeaderSize - sizeof(ChunkHeader)); +} + +static inline uint32_t ByteRate(size_t num_channels, int sample_rate, + size_t bytes_per_sample) { + return static_cast(num_channels * sample_rate * bytes_per_sample); +} + +static inline uint16_t BlockAlign(size_t num_channels, + size_t bytes_per_sample) { + return static_cast(num_channels * bytes_per_sample); +} + +void WriteWavHeader(uint8_t* buf, + size_t num_channels, + int sample_rate, + WavFormat format, + size_t bytes_per_sample, + size_t num_samples) { + RTC_CHECK(CheckWavParameters(num_channels, sample_rate, format, + bytes_per_sample, num_samples)); + + WavHeader header; + const size_t bytes_in_payload = bytes_per_sample * num_samples; + + WriteFourCC(&header.riff.header.ID, 'R', 'I', 'F', 'F'); + WriteLE32(&header.riff.header.Size, RiffChunkSize(bytes_in_payload)); + WriteFourCC(&header.riff.Format, 'W', 'A', 'V', 'E'); + + WriteFourCC(&header.fmt.header.ID, 'f', 'm', 't', ' '); + WriteLE32(&header.fmt.header.Size, kFmtSubchunkSize); + WriteLE16(&header.fmt.AudioFormat, format); + WriteLE16(&header.fmt.NumChannels, static_cast(num_channels)); + WriteLE32(&header.fmt.SampleRate, sample_rate); + WriteLE32(&header.fmt.ByteRate, ByteRate(num_channels, sample_rate, + bytes_per_sample)); + WriteLE16(&header.fmt.BlockAlign, BlockAlign(num_channels, bytes_per_sample)); + WriteLE16(&header.fmt.BitsPerSample, + static_cast(8 * bytes_per_sample)); + + WriteFourCC(&header.data.header.ID, 'd', 'a', 't', 'a'); + WriteLE32(&header.data.header.Size, static_cast(bytes_in_payload)); + + // Do an extra copy rather than writing everything to buf directly, since buf + // might not be correctly aligned. + memcpy(buf, &header, kWavHeaderSize); +} + +bool ReadWavHeader(ReadableWav* readable, + size_t* num_channels, + int* sample_rate, + WavFormat* format, + size_t* bytes_per_sample, + size_t* num_samples) { + WavHeader header; + if (readable->Read(&header, kWavHeaderSize - sizeof(header.data)) != + kWavHeaderSize - sizeof(header.data)) + return false; + + const uint32_t fmt_size = ReadLE32(header.fmt.header.Size); + if (fmt_size != kFmtSubchunkSize) { + // There is an optional two-byte extension field permitted to be present + // with PCM, but which must be zero. + int16_t ext_size; + if (kFmtSubchunkSize + sizeof(ext_size) != fmt_size) + return false; + if (readable->Read(&ext_size, sizeof(ext_size)) != sizeof(ext_size)) + return false; + if (ext_size != 0) + return false; + } + if (readable->Read(&header.data, sizeof(header.data)) != sizeof(header.data)) + return false; + + // Parse needed fields. + *format = static_cast(ReadLE16(header.fmt.AudioFormat)); + *num_channels = ReadLE16(header.fmt.NumChannels); + *sample_rate = ReadLE32(header.fmt.SampleRate); + *bytes_per_sample = ReadLE16(header.fmt.BitsPerSample) / 8; + const size_t bytes_in_payload = ReadLE32(header.data.header.Size); + if (*bytes_per_sample == 0) + return false; + *num_samples = bytes_in_payload / *bytes_per_sample; + + // Sanity check remaining fields. + if (ReadFourCC(header.riff.header.ID) != "RIFF") + return false; + if (ReadFourCC(header.riff.Format) != "WAVE") + return false; + if (ReadFourCC(header.fmt.header.ID) != "fmt ") + return false; + if (ReadFourCC(header.data.header.ID) != "data") + return false; + + if (ReadLE32(header.riff.header.Size) < RiffChunkSize(bytes_in_payload)) + return false; + if (ReadLE32(header.fmt.ByteRate) != + ByteRate(*num_channels, *sample_rate, *bytes_per_sample)) + return false; + if (ReadLE16(header.fmt.BlockAlign) != + BlockAlign(*num_channels, *bytes_per_sample)) + return false; + + return CheckWavParameters(*num_channels, *sample_rate, *format, + *bytes_per_sample, *num_samples); +} + + +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_header.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_header.h new file mode 100644 index 000000000..684430694 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/common_audio/wav_header.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_COMMON_AUDIO_WAV_HEADER_H_ +#define WEBRTC_COMMON_AUDIO_WAV_HEADER_H_ + +#include +#include + +namespace webrtc { + +static const size_t kWavHeaderSize = 44; + +class ReadableWav { + public: + // Returns the number of bytes read. + size_t virtual Read(void* buf, size_t num_bytes) = 0; + virtual ~ReadableWav() {} +}; + +enum WavFormat { + kWavFormatPcm = 1, // PCM, each sample of size bytes_per_sample + kWavFormatALaw = 6, // 8-bit ITU-T G.711 A-law + kWavFormatMuLaw = 7, // 8-bit ITU-T G.711 mu-law +}; + +// Return true if the given parameters will make a well-formed WAV header. +bool CheckWavParameters(size_t num_channels, + int sample_rate, + WavFormat format, + size_t bytes_per_sample, + size_t num_samples); + +// Write a kWavHeaderSize bytes long WAV header to buf. The payload that +// follows the header is supposed to have the specified number of interleaved +// channels and contain the specified total number of samples of the specified +// type. CHECKs the input parameters for validity. +void WriteWavHeader(uint8_t* buf, + size_t num_channels, + int sample_rate, + WavFormat format, + size_t bytes_per_sample, + size_t num_samples); + +// Read a WAV header from an implemented ReadableWav and parse the values into +// the provided output parameters. ReadableWav is used because the header can +// be variably sized. Returns false if the header is invalid. +bool ReadWavHeader(ReadableWav* readable, + size_t* num_channels, + int* sample_rate, + WavFormat* format, + size_t* bytes_per_sample, + size_t* num_samples); + +} // namespace webrtc + +#endif // WEBRTC_COMMON_AUDIO_WAV_HEADER_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_common.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_common.h new file mode 100644 index 000000000..0e3cddeb4 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_common.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_COMMON_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_COMMON_H_ + +#include "webrtc/typedefs.h" + +#ifdef _MSC_VER /* visual c++ */ +#define ALIGN16_BEG __declspec(align(16)) +#define ALIGN16_END +#else /* gcc or icc */ +#define ALIGN16_BEG +#define ALIGN16_END __attribute__((aligned(16))) +#endif + +#ifdef __cplusplus +namespace webrtc { +#endif + +extern ALIGN16_BEG const float ALIGN16_END WebRtcAec_sqrtHanning[65]; +extern ALIGN16_BEG const float ALIGN16_END WebRtcAec_weightCurve[65]; +extern ALIGN16_BEG const float ALIGN16_END WebRtcAec_overDriveCurve[65]; +extern const float WebRtcAec_kExtendedSmoothingCoefficients[2][2]; +extern const float WebRtcAec_kNormalSmoothingCoefficients[2][2]; +extern const float WebRtcAec_kMinFarendPSD; + +#ifdef __cplusplus +} // namespace webrtc +#endif + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_COMMON_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core.cc new file mode 100644 index 000000000..eb4ddc75e --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core.cc @@ -0,0 +1,2047 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * The core AEC algorithm, which is presented with time-aligned signals. + */ + +#include "webrtc/modules/audio_processing/aec/aec_core.h" + +#include +#include +#include // size_t +#include +#include + +#include "webrtc/base/checks.h" +extern "C" { +#include "webrtc/common_audio/ring_buffer.h" +} +#include "webrtc/base/checks.h" +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +#include "webrtc/modules/audio_processing/aec/aec_common.h" +#include "webrtc/modules/audio_processing/aec/aec_core_optimized_methods.h" +#include "webrtc/modules/audio_processing/logging/apm_data_dumper.h" +#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h" +#include "webrtc/system_wrappers/include/cpu_features_wrapper.h" +//#include "webrtc/system_wrappers/include/metrics.h" +#include "webrtc/typedefs.h" + +namespace webrtc { +namespace { +enum class DelaySource { + kSystemDelay, // The delay values come from the OS. + kDelayAgnostic, // The delay values come from the DA-AEC. +}; + +constexpr int kMinDelayLogValue = -200; +constexpr int kMaxDelayLogValue = 200; +constexpr int kNumDelayLogBuckets = 100; + +void MaybeLogDelayAdjustment(int moved_ms, DelaySource source) { + if (moved_ms == 0) + return; + /*switch (source) { + case DelaySource::kSystemDelay: + RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecDelayAdjustmentMsSystemValue", + moved_ms, kMinDelayLogValue, kMaxDelayLogValue, + kNumDelayLogBuckets); + return; + case DelaySource::kDelayAgnostic: + RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecDelayAdjustmentMsAgnosticValue", + moved_ms, kMinDelayLogValue, kMaxDelayLogValue, + kNumDelayLogBuckets); + return; + }*/ +} +} // namespace + +// Buffer size (samples) +static const size_t kBufferSizeBlocks = 250; // 1 second of audio in 16 kHz. + +// Metrics +static const size_t kSubCountLen = 4; +static const size_t kCountLen = 50; +static const int kDelayMetricsAggregationWindow = 1250; // 5 seconds at 16 kHz. + +// Divergence metric is based on audio level, which gets updated every +// |kSubCountLen + 1| * PART_LEN samples. Divergence metric takes the statistics +// of |kDivergentFilterFractionAggregationWindowSize| audio levels. The +// following value corresponds to 1 second at 16 kHz. +static const int kDivergentFilterFractionAggregationWindowSize = 50; + +// Quantities to control H band scaling for SWB input +static const float cnScaleHband = 0.4f; // scale for comfort noise in H band. +// Initial bin for averaging nlp gain in low band +static const int freqAvgIc = PART_LEN / 2; + +// Matlab code to produce table: +// win = sqrt(hanning(63)); win = [0 ; win(1:32)]; +// fprintf(1, '\t%.14f, %.14f, %.14f,\n', win); +ALIGN16_BEG const float ALIGN16_END WebRtcAec_sqrtHanning[65] = { + 0.00000000000000f, 0.02454122852291f, 0.04906767432742f, 0.07356456359967f, + 0.09801714032956f, 0.12241067519922f, 0.14673047445536f, 0.17096188876030f, + 0.19509032201613f, 0.21910124015687f, 0.24298017990326f, 0.26671275747490f, + 0.29028467725446f, 0.31368174039889f, 0.33688985339222f, 0.35989503653499f, + 0.38268343236509f, 0.40524131400499f, 0.42755509343028f, 0.44961132965461f, + 0.47139673682600f, 0.49289819222978f, 0.51410274419322f, 0.53499761988710f, + 0.55557023301960f, 0.57580819141785f, 0.59569930449243f, 0.61523159058063f, + 0.63439328416365f, 0.65317284295378f, 0.67155895484702f, 0.68954054473707f, + 0.70710678118655f, 0.72424708295147f, 0.74095112535496f, 0.75720884650648f, + 0.77301045336274f, 0.78834642762661f, 0.80320753148064f, 0.81758481315158f, + 0.83146961230255f, 0.84485356524971f, 0.85772861000027f, 0.87008699110871f, + 0.88192126434835f, 0.89322430119552f, 0.90398929312344f, 0.91420975570353f, + 0.92387953251129f, 0.93299279883474f, 0.94154406518302f, 0.94952818059304f, + 0.95694033573221f, 0.96377606579544f, 0.97003125319454f, 0.97570213003853f, + 0.98078528040323f, 0.98527764238894f, 0.98917650996478f, 0.99247953459871f, + 0.99518472667220f, 0.99729045667869f, 0.99879545620517f, 0.99969881869620f, + 1.00000000000000f}; + +// Matlab code to produce table: +// weightCurve = [0 ; 0.3 * sqrt(linspace(0,1,64))' + 0.1]; +// fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', weightCurve); +ALIGN16_BEG const float ALIGN16_END WebRtcAec_weightCurve[65] = { + 0.0000f, 0.1000f, 0.1378f, 0.1535f, 0.1655f, 0.1756f, 0.1845f, 0.1926f, + 0.2000f, 0.2069f, 0.2134f, 0.2195f, 0.2254f, 0.2309f, 0.2363f, 0.2414f, + 0.2464f, 0.2512f, 0.2558f, 0.2604f, 0.2648f, 0.2690f, 0.2732f, 0.2773f, + 0.2813f, 0.2852f, 0.2890f, 0.2927f, 0.2964f, 0.3000f, 0.3035f, 0.3070f, + 0.3104f, 0.3138f, 0.3171f, 0.3204f, 0.3236f, 0.3268f, 0.3299f, 0.3330f, + 0.3360f, 0.3390f, 0.3420f, 0.3449f, 0.3478f, 0.3507f, 0.3535f, 0.3563f, + 0.3591f, 0.3619f, 0.3646f, 0.3673f, 0.3699f, 0.3726f, 0.3752f, 0.3777f, + 0.3803f, 0.3828f, 0.3854f, 0.3878f, 0.3903f, 0.3928f, 0.3952f, 0.3976f, + 0.4000f}; + +// Matlab code to produce table: +// overDriveCurve = [sqrt(linspace(0,1,65))' + 1]; +// fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', overDriveCurve); +ALIGN16_BEG const float ALIGN16_END WebRtcAec_overDriveCurve[65] = { + 1.0000f, 1.1250f, 1.1768f, 1.2165f, 1.2500f, 1.2795f, 1.3062f, 1.3307f, + 1.3536f, 1.3750f, 1.3953f, 1.4146f, 1.4330f, 1.4507f, 1.4677f, 1.4841f, + 1.5000f, 1.5154f, 1.5303f, 1.5449f, 1.5590f, 1.5728f, 1.5863f, 1.5995f, + 1.6124f, 1.6250f, 1.6374f, 1.6495f, 1.6614f, 1.6731f, 1.6847f, 1.6960f, + 1.7071f, 1.7181f, 1.7289f, 1.7395f, 1.7500f, 1.7603f, 1.7706f, 1.7806f, + 1.7906f, 1.8004f, 1.8101f, 1.8197f, 1.8292f, 1.8385f, 1.8478f, 1.8570f, + 1.8660f, 1.8750f, 1.8839f, 1.8927f, 1.9014f, 1.9100f, 1.9186f, 1.9270f, + 1.9354f, 1.9437f, 1.9520f, 1.9601f, 1.9682f, 1.9763f, 1.9843f, 1.9922f, + 2.0000f}; + +// Delay Agnostic AEC parameters, still under development and may change. +static const float kDelayQualityThresholdMax = 0.07f; +static const float kDelayQualityThresholdMin = 0.01f; +static const int kInitialShiftOffset = 5; +#if !defined(WEBRTC_ANDROID) +static const int kDelayCorrectionStart = 1500; // 10 ms chunks +#endif + +// Target suppression levels for nlp modes. +// log{0.001, 0.00001, 0.00000001} +static const float kTargetSupp[3] = {-6.9f, -11.5f, -18.4f}; + +// Two sets of parameters, one for the extended filter mode. +static const float kExtendedMinOverDrive[3] = {3.0f, 6.0f, 15.0f}; +static const float kNormalMinOverDrive[3] = {1.0f, 2.0f, 5.0f}; +const float WebRtcAec_kExtendedSmoothingCoefficients[2][2] = {{0.9f, 0.1f}, + {0.92f, 0.08f}}; +const float WebRtcAec_kNormalSmoothingCoefficients[2][2] = {{0.9f, 0.1f}, + {0.93f, 0.07f}}; + +// Number of partitions forming the NLP's "preferred" bands. +enum { kPrefBandSize = 24 }; + +WebRtcAecFilterFar WebRtcAec_FilterFar; +WebRtcAecScaleErrorSignal WebRtcAec_ScaleErrorSignal; +WebRtcAecFilterAdaptation WebRtcAec_FilterAdaptation; +WebRtcAecOverdrive WebRtcAec_Overdrive; +WebRtcAecSuppress WebRtcAec_Suppress; +WebRtcAecComputeCoherence WebRtcAec_ComputeCoherence; +WebRtcAecUpdateCoherenceSpectra WebRtcAec_UpdateCoherenceSpectra; +WebRtcAecStoreAsComplex WebRtcAec_StoreAsComplex; +WebRtcAecPartitionDelay WebRtcAec_PartitionDelay; +WebRtcAecWindowData WebRtcAec_WindowData; + +__inline static float MulRe(float aRe, float aIm, float bRe, float bIm) { + return aRe * bRe - aIm * bIm; +} + +__inline static float MulIm(float aRe, float aIm, float bRe, float bIm) { + return aRe * bIm + aIm * bRe; +} + +// TODO(minyue): Due to a legacy bug, |framelevel| and |averagelevel| use a +// window, of which the length is 1 unit longer than indicated. Remove "+1" when +// the code is refactored. +PowerLevel::PowerLevel() + : framelevel(kSubCountLen + 1), + averagelevel(kCountLen + 1) { +} + +BlockBuffer::BlockBuffer() { + buffer_ = WebRtc_CreateBuffer(kBufferSizeBlocks, sizeof(float) * PART_LEN); + RTC_CHECK(buffer_); + ReInit(); +} + +BlockBuffer::~BlockBuffer() { + WebRtc_FreeBuffer(buffer_); +} + +void BlockBuffer::ReInit() { + WebRtc_InitBuffer(buffer_); +} + +void BlockBuffer::Insert(const float block[PART_LEN]) { + WebRtc_WriteBuffer(buffer_, block, 1); +} + +void BlockBuffer::ExtractExtendedBlock(float extended_block[PART_LEN2]) { + float* block_ptr = NULL; + RTC_DCHECK_LT(0, AvaliableSpace()); + + // Extract the previous block. + WebRtc_MoveReadPtr(buffer_, -1); + WebRtc_ReadBuffer(buffer_, reinterpret_cast(&block_ptr), + &extended_block[0], 1); + if (block_ptr != &extended_block[0]) { + memcpy(&extended_block[0], block_ptr, PART_LEN * sizeof(float)); + } + + // Extract the current block. + WebRtc_ReadBuffer(buffer_, reinterpret_cast(&block_ptr), + &extended_block[PART_LEN], 1); + if (block_ptr != &extended_block[PART_LEN]) { + memcpy(&extended_block[PART_LEN], block_ptr, PART_LEN * sizeof(float)); + } +} + +int BlockBuffer::AdjustSize(int buffer_size_decrease) { + return WebRtc_MoveReadPtr(buffer_, buffer_size_decrease); +} + +size_t BlockBuffer::Size() { + return static_cast(WebRtc_available_read(buffer_)); +} + +size_t BlockBuffer::AvaliableSpace() { + return WebRtc_available_write(buffer_); +} + +DivergentFilterFraction::DivergentFilterFraction() + : count_(0), + occurrence_(0), + fraction_(-1.0) { +} + +void DivergentFilterFraction::Reset() { + Clear(); + fraction_ = -1.0; +} + +void DivergentFilterFraction::AddObservation(const PowerLevel& nearlevel, + const PowerLevel& linoutlevel, + const PowerLevel& nlpoutlevel) { + const float near_level = nearlevel.framelevel.GetLatestMean(); + const float level_increase = + linoutlevel.framelevel.GetLatestMean() - near_level; + const bool output_signal_active = nlpoutlevel.framelevel.GetLatestMean() > + 40.0 * nlpoutlevel.minlevel; + // Level increase should be, in principle, negative, when the filter + // does not diverge. Here we allow some margin (0.01 * near end level) and + // numerical error (1.0). We count divergence only when the AEC output + // signal is active. + if (output_signal_active && + level_increase > std::max(0.01 * near_level, 1.0)) + occurrence_++; + ++count_; + if (count_ == kDivergentFilterFractionAggregationWindowSize) { + fraction_ = static_cast(occurrence_) / + kDivergentFilterFractionAggregationWindowSize; + Clear(); + } +} + +float DivergentFilterFraction::GetLatestFraction() const { + return fraction_; +} + +void DivergentFilterFraction::Clear() { + count_ = 0; + occurrence_ = 0; +} + +// TODO(minyue): Moving some initialization from WebRtcAec_CreateAec() to ctor. +AecCore::AecCore(int instance_index) + : data_dumper(new ApmDataDumper(instance_index)) {} + +AecCore::~AecCore() {} + +static int CmpFloat(const void* a, const void* b) { + const float* da = (const float*)a; + const float* db = (const float*)b; + + return (*da > *db) - (*da < *db); +} + +static void FilterFar(int num_partitions, + int x_fft_buf_block_pos, + float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], + float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1], + float y_fft[2][PART_LEN1]) { + int i; + for (i = 0; i < num_partitions; i++) { + int j; + int xPos = (i + x_fft_buf_block_pos) * PART_LEN1; + int pos = i * PART_LEN1; + // Check for wrap + if (i + x_fft_buf_block_pos >= num_partitions) { + xPos -= num_partitions * (PART_LEN1); + } + + for (j = 0; j < PART_LEN1; j++) { + y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j], + h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]); + y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j], + h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]); + } + } +} + +static void ScaleErrorSignal(float mu, + float error_threshold, + float x_pow[PART_LEN1], + float ef[2][PART_LEN1]) { + int i; + float abs_ef; + for (i = 0; i < (PART_LEN1); i++) { + ef[0][i] /= (x_pow[i] + 1e-10f); + ef[1][i] /= (x_pow[i] + 1e-10f); + abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]); + + if (abs_ef > error_threshold) { + abs_ef = error_threshold / (abs_ef + 1e-10f); + ef[0][i] *= abs_ef; + ef[1][i] *= abs_ef; + } + + // Stepsize factor + ef[0][i] *= mu; + ef[1][i] *= mu; + } +} + +static void FilterAdaptation( + const OouraFft& ooura_fft, + int num_partitions, + int x_fft_buf_block_pos, + float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], + float e_fft[2][PART_LEN1], + float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) { + int i, j; + float fft[PART_LEN2]; + for (i = 0; i < num_partitions; i++) { + int xPos = (i + x_fft_buf_block_pos) * (PART_LEN1); + int pos; + // Check for wrap + if (i + x_fft_buf_block_pos >= num_partitions) { + xPos -= num_partitions * PART_LEN1; + } + + pos = i * PART_LEN1; + + for (j = 0; j < PART_LEN; j++) { + fft[2 * j] = MulRe(x_fft_buf[0][xPos + j], -x_fft_buf[1][xPos + j], + e_fft[0][j], e_fft[1][j]); + fft[2 * j + 1] = MulIm(x_fft_buf[0][xPos + j], -x_fft_buf[1][xPos + j], + e_fft[0][j], e_fft[1][j]); + } + fft[1] = + MulRe(x_fft_buf[0][xPos + PART_LEN], -x_fft_buf[1][xPos + PART_LEN], + e_fft[0][PART_LEN], e_fft[1][PART_LEN]); + + ooura_fft.InverseFft(fft); + memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN); + + // fft scaling + { + float scale = 2.0f / PART_LEN2; + for (j = 0; j < PART_LEN; j++) { + fft[j] *= scale; + } + } + ooura_fft.Fft(fft); + + h_fft_buf[0][pos] += fft[0]; + h_fft_buf[0][pos + PART_LEN] += fft[1]; + + for (j = 1; j < PART_LEN; j++) { + h_fft_buf[0][pos + j] += fft[2 * j]; + h_fft_buf[1][pos + j] += fft[2 * j + 1]; + } + } +} + +static void Overdrive(float overdrive_scaling, + const float hNlFb, + float hNl[PART_LEN1]) { + for (int i = 0; i < PART_LEN1; ++i) { + // Weight subbands + if (hNl[i] > hNlFb) { + hNl[i] = WebRtcAec_weightCurve[i] * hNlFb + + (1 - WebRtcAec_weightCurve[i]) * hNl[i]; + } + hNl[i] = powf(hNl[i], overdrive_scaling * WebRtcAec_overDriveCurve[i]); + } +} + +static void Suppress(const float hNl[PART_LEN1], float efw[2][PART_LEN1]) { + for (int i = 0; i < PART_LEN1; ++i) { + // Suppress error signal + efw[0][i] *= hNl[i]; + efw[1][i] *= hNl[i]; + + // Ooura fft returns incorrect sign on imaginary component. It matters here + // because we are making an additive change with comfort noise. + efw[1][i] *= -1; + } +} + +static int PartitionDelay(int num_partitions, + float h_fft_buf[2] + [kExtendedNumPartitions * PART_LEN1]) { + // Measures the energy in each filter partition and returns the partition with + // highest energy. + // TODO(bjornv): Spread computational cost by computing one partition per + // block? + float wfEnMax = 0; + int i; + int delay = 0; + + for (i = 0; i < num_partitions; i++) { + int j; + int pos = i * PART_LEN1; + float wfEn = 0; + for (j = 0; j < PART_LEN1; j++) { + wfEn += h_fft_buf[0][pos + j] * h_fft_buf[0][pos + j] + + h_fft_buf[1][pos + j] * h_fft_buf[1][pos + j]; + } + + if (wfEn > wfEnMax) { + wfEnMax = wfEn; + delay = i; + } + } + return delay; +} + +// Update metric with 10 * log10(numerator / denominator). +static void UpdateLogRatioMetric(Stats* metric, float numerator, + float denominator) { + RTC_DCHECK(metric); + RTC_CHECK(numerator >= 0); + RTC_CHECK(denominator >= 0); + + const float log_numerator = log10(numerator + 1e-10f); + const float log_denominator = log10(denominator + 1e-10f); + metric->instant = 10.0f * (log_numerator - log_denominator); + + // Max. + if (metric->instant > metric->max) + metric->max = metric->instant; + + // Min. + if (metric->instant < metric->min) + metric->min = metric->instant; + + // Average. + metric->counter++; + // This is to protect overflow, which should almost never happen. + RTC_CHECK_NE(0, metric->counter); + metric->sum += metric->instant; + metric->average = metric->sum / metric->counter; + + // Upper mean. + if (metric->instant > metric->average) { + metric->hicounter++; + // This is to protect overflow, which should almost never happen. + RTC_CHECK_NE(0, metric->hicounter); + metric->hisum += metric->instant; + metric->himean = metric->hisum / metric->hicounter; + } +} + +// Threshold to protect against the ill-effects of a zero far-end. +const float WebRtcAec_kMinFarendPSD = 15; + +// Updates the following smoothed Power Spectral Densities (PSD): +// - sd : near-end +// - se : residual echo +// - sx : far-end +// - sde : cross-PSD of near-end and residual echo +// - sxd : cross-PSD of near-end and far-end +// +// In addition to updating the PSDs, also the filter diverge state is +// determined. +static void UpdateCoherenceSpectra(int mult, + bool extended_filter_enabled, + float efw[2][PART_LEN1], + float dfw[2][PART_LEN1], + float xfw[2][PART_LEN1], + CoherenceState* coherence_state, + short* filter_divergence_state, + int* extreme_filter_divergence) { + // Power estimate smoothing coefficients. + const float* ptrGCoh = + extended_filter_enabled + ? WebRtcAec_kExtendedSmoothingCoefficients[mult - 1] + : WebRtcAec_kNormalSmoothingCoefficients[mult - 1]; + int i; + float sdSum = 0, seSum = 0; + + for (i = 0; i < PART_LEN1; i++) { + coherence_state->sd[i] = + ptrGCoh[0] * coherence_state->sd[i] + + ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]); + coherence_state->se[i] = + ptrGCoh[0] * coherence_state->se[i] + + ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]); + // We threshold here to protect against the ill-effects of a zero farend. + // The threshold is not arbitrarily chosen, but balances protection and + // adverse interaction with the algorithm's tuning. + // TODO(bjornv): investigate further why this is so sensitive. + coherence_state->sx[i] = + ptrGCoh[0] * coherence_state->sx[i] + + ptrGCoh[1] * + WEBRTC_SPL_MAX(xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i], + WebRtcAec_kMinFarendPSD); + + coherence_state->sde[i][0] = + ptrGCoh[0] * coherence_state->sde[i][0] + + ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]); + coherence_state->sde[i][1] = + ptrGCoh[0] * coherence_state->sde[i][1] + + ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]); + + coherence_state->sxd[i][0] = + ptrGCoh[0] * coherence_state->sxd[i][0] + + ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]); + coherence_state->sxd[i][1] = + ptrGCoh[0] * coherence_state->sxd[i][1] + + ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]); + + sdSum += coherence_state->sd[i]; + seSum += coherence_state->se[i]; + } + + // Divergent filter safeguard update. + *filter_divergence_state = + (*filter_divergence_state ? 1.05f : 1.0f) * seSum > sdSum; + + // Signal extreme filter divergence if the error is significantly larger + // than the nearend (13 dB). + *extreme_filter_divergence = (seSum > (19.95f * sdSum)); +} + +// Window time domain data to be used by the fft. +__inline static void WindowData(float* x_windowed, const float* x) { + int i; + for (i = 0; i < PART_LEN; i++) { + x_windowed[i] = x[i] * WebRtcAec_sqrtHanning[i]; + x_windowed[PART_LEN + i] = + x[PART_LEN + i] * WebRtcAec_sqrtHanning[PART_LEN - i]; + } +} + +// Puts fft output data into a complex valued array. +__inline static void StoreAsComplex(const float* data, + float data_complex[2][PART_LEN1]) { + int i; + data_complex[0][0] = data[0]; + data_complex[1][0] = 0; + for (i = 1; i < PART_LEN; i++) { + data_complex[0][i] = data[2 * i]; + data_complex[1][i] = data[2 * i + 1]; + } + data_complex[0][PART_LEN] = data[1]; + data_complex[1][PART_LEN] = 0; +} + +static void ComputeCoherence(const CoherenceState* coherence_state, + float* cohde, + float* cohxd) { + // Subband coherence + for (int i = 0; i < PART_LEN1; i++) { + cohde[i] = (coherence_state->sde[i][0] * coherence_state->sde[i][0] + + coherence_state->sde[i][1] * coherence_state->sde[i][1]) / + (coherence_state->sd[i] * coherence_state->se[i] + 1e-10f); + cohxd[i] = (coherence_state->sxd[i][0] * coherence_state->sxd[i][0] + + coherence_state->sxd[i][1] * coherence_state->sxd[i][1]) / + (coherence_state->sx[i] * coherence_state->sd[i] + 1e-10f); + } +} + +static void GetHighbandGain(const float* lambda, float* nlpGainHband) { + int i; + + *nlpGainHband = 0.0f; + for (i = freqAvgIc; i < PART_LEN1 - 1; i++) { + *nlpGainHband += lambda[i]; + } + *nlpGainHband /= static_cast(PART_LEN1 - 1 - freqAvgIc); +} + +static void GenerateComplexNoise(uint32_t* seed, float noise[2][PART_LEN1]) { + const float kPi2 = 6.28318530717959f; + int16_t randW16[PART_LEN]; + WebRtcSpl_RandUArray(randW16, PART_LEN, seed); + + noise[0][0] = 0; + noise[1][0] = 0; + for (size_t i = 1; i < PART_LEN1; i++) { + float tmp = kPi2 * randW16[i - 1] / 32768.f; + noise[0][i] = cosf(tmp); + noise[1][i] = -sinf(tmp); + } + noise[1][PART_LEN] = 0; +} + +static void ComfortNoise(bool generate_high_frequency_noise, + uint32_t* seed, + float e_fft[2][PART_LEN1], + float high_frequency_comfort_noise[2][PART_LEN1], + const float* noise_spectrum, + const float* suppressor_gain) { + float complex_noise[2][PART_LEN1]; + + GenerateComplexNoise(seed, complex_noise); + + // Shape, scale and add comfort noise. + for (int i = 1; i < PART_LEN1; ++i) { + float noise_scaling = + sqrtf(WEBRTC_SPL_MAX(1 - suppressor_gain[i] * suppressor_gain[i], 0)) * + sqrtf(noise_spectrum[i]); + e_fft[0][i] += noise_scaling * complex_noise[0][i]; + e_fft[1][i] += noise_scaling * complex_noise[1][i]; + } + + // Form comfort noise for higher frequencies. + if (generate_high_frequency_noise) { + // Compute average noise power and nlp gain over the second half of freq + // spectrum (i.e., 4->8khz). + int start_avg_band = PART_LEN1 / 2; + float upper_bands_noise_power = 0.f; + float upper_bands_suppressor_gain = 0.f; + for (int i = start_avg_band; i < PART_LEN1; ++i) { + upper_bands_noise_power += sqrtf(noise_spectrum[i]); + upper_bands_suppressor_gain += + sqrtf(WEBRTC_SPL_MAX(1 - suppressor_gain[i] * suppressor_gain[i], 0)); + } + upper_bands_noise_power /= (PART_LEN1 - start_avg_band); + upper_bands_suppressor_gain /= (PART_LEN1 - start_avg_band); + + // Shape, scale and add comfort noise. + float noise_scaling = upper_bands_suppressor_gain * upper_bands_noise_power; + high_frequency_comfort_noise[0][0] = 0; + high_frequency_comfort_noise[1][0] = 0; + for (int i = 1; i < PART_LEN1; ++i) { + high_frequency_comfort_noise[0][i] = noise_scaling * complex_noise[0][i]; + high_frequency_comfort_noise[1][i] = noise_scaling * complex_noise[1][i]; + } + high_frequency_comfort_noise[1][PART_LEN] = 0; + } else { + memset(high_frequency_comfort_noise, 0, + 2 * PART_LEN1 * sizeof(high_frequency_comfort_noise[0][0])); + } +} + +static void InitLevel(PowerLevel* level) { + const float kBigFloat = 1E17f; + level->averagelevel.Reset(); + level->framelevel.Reset(); + level->minlevel = kBigFloat; +} + +static void InitStats(Stats* stats) { + stats->instant = kOffsetLevel; + stats->average = kOffsetLevel; + stats->max = kOffsetLevel; + stats->min = kOffsetLevel * (-1); + stats->sum = 0; + stats->hisum = 0; + stats->himean = kOffsetLevel; + stats->counter = 0; + stats->hicounter = 0; +} + +static void InitMetrics(AecCore* self) { + self->stateCounter = 0; + InitLevel(&self->farlevel); + InitLevel(&self->nearlevel); + InitLevel(&self->linoutlevel); + InitLevel(&self->nlpoutlevel); + + InitStats(&self->erl); + InitStats(&self->erle); + InitStats(&self->aNlp); + InitStats(&self->rerl); + + self->divergent_filter_fraction.Reset(); +} + +static float CalculatePower(const float* in, size_t num_samples) { + size_t k; + float energy = 0.0f; + + for (k = 0; k < num_samples; ++k) { + energy += in[k] * in[k]; + } + return energy / num_samples; +} + +static void UpdateLevel(PowerLevel* level, float power) { + level->framelevel.AddValue(power); + if (level->framelevel.EndOfBlock()) { + const float new_frame_level = level->framelevel.GetLatestMean(); + if (new_frame_level > 0) { + if (new_frame_level < level->minlevel) { + level->minlevel = new_frame_level; // New minimum. + } else { + level->minlevel *= (1 + 0.001f); // Small increase. + } + } + level->averagelevel.AddValue(new_frame_level); + } +} + +static void UpdateMetrics(AecCore* aec) { + const float actThresholdNoisy = 8.0f; + const float actThresholdClean = 40.0f; + + const float noisyPower = 300000.0f; + + float actThreshold; + + if (aec->echoState) { // Check if echo is likely present + aec->stateCounter++; + } + + if (aec->linoutlevel.framelevel.EndOfBlock()) { + aec->divergent_filter_fraction.AddObservation(aec->nearlevel, + aec->linoutlevel, + aec->nlpoutlevel); + } + + if (aec->farlevel.averagelevel.EndOfBlock()) { + if (aec->farlevel.minlevel < noisyPower) { + actThreshold = actThresholdClean; + } else { + actThreshold = actThresholdNoisy; + } + + const float far_average_level = aec->farlevel.averagelevel.GetLatestMean(); + + // The last condition is to let estimation be made in active far-end + // segments only. + if ((aec->stateCounter > (0.5f * kCountLen * kSubCountLen)) && + (aec->farlevel.framelevel.EndOfBlock()) && + (far_average_level > (actThreshold * aec->farlevel.minlevel))) { + + // ERL: error return loss. + const float near_average_level = + aec->nearlevel.averagelevel.GetLatestMean(); + UpdateLogRatioMetric(&aec->erl, far_average_level, near_average_level); + + // A_NLP: error return loss enhanced before the nonlinear suppression. + const float linout_average_level = + aec->linoutlevel.averagelevel.GetLatestMean(); + UpdateLogRatioMetric(&aec->aNlp, near_average_level, + linout_average_level); + + // ERLE: error return loss enhanced. + const float nlpout_average_level = + aec->nlpoutlevel.averagelevel.GetLatestMean(); + UpdateLogRatioMetric(&aec->erle, near_average_level, + nlpout_average_level); + } + + aec->stateCounter = 0; + } +} + +static void UpdateDelayMetrics(AecCore* self) { + int i = 0; + int delay_values = 0; + int median = 0; + int lookahead = WebRtc_lookahead(self->delay_estimator); + const int kMsPerBlock = PART_LEN / (self->mult * 8); + int64_t l1_norm = 0; + + if (self->num_delay_values == 0) { + // We have no new delay value data. Even though -1 is a valid |median| in + // the sense that we allow negative values, it will practically never be + // used since multiples of |kMsPerBlock| will always be returned. + // We therefore use -1 to indicate in the logs that the delay estimator was + // not able to estimate the delay. + self->delay_median = -1; + self->delay_std = -1; + self->fraction_poor_delays = -1; + return; + } + + // Start value for median count down. + delay_values = self->num_delay_values >> 1; + // Get median of delay values since last update. + for (i = 0; i < kHistorySizeBlocks; i++) { + delay_values -= self->delay_histogram[i]; + if (delay_values < 0) { + median = i; + break; + } + } + // Account for lookahead. + self->delay_median = (median - lookahead) * kMsPerBlock; + + // Calculate the L1 norm, with median value as central moment. + for (i = 0; i < kHistorySizeBlocks; i++) { + l1_norm += abs(i - median) * self->delay_histogram[i]; + } + self->delay_std = + static_cast((l1_norm + self->num_delay_values / 2) / + self->num_delay_values) * kMsPerBlock; + + // Determine fraction of delays that are out of bounds, that is, either + // negative (anti-causal system) or larger than the AEC filter length. + { + int num_delays_out_of_bounds = self->num_delay_values; + const int histogram_length = + sizeof(self->delay_histogram) / sizeof(self->delay_histogram[0]); + for (i = lookahead; i < lookahead + self->num_partitions; ++i) { + if (i < histogram_length) + num_delays_out_of_bounds -= self->delay_histogram[i]; + } + self->fraction_poor_delays = + static_cast(num_delays_out_of_bounds) / self->num_delay_values; + } + + // Reset histogram. + memset(self->delay_histogram, 0, sizeof(self->delay_histogram)); + self->num_delay_values = 0; + + return; +} + +static void ScaledInverseFft(const OouraFft& ooura_fft, + float freq_data[2][PART_LEN1], + float time_data[PART_LEN2], + float scale, + int conjugate) { + int i; + const float normalization = scale / static_cast(PART_LEN2); + const float sign = (conjugate ? -1 : 1); + time_data[0] = freq_data[0][0] * normalization; + time_data[1] = freq_data[0][PART_LEN] * normalization; + for (i = 1; i < PART_LEN; i++) { + time_data[2 * i] = freq_data[0][i] * normalization; + time_data[2 * i + 1] = sign * freq_data[1][i] * normalization; + } + ooura_fft.InverseFft(time_data); +} + +static void Fft(const OouraFft& ooura_fft, + float time_data[PART_LEN2], + float freq_data[2][PART_LEN1]) { + int i; + ooura_fft.Fft(time_data); + + // Reorder fft output data. + freq_data[1][0] = 0; + freq_data[1][PART_LEN] = 0; + freq_data[0][0] = time_data[0]; + freq_data[0][PART_LEN] = time_data[1]; + for (i = 1; i < PART_LEN; i++) { + freq_data[0][i] = time_data[2 * i]; + freq_data[1][i] = time_data[2 * i + 1]; + } +} + +static int SignalBasedDelayCorrection(AecCore* self) { + int delay_correction = 0; + int last_delay = -2; + RTC_DCHECK(self); +#if !defined(WEBRTC_ANDROID) + // On desktops, turn on correction after |kDelayCorrectionStart| frames. This + // is to let the delay estimation get a chance to converge. Also, if the + // playout audio volume is low (or even muted) the delay estimation can return + // a very large delay, which will break the AEC if it is applied. + if (self->frame_count < kDelayCorrectionStart) { + self->data_dumper->DumpRaw("aec_da_reported_delay", 1, &last_delay); + return 0; + } +#endif + + // 1. Check for non-negative delay estimate. Note that the estimates we get + // from the delay estimation are not compensated for lookahead. Hence, a + // negative |last_delay| is an invalid one. + // 2. Verify that there is a delay change. In addition, only allow a change + // if the delay is outside a certain region taking the AEC filter length + // into account. + // TODO(bjornv): Investigate if we can remove the non-zero delay change check. + // 3. Only allow delay correction if the delay estimation quality exceeds + // |delay_quality_threshold|. + // 4. Finally, verify that the proposed |delay_correction| is feasible by + // comparing with the size of the far-end buffer. + last_delay = WebRtc_last_delay(self->delay_estimator); + self->data_dumper->DumpRaw("aec_da_reported_delay", 1, &last_delay); + if ((last_delay >= 0) && (last_delay != self->previous_delay) && + (WebRtc_last_delay_quality(self->delay_estimator) > + self->delay_quality_threshold)) { + int delay = last_delay - WebRtc_lookahead(self->delay_estimator); + // Allow for a slack in the actual delay, defined by a |lower_bound| and an + // |upper_bound|. The adaptive echo cancellation filter is currently + // |num_partitions| (of 64 samples) long. If the delay estimate is negative + // or at least 3/4 of the filter length we open up for correction. + const int lower_bound = 0; + const int upper_bound = self->num_partitions * 3 / 4; + const int do_correction = delay <= lower_bound || delay > upper_bound; + if (do_correction == 1) { + int available_read = self->farend_block_buffer_.Size(); + // With |shift_offset| we gradually rely on the delay estimates. For + // positive delays we reduce the correction by |shift_offset| to lower the + // risk of pushing the AEC into a non causal state. For negative delays + // we rely on the values up to a rounding error, hence compensate by 1 + // element to make sure to push the delay into the causal region. + delay_correction = -delay; + delay_correction += delay > self->shift_offset ? self->shift_offset : 1; + self->shift_offset--; + self->shift_offset = (self->shift_offset <= 1 ? 1 : self->shift_offset); + if (delay_correction > available_read - self->mult - 1) { + // There is not enough data in the buffer to perform this shift. Hence, + // we do not rely on the delay estimate and do nothing. + delay_correction = 0; + } else { + self->previous_delay = last_delay; + ++self->delay_correction_count; + } + } + } + // Update the |delay_quality_threshold| once we have our first delay + // correction. + if (self->delay_correction_count > 0) { + float delay_quality = WebRtc_last_delay_quality(self->delay_estimator); + delay_quality = + (delay_quality > kDelayQualityThresholdMax ? kDelayQualityThresholdMax + : delay_quality); + self->delay_quality_threshold = + (delay_quality > self->delay_quality_threshold + ? delay_quality + : self->delay_quality_threshold); + } + self->data_dumper->DumpRaw("aec_da_delay_correction", 1, &delay_correction); + + return delay_correction; +} + +static void RegressorPower(int num_partitions, + int latest_added_partition, + float x_fft_buf[2] + [kExtendedNumPartitions * PART_LEN1], + float x_pow[PART_LEN1]) { + RTC_DCHECK_LT(latest_added_partition, num_partitions); + memset(x_pow, 0, PART_LEN1 * sizeof(x_pow[0])); + + int partition = latest_added_partition; + int x_fft_buf_position = partition * PART_LEN1; + for (int i = 0; i < num_partitions; ++i) { + for (int bin = 0; bin < PART_LEN1; ++bin) { + float re = x_fft_buf[0][x_fft_buf_position]; + float im = x_fft_buf[1][x_fft_buf_position]; + x_pow[bin] += re * re + im * im; + ++x_fft_buf_position; + } + + ++partition; + if (partition == num_partitions) { + partition = 0; + RTC_DCHECK_EQ(num_partitions * PART_LEN1, x_fft_buf_position); + x_fft_buf_position = 0; + } + } +} + +static void EchoSubtraction(const OouraFft& ooura_fft, + int num_partitions, + int extended_filter_enabled, + int* extreme_filter_divergence, + float filter_step_size, + float error_threshold, + float* x_fft, + int* x_fft_buf_block_pos, + float x_fft_buf[2] + [kExtendedNumPartitions * PART_LEN1], + float* const y, + float x_pow[PART_LEN1], + float h_fft_buf[2] + [kExtendedNumPartitions * PART_LEN1], + float echo_subtractor_output[PART_LEN]) { + float s_fft[2][PART_LEN1]; + float e_extended[PART_LEN2]; + float s_extended[PART_LEN2]; + float* s; + float e[PART_LEN]; + float e_fft[2][PART_LEN1]; + int i; + + // Update the x_fft_buf block position. + (*x_fft_buf_block_pos)--; + if ((*x_fft_buf_block_pos) == -1) { + *x_fft_buf_block_pos = num_partitions - 1; + } + + // Buffer x_fft. + memcpy(x_fft_buf[0] + (*x_fft_buf_block_pos) * PART_LEN1, x_fft, + sizeof(float) * PART_LEN1); + memcpy(x_fft_buf[1] + (*x_fft_buf_block_pos) * PART_LEN1, &x_fft[PART_LEN1], + sizeof(float) * PART_LEN1); + + memset(s_fft, 0, sizeof(s_fft)); + + // Conditionally reset the echo subtraction filter if the filter has diverged + // significantly. + if (!extended_filter_enabled && *extreme_filter_divergence) { + memset(h_fft_buf, 0, + 2 * kExtendedNumPartitions * PART_LEN1 * sizeof(h_fft_buf[0][0])); + *extreme_filter_divergence = 0; + } + + // Produce echo estimate s_fft. + WebRtcAec_FilterFar(num_partitions, *x_fft_buf_block_pos, x_fft_buf, + h_fft_buf, s_fft); + + // Compute the time-domain echo estimate s. + ScaledInverseFft(ooura_fft, s_fft, s_extended, 2.0f, 0); + s = &s_extended[PART_LEN]; + + // Compute the time-domain echo prediction error. + for (i = 0; i < PART_LEN; ++i) { + e[i] = y[i] - s[i]; + } + + // Compute the frequency domain echo prediction error. + memset(e_extended, 0, sizeof(float) * PART_LEN); + memcpy(e_extended + PART_LEN, e, sizeof(float) * PART_LEN); + Fft(ooura_fft, e_extended, e_fft); + + // Scale error signal inversely with far power. + WebRtcAec_ScaleErrorSignal(filter_step_size, error_threshold, x_pow, e_fft); + WebRtcAec_FilterAdaptation(ooura_fft, num_partitions, *x_fft_buf_block_pos, + x_fft_buf, e_fft, h_fft_buf); + memcpy(echo_subtractor_output, e, sizeof(float) * PART_LEN); +} + +static void FormSuppressionGain(AecCore* aec, + float cohde[PART_LEN1], + float cohxd[PART_LEN1], + float hNl[PART_LEN1]) { + float hNlDeAvg, hNlXdAvg; + float hNlPref[kPrefBandSize]; + float hNlFb = 0, hNlFbLow = 0; + const int prefBandSize = kPrefBandSize / aec->mult; + const float prefBandQuant = 0.75f, prefBandQuantLow = 0.5f; + const int minPrefBand = 4 / aec->mult; + // Power estimate smoothing coefficients. + const float* min_overdrive = aec->extended_filter_enabled + ? kExtendedMinOverDrive + : kNormalMinOverDrive; + + hNlXdAvg = 0; + for (int i = minPrefBand; i < prefBandSize + minPrefBand; ++i) { + hNlXdAvg += cohxd[i]; + } + hNlXdAvg /= prefBandSize; + hNlXdAvg = 1 - hNlXdAvg; + + hNlDeAvg = 0; + for (int i = minPrefBand; i < prefBandSize + minPrefBand; ++i) { + hNlDeAvg += cohde[i]; + } + hNlDeAvg /= prefBandSize; + + if (hNlXdAvg < 0.75f && hNlXdAvg < aec->hNlXdAvgMin) { + aec->hNlXdAvgMin = hNlXdAvg; + } + + if (hNlDeAvg > 0.98f && hNlXdAvg > 0.9f) { + aec->stNearState = 1; + } else if (hNlDeAvg < 0.95f || hNlXdAvg < 0.8f) { + aec->stNearState = 0; + } + + if (aec->hNlXdAvgMin == 1) { + aec->echoState = 0; + aec->overDrive = min_overdrive[aec->nlp_mode]; + + if (aec->stNearState == 1) { + memcpy(hNl, cohde, sizeof(hNl[0]) * PART_LEN1); + hNlFb = hNlDeAvg; + hNlFbLow = hNlDeAvg; + } else { + for (int i = 0; i < PART_LEN1; ++i) { + hNl[i] = 1 - cohxd[i]; + } + hNlFb = hNlXdAvg; + hNlFbLow = hNlXdAvg; + } + } else { + if (aec->stNearState == 1) { + aec->echoState = 0; + memcpy(hNl, cohde, sizeof(hNl[0]) * PART_LEN1); + hNlFb = hNlDeAvg; + hNlFbLow = hNlDeAvg; + } else { + aec->echoState = 1; + for (int i = 0; i < PART_LEN1; ++i) { + hNl[i] = WEBRTC_SPL_MIN(cohde[i], 1 - cohxd[i]); + } + + // Select an order statistic from the preferred bands. + // TODO(peah): Using quicksort now, but a selection algorithm may be + // preferred. + memcpy(hNlPref, &hNl[minPrefBand], sizeof(float) * prefBandSize); + qsort(hNlPref, prefBandSize, sizeof(float), CmpFloat); + hNlFb = hNlPref[static_cast(floor(prefBandQuant * + (prefBandSize - 1)))]; + hNlFbLow = hNlPref[static_cast(floor(prefBandQuantLow * + (prefBandSize - 1)))]; + } + } + + // Track the local filter minimum to determine suppression overdrive. + if (hNlFbLow < 0.6f && hNlFbLow < aec->hNlFbLocalMin) { + aec->hNlFbLocalMin = hNlFbLow; + aec->hNlFbMin = hNlFbLow; + aec->hNlNewMin = 1; + aec->hNlMinCtr = 0; + } + aec->hNlFbLocalMin = + WEBRTC_SPL_MIN(aec->hNlFbLocalMin + 0.0008f / aec->mult, 1); + aec->hNlXdAvgMin = WEBRTC_SPL_MIN(aec->hNlXdAvgMin + 0.0006f / aec->mult, 1); + + if (aec->hNlNewMin == 1) { + aec->hNlMinCtr++; + } + if (aec->hNlMinCtr == 2) { + aec->hNlNewMin = 0; + aec->hNlMinCtr = 0; + aec->overDrive = + WEBRTC_SPL_MAX(kTargetSupp[aec->nlp_mode] / + static_cast(log(aec->hNlFbMin + 1e-10f) + 1e-10f), + min_overdrive[aec->nlp_mode]); + } + + // Smooth the overdrive. + if (aec->overDrive < aec->overdrive_scaling) { + aec->overdrive_scaling = + 0.99f * aec->overdrive_scaling + 0.01f * aec->overDrive; + } else { + aec->overdrive_scaling = + 0.9f * aec->overdrive_scaling + 0.1f * aec->overDrive; + } + + // Apply the overdrive. + WebRtcAec_Overdrive(aec->overdrive_scaling, hNlFb, hNl); +} + +static void EchoSuppression(const OouraFft& ooura_fft, + AecCore* aec, + float* nearend_extended_block_lowest_band, + float farend_extended_block[PART_LEN2], + float* echo_subtractor_output, + float output[NUM_HIGH_BANDS_MAX + 1][PART_LEN]) { + float efw[2][PART_LEN1]; + float xfw[2][PART_LEN1]; + float dfw[2][PART_LEN1]; + float comfortNoiseHband[2][PART_LEN1]; + float fft[PART_LEN2]; + float nlpGainHband; + int i; + size_t j; + + // Coherence and non-linear filter + float cohde[PART_LEN1], cohxd[PART_LEN1]; + float hNl[PART_LEN1]; + + // Filter energy + const int delayEstInterval = 10 * aec->mult; + + float* xfw_ptr = NULL; + + // Update eBuf with echo subtractor output. + memcpy(aec->eBuf + PART_LEN, echo_subtractor_output, + sizeof(float) * PART_LEN); + + // Analysis filter banks for the echo suppressor. + // Windowed near-end ffts. + WindowData(fft, nearend_extended_block_lowest_band); + ooura_fft.Fft(fft); + StoreAsComplex(fft, dfw); + + // Windowed echo suppressor output ffts. + WindowData(fft, aec->eBuf); + ooura_fft.Fft(fft); + StoreAsComplex(fft, efw); + + // NLP + + // Convert far-end partition to the frequency domain with windowing. + WindowData(fft, farend_extended_block); + Fft(ooura_fft, fft, xfw); + xfw_ptr = &xfw[0][0]; + + // Buffer far. + memcpy(aec->xfwBuf, xfw_ptr, sizeof(float) * 2 * PART_LEN1); + + aec->delayEstCtr++; + if (aec->delayEstCtr == delayEstInterval) { + aec->delayEstCtr = 0; + aec->delayIdx = WebRtcAec_PartitionDelay(aec->num_partitions, aec->wfBuf); + } + + aec->data_dumper->DumpRaw("aec_nlp_delay", 1, &aec->delayIdx); + + // Use delayed far. + memcpy(xfw, aec->xfwBuf + aec->delayIdx * PART_LEN1, + sizeof(xfw[0][0]) * 2 * PART_LEN1); + + WebRtcAec_UpdateCoherenceSpectra(aec->mult, aec->extended_filter_enabled == 1, + efw, dfw, xfw, &aec->coherence_state, + &aec->divergeState, + &aec->extreme_filter_divergence); + + WebRtcAec_ComputeCoherence(&aec->coherence_state, cohde, cohxd); + + // Select the microphone signal as output if the filter is deemed to have + // diverged. + if (aec->divergeState) { + memcpy(efw, dfw, sizeof(efw[0][0]) * 2 * PART_LEN1); + } + + FormSuppressionGain(aec, cohde, cohxd, hNl); + + aec->data_dumper->DumpRaw("aec_nlp_gain", PART_LEN1, hNl); + + WebRtcAec_Suppress(hNl, efw); + + // Add comfort noise. + ComfortNoise(aec->num_bands > 1, &aec->seed, efw, comfortNoiseHband, + aec->noisePow, hNl); + + // Inverse error fft. + ScaledInverseFft(ooura_fft, efw, fft, 2.0f, 1); + + // Overlap and add to obtain output. + for (i = 0; i < PART_LEN; i++) { + output[0][i] = (fft[i] * WebRtcAec_sqrtHanning[i] + + aec->outBuf[i] * WebRtcAec_sqrtHanning[PART_LEN - i]); + + // Saturate output to keep it in the allowed range. + output[0][i] = WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, output[0][i], + WEBRTC_SPL_WORD16_MIN); + } + memcpy(aec->outBuf, &fft[PART_LEN], PART_LEN * sizeof(aec->outBuf[0])); + + // For H band + if (aec->num_bands > 1) { + // H band gain + // average nlp over low band: average over second half of freq spectrum + // (4->8khz) + GetHighbandGain(hNl, &nlpGainHband); + + // Inverse comfort_noise + ScaledInverseFft(ooura_fft, comfortNoiseHband, fft, 2.0f, 0); + + // compute gain factor + for (j = 1; j < aec->num_bands; ++j) { + for (i = 0; i < PART_LEN; i++) { + output[j][i] = aec->previous_nearend_block[j][i] * nlpGainHband; + } + } + + // Add some comfort noise where Hband is attenuated. + for (i = 0; i < PART_LEN; i++) { + output[1][i] += cnScaleHband * fft[i]; + } + + // Saturate output to keep it in the allowed range. + for (j = 1; j < aec->num_bands; ++j) { + for (i = 0; i < PART_LEN; i++) { + output[j][i] = WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, output[j][i], + WEBRTC_SPL_WORD16_MIN); + } + } + } + + // Copy the current block to the old position. + memcpy(aec->eBuf, aec->eBuf + PART_LEN, sizeof(float) * PART_LEN); + + memmove(aec->xfwBuf + PART_LEN1, aec->xfwBuf, + sizeof(aec->xfwBuf) - sizeof(complex_t) * PART_LEN1); +} + +static void ProcessNearendBlock( + AecCore* aec, + float farend_extended_block_lowest_band[PART_LEN2], + float nearend_block[NUM_HIGH_BANDS_MAX + 1][PART_LEN], + float output_block[NUM_HIGH_BANDS_MAX + 1][PART_LEN]) { + size_t i; + + float fft[PART_LEN2]; + float nearend_extended_block_lowest_band[PART_LEN2]; + float farend_fft[2][PART_LEN1]; + float nearend_fft[2][PART_LEN1]; + float far_spectrum = 0.0f; + float near_spectrum = 0.0f; + float abs_far_spectrum[PART_LEN1]; + float abs_near_spectrum[PART_LEN1]; + + const float gPow[2] = {0.9f, 0.1f}; + + // Noise estimate constants. + const int noiseInitBlocks = 500 * aec->mult; + const float step = 0.1f; + const float ramp = 1.0002f; + const float gInitNoise[2] = {0.999f, 0.001f}; + + float echo_subtractor_output[PART_LEN]; + + aec->data_dumper->DumpWav("aec_far", PART_LEN, + &farend_extended_block_lowest_band[PART_LEN], + std::min(aec->sampFreq, 16000), 1); + aec->data_dumper->DumpWav("aec_near", PART_LEN, &nearend_block[0][0], + std::min(aec->sampFreq, 16000), 1); + + if (aec->metricsMode == 1) { + // Update power levels + UpdateLevel( + &aec->farlevel, + CalculatePower(&farend_extended_block_lowest_band[PART_LEN], PART_LEN)); + UpdateLevel(&aec->nearlevel, + CalculatePower(&nearend_block[0][0], PART_LEN)); + } + + // Convert far-end signal to the frequency domain. + memcpy(fft, farend_extended_block_lowest_band, sizeof(float) * PART_LEN2); + Fft(aec->ooura_fft, fft, farend_fft); + + // Form extended nearend frame. + memcpy(&nearend_extended_block_lowest_band[0], + &aec->previous_nearend_block[0][0], sizeof(float) * PART_LEN); + memcpy(&nearend_extended_block_lowest_band[PART_LEN], &nearend_block[0][0], + sizeof(float) * PART_LEN); + + // Convert near-end signal to the frequency domain. + memcpy(fft, nearend_extended_block_lowest_band, sizeof(float) * PART_LEN2); + Fft(aec->ooura_fft, fft, nearend_fft); + + // Power smoothing. + if (aec->refined_adaptive_filter_enabled) { + for (i = 0; i < PART_LEN1; ++i) { + far_spectrum = farend_fft[0][i] * farend_fft[0][i] + + farend_fft[1][i] * farend_fft[1][i]; + // Calculate the magnitude spectrum. + abs_far_spectrum[i] = sqrtf(far_spectrum); + } + RegressorPower(aec->num_partitions, aec->xfBufBlockPos, aec->xfBuf, + aec->xPow); + } else { + for (i = 0; i < PART_LEN1; ++i) { + far_spectrum = farend_fft[0][i] * farend_fft[0][i] + + farend_fft[1][i] * farend_fft[1][i]; + aec->xPow[i] = + gPow[0] * aec->xPow[i] + gPow[1] * aec->num_partitions * far_spectrum; + // Calculate the magnitude spectrum. + abs_far_spectrum[i] = sqrtf(far_spectrum); + } + } + + for (i = 0; i < PART_LEN1; ++i) { + near_spectrum = nearend_fft[0][i] * nearend_fft[0][i] + + nearend_fft[1][i] * nearend_fft[1][i]; + aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] * near_spectrum; + // Calculate the magnitude spectrum. + abs_near_spectrum[i] = sqrtf(near_spectrum); + } + + // Estimate noise power. Wait until dPow is more stable. + if (aec->noiseEstCtr > 50) { + for (i = 0; i < PART_LEN1; i++) { + if (aec->dPow[i] < aec->dMinPow[i]) { + aec->dMinPow[i] = + (aec->dPow[i] + step * (aec->dMinPow[i] - aec->dPow[i])) * ramp; + } else { + aec->dMinPow[i] *= ramp; + } + } + } + + // Smooth increasing noise power from zero at the start, + // to avoid a sudden burst of comfort noise. + if (aec->noiseEstCtr < noiseInitBlocks) { + aec->noiseEstCtr++; + for (i = 0; i < PART_LEN1; i++) { + if (aec->dMinPow[i] > aec->dInitMinPow[i]) { + aec->dInitMinPow[i] = gInitNoise[0] * aec->dInitMinPow[i] + + gInitNoise[1] * aec->dMinPow[i]; + } else { + aec->dInitMinPow[i] = aec->dMinPow[i]; + } + } + aec->noisePow = aec->dInitMinPow; + } else { + aec->noisePow = aec->dMinPow; + } + + // Block wise delay estimation used for logging + if (aec->delay_logging_enabled) { + if (WebRtc_AddFarSpectrumFloat(aec->delay_estimator_farend, + abs_far_spectrum, PART_LEN1) == 0) { + int delay_estimate = WebRtc_DelayEstimatorProcessFloat( + aec->delay_estimator, abs_near_spectrum, PART_LEN1); + if (delay_estimate >= 0) { + // Update delay estimate buffer. + aec->delay_histogram[delay_estimate]++; + aec->num_delay_values++; + } + if (aec->delay_metrics_delivered == 1 && + aec->num_delay_values >= kDelayMetricsAggregationWindow) { + UpdateDelayMetrics(aec); + } + } + } + + // Perform echo subtraction. + EchoSubtraction( + aec->ooura_fft, aec->num_partitions, aec->extended_filter_enabled, + &aec->extreme_filter_divergence, aec->filter_step_size, + aec->error_threshold, &farend_fft[0][0], &aec->xfBufBlockPos, aec->xfBuf, + &nearend_block[0][0], aec->xPow, aec->wfBuf, echo_subtractor_output); + aec->data_dumper->DumpRaw("aec_h_fft", PART_LEN1 * aec->num_partitions, + &aec->wfBuf[0][0]); + aec->data_dumper->DumpRaw("aec_h_fft", PART_LEN1 * aec->num_partitions, + &aec->wfBuf[1][0]); + + aec->data_dumper->DumpWav("aec_out_linear", PART_LEN, echo_subtractor_output, + std::min(aec->sampFreq, 16000), 1); + + if (aec->metricsMode == 1) { + UpdateLevel(&aec->linoutlevel, + CalculatePower(echo_subtractor_output, PART_LEN)); + } + + // Perform echo suppression. + EchoSuppression(aec->ooura_fft, aec, nearend_extended_block_lowest_band, + farend_extended_block_lowest_band, echo_subtractor_output, + output_block); + + if (aec->metricsMode == 1) { + UpdateLevel(&aec->nlpoutlevel, + CalculatePower(&output_block[0][0], PART_LEN)); + UpdateMetrics(aec); + } + + // Store the nearend signal until the next frame. + for (i = 0; i < aec->num_bands; ++i) { + memcpy(&aec->previous_nearend_block[i][0], &nearend_block[i][0], + sizeof(float) * PART_LEN); + } + + aec->data_dumper->DumpWav("aec_out", PART_LEN, &output_block[0][0], + std::min(aec->sampFreq, 16000), 1); +} + +AecCore* WebRtcAec_CreateAec(int instance_count) { + AecCore* aec = new AecCore(instance_count); + + if (!aec) { + return NULL; + } + aec->nearend_buffer_size = 0; + memset(&aec->nearend_buffer[0], 0, sizeof(aec->nearend_buffer)); + // Start the output buffer with zeros to be able to produce + // a full output frame in the first frame. + aec->output_buffer_size = PART_LEN - (FRAME_LEN - PART_LEN); + memset(&aec->output_buffer[0], 0, sizeof(aec->output_buffer)); + + aec->delay_estimator_farend = + WebRtc_CreateDelayEstimatorFarend(PART_LEN1, kHistorySizeBlocks); + if (aec->delay_estimator_farend == NULL) { + WebRtcAec_FreeAec(aec); + return NULL; + } + // We create the delay_estimator with the same amount of maximum lookahead as + // the delay history size (kHistorySizeBlocks) for symmetry reasons. + aec->delay_estimator = WebRtc_CreateDelayEstimator( + aec->delay_estimator_farend, kHistorySizeBlocks); + if (aec->delay_estimator == NULL) { + WebRtcAec_FreeAec(aec); + return NULL; + } +#ifdef WEBRTC_ANDROID + aec->delay_agnostic_enabled = 1; // DA-AEC enabled by default. + // DA-AEC assumes the system is causal from the beginning and will self adjust + // the lookahead when shifting is required. + WebRtc_set_lookahead(aec->delay_estimator, 0); +#else + aec->delay_agnostic_enabled = 0; + WebRtc_set_lookahead(aec->delay_estimator, kLookaheadBlocks); +#endif + aec->extended_filter_enabled = 0; + aec->refined_adaptive_filter_enabled = false; + + // Assembly optimization + WebRtcAec_FilterFar = FilterFar; + WebRtcAec_ScaleErrorSignal = ScaleErrorSignal; + WebRtcAec_FilterAdaptation = FilterAdaptation; + WebRtcAec_Overdrive = Overdrive; + WebRtcAec_Suppress = Suppress; + WebRtcAec_ComputeCoherence = ComputeCoherence; + WebRtcAec_UpdateCoherenceSpectra = UpdateCoherenceSpectra; + WebRtcAec_StoreAsComplex = StoreAsComplex; + WebRtcAec_PartitionDelay = PartitionDelay; + WebRtcAec_WindowData = WindowData; + +#if defined(WEBRTC_ARCH_X86_FAMILY) + if (WebRtc_GetCPUInfo(kSSE2)) { + WebRtcAec_InitAec_SSE2(); + } +#endif + +#if defined(MIPS_FPU_LE) + WebRtcAec_InitAec_mips(); +#endif + +#if defined(WEBRTC_HAS_NEON) + WebRtcAec_InitAec_neon(); +#endif + + return aec; +} + +void WebRtcAec_FreeAec(AecCore* aec) { + if (aec == NULL) { + return; + } + + WebRtc_FreeDelayEstimator(aec->delay_estimator); + WebRtc_FreeDelayEstimatorFarend(aec->delay_estimator_farend); + + delete aec; +} + +static void SetAdaptiveFilterStepSize(AecCore* aec) { + // Extended filter adaptation parameter. + // TODO(ajm): No narrowband tuning yet. + const float kExtendedMu = 0.4f; + + if (aec->refined_adaptive_filter_enabled) { + aec->filter_step_size = 0.05f; + } else { + if (aec->extended_filter_enabled) { + aec->filter_step_size = kExtendedMu; + } else { + if (aec->sampFreq == 8000) { + aec->filter_step_size = 0.6f; + } else { + aec->filter_step_size = 0.5f; + } + } + } +} + +static void SetErrorThreshold(AecCore* aec) { + // Extended filter adaptation parameter. + // TODO(ajm): No narrowband tuning yet. + static const float kExtendedErrorThreshold = 1.0e-6f; + + if (aec->extended_filter_enabled) { + aec->error_threshold = kExtendedErrorThreshold; + } else { + if (aec->sampFreq == 8000) { + aec->error_threshold = 2e-6f; + } else { + aec->error_threshold = 1.5e-6f; + } + } +} + +int WebRtcAec_InitAec(AecCore* aec, int sampFreq) { + int i; + aec->data_dumper->InitiateNewSetOfRecordings(); + + aec->sampFreq = sampFreq; + + SetAdaptiveFilterStepSize(aec); + SetErrorThreshold(aec); + + if (sampFreq == 8000) { + aec->num_bands = 1; + } else { + aec->num_bands = (size_t)(sampFreq / 16000); + } + + // Start the output buffer with zeros to be able to produce + // a full output frame in the first frame. + aec->output_buffer_size = PART_LEN - (FRAME_LEN - PART_LEN); + memset(&aec->output_buffer[0], 0, sizeof(aec->output_buffer)); + aec->nearend_buffer_size = 0; + memset(&aec->nearend_buffer[0], 0, sizeof(aec->nearend_buffer)); + + // Initialize far-end buffer. + aec->farend_block_buffer_.ReInit(); + + aec->system_delay = 0; + + if (WebRtc_InitDelayEstimatorFarend(aec->delay_estimator_farend) != 0) { + return -1; + } + if (WebRtc_InitDelayEstimator(aec->delay_estimator) != 0) { + return -1; + } + aec->delay_logging_enabled = 0; + aec->delay_metrics_delivered = 0; + memset(aec->delay_histogram, 0, sizeof(aec->delay_histogram)); + aec->num_delay_values = 0; + aec->delay_median = -1; + aec->delay_std = -1; + aec->fraction_poor_delays = -1.0f; + + aec->previous_delay = -2; // (-2): Uninitialized. + aec->delay_correction_count = 0; + aec->shift_offset = kInitialShiftOffset; + aec->delay_quality_threshold = kDelayQualityThresholdMin; + + aec->num_partitions = kNormalNumPartitions; + + // Update the delay estimator with filter length. We use half the + // |num_partitions| to take the echo path into account. In practice we say + // that the echo has a duration of maximum half |num_partitions|, which is not + // true, but serves as a crude measure. + WebRtc_set_allowed_offset(aec->delay_estimator, aec->num_partitions / 2); + // TODO(bjornv): I currently hard coded the enable. Once we've established + // that AECM has no performance regression, robust_validation will be enabled + // all the time and the APIs to turn it on/off will be removed. Hence, remove + // this line then. + WebRtc_enable_robust_validation(aec->delay_estimator, 1); + aec->frame_count = 0; + + // Default target suppression mode. + aec->nlp_mode = 1; + + // Sampling frequency multiplier w.r.t. 8 kHz. + // In case of multiple bands we process the lower band in 16 kHz, hence the + // multiplier is always 2. + if (aec->num_bands > 1) { + aec->mult = 2; + } else { + aec->mult = static_cast(aec->sampFreq) / 8000; + } + + aec->farBufWritePos = 0; + aec->farBufReadPos = 0; + + aec->inSamples = 0; + aec->outSamples = 0; + aec->knownDelay = 0; + + // Initialize buffers + memset(aec->previous_nearend_block, 0, sizeof(aec->previous_nearend_block)); + memset(aec->eBuf, 0, sizeof(aec->eBuf)); + + memset(aec->xPow, 0, sizeof(aec->xPow)); + memset(aec->dPow, 0, sizeof(aec->dPow)); + memset(aec->dInitMinPow, 0, sizeof(aec->dInitMinPow)); + aec->noisePow = aec->dInitMinPow; + aec->noiseEstCtr = 0; + + // Initial comfort noise power + for (i = 0; i < PART_LEN1; i++) { + aec->dMinPow[i] = 1.0e6f; + } + + // Holds the last block written to + aec->xfBufBlockPos = 0; + // TODO(peah): Investigate need for these initializations. Deleting them + // doesn't change the output at all and yields 0.4% overall speedup. + memset(aec->xfBuf, 0, sizeof(complex_t) * kExtendedNumPartitions * PART_LEN1); + memset(aec->wfBuf, 0, sizeof(complex_t) * kExtendedNumPartitions * PART_LEN1); + memset(aec->coherence_state.sde, 0, sizeof(complex_t) * PART_LEN1); + memset(aec->coherence_state.sxd, 0, sizeof(complex_t) * PART_LEN1); + memset(aec->xfwBuf, 0, + sizeof(complex_t) * kExtendedNumPartitions * PART_LEN1); + memset(aec->coherence_state.se, 0, sizeof(float) * PART_LEN1); + + // To prevent numerical instability in the first block. + for (i = 0; i < PART_LEN1; i++) { + aec->coherence_state.sd[i] = 1; + } + for (i = 0; i < PART_LEN1; i++) { + aec->coherence_state.sx[i] = 1; + } + + memset(aec->hNs, 0, sizeof(aec->hNs)); + memset(aec->outBuf, 0, sizeof(float) * PART_LEN); + + aec->hNlFbMin = 1; + aec->hNlFbLocalMin = 1; + aec->hNlXdAvgMin = 1; + aec->hNlNewMin = 0; + aec->hNlMinCtr = 0; + aec->overDrive = 2; + aec->overdrive_scaling = 2; + aec->delayIdx = 0; + aec->stNearState = 0; + aec->echoState = 0; + aec->divergeState = 0; + + aec->seed = 777; + aec->delayEstCtr = 0; + + aec->extreme_filter_divergence = 0; + + // Metrics disabled by default + aec->metricsMode = 0; + InitMetrics(aec); + + return 0; +} + +void WebRtcAec_BufferFarendBlock(AecCore* aec, const float* farend) { + // Check if the buffer is full, and in that case flush the oldest data. + if (aec->farend_block_buffer_.AvaliableSpace() < 1) { + aec->farend_block_buffer_.AdjustSize(1); + } + aec->farend_block_buffer_.Insert(farend); +} + +int WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(AecCore* aec, + int buffer_size_decrease) { + int achieved_buffer_size_decrease = + aec->farend_block_buffer_.AdjustSize(buffer_size_decrease); + aec->system_delay -= achieved_buffer_size_decrease * PART_LEN; + return achieved_buffer_size_decrease; +} + +void FormNearendBlock( + size_t nearend_start_index, + size_t num_bands, + const float* const* nearend_frame, + size_t num_samples_from_nearend_frame, + const float nearend_buffer[NUM_HIGH_BANDS_MAX + 1] + [PART_LEN - (FRAME_LEN - PART_LEN)], + float nearend_block[NUM_HIGH_BANDS_MAX + 1][PART_LEN]) { + RTC_DCHECK_LE(num_samples_from_nearend_frame, PART_LEN); + const int num_samples_from_buffer = PART_LEN - num_samples_from_nearend_frame; + + if (num_samples_from_buffer > 0) { + for (size_t i = 0; i < num_bands; ++i) { + memcpy(&nearend_block[i][0], &nearend_buffer[i][0], + num_samples_from_buffer * sizeof(float)); + } + } + + for (size_t i = 0; i < num_bands; ++i) { + memcpy(&nearend_block[i][num_samples_from_buffer], + &nearend_frame[i][nearend_start_index], + num_samples_from_nearend_frame * sizeof(float)); + } +} + +void BufferNearendFrame( + size_t nearend_start_index, + size_t num_bands, + const float* const* nearend_frame, + size_t num_samples_to_buffer, + float nearend_buffer[NUM_HIGH_BANDS_MAX + 1] + [PART_LEN - (FRAME_LEN - PART_LEN)]) { + for (size_t i = 0; i < num_bands; ++i) { + memcpy( + &nearend_buffer[i][0], + &nearend_frame[i] + [nearend_start_index + FRAME_LEN - num_samples_to_buffer], + num_samples_to_buffer * sizeof(float)); + } +} + +void BufferOutputBlock(size_t num_bands, + const float output_block[NUM_HIGH_BANDS_MAX + 1] + [PART_LEN], + size_t* output_buffer_size, + float output_buffer[NUM_HIGH_BANDS_MAX + 1] + [2 * PART_LEN]) { + for (size_t i = 0; i < num_bands; ++i) { + memcpy(&output_buffer[i][*output_buffer_size], &output_block[i][0], + PART_LEN * sizeof(float)); + } + (*output_buffer_size) += PART_LEN; +} + +void FormOutputFrame(size_t output_start_index, + size_t num_bands, + size_t* output_buffer_size, + float output_buffer[NUM_HIGH_BANDS_MAX + 1][2 * PART_LEN], + float* const* output_frame) { + RTC_DCHECK_LE(FRAME_LEN, *output_buffer_size); + for (size_t i = 0; i < num_bands; ++i) { + memcpy(&output_frame[i][output_start_index], &output_buffer[i][0], + FRAME_LEN * sizeof(float)); + } + (*output_buffer_size) -= FRAME_LEN; + if (*output_buffer_size > 0) { + RTC_DCHECK_GE(2 * PART_LEN - FRAME_LEN, (*output_buffer_size)); + for (size_t i = 0; i < num_bands; ++i) { + memcpy(&output_buffer[i][0], &output_buffer[i][FRAME_LEN], + (*output_buffer_size) * sizeof(float)); + } + } +} + +void WebRtcAec_ProcessFrames(AecCore* aec, + const float* const* nearend, + size_t num_bands, + size_t num_samples, + int knownDelay, + float* const* out) { + RTC_DCHECK(num_samples == 80 || num_samples == 160); + + aec->frame_count++; + // For each frame the process is as follows: + // 1) If the system_delay indicates on being too small for processing a + // frame we stuff the buffer with enough data for 10 ms. + // 2 a) Adjust the buffer to the system delay, by moving the read pointer. + // b) Apply signal based delay correction, if we have detected poor AEC + // performance. + // 3) TODO(bjornv): Investigate if we need to add this: + // If we can't move read pointer due to buffer size limitations we + // flush/stuff the buffer. + // 4) Process as many partitions as possible. + // 5) Update the |system_delay| with respect to a full frame of FRAME_LEN + // samples. Even though we will have data left to process (we work with + // partitions) we consider updating a whole frame, since that's the + // amount of data we input and output in audio_processing. + // 6) Update the outputs. + + // The AEC has two different delay estimation algorithms built in. The + // first relies on delay input values from the user and the amount of + // shifted buffer elements is controlled by |knownDelay|. This delay will + // give a guess on how much we need to shift far-end buffers to align with + // the near-end signal. The other delay estimation algorithm uses the + // far- and near-end signals to find the offset between them. This one + // (called "signal delay") is then used to fine tune the alignment, or + // simply compensate for errors in the system based one. + // Note that the two algorithms operate independently. Currently, we only + // allow one algorithm to be turned on. + + RTC_DCHECK_EQ(aec->num_bands, num_bands); + + for (size_t j = 0; j < num_samples; j += FRAME_LEN) { + // 1) At most we process |aec->mult|+1 partitions in 10 ms. Make sure we + // have enough far-end data for that by stuffing the buffer if the + // |system_delay| indicates others. + if (aec->system_delay < FRAME_LEN) { + // We don't have enough data so we rewind 10 ms. + WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(aec, -(aec->mult + 1)); + } + + if (!aec->delay_agnostic_enabled) { + // 2 a) Compensate for a possible change in the system delay. + + // TODO(bjornv): Investigate how we should round the delay difference; + // right now we know that incoming |knownDelay| is underestimated when + // it's less than |aec->knownDelay|. We therefore, round (-32) in that + // direction. In the other direction, we don't have this situation, but + // might flush one partition too little. This can cause non-causality, + // which should be investigated. Maybe, allow for a non-symmetric + // rounding, like -16. + int move_elements = (aec->knownDelay - knownDelay - 32) / PART_LEN; + int moved_elements = aec->farend_block_buffer_.AdjustSize(move_elements); + MaybeLogDelayAdjustment(moved_elements * (aec->sampFreq == 8000 ? 8 : 4), + DelaySource::kSystemDelay); + aec->knownDelay -= moved_elements * PART_LEN; + } else { + // 2 b) Apply signal based delay correction. + int move_elements = SignalBasedDelayCorrection(aec); + int moved_elements = aec->farend_block_buffer_.AdjustSize(move_elements); + MaybeLogDelayAdjustment(moved_elements * (aec->sampFreq == 8000 ? 8 : 4), + DelaySource::kDelayAgnostic); + int far_near_buffer_diff = + aec->farend_block_buffer_.Size() - + (aec->nearend_buffer_size + FRAME_LEN) / PART_LEN; + WebRtc_SoftResetDelayEstimator(aec->delay_estimator, moved_elements); + WebRtc_SoftResetDelayEstimatorFarend(aec->delay_estimator_farend, + moved_elements); + // If we rely on reported system delay values only, a buffer underrun here + // can never occur since we've taken care of that in 1) above. Here, we + // apply signal based delay correction and can therefore end up with + // buffer underruns since the delay estimation can be wrong. We therefore + // stuff the buffer with enough elements if needed. + if (far_near_buffer_diff < 0) { + WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(aec, + far_near_buffer_diff); + } + } + + static_assert( + 16 == (FRAME_LEN - PART_LEN), + "These constants need to be properly related for this code to work"); + float output_block[NUM_HIGH_BANDS_MAX + 1][PART_LEN]; + float nearend_block[NUM_HIGH_BANDS_MAX + 1][PART_LEN]; + float farend_extended_block_lowest_band[PART_LEN2]; + + // Form and process a block of nearend samples, buffer the output block of + // samples. + aec->farend_block_buffer_.ExtractExtendedBlock( + farend_extended_block_lowest_band); + FormNearendBlock(j, num_bands, nearend, PART_LEN - aec->nearend_buffer_size, + aec->nearend_buffer, nearend_block); + ProcessNearendBlock(aec, farend_extended_block_lowest_band, nearend_block, + output_block); + BufferOutputBlock(num_bands, output_block, &aec->output_buffer_size, + aec->output_buffer); + + if ((FRAME_LEN - PART_LEN + aec->nearend_buffer_size) == PART_LEN) { + // When possible (every fourth frame) form and process a second block of + // nearend samples, buffer the output block of samples. + aec->farend_block_buffer_.ExtractExtendedBlock( + farend_extended_block_lowest_band); + FormNearendBlock(j + FRAME_LEN - PART_LEN, num_bands, nearend, PART_LEN, + aec->nearend_buffer, nearend_block); + ProcessNearendBlock(aec, farend_extended_block_lowest_band, nearend_block, + output_block); + BufferOutputBlock(num_bands, output_block, &aec->output_buffer_size, + aec->output_buffer); + + // Reset the buffer size as there are no samples left in the nearend input + // to buffer. + aec->nearend_buffer_size = 0; + } else { + // Buffer the remaining samples in the nearend input. + aec->nearend_buffer_size += FRAME_LEN - PART_LEN; + BufferNearendFrame(j, num_bands, nearend, aec->nearend_buffer_size, + aec->nearend_buffer); + } + + // 5) Update system delay with respect to the entire frame. + aec->system_delay -= FRAME_LEN; + + // 6) Form the output frame. + FormOutputFrame(j, num_bands, &aec->output_buffer_size, aec->output_buffer, + out); + } +} + +int WebRtcAec_GetDelayMetricsCore(AecCore* self, + int* median, + int* std, + float* fraction_poor_delays) { + RTC_DCHECK(self); + RTC_DCHECK(median); + RTC_DCHECK(std); + + if (self->delay_logging_enabled == 0) { + // Logging disabled. + return -1; + } + + if (self->delay_metrics_delivered == 0) { + UpdateDelayMetrics(self); + self->delay_metrics_delivered = 1; + } + *median = self->delay_median; + *std = self->delay_std; + *fraction_poor_delays = self->fraction_poor_delays; + + return 0; +} + +int WebRtcAec_echo_state(AecCore* self) { + return self->echoState; +} + +void WebRtcAec_GetEchoStats(AecCore* self, + Stats* erl, + Stats* erle, + Stats* a_nlp, + float* divergent_filter_fraction) { + RTC_DCHECK(erl); + RTC_DCHECK(erle); + RTC_DCHECK(a_nlp); + *erl = self->erl; + *erle = self->erle; + *a_nlp = self->aNlp; + *divergent_filter_fraction = + self->divergent_filter_fraction.GetLatestFraction(); +} + +void WebRtcAec_SetConfigCore(AecCore* self, + int nlp_mode, + int metrics_mode, + int delay_logging) { + RTC_DCHECK_GE(nlp_mode, 0); + RTC_DCHECK_LT(nlp_mode, 3); + self->nlp_mode = nlp_mode; + self->metricsMode = metrics_mode; + if (self->metricsMode) { + InitMetrics(self); + } + // Turn on delay logging if it is either set explicitly or if delay agnostic + // AEC is enabled (which requires delay estimates). + self->delay_logging_enabled = delay_logging || self->delay_agnostic_enabled; + if (self->delay_logging_enabled) { + memset(self->delay_histogram, 0, sizeof(self->delay_histogram)); + } +} + +void WebRtcAec_enable_delay_agnostic(AecCore* self, int enable) { + self->delay_agnostic_enabled = enable; +} + +int WebRtcAec_delay_agnostic_enabled(AecCore* self) { + return self->delay_agnostic_enabled; +} + +void WebRtcAec_enable_refined_adaptive_filter(AecCore* self, bool enable) { + self->refined_adaptive_filter_enabled = enable; + SetAdaptiveFilterStepSize(self); + SetErrorThreshold(self); +} + +bool WebRtcAec_refined_adaptive_filter_enabled(const AecCore* self) { + return self->refined_adaptive_filter_enabled; +} + +void WebRtcAec_enable_extended_filter(AecCore* self, int enable) { + self->extended_filter_enabled = enable; + SetAdaptiveFilterStepSize(self); + SetErrorThreshold(self); + self->num_partitions = enable ? kExtendedNumPartitions : kNormalNumPartitions; + // Update the delay estimator with filter length. See InitAEC() for details. + WebRtc_set_allowed_offset(self->delay_estimator, self->num_partitions / 2); +} + +int WebRtcAec_extended_filter_enabled(AecCore* self) { + return self->extended_filter_enabled; +} + +int WebRtcAec_system_delay(AecCore* self) { + return self->system_delay; +} + +void WebRtcAec_SetSystemDelay(AecCore* self, int delay) { + RTC_DCHECK_GE(delay, 0); + self->system_delay = delay; +} +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core.h new file mode 100644 index 000000000..78cb787fa --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core.h @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * Specifies the interface for the AEC core. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_H_ + +#include + +#include + +extern "C" { +#include "webrtc/common_audio/ring_buffer.h" +} +#include "webrtc/base/constructormagic.h" +#include "webrtc/common_audio/wav_file.h" +#include "webrtc/modules/audio_processing/aec/aec_common.h" +#include "webrtc/modules/audio_processing/utility/block_mean_calculator.h" +#include "webrtc/modules/audio_processing/utility/ooura_fft.h" +#include "webrtc/typedefs.h" + +namespace webrtc { + +#define FRAME_LEN 80 +#define PART_LEN 64 // Length of partition +#define PART_LEN1 (PART_LEN + 1) // Unique fft coefficients +#define PART_LEN2 (PART_LEN * 2) // Length of partition * 2 +#define NUM_HIGH_BANDS_MAX 2 // Max number of high bands + +class ApmDataDumper; + +typedef float complex_t[2]; +// For performance reasons, some arrays of complex numbers are replaced by twice +// as long arrays of float, all the real parts followed by all the imaginary +// ones (complex_t[SIZE] -> float[2][SIZE]). This allows SIMD optimizations and +// is better than two arrays (one for the real parts and one for the imaginary +// parts) as this other way would require two pointers instead of one and cause +// extra register spilling. This also allows the offsets to be calculated at +// compile time. + +// Metrics +enum { kOffsetLevel = -100 }; + +typedef struct Stats { + float instant; + float average; + float min; + float max; + float sum; + float hisum; + float himean; + size_t counter; + size_t hicounter; +} Stats; + +// Number of partitions for the extended filter mode. The first one is an enum +// to be used in array declarations, as it represents the maximum filter length. +enum { kExtendedNumPartitions = 32 }; +static const int kNormalNumPartitions = 12; + +// Delay estimator constants, used for logging and delay compensation if +// if reported delays are disabled. +enum { kLookaheadBlocks = 15 }; +enum { + // 500 ms for 16 kHz which is equivalent with the limit of reported delays. + kHistorySizeBlocks = 125 +}; + +typedef struct PowerLevel { + PowerLevel(); + + BlockMeanCalculator framelevel; + BlockMeanCalculator averagelevel; + float minlevel; +} PowerLevel; + +class BlockBuffer { + public: + BlockBuffer(); + ~BlockBuffer(); + void ReInit(); + void Insert(const float block[PART_LEN]); + void ExtractExtendedBlock(float extended_block[PART_LEN]); + int AdjustSize(int buffer_size_decrease); + size_t Size(); + size_t AvaliableSpace(); + + private: + RingBuffer* buffer_; +}; + +class DivergentFilterFraction { + public: + DivergentFilterFraction(); + + // Reset. + void Reset(); + + void AddObservation(const PowerLevel& nearlevel, + const PowerLevel& linoutlevel, + const PowerLevel& nlpoutlevel); + + // Return the latest fraction. + float GetLatestFraction() const; + + private: + // Clear all values added. + void Clear(); + + size_t count_; + size_t occurrence_; + float fraction_; + + RTC_DISALLOW_COPY_AND_ASSIGN(DivergentFilterFraction); +}; + +typedef struct CoherenceState { + complex_t sde[PART_LEN1]; // cross-psd of nearend and error + complex_t sxd[PART_LEN1]; // cross-psd of farend and nearend + float sx[PART_LEN1], sd[PART_LEN1], se[PART_LEN1]; // far, near, error psd +} CoherenceState; + +struct AecCore { + explicit AecCore(int instance_index); + ~AecCore(); + + std::unique_ptr data_dumper; + const OouraFft ooura_fft; + + CoherenceState coherence_state; + + int farBufWritePos, farBufReadPos; + + int knownDelay; + int inSamples, outSamples; + int delayEstCtr; + + // Nearend buffer used for changing from FRAME_LEN to PART_LEN sample block + // sizes. The buffer stores all the incoming bands and for each band a maximum + // of PART_LEN - (FRAME_LEN - PART_LEN) values need to be buffered in order to + // change the block size from FRAME_LEN to PART_LEN. + float nearend_buffer[NUM_HIGH_BANDS_MAX + 1] + [PART_LEN - (FRAME_LEN - PART_LEN)]; + size_t nearend_buffer_size; + float output_buffer[NUM_HIGH_BANDS_MAX + 1][2 * PART_LEN]; + size_t output_buffer_size; + + float eBuf[PART_LEN2]; // error + + float previous_nearend_block[NUM_HIGH_BANDS_MAX + 1][PART_LEN]; + + float xPow[PART_LEN1]; + float dPow[PART_LEN1]; + float dMinPow[PART_LEN1]; + float dInitMinPow[PART_LEN1]; + float* noisePow; + + float xfBuf[2][kExtendedNumPartitions * PART_LEN1]; // farend fft buffer + float wfBuf[2][kExtendedNumPartitions * PART_LEN1]; // filter fft + // Farend windowed fft buffer. + complex_t xfwBuf[kExtendedNumPartitions * PART_LEN1]; + + float hNs[PART_LEN1]; + float hNlFbMin, hNlFbLocalMin; + float hNlXdAvgMin; + int hNlNewMin, hNlMinCtr; + float overDrive; + float overdrive_scaling; + int nlp_mode; + float outBuf[PART_LEN]; + int delayIdx; + + short stNearState, echoState; + short divergeState; + + int xfBufBlockPos; + + BlockBuffer farend_block_buffer_; + + int system_delay; // Current system delay buffered in AEC. + + int mult; // sampling frequency multiple + int sampFreq = 16000; + size_t num_bands; + uint32_t seed; + + float filter_step_size; // stepsize + float error_threshold; // error threshold + + int noiseEstCtr; + + PowerLevel farlevel; + PowerLevel nearlevel; + PowerLevel linoutlevel; + PowerLevel nlpoutlevel; + + int metricsMode; + int stateCounter; + Stats erl; + Stats erle; + Stats aNlp; + Stats rerl; + DivergentFilterFraction divergent_filter_fraction; + + // Quantities to control H band scaling for SWB input + int freq_avg_ic; // initial bin for averaging nlp gain + int flag_Hband_cn; // for comfort noise + float cn_scale_Hband; // scale for comfort noise in H band + + int delay_metrics_delivered; + int delay_histogram[kHistorySizeBlocks]; + int num_delay_values; + int delay_median; + int delay_std; + float fraction_poor_delays; + int delay_logging_enabled; + void* delay_estimator_farend; + void* delay_estimator; + // Variables associated with delay correction through signal based delay + // estimation feedback. + int previous_delay; + int delay_correction_count; + int shift_offset; + float delay_quality_threshold; + int frame_count; + + // 0 = delay agnostic mode (signal based delay correction) disabled. + // Otherwise enabled. + int delay_agnostic_enabled; + // 1 = extended filter mode enabled, 0 = disabled. + int extended_filter_enabled; + // 1 = refined filter adaptation aec mode enabled, 0 = disabled. + bool refined_adaptive_filter_enabled; + + // Runtime selection of number of filter partitions. + int num_partitions; + + // Flag that extreme filter divergence has been detected by the Echo + // Suppressor. + int extreme_filter_divergence; +}; + +AecCore* WebRtcAec_CreateAec(int instance_count); // Returns NULL on error. +void WebRtcAec_FreeAec(AecCore* aec); +int WebRtcAec_InitAec(AecCore* aec, int sampFreq); +void WebRtcAec_InitAec_SSE2(void); +#if defined(MIPS_FPU_LE) +void WebRtcAec_InitAec_mips(void); +#endif +#if defined(WEBRTC_HAS_NEON) +void WebRtcAec_InitAec_neon(void); +#endif + +void WebRtcAec_BufferFarendBlock(AecCore* aec, const float* farend); +void WebRtcAec_ProcessFrames(AecCore* aec, + const float* const* nearend, + size_t num_bands, + size_t num_samples, + int knownDelay, + float* const* out); + +// A helper function to call adjust the farend buffer size. +// Returns the number of elements the size was decreased with, and adjusts +// |system_delay| by the corresponding amount in ms. +int WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(AecCore* aec, + int size_decrease); + +// Calculates the median, standard deviation and amount of poor values among the +// delay estimates aggregated up to the first call to the function. After that +// first call the metrics are aggregated and updated every second. With poor +// values we mean values that most likely will cause the AEC to perform poorly. +// TODO(bjornv): Consider changing tests and tools to handle constant +// constant aggregation window throughout the session instead. +int WebRtcAec_GetDelayMetricsCore(AecCore* self, + int* median, + int* std, + float* fraction_poor_delays); + +// Returns the echo state (1: echo, 0: no echo). +int WebRtcAec_echo_state(AecCore* self); + +// Gets statistics of the echo metrics ERL, ERLE, A_NLP. +void WebRtcAec_GetEchoStats(AecCore* self, + Stats* erl, + Stats* erle, + Stats* a_nlp, + float* divergent_filter_fraction); + +// Sets local configuration modes. +void WebRtcAec_SetConfigCore(AecCore* self, + int nlp_mode, + int metrics_mode, + int delay_logging); + +// Non-zero enables, zero disables. +void WebRtcAec_enable_delay_agnostic(AecCore* self, int enable); + +// Returns non-zero if delay agnostic (i.e., signal based delay estimation) is +// enabled and zero if disabled. +int WebRtcAec_delay_agnostic_enabled(AecCore* self); + +// Turns on/off the refined adaptive filter feature. +void WebRtcAec_enable_refined_adaptive_filter(AecCore* self, bool enable); + +// Returns whether the refined adaptive filter is enabled. +bool WebRtcAec_refined_adaptive_filter(const AecCore* self); + +// Enables or disables extended filter mode. Non-zero enables, zero disables. +void WebRtcAec_enable_extended_filter(AecCore* self, int enable); + +// Returns non-zero if extended filter mode is enabled and zero if disabled. +int WebRtcAec_extended_filter_enabled(AecCore* self); + +// Returns the current |system_delay|, i.e., the buffered difference between +// far-end and near-end. +int WebRtcAec_system_delay(AecCore* self); + +// Sets the |system_delay| to |value|. Note that if the value is changed +// improperly, there can be a performance regression. So it should be used with +// care. +void WebRtcAec_SetSystemDelay(AecCore* self, int delay); + +} // namespace webrtc + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_neon.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_neon.cc new file mode 100644 index 000000000..b7f332c06 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_neon.cc @@ -0,0 +1,741 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * The core AEC algorithm, neon version of speed-critical functions. + * + * Based on aec_core_sse2.c. + */ + +#ifdef WEBRTC_ARCH_ARM_FAMILY + +#include +#include +#include // memset + +extern "C" { +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +} +#include "webrtc/modules/audio_processing/aec/aec_common.h" +#include "webrtc/modules/audio_processing/aec/aec_core_optimized_methods.h" +#include "webrtc/modules/audio_processing/utility/ooura_fft.h" + +namespace webrtc { + +enum { kShiftExponentIntoTopMantissa = 8 }; +enum { kFloatExponentShift = 23 }; + +__inline static float MulRe(float aRe, float aIm, float bRe, float bIm) { + return aRe * bRe - aIm * bIm; +} + +__inline static float MulIm(float aRe, float aIm, float bRe, float bIm) { + return aRe * bIm + aIm * bRe; +} + +static void FilterFarNEON(int num_partitions, + int x_fft_buf_block_pos, + float x_fft_buf[2] + [kExtendedNumPartitions * PART_LEN1], + float h_fft_buf[2] + [kExtendedNumPartitions * PART_LEN1], + float y_fft[2][PART_LEN1]) { + int i; + for (i = 0; i < num_partitions; i++) { + int j; + int xPos = (i + x_fft_buf_block_pos) * PART_LEN1; + int pos = i * PART_LEN1; + // Check for wrap + if (i + x_fft_buf_block_pos >= num_partitions) { + xPos -= num_partitions * PART_LEN1; + } + + // vectorized code (four at once) + for (j = 0; j + 3 < PART_LEN1; j += 4) { + const float32x4_t x_fft_buf_re = vld1q_f32(&x_fft_buf[0][xPos + j]); + const float32x4_t x_fft_buf_im = vld1q_f32(&x_fft_buf[1][xPos + j]); + const float32x4_t h_fft_buf_re = vld1q_f32(&h_fft_buf[0][pos + j]); + const float32x4_t h_fft_buf_im = vld1q_f32(&h_fft_buf[1][pos + j]); + const float32x4_t y_fft_re = vld1q_f32(&y_fft[0][j]); + const float32x4_t y_fft_im = vld1q_f32(&y_fft[1][j]); + const float32x4_t a = vmulq_f32(x_fft_buf_re, h_fft_buf_re); + const float32x4_t e = vmlsq_f32(a, x_fft_buf_im, h_fft_buf_im); + const float32x4_t c = vmulq_f32(x_fft_buf_re, h_fft_buf_im); + const float32x4_t f = vmlaq_f32(c, x_fft_buf_im, h_fft_buf_re); + const float32x4_t g = vaddq_f32(y_fft_re, e); + const float32x4_t h = vaddq_f32(y_fft_im, f); + vst1q_f32(&y_fft[0][j], g); + vst1q_f32(&y_fft[1][j], h); + } + // scalar code for the remaining items. + for (; j < PART_LEN1; j++) { + y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j], + h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]); + y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j], + h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]); + } + } +} + +// ARM64's arm_neon.h has already defined vdivq_f32 vsqrtq_f32. +#if !defined(WEBRTC_ARCH_ARM64) +static float32x4_t vdivq_f32(float32x4_t a, float32x4_t b) { + int i; + float32x4_t x = vrecpeq_f32(b); + // from arm documentation + // The Newton-Raphson iteration: + // x[n+1] = x[n] * (2 - d * x[n]) + // converges to (1/d) if x0 is the result of VRECPE applied to d. + // + // Note: The precision did not improve after 2 iterations. + for (i = 0; i < 2; i++) { + x = vmulq_f32(vrecpsq_f32(b, x), x); + } + // a/b = a*(1/b) + return vmulq_f32(a, x); +} + +static float32x4_t vsqrtq_f32(float32x4_t s) { + int i; + float32x4_t x = vrsqrteq_f32(s); + + // Code to handle sqrt(0). + // If the input to sqrtf() is zero, a zero will be returned. + // If the input to vrsqrteq_f32() is zero, positive infinity is returned. + const uint32x4_t vec_p_inf = vdupq_n_u32(0x7F800000); + // check for divide by zero + const uint32x4_t div_by_zero = vceqq_u32(vec_p_inf, vreinterpretq_u32_f32(x)); + // zero out the positive infinity results + x = vreinterpretq_f32_u32( + vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(x))); + // from arm documentation + // The Newton-Raphson iteration: + // x[n+1] = x[n] * (3 - d * (x[n] * x[n])) / 2) + // converges to (1/√d) if x0 is the result of VRSQRTE applied to d. + // + // Note: The precision did not improve after 2 iterations. + for (i = 0; i < 2; i++) { + x = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, x), s), x); + } + // sqrt(s) = s * 1/sqrt(s) + return vmulq_f32(s, x); +} +#endif // WEBRTC_ARCH_ARM64 + +static void ScaleErrorSignalNEON(float mu, + float error_threshold, + float x_pow[PART_LEN1], + float ef[2][PART_LEN1]) { + const float32x4_t k1e_10f = vdupq_n_f32(1e-10f); + const float32x4_t kMu = vmovq_n_f32(mu); + const float32x4_t kThresh = vmovq_n_f32(error_threshold); + int i; + // vectorized code (four at once) + for (i = 0; i + 3 < PART_LEN1; i += 4) { + const float32x4_t x_pow_local = vld1q_f32(&x_pow[i]); + const float32x4_t ef_re_base = vld1q_f32(&ef[0][i]); + const float32x4_t ef_im_base = vld1q_f32(&ef[1][i]); + const float32x4_t xPowPlus = vaddq_f32(x_pow_local, k1e_10f); + float32x4_t ef_re = vdivq_f32(ef_re_base, xPowPlus); + float32x4_t ef_im = vdivq_f32(ef_im_base, xPowPlus); + const float32x4_t ef_re2 = vmulq_f32(ef_re, ef_re); + const float32x4_t ef_sum2 = vmlaq_f32(ef_re2, ef_im, ef_im); + const float32x4_t absEf = vsqrtq_f32(ef_sum2); + const uint32x4_t bigger = vcgtq_f32(absEf, kThresh); + const float32x4_t absEfPlus = vaddq_f32(absEf, k1e_10f); + const float32x4_t absEfInv = vdivq_f32(kThresh, absEfPlus); + uint32x4_t ef_re_if = vreinterpretq_u32_f32(vmulq_f32(ef_re, absEfInv)); + uint32x4_t ef_im_if = vreinterpretq_u32_f32(vmulq_f32(ef_im, absEfInv)); + uint32x4_t ef_re_u32 = + vandq_u32(vmvnq_u32(bigger), vreinterpretq_u32_f32(ef_re)); + uint32x4_t ef_im_u32 = + vandq_u32(vmvnq_u32(bigger), vreinterpretq_u32_f32(ef_im)); + ef_re_if = vandq_u32(bigger, ef_re_if); + ef_im_if = vandq_u32(bigger, ef_im_if); + ef_re_u32 = vorrq_u32(ef_re_u32, ef_re_if); + ef_im_u32 = vorrq_u32(ef_im_u32, ef_im_if); + ef_re = vmulq_f32(vreinterpretq_f32_u32(ef_re_u32), kMu); + ef_im = vmulq_f32(vreinterpretq_f32_u32(ef_im_u32), kMu); + vst1q_f32(&ef[0][i], ef_re); + vst1q_f32(&ef[1][i], ef_im); + } + // scalar code for the remaining items. + for (; i < PART_LEN1; i++) { + float abs_ef; + ef[0][i] /= (x_pow[i] + 1e-10f); + ef[1][i] /= (x_pow[i] + 1e-10f); + abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]); + + if (abs_ef > error_threshold) { + abs_ef = error_threshold / (abs_ef + 1e-10f); + ef[0][i] *= abs_ef; + ef[1][i] *= abs_ef; + } + + // Stepsize factor + ef[0][i] *= mu; + ef[1][i] *= mu; + } +} + +static void FilterAdaptationNEON( + const OouraFft& ooura_fft, + int num_partitions, + int x_fft_buf_block_pos, + float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], + float e_fft[2][PART_LEN1], + float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) { + float fft[PART_LEN2]; + int i; + for (i = 0; i < num_partitions; i++) { + int xPos = (i + x_fft_buf_block_pos) * PART_LEN1; + int pos = i * PART_LEN1; + int j; + // Check for wrap + if (i + x_fft_buf_block_pos >= num_partitions) { + xPos -= num_partitions * PART_LEN1; + } + + // Process the whole array... + for (j = 0; j < PART_LEN; j += 4) { + // Load x_fft_buf and e_fft. + const float32x4_t x_fft_buf_re = vld1q_f32(&x_fft_buf[0][xPos + j]); + const float32x4_t x_fft_buf_im = vld1q_f32(&x_fft_buf[1][xPos + j]); + const float32x4_t e_fft_re = vld1q_f32(&e_fft[0][j]); + const float32x4_t e_fft_im = vld1q_f32(&e_fft[1][j]); + // Calculate the product of conjugate(x_fft_buf) by e_fft. + // re(conjugate(a) * b) = aRe * bRe + aIm * bIm + // im(conjugate(a) * b)= aRe * bIm - aIm * bRe + const float32x4_t a = vmulq_f32(x_fft_buf_re, e_fft_re); + const float32x4_t e = vmlaq_f32(a, x_fft_buf_im, e_fft_im); + const float32x4_t c = vmulq_f32(x_fft_buf_re, e_fft_im); + const float32x4_t f = vmlsq_f32(c, x_fft_buf_im, e_fft_re); + // Interleave real and imaginary parts. + const float32x4x2_t g_n_h = vzipq_f32(e, f); + // Store + vst1q_f32(&fft[2 * j + 0], g_n_h.val[0]); + vst1q_f32(&fft[2 * j + 4], g_n_h.val[1]); + } + // ... and fixup the first imaginary entry. + fft[1] = + MulRe(x_fft_buf[0][xPos + PART_LEN], -x_fft_buf[1][xPos + PART_LEN], + e_fft[0][PART_LEN], e_fft[1][PART_LEN]); + + ooura_fft.InverseFft(fft); + memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN); + + // fft scaling + { + const float scale = 2.0f / PART_LEN2; + const float32x4_t scale_ps = vmovq_n_f32(scale); + for (j = 0; j < PART_LEN; j += 4) { + const float32x4_t fft_ps = vld1q_f32(&fft[j]); + const float32x4_t fft_scale = vmulq_f32(fft_ps, scale_ps); + vst1q_f32(&fft[j], fft_scale); + } + } + ooura_fft.Fft(fft); + + { + const float wt1 = h_fft_buf[1][pos]; + h_fft_buf[0][pos + PART_LEN] += fft[1]; + for (j = 0; j < PART_LEN; j += 4) { + float32x4_t wtBuf_re = vld1q_f32(&h_fft_buf[0][pos + j]); + float32x4_t wtBuf_im = vld1q_f32(&h_fft_buf[1][pos + j]); + const float32x4_t fft0 = vld1q_f32(&fft[2 * j + 0]); + const float32x4_t fft4 = vld1q_f32(&fft[2 * j + 4]); + const float32x4x2_t fft_re_im = vuzpq_f32(fft0, fft4); + wtBuf_re = vaddq_f32(wtBuf_re, fft_re_im.val[0]); + wtBuf_im = vaddq_f32(wtBuf_im, fft_re_im.val[1]); + + vst1q_f32(&h_fft_buf[0][pos + j], wtBuf_re); + vst1q_f32(&h_fft_buf[1][pos + j], wtBuf_im); + } + h_fft_buf[1][pos] = wt1; + } + } +} + +static float32x4_t vpowq_f32(float32x4_t a, float32x4_t b) { + // a^b = exp2(b * log2(a)) + // exp2(x) and log2(x) are calculated using polynomial approximations. + float32x4_t log2_a, b_log2_a, a_exp_b; + + // Calculate log2(x), x = a. + { + // To calculate log2(x), we decompose x like this: + // x = y * 2^n + // n is an integer + // y is in the [1.0, 2.0) range + // + // log2(x) = log2(y) + n + // n can be evaluated by playing with float representation. + // log2(y) in a small range can be approximated, this code uses an order + // five polynomial approximation. The coefficients have been + // estimated with the Remez algorithm and the resulting + // polynomial has a maximum relative error of 0.00086%. + + // Compute n. + // This is done by masking the exponent, shifting it into the top bit of + // the mantissa, putting eight into the biased exponent (to shift/ + // compensate the fact that the exponent has been shifted in the top/ + // fractional part and finally getting rid of the implicit leading one + // from the mantissa by substracting it out. + const uint32x4_t vec_float_exponent_mask = vdupq_n_u32(0x7F800000); + const uint32x4_t vec_eight_biased_exponent = vdupq_n_u32(0x43800000); + const uint32x4_t vec_implicit_leading_one = vdupq_n_u32(0x43BF8000); + const uint32x4_t two_n = + vandq_u32(vreinterpretq_u32_f32(a), vec_float_exponent_mask); + const uint32x4_t n_1 = vshrq_n_u32(two_n, kShiftExponentIntoTopMantissa); + const uint32x4_t n_0 = vorrq_u32(n_1, vec_eight_biased_exponent); + const float32x4_t n = + vsubq_f32(vreinterpretq_f32_u32(n_0), + vreinterpretq_f32_u32(vec_implicit_leading_one)); + // Compute y. + const uint32x4_t vec_mantissa_mask = vdupq_n_u32(0x007FFFFF); + const uint32x4_t vec_zero_biased_exponent_is_one = vdupq_n_u32(0x3F800000); + const uint32x4_t mantissa = + vandq_u32(vreinterpretq_u32_f32(a), vec_mantissa_mask); + const float32x4_t y = vreinterpretq_f32_u32( + vorrq_u32(mantissa, vec_zero_biased_exponent_is_one)); + // Approximate log2(y) ~= (y - 1) * pol5(y). + // pol5(y) = C5 * y^5 + C4 * y^4 + C3 * y^3 + C2 * y^2 + C1 * y + C0 + const float32x4_t C5 = vdupq_n_f32(-3.4436006e-2f); + const float32x4_t C4 = vdupq_n_f32(3.1821337e-1f); + const float32x4_t C3 = vdupq_n_f32(-1.2315303f); + const float32x4_t C2 = vdupq_n_f32(2.5988452f); + const float32x4_t C1 = vdupq_n_f32(-3.3241990f); + const float32x4_t C0 = vdupq_n_f32(3.1157899f); + float32x4_t pol5_y = C5; + pol5_y = vmlaq_f32(C4, y, pol5_y); + pol5_y = vmlaq_f32(C3, y, pol5_y); + pol5_y = vmlaq_f32(C2, y, pol5_y); + pol5_y = vmlaq_f32(C1, y, pol5_y); + pol5_y = vmlaq_f32(C0, y, pol5_y); + const float32x4_t y_minus_one = + vsubq_f32(y, vreinterpretq_f32_u32(vec_zero_biased_exponent_is_one)); + const float32x4_t log2_y = vmulq_f32(y_minus_one, pol5_y); + + // Combine parts. + log2_a = vaddq_f32(n, log2_y); + } + + // b * log2(a) + b_log2_a = vmulq_f32(b, log2_a); + + // Calculate exp2(x), x = b * log2(a). + { + // To calculate 2^x, we decompose x like this: + // x = n + y + // n is an integer, the value of x - 0.5 rounded down, therefore + // y is in the [0.5, 1.5) range + // + // 2^x = 2^n * 2^y + // 2^n can be evaluated by playing with float representation. + // 2^y in a small range can be approximated, this code uses an order two + // polynomial approximation. The coefficients have been estimated + // with the Remez algorithm and the resulting polynomial has a + // maximum relative error of 0.17%. + // To avoid over/underflow, we reduce the range of input to ]-127, 129]. + const float32x4_t max_input = vdupq_n_f32(129.f); + const float32x4_t min_input = vdupq_n_f32(-126.99999f); + const float32x4_t x_min = vminq_f32(b_log2_a, max_input); + const float32x4_t x_max = vmaxq_f32(x_min, min_input); + // Compute n. + const float32x4_t half = vdupq_n_f32(0.5f); + const float32x4_t x_minus_half = vsubq_f32(x_max, half); + const int32x4_t x_minus_half_floor = vcvtq_s32_f32(x_minus_half); + + // Compute 2^n. + const int32x4_t float_exponent_bias = vdupq_n_s32(127); + const int32x4_t two_n_exponent = + vaddq_s32(x_minus_half_floor, float_exponent_bias); + const float32x4_t two_n = + vreinterpretq_f32_s32(vshlq_n_s32(two_n_exponent, kFloatExponentShift)); + // Compute y. + const float32x4_t y = vsubq_f32(x_max, vcvtq_f32_s32(x_minus_half_floor)); + + // Approximate 2^y ~= C2 * y^2 + C1 * y + C0. + const float32x4_t C2 = vdupq_n_f32(3.3718944e-1f); + const float32x4_t C1 = vdupq_n_f32(6.5763628e-1f); + const float32x4_t C0 = vdupq_n_f32(1.0017247f); + float32x4_t exp2_y = C2; + exp2_y = vmlaq_f32(C1, y, exp2_y); + exp2_y = vmlaq_f32(C0, y, exp2_y); + + // Combine parts. + a_exp_b = vmulq_f32(exp2_y, two_n); + } + + return a_exp_b; +} + +static void OverdriveNEON(float overdrive_scaling, + float hNlFb, + float hNl[PART_LEN1]) { + int i; + const float32x4_t vec_hNlFb = vmovq_n_f32(hNlFb); + const float32x4_t vec_one = vdupq_n_f32(1.0f); + const float32x4_t vec_overdrive_scaling = vmovq_n_f32(overdrive_scaling); + + // vectorized code (four at once) + for (i = 0; i + 3 < PART_LEN1; i += 4) { + // Weight subbands + float32x4_t vec_hNl = vld1q_f32(&hNl[i]); + const float32x4_t vec_weightCurve = vld1q_f32(&WebRtcAec_weightCurve[i]); + const uint32x4_t bigger = vcgtq_f32(vec_hNl, vec_hNlFb); + const float32x4_t vec_weightCurve_hNlFb = + vmulq_f32(vec_weightCurve, vec_hNlFb); + const float32x4_t vec_one_weightCurve = vsubq_f32(vec_one, vec_weightCurve); + const float32x4_t vec_one_weightCurve_hNl = + vmulq_f32(vec_one_weightCurve, vec_hNl); + const uint32x4_t vec_if0 = + vandq_u32(vmvnq_u32(bigger), vreinterpretq_u32_f32(vec_hNl)); + const float32x4_t vec_one_weightCurve_add = + vaddq_f32(vec_weightCurve_hNlFb, vec_one_weightCurve_hNl); + const uint32x4_t vec_if1 = + vandq_u32(bigger, vreinterpretq_u32_f32(vec_one_weightCurve_add)); + + vec_hNl = vreinterpretq_f32_u32(vorrq_u32(vec_if0, vec_if1)); + + const float32x4_t vec_overDriveCurve = + vld1q_f32(&WebRtcAec_overDriveCurve[i]); + const float32x4_t vec_overDriveSm_overDriveCurve = + vmulq_f32(vec_overdrive_scaling, vec_overDriveCurve); + vec_hNl = vpowq_f32(vec_hNl, vec_overDriveSm_overDriveCurve); + vst1q_f32(&hNl[i], vec_hNl); + } + + // scalar code for the remaining items. + for (; i < PART_LEN1; i++) { + // Weight subbands + if (hNl[i] > hNlFb) { + hNl[i] = WebRtcAec_weightCurve[i] * hNlFb + + (1 - WebRtcAec_weightCurve[i]) * hNl[i]; + } + + hNl[i] = powf(hNl[i], overdrive_scaling * WebRtcAec_overDriveCurve[i]); + } +} + +static void SuppressNEON(const float hNl[PART_LEN1], float efw[2][PART_LEN1]) { + int i; + const float32x4_t vec_minus_one = vdupq_n_f32(-1.0f); + // vectorized code (four at once) + for (i = 0; i + 3 < PART_LEN1; i += 4) { + float32x4_t vec_hNl = vld1q_f32(&hNl[i]); + float32x4_t vec_efw_re = vld1q_f32(&efw[0][i]); + float32x4_t vec_efw_im = vld1q_f32(&efw[1][i]); + vec_efw_re = vmulq_f32(vec_efw_re, vec_hNl); + vec_efw_im = vmulq_f32(vec_efw_im, vec_hNl); + + // Ooura fft returns incorrect sign on imaginary component. It matters + // here because we are making an additive change with comfort noise. + vec_efw_im = vmulq_f32(vec_efw_im, vec_minus_one); + vst1q_f32(&efw[0][i], vec_efw_re); + vst1q_f32(&efw[1][i], vec_efw_im); + } + + // scalar code for the remaining items. + for (; i < PART_LEN1; i++) { + efw[0][i] *= hNl[i]; + efw[1][i] *= hNl[i]; + + // Ooura fft returns incorrect sign on imaginary component. It matters + // here because we are making an additive change with comfort noise. + efw[1][i] *= -1; + } +} + +static int PartitionDelayNEON( + int num_partitions, + float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) { + // Measures the energy in each filter partition and returns the partition with + // highest energy. + // TODO(bjornv): Spread computational cost by computing one partition per + // block? + float wfEnMax = 0; + int i; + int delay = 0; + + for (i = 0; i < num_partitions; i++) { + int j; + int pos = i * PART_LEN1; + float wfEn = 0; + float32x4_t vec_wfEn = vdupq_n_f32(0.0f); + // vectorized code (four at once) + for (j = 0; j + 3 < PART_LEN1; j += 4) { + const float32x4_t vec_wfBuf0 = vld1q_f32(&h_fft_buf[0][pos + j]); + const float32x4_t vec_wfBuf1 = vld1q_f32(&h_fft_buf[1][pos + j]); + vec_wfEn = vmlaq_f32(vec_wfEn, vec_wfBuf0, vec_wfBuf0); + vec_wfEn = vmlaq_f32(vec_wfEn, vec_wfBuf1, vec_wfBuf1); + } + { + float32x2_t vec_total; + // A B C D + vec_total = vpadd_f32(vget_low_f32(vec_wfEn), vget_high_f32(vec_wfEn)); + // A+B C+D + vec_total = vpadd_f32(vec_total, vec_total); + // A+B+C+D A+B+C+D + wfEn = vget_lane_f32(vec_total, 0); + } + + // scalar code for the remaining items. + for (; j < PART_LEN1; j++) { + wfEn += h_fft_buf[0][pos + j] * h_fft_buf[0][pos + j] + + h_fft_buf[1][pos + j] * h_fft_buf[1][pos + j]; + } + + if (wfEn > wfEnMax) { + wfEnMax = wfEn; + delay = i; + } + } + return delay; +} + +// Updates the following smoothed Power Spectral Densities (PSD): +// - sd : near-end +// - se : residual echo +// - sx : far-end +// - sde : cross-PSD of near-end and residual echo +// - sxd : cross-PSD of near-end and far-end +// +// In addition to updating the PSDs, also the filter diverge state is determined +// upon actions are taken. +static void UpdateCoherenceSpectraNEON(int mult, + bool extended_filter_enabled, + float efw[2][PART_LEN1], + float dfw[2][PART_LEN1], + float xfw[2][PART_LEN1], + CoherenceState* coherence_state, + short* filter_divergence_state, + int* extreme_filter_divergence) { + // Power estimate smoothing coefficients. + const float* ptrGCoh = + extended_filter_enabled + ? WebRtcAec_kExtendedSmoothingCoefficients[mult - 1] + : WebRtcAec_kNormalSmoothingCoefficients[mult - 1]; + int i; + float sdSum = 0, seSum = 0; + const float32x4_t vec_15 = vdupq_n_f32(WebRtcAec_kMinFarendPSD); + float32x4_t vec_sdSum = vdupq_n_f32(0.0f); + float32x4_t vec_seSum = vdupq_n_f32(0.0f); + + for (i = 0; i + 3 < PART_LEN1; i += 4) { + const float32x4_t vec_dfw0 = vld1q_f32(&dfw[0][i]); + const float32x4_t vec_dfw1 = vld1q_f32(&dfw[1][i]); + const float32x4_t vec_efw0 = vld1q_f32(&efw[0][i]); + const float32x4_t vec_efw1 = vld1q_f32(&efw[1][i]); + const float32x4_t vec_xfw0 = vld1q_f32(&xfw[0][i]); + const float32x4_t vec_xfw1 = vld1q_f32(&xfw[1][i]); + float32x4_t vec_sd = + vmulq_n_f32(vld1q_f32(&coherence_state->sd[i]), ptrGCoh[0]); + float32x4_t vec_se = + vmulq_n_f32(vld1q_f32(&coherence_state->se[i]), ptrGCoh[0]); + float32x4_t vec_sx = + vmulq_n_f32(vld1q_f32(&coherence_state->sx[i]), ptrGCoh[0]); + float32x4_t vec_dfw_sumsq = vmulq_f32(vec_dfw0, vec_dfw0); + float32x4_t vec_efw_sumsq = vmulq_f32(vec_efw0, vec_efw0); + float32x4_t vec_xfw_sumsq = vmulq_f32(vec_xfw0, vec_xfw0); + + vec_dfw_sumsq = vmlaq_f32(vec_dfw_sumsq, vec_dfw1, vec_dfw1); + vec_efw_sumsq = vmlaq_f32(vec_efw_sumsq, vec_efw1, vec_efw1); + vec_xfw_sumsq = vmlaq_f32(vec_xfw_sumsq, vec_xfw1, vec_xfw1); + vec_xfw_sumsq = vmaxq_f32(vec_xfw_sumsq, vec_15); + vec_sd = vmlaq_n_f32(vec_sd, vec_dfw_sumsq, ptrGCoh[1]); + vec_se = vmlaq_n_f32(vec_se, vec_efw_sumsq, ptrGCoh[1]); + vec_sx = vmlaq_n_f32(vec_sx, vec_xfw_sumsq, ptrGCoh[1]); + + vst1q_f32(&coherence_state->sd[i], vec_sd); + vst1q_f32(&coherence_state->se[i], vec_se); + vst1q_f32(&coherence_state->sx[i], vec_sx); + + { + float32x4x2_t vec_sde = vld2q_f32(&coherence_state->sde[i][0]); + float32x4_t vec_dfwefw0011 = vmulq_f32(vec_dfw0, vec_efw0); + float32x4_t vec_dfwefw0110 = vmulq_f32(vec_dfw0, vec_efw1); + vec_sde.val[0] = vmulq_n_f32(vec_sde.val[0], ptrGCoh[0]); + vec_sde.val[1] = vmulq_n_f32(vec_sde.val[1], ptrGCoh[0]); + vec_dfwefw0011 = vmlaq_f32(vec_dfwefw0011, vec_dfw1, vec_efw1); + vec_dfwefw0110 = vmlsq_f32(vec_dfwefw0110, vec_dfw1, vec_efw0); + vec_sde.val[0] = vmlaq_n_f32(vec_sde.val[0], vec_dfwefw0011, ptrGCoh[1]); + vec_sde.val[1] = vmlaq_n_f32(vec_sde.val[1], vec_dfwefw0110, ptrGCoh[1]); + vst2q_f32(&coherence_state->sde[i][0], vec_sde); + } + + { + float32x4x2_t vec_sxd = vld2q_f32(&coherence_state->sxd[i][0]); + float32x4_t vec_dfwxfw0011 = vmulq_f32(vec_dfw0, vec_xfw0); + float32x4_t vec_dfwxfw0110 = vmulq_f32(vec_dfw0, vec_xfw1); + vec_sxd.val[0] = vmulq_n_f32(vec_sxd.val[0], ptrGCoh[0]); + vec_sxd.val[1] = vmulq_n_f32(vec_sxd.val[1], ptrGCoh[0]); + vec_dfwxfw0011 = vmlaq_f32(vec_dfwxfw0011, vec_dfw1, vec_xfw1); + vec_dfwxfw0110 = vmlsq_f32(vec_dfwxfw0110, vec_dfw1, vec_xfw0); + vec_sxd.val[0] = vmlaq_n_f32(vec_sxd.val[0], vec_dfwxfw0011, ptrGCoh[1]); + vec_sxd.val[1] = vmlaq_n_f32(vec_sxd.val[1], vec_dfwxfw0110, ptrGCoh[1]); + vst2q_f32(&coherence_state->sxd[i][0], vec_sxd); + } + + vec_sdSum = vaddq_f32(vec_sdSum, vec_sd); + vec_seSum = vaddq_f32(vec_seSum, vec_se); + } + { + float32x2_t vec_sdSum_total; + float32x2_t vec_seSum_total; + // A B C D + vec_sdSum_total = + vpadd_f32(vget_low_f32(vec_sdSum), vget_high_f32(vec_sdSum)); + vec_seSum_total = + vpadd_f32(vget_low_f32(vec_seSum), vget_high_f32(vec_seSum)); + // A+B C+D + vec_sdSum_total = vpadd_f32(vec_sdSum_total, vec_sdSum_total); + vec_seSum_total = vpadd_f32(vec_seSum_total, vec_seSum_total); + // A+B+C+D A+B+C+D + sdSum = vget_lane_f32(vec_sdSum_total, 0); + seSum = vget_lane_f32(vec_seSum_total, 0); + } + + // scalar code for the remaining items. + for (; i < PART_LEN1; i++) { + coherence_state->sd[i] = + ptrGCoh[0] * coherence_state->sd[i] + + ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]); + coherence_state->se[i] = + ptrGCoh[0] * coherence_state->se[i] + + ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]); + // We threshold here to protect against the ill-effects of a zero farend. + // The threshold is not arbitrarily chosen, but balances protection and + // adverse interaction with the algorithm's tuning. + // TODO(bjornv): investigate further why this is so sensitive. + coherence_state->sx[i] = + ptrGCoh[0] * coherence_state->sx[i] + + ptrGCoh[1] * + WEBRTC_SPL_MAX(xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i], + WebRtcAec_kMinFarendPSD); + + coherence_state->sde[i][0] = + ptrGCoh[0] * coherence_state->sde[i][0] + + ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]); + coherence_state->sde[i][1] = + ptrGCoh[0] * coherence_state->sde[i][1] + + ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]); + + coherence_state->sxd[i][0] = + ptrGCoh[0] * coherence_state->sxd[i][0] + + ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]); + coherence_state->sxd[i][1] = + ptrGCoh[0] * coherence_state->sxd[i][1] + + ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]); + + sdSum += coherence_state->sd[i]; + seSum += coherence_state->se[i]; + } + + // Divergent filter safeguard update. + *filter_divergence_state = + (*filter_divergence_state ? 1.05f : 1.0f) * seSum > sdSum; + + // Signal extreme filter divergence if the error is significantly larger + // than the nearend (13 dB). + *extreme_filter_divergence = (seSum > (19.95f * sdSum)); +} + +// Window time domain data to be used by the fft. +static void WindowDataNEON(float* x_windowed, const float* x) { + int i; + for (i = 0; i < PART_LEN; i += 4) { + const float32x4_t vec_Buf1 = vld1q_f32(&x[i]); + const float32x4_t vec_Buf2 = vld1q_f32(&x[PART_LEN + i]); + const float32x4_t vec_sqrtHanning = vld1q_f32(&WebRtcAec_sqrtHanning[i]); + // A B C D + float32x4_t vec_sqrtHanning_rev = + vld1q_f32(&WebRtcAec_sqrtHanning[PART_LEN - i - 3]); + // B A D C + vec_sqrtHanning_rev = vrev64q_f32(vec_sqrtHanning_rev); + // D C B A + vec_sqrtHanning_rev = vcombine_f32(vget_high_f32(vec_sqrtHanning_rev), + vget_low_f32(vec_sqrtHanning_rev)); + vst1q_f32(&x_windowed[i], vmulq_f32(vec_Buf1, vec_sqrtHanning)); + vst1q_f32(&x_windowed[PART_LEN + i], + vmulq_f32(vec_Buf2, vec_sqrtHanning_rev)); + } +} + +// Puts fft output data into a complex valued array. +static void StoreAsComplexNEON(const float* data, + float data_complex[2][PART_LEN1]) { + int i; + for (i = 0; i < PART_LEN; i += 4) { + const float32x4x2_t vec_data = vld2q_f32(&data[2 * i]); + vst1q_f32(&data_complex[0][i], vec_data.val[0]); + vst1q_f32(&data_complex[1][i], vec_data.val[1]); + } + // fix beginning/end values + data_complex[1][0] = 0; + data_complex[1][PART_LEN] = 0; + data_complex[0][0] = data[0]; + data_complex[0][PART_LEN] = data[1]; +} + +static void ComputeCoherenceNEON(const CoherenceState* coherence_state, + float* cohde, + float* cohxd) { + int i; + + { + const float32x4_t vec_1eminus10 = vdupq_n_f32(1e-10f); + + // Subband coherence + for (i = 0; i + 3 < PART_LEN1; i += 4) { + const float32x4_t vec_sd = vld1q_f32(&coherence_state->sd[i]); + const float32x4_t vec_se = vld1q_f32(&coherence_state->se[i]); + const float32x4_t vec_sx = vld1q_f32(&coherence_state->sx[i]); + const float32x4_t vec_sdse = vmlaq_f32(vec_1eminus10, vec_sd, vec_se); + const float32x4_t vec_sdsx = vmlaq_f32(vec_1eminus10, vec_sd, vec_sx); + float32x4x2_t vec_sde = vld2q_f32(&coherence_state->sde[i][0]); + float32x4x2_t vec_sxd = vld2q_f32(&coherence_state->sxd[i][0]); + float32x4_t vec_cohde = vmulq_f32(vec_sde.val[0], vec_sde.val[0]); + float32x4_t vec_cohxd = vmulq_f32(vec_sxd.val[0], vec_sxd.val[0]); + vec_cohde = vmlaq_f32(vec_cohde, vec_sde.val[1], vec_sde.val[1]); + vec_cohde = vdivq_f32(vec_cohde, vec_sdse); + vec_cohxd = vmlaq_f32(vec_cohxd, vec_sxd.val[1], vec_sxd.val[1]); + vec_cohxd = vdivq_f32(vec_cohxd, vec_sdsx); + + vst1q_f32(&cohde[i], vec_cohde); + vst1q_f32(&cohxd[i], vec_cohxd); + } + } + // scalar code for the remaining items. + for (; i < PART_LEN1; i++) { + cohde[i] = (coherence_state->sde[i][0] * coherence_state->sde[i][0] + + coherence_state->sde[i][1] * coherence_state->sde[i][1]) / + (coherence_state->sd[i] * coherence_state->se[i] + 1e-10f); + cohxd[i] = (coherence_state->sxd[i][0] * coherence_state->sxd[i][0] + + coherence_state->sxd[i][1] * coherence_state->sxd[i][1]) / + (coherence_state->sx[i] * coherence_state->sd[i] + 1e-10f); + } +} + +void WebRtcAec_InitAec_neon(void) { + WebRtcAec_FilterFar = FilterFarNEON; + WebRtcAec_ScaleErrorSignal = ScaleErrorSignalNEON; + WebRtcAec_FilterAdaptation = FilterAdaptationNEON; + WebRtcAec_Overdrive = OverdriveNEON; + WebRtcAec_Suppress = SuppressNEON; + WebRtcAec_ComputeCoherence = ComputeCoherenceNEON; + WebRtcAec_UpdateCoherenceSpectra = UpdateCoherenceSpectraNEON; + WebRtcAec_StoreAsComplex = StoreAsComplexNEON; + WebRtcAec_PartitionDelay = PartitionDelayNEON; + WebRtcAec_WindowData = WindowDataNEON; +} +} // namespace webrtc + +#endif diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_optimized_methods.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_optimized_methods.h new file mode 100644 index 000000000..5e873c8c8 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_optimized_methods.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_OPTIMIZED_METHODS_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_OPTIMIZED_METHODS_H_ + +#include + +#include "webrtc/modules/audio_processing/aec/aec_core.h" +#include "webrtc/typedefs.h" + +namespace webrtc { + +typedef void (*WebRtcAecFilterFar)( + int num_partitions, + int x_fft_buf_block_pos, + float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], + float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1], + float y_fft[2][PART_LEN1]); +extern WebRtcAecFilterFar WebRtcAec_FilterFar; +typedef void (*WebRtcAecScaleErrorSignal)(float mu, + float error_threshold, + float x_pow[PART_LEN1], + float ef[2][PART_LEN1]); +extern WebRtcAecScaleErrorSignal WebRtcAec_ScaleErrorSignal; +typedef void (*WebRtcAecFilterAdaptation)( + const OouraFft& ooura_fft, + int num_partitions, + int x_fft_buf_block_pos, + float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], + float e_fft[2][PART_LEN1], + float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]); +extern WebRtcAecFilterAdaptation WebRtcAec_FilterAdaptation; + +typedef void (*WebRtcAecOverdrive)(float overdrive_scaling, + const float hNlFb, + float hNl[PART_LEN1]); +extern WebRtcAecOverdrive WebRtcAec_Overdrive; + +typedef void (*WebRtcAecSuppress)(const float hNl[PART_LEN1], + float efw[2][PART_LEN1]); +extern WebRtcAecSuppress WebRtcAec_Suppress; + +typedef void (*WebRtcAecComputeCoherence)(const CoherenceState* coherence_state, + float* cohde, + float* cohxd); +extern WebRtcAecComputeCoherence WebRtcAec_ComputeCoherence; + +typedef void (*WebRtcAecUpdateCoherenceSpectra)(int mult, + bool extended_filter_enabled, + float efw[2][PART_LEN1], + float dfw[2][PART_LEN1], + float xfw[2][PART_LEN1], + CoherenceState* coherence_state, + short* filter_divergence_state, + int* extreme_filter_divergence); +extern WebRtcAecUpdateCoherenceSpectra WebRtcAec_UpdateCoherenceSpectra; + +typedef int (*WebRtcAecPartitionDelay)( + int num_partitions, + float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]); +extern WebRtcAecPartitionDelay WebRtcAec_PartitionDelay; + +typedef void (*WebRtcAecStoreAsComplex)(const float* data, + float data_complex[2][PART_LEN1]); +extern WebRtcAecStoreAsComplex WebRtcAec_StoreAsComplex; + +typedef void (*WebRtcAecWindowData)(float* x_windowed, const float* x); +extern WebRtcAecWindowData WebRtcAec_WindowData; + +} // namespace webrtc + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_OPTIMIZED_METHODS_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_sse2.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_sse2.cc new file mode 100644 index 000000000..3863075ae --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_core_sse2.cc @@ -0,0 +1,754 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * The core AEC algorithm, SSE2 version of speed-critical functions. + */ + +extern "C" { +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +} +#include "webrtc/modules/audio_processing/aec/aec_common.h" +#include "webrtc/modules/audio_processing/aec/aec_core_optimized_methods.h" +#include "webrtc/modules/audio_processing/utility/ooura_fft.h" + +#if defined(WEBRTC_ARCH_X86_FAMILY) + +#include +#include +#include // memset + +namespace webrtc { + +__inline static float MulRe(float aRe, float aIm, float bRe, float bIm) { + return aRe * bRe - aIm * bIm; +} + +__inline static float MulIm(float aRe, float aIm, float bRe, float bIm) { + return aRe * bIm + aIm * bRe; +} + +static void FilterFarSSE2(int num_partitions, + int x_fft_buf_block_pos, + float x_fft_buf[2] + [kExtendedNumPartitions * PART_LEN1], + float h_fft_buf[2] + [kExtendedNumPartitions * PART_LEN1], + float y_fft[2][PART_LEN1]) { + int i; + for (i = 0; i < num_partitions; i++) { + int j; + int xPos = (i + x_fft_buf_block_pos) * PART_LEN1; + int pos = i * PART_LEN1; + // Check for wrap + if (i + x_fft_buf_block_pos >= num_partitions) { + xPos -= num_partitions * (PART_LEN1); + } + + // vectorized code (four at once) + for (j = 0; j + 3 < PART_LEN1; j += 4) { + const __m128 x_fft_buf_re = _mm_loadu_ps(&x_fft_buf[0][xPos + j]); + const __m128 x_fft_buf_im = _mm_loadu_ps(&x_fft_buf[1][xPos + j]); + const __m128 h_fft_buf_re = _mm_loadu_ps(&h_fft_buf[0][pos + j]); + const __m128 h_fft_buf_im = _mm_loadu_ps(&h_fft_buf[1][pos + j]); + const __m128 y_fft_re = _mm_loadu_ps(&y_fft[0][j]); + const __m128 y_fft_im = _mm_loadu_ps(&y_fft[1][j]); + const __m128 a = _mm_mul_ps(x_fft_buf_re, h_fft_buf_re); + const __m128 b = _mm_mul_ps(x_fft_buf_im, h_fft_buf_im); + const __m128 c = _mm_mul_ps(x_fft_buf_re, h_fft_buf_im); + const __m128 d = _mm_mul_ps(x_fft_buf_im, h_fft_buf_re); + const __m128 e = _mm_sub_ps(a, b); + const __m128 f = _mm_add_ps(c, d); + const __m128 g = _mm_add_ps(y_fft_re, e); + const __m128 h = _mm_add_ps(y_fft_im, f); + _mm_storeu_ps(&y_fft[0][j], g); + _mm_storeu_ps(&y_fft[1][j], h); + } + // scalar code for the remaining items. + for (; j < PART_LEN1; j++) { + y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j], + h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]); + y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j], + h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]); + } + } +} + +static void ScaleErrorSignalSSE2(float mu, + float error_threshold, + float x_pow[PART_LEN1], + float ef[2][PART_LEN1]) { + const __m128 k1e_10f = _mm_set1_ps(1e-10f); + const __m128 kMu = _mm_set1_ps(mu); + const __m128 kThresh = _mm_set1_ps(error_threshold); + + int i; + // vectorized code (four at once) + for (i = 0; i + 3 < PART_LEN1; i += 4) { + const __m128 x_pow_local = _mm_loadu_ps(&x_pow[i]); + const __m128 ef_re_base = _mm_loadu_ps(&ef[0][i]); + const __m128 ef_im_base = _mm_loadu_ps(&ef[1][i]); + + const __m128 xPowPlus = _mm_add_ps(x_pow_local, k1e_10f); + __m128 ef_re = _mm_div_ps(ef_re_base, xPowPlus); + __m128 ef_im = _mm_div_ps(ef_im_base, xPowPlus); + const __m128 ef_re2 = _mm_mul_ps(ef_re, ef_re); + const __m128 ef_im2 = _mm_mul_ps(ef_im, ef_im); + const __m128 ef_sum2 = _mm_add_ps(ef_re2, ef_im2); + const __m128 absEf = _mm_sqrt_ps(ef_sum2); + const __m128 bigger = _mm_cmpgt_ps(absEf, kThresh); + __m128 absEfPlus = _mm_add_ps(absEf, k1e_10f); + const __m128 absEfInv = _mm_div_ps(kThresh, absEfPlus); + __m128 ef_re_if = _mm_mul_ps(ef_re, absEfInv); + __m128 ef_im_if = _mm_mul_ps(ef_im, absEfInv); + ef_re_if = _mm_and_ps(bigger, ef_re_if); + ef_im_if = _mm_and_ps(bigger, ef_im_if); + ef_re = _mm_andnot_ps(bigger, ef_re); + ef_im = _mm_andnot_ps(bigger, ef_im); + ef_re = _mm_or_ps(ef_re, ef_re_if); + ef_im = _mm_or_ps(ef_im, ef_im_if); + ef_re = _mm_mul_ps(ef_re, kMu); + ef_im = _mm_mul_ps(ef_im, kMu); + + _mm_storeu_ps(&ef[0][i], ef_re); + _mm_storeu_ps(&ef[1][i], ef_im); + } + // scalar code for the remaining items. + { + for (; i < (PART_LEN1); i++) { + float abs_ef; + ef[0][i] /= (x_pow[i] + 1e-10f); + ef[1][i] /= (x_pow[i] + 1e-10f); + abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]); + + if (abs_ef > error_threshold) { + abs_ef = error_threshold / (abs_ef + 1e-10f); + ef[0][i] *= abs_ef; + ef[1][i] *= abs_ef; + } + + // Stepsize factor + ef[0][i] *= mu; + ef[1][i] *= mu; + } + } +} + +static void FilterAdaptationSSE2( + const OouraFft& ooura_fft, + int num_partitions, + int x_fft_buf_block_pos, + float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1], + float e_fft[2][PART_LEN1], + float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) { + float fft[PART_LEN2]; + int i, j; + for (i = 0; i < num_partitions; i++) { + int xPos = (i + x_fft_buf_block_pos) * (PART_LEN1); + int pos = i * PART_LEN1; + // Check for wrap + if (i + x_fft_buf_block_pos >= num_partitions) { + xPos -= num_partitions * PART_LEN1; + } + + // Process the whole array... + for (j = 0; j < PART_LEN; j += 4) { + // Load x_fft_buf and e_fft. + const __m128 x_fft_buf_re = _mm_loadu_ps(&x_fft_buf[0][xPos + j]); + const __m128 x_fft_buf_im = _mm_loadu_ps(&x_fft_buf[1][xPos + j]); + const __m128 e_fft_re = _mm_loadu_ps(&e_fft[0][j]); + const __m128 e_fft_im = _mm_loadu_ps(&e_fft[1][j]); + // Calculate the product of conjugate(x_fft_buf) by e_fft. + // re(conjugate(a) * b) = aRe * bRe + aIm * bIm + // im(conjugate(a) * b)= aRe * bIm - aIm * bRe + const __m128 a = _mm_mul_ps(x_fft_buf_re, e_fft_re); + const __m128 b = _mm_mul_ps(x_fft_buf_im, e_fft_im); + const __m128 c = _mm_mul_ps(x_fft_buf_re, e_fft_im); + const __m128 d = _mm_mul_ps(x_fft_buf_im, e_fft_re); + const __m128 e = _mm_add_ps(a, b); + const __m128 f = _mm_sub_ps(c, d); + // Interleave real and imaginary parts. + const __m128 g = _mm_unpacklo_ps(e, f); + const __m128 h = _mm_unpackhi_ps(e, f); + // Store + _mm_storeu_ps(&fft[2 * j + 0], g); + _mm_storeu_ps(&fft[2 * j + 4], h); + } + // ... and fixup the first imaginary entry. + fft[1] = + MulRe(x_fft_buf[0][xPos + PART_LEN], -x_fft_buf[1][xPos + PART_LEN], + e_fft[0][PART_LEN], e_fft[1][PART_LEN]); + + ooura_fft.InverseFft(fft); + memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN); + + // fft scaling + { + float scale = 2.0f / PART_LEN2; + const __m128 scale_ps = _mm_load_ps1(&scale); + for (j = 0; j < PART_LEN; j += 4) { + const __m128 fft_ps = _mm_loadu_ps(&fft[j]); + const __m128 fft_scale = _mm_mul_ps(fft_ps, scale_ps); + _mm_storeu_ps(&fft[j], fft_scale); + } + } + ooura_fft.Fft(fft); + + { + float wt1 = h_fft_buf[1][pos]; + h_fft_buf[0][pos + PART_LEN] += fft[1]; + for (j = 0; j < PART_LEN; j += 4) { + __m128 wtBuf_re = _mm_loadu_ps(&h_fft_buf[0][pos + j]); + __m128 wtBuf_im = _mm_loadu_ps(&h_fft_buf[1][pos + j]); + const __m128 fft0 = _mm_loadu_ps(&fft[2 * j + 0]); + const __m128 fft4 = _mm_loadu_ps(&fft[2 * j + 4]); + const __m128 fft_re = + _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(2, 0, 2, 0)); + const __m128 fft_im = + _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(3, 1, 3, 1)); + wtBuf_re = _mm_add_ps(wtBuf_re, fft_re); + wtBuf_im = _mm_add_ps(wtBuf_im, fft_im); + _mm_storeu_ps(&h_fft_buf[0][pos + j], wtBuf_re); + _mm_storeu_ps(&h_fft_buf[1][pos + j], wtBuf_im); + } + h_fft_buf[1][pos] = wt1; + } + } +} + +static __m128 mm_pow_ps(__m128 a, __m128 b) { + // a^b = exp2(b * log2(a)) + // exp2(x) and log2(x) are calculated using polynomial approximations. + __m128 log2_a, b_log2_a, a_exp_b; + + // Calculate log2(x), x = a. + { + // To calculate log2(x), we decompose x like this: + // x = y * 2^n + // n is an integer + // y is in the [1.0, 2.0) range + // + // log2(x) = log2(y) + n + // n can be evaluated by playing with float representation. + // log2(y) in a small range can be approximated, this code uses an order + // five polynomial approximation. The coefficients have been + // estimated with the Remez algorithm and the resulting + // polynomial has a maximum relative error of 0.00086%. + + // Compute n. + // This is done by masking the exponent, shifting it into the top bit of + // the mantissa, putting eight into the biased exponent (to shift/ + // compensate the fact that the exponent has been shifted in the top/ + // fractional part and finally getting rid of the implicit leading one + // from the mantissa by substracting it out. + static const ALIGN16_BEG int float_exponent_mask[4] ALIGN16_END = { + 0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000}; + static const ALIGN16_BEG int eight_biased_exponent[4] ALIGN16_END = { + 0x43800000, 0x43800000, 0x43800000, 0x43800000}; + static const ALIGN16_BEG int implicit_leading_one[4] ALIGN16_END = { + 0x43BF8000, 0x43BF8000, 0x43BF8000, 0x43BF8000}; + static const int shift_exponent_into_top_mantissa = 8; + const __m128 two_n = + _mm_and_ps(a, *(reinterpret_cast(float_exponent_mask))); + const __m128 n_1 = _mm_castsi128_ps(_mm_srli_epi32( + _mm_castps_si128(two_n), shift_exponent_into_top_mantissa)); + const __m128 n_0 = + _mm_or_ps(n_1, *(reinterpret_cast(eight_biased_exponent))); + const __m128 n = + _mm_sub_ps(n_0, *(reinterpret_cast(implicit_leading_one))); + + // Compute y. + static const ALIGN16_BEG int mantissa_mask[4] ALIGN16_END = { + 0x007FFFFF, 0x007FFFFF, 0x007FFFFF, 0x007FFFFF}; + static const ALIGN16_BEG int zero_biased_exponent_is_one[4] ALIGN16_END = { + 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000}; + const __m128 mantissa = + _mm_and_ps(a, *(reinterpret_cast(mantissa_mask))); + const __m128 y = + _mm_or_ps(mantissa, + *(reinterpret_cast(zero_biased_exponent_is_one))); + + // Approximate log2(y) ~= (y - 1) * pol5(y). + // pol5(y) = C5 * y^5 + C4 * y^4 + C3 * y^3 + C2 * y^2 + C1 * y + C0 + static const ALIGN16_BEG float ALIGN16_END C5[4] = { + -3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f}; + static const ALIGN16_BEG float ALIGN16_END C4[4] = { + 3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f}; + static const ALIGN16_BEG float ALIGN16_END C3[4] = { + -1.2315303f, -1.2315303f, -1.2315303f, -1.2315303f}; + static const ALIGN16_BEG float ALIGN16_END C2[4] = {2.5988452f, 2.5988452f, + 2.5988452f, 2.5988452f}; + static const ALIGN16_BEG float ALIGN16_END C1[4] = { + -3.3241990f, -3.3241990f, -3.3241990f, -3.3241990f}; + static const ALIGN16_BEG float ALIGN16_END C0[4] = {3.1157899f, 3.1157899f, + 3.1157899f, 3.1157899f}; + const __m128 pol5_y_0 = + _mm_mul_ps(y, *(reinterpret_cast(C5))); + const __m128 pol5_y_1 = + _mm_add_ps(pol5_y_0, *(reinterpret_cast(C4))); + const __m128 pol5_y_2 = _mm_mul_ps(pol5_y_1, y); + const __m128 pol5_y_3 = + _mm_add_ps(pol5_y_2, *(reinterpret_cast(C3))); + const __m128 pol5_y_4 = _mm_mul_ps(pol5_y_3, y); + const __m128 pol5_y_5 = + _mm_add_ps(pol5_y_4, *(reinterpret_cast(C2))); + const __m128 pol5_y_6 = _mm_mul_ps(pol5_y_5, y); + const __m128 pol5_y_7 = + _mm_add_ps(pol5_y_6, *(reinterpret_cast(C1))); + const __m128 pol5_y_8 = _mm_mul_ps(pol5_y_7, y); + const __m128 pol5_y = + _mm_add_ps(pol5_y_8, *(reinterpret_cast(C0))); + const __m128 y_minus_one = + _mm_sub_ps(y, + *(reinterpret_cast(zero_biased_exponent_is_one))); + const __m128 log2_y = _mm_mul_ps(y_minus_one, pol5_y); + + // Combine parts. + log2_a = _mm_add_ps(n, log2_y); + } + + // b * log2(a) + b_log2_a = _mm_mul_ps(b, log2_a); + + // Calculate exp2(x), x = b * log2(a). + { + // To calculate 2^x, we decompose x like this: + // x = n + y + // n is an integer, the value of x - 0.5 rounded down, therefore + // y is in the [0.5, 1.5) range + // + // 2^x = 2^n * 2^y + // 2^n can be evaluated by playing with float representation. + // 2^y in a small range can be approximated, this code uses an order two + // polynomial approximation. The coefficients have been estimated + // with the Remez algorithm and the resulting polynomial has a + // maximum relative error of 0.17%. + + // To avoid over/underflow, we reduce the range of input to ]-127, 129]. + static const ALIGN16_BEG float max_input[4] ALIGN16_END = {129.f, 129.f, + 129.f, 129.f}; + static const ALIGN16_BEG float min_input[4] ALIGN16_END = { + -126.99999f, -126.99999f, -126.99999f, -126.99999f}; + const __m128 x_min = + _mm_min_ps(b_log2_a, *(reinterpret_cast(max_input))); + const __m128 x_max = + _mm_max_ps(x_min, *(reinterpret_cast(min_input))); + // Compute n. + static const ALIGN16_BEG float half[4] ALIGN16_END = {0.5f, 0.5f, 0.5f, + 0.5f}; + const __m128 x_minus_half = + _mm_sub_ps(x_max, *(reinterpret_cast(half))); + const __m128i x_minus_half_floor = _mm_cvtps_epi32(x_minus_half); + // Compute 2^n. + static const ALIGN16_BEG int float_exponent_bias[4] ALIGN16_END = { + 127, 127, 127, 127}; + static const int float_exponent_shift = 23; + const __m128i two_n_exponent = + _mm_add_epi32(x_minus_half_floor, + *(reinterpret_cast(float_exponent_bias))); + const __m128 two_n = + _mm_castsi128_ps(_mm_slli_epi32(two_n_exponent, float_exponent_shift)); + // Compute y. + const __m128 y = _mm_sub_ps(x_max, _mm_cvtepi32_ps(x_minus_half_floor)); + // Approximate 2^y ~= C2 * y^2 + C1 * y + C0. + static const ALIGN16_BEG float C2[4] ALIGN16_END = { + 3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f}; + static const ALIGN16_BEG float C1[4] ALIGN16_END = { + 6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f}; + static const ALIGN16_BEG float C0[4] ALIGN16_END = {1.0017247f, 1.0017247f, + 1.0017247f, 1.0017247f}; + const __m128 exp2_y_0 = + _mm_mul_ps(y, *(reinterpret_cast(C2))); + const __m128 exp2_y_1 = + _mm_add_ps(exp2_y_0, *(reinterpret_cast(C1))); + const __m128 exp2_y_2 = _mm_mul_ps(exp2_y_1, y); + const __m128 exp2_y = + _mm_add_ps(exp2_y_2, *(reinterpret_cast(C0))); + + // Combine parts. + a_exp_b = _mm_mul_ps(exp2_y, two_n); + } + return a_exp_b; +} + +static void OverdriveSSE2(float overdrive_scaling, + float hNlFb, + float hNl[PART_LEN1]) { + int i; + const __m128 vec_hNlFb = _mm_set1_ps(hNlFb); + const __m128 vec_one = _mm_set1_ps(1.0f); + const __m128 vec_overdrive_scaling = _mm_set1_ps(overdrive_scaling); + // vectorized code (four at once) + for (i = 0; i + 3 < PART_LEN1; i += 4) { + // Weight subbands + __m128 vec_hNl = _mm_loadu_ps(&hNl[i]); + const __m128 vec_weightCurve = _mm_loadu_ps(&WebRtcAec_weightCurve[i]); + const __m128 bigger = _mm_cmpgt_ps(vec_hNl, vec_hNlFb); + const __m128 vec_weightCurve_hNlFb = _mm_mul_ps(vec_weightCurve, vec_hNlFb); + const __m128 vec_one_weightCurve = _mm_sub_ps(vec_one, vec_weightCurve); + const __m128 vec_one_weightCurve_hNl = + _mm_mul_ps(vec_one_weightCurve, vec_hNl); + const __m128 vec_if0 = _mm_andnot_ps(bigger, vec_hNl); + const __m128 vec_if1 = _mm_and_ps( + bigger, _mm_add_ps(vec_weightCurve_hNlFb, vec_one_weightCurve_hNl)); + vec_hNl = _mm_or_ps(vec_if0, vec_if1); + + const __m128 vec_overDriveCurve = + _mm_loadu_ps(&WebRtcAec_overDriveCurve[i]); + const __m128 vec_overDriveSm_overDriveCurve = + _mm_mul_ps(vec_overdrive_scaling, vec_overDriveCurve); + vec_hNl = mm_pow_ps(vec_hNl, vec_overDriveSm_overDriveCurve); + _mm_storeu_ps(&hNl[i], vec_hNl); + } + // scalar code for the remaining items. + for (; i < PART_LEN1; i++) { + // Weight subbands + if (hNl[i] > hNlFb) { + hNl[i] = WebRtcAec_weightCurve[i] * hNlFb + + (1 - WebRtcAec_weightCurve[i]) * hNl[i]; + } + hNl[i] = powf(hNl[i], overdrive_scaling * WebRtcAec_overDriveCurve[i]); + } +} + +static void SuppressSSE2(const float hNl[PART_LEN1], float efw[2][PART_LEN1]) { + int i; + const __m128 vec_minus_one = _mm_set1_ps(-1.0f); + // vectorized code (four at once) + for (i = 0; i + 3 < PART_LEN1; i += 4) { + // Suppress error signal + __m128 vec_hNl = _mm_loadu_ps(&hNl[i]); + __m128 vec_efw_re = _mm_loadu_ps(&efw[0][i]); + __m128 vec_efw_im = _mm_loadu_ps(&efw[1][i]); + vec_efw_re = _mm_mul_ps(vec_efw_re, vec_hNl); + vec_efw_im = _mm_mul_ps(vec_efw_im, vec_hNl); + + // Ooura fft returns incorrect sign on imaginary component. It matters + // here because we are making an additive change with comfort noise. + vec_efw_im = _mm_mul_ps(vec_efw_im, vec_minus_one); + _mm_storeu_ps(&efw[0][i], vec_efw_re); + _mm_storeu_ps(&efw[1][i], vec_efw_im); + } + // scalar code for the remaining items. + for (; i < PART_LEN1; i++) { + // Suppress error signal + efw[0][i] *= hNl[i]; + efw[1][i] *= hNl[i]; + + // Ooura fft returns incorrect sign on imaginary component. It matters + // here because we are making an additive change with comfort noise. + efw[1][i] *= -1; + } +} + +__inline static void _mm_add_ps_4x1(__m128 sum, float* dst) { + // A+B C+D + sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(0, 0, 3, 2))); + // A+B+C+D A+B+C+D + sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 1, 1, 1))); + _mm_store_ss(dst, sum); +} + +static int PartitionDelaySSE2( + int num_partitions, + float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) { + // Measures the energy in each filter partition and returns the partition with + // highest energy. + // TODO(bjornv): Spread computational cost by computing one partition per + // block? + float wfEnMax = 0; + int i; + int delay = 0; + + for (i = 0; i < num_partitions; i++) { + int j; + int pos = i * PART_LEN1; + float wfEn = 0; + __m128 vec_wfEn = _mm_set1_ps(0.0f); + // vectorized code (four at once) + for (j = 0; j + 3 < PART_LEN1; j += 4) { + const __m128 vec_wfBuf0 = _mm_loadu_ps(&h_fft_buf[0][pos + j]); + const __m128 vec_wfBuf1 = _mm_loadu_ps(&h_fft_buf[1][pos + j]); + vec_wfEn = _mm_add_ps(vec_wfEn, _mm_mul_ps(vec_wfBuf0, vec_wfBuf0)); + vec_wfEn = _mm_add_ps(vec_wfEn, _mm_mul_ps(vec_wfBuf1, vec_wfBuf1)); + } + _mm_add_ps_4x1(vec_wfEn, &wfEn); + + // scalar code for the remaining items. + for (; j < PART_LEN1; j++) { + wfEn += h_fft_buf[0][pos + j] * h_fft_buf[0][pos + j] + + h_fft_buf[1][pos + j] * h_fft_buf[1][pos + j]; + } + + if (wfEn > wfEnMax) { + wfEnMax = wfEn; + delay = i; + } + } + return delay; +} + +// Updates the following smoothed Power Spectral Densities (PSD): +// - sd : near-end +// - se : residual echo +// - sx : far-end +// - sde : cross-PSD of near-end and residual echo +// - sxd : cross-PSD of near-end and far-end +// +// In addition to updating the PSDs, also the filter diverge state is determined +// upon actions are taken. +static void UpdateCoherenceSpectraSSE2(int mult, + bool extended_filter_enabled, + float efw[2][PART_LEN1], + float dfw[2][PART_LEN1], + float xfw[2][PART_LEN1], + CoherenceState* coherence_state, + short* filter_divergence_state, + int* extreme_filter_divergence) { + // Power estimate smoothing coefficients. + const float* ptrGCoh = + extended_filter_enabled + ? WebRtcAec_kExtendedSmoothingCoefficients[mult - 1] + : WebRtcAec_kNormalSmoothingCoefficients[mult - 1]; + int i; + float sdSum = 0, seSum = 0; + const __m128 vec_15 = _mm_set1_ps(WebRtcAec_kMinFarendPSD); + const __m128 vec_GCoh0 = _mm_set1_ps(ptrGCoh[0]); + const __m128 vec_GCoh1 = _mm_set1_ps(ptrGCoh[1]); + __m128 vec_sdSum = _mm_set1_ps(0.0f); + __m128 vec_seSum = _mm_set1_ps(0.0f); + + for (i = 0; i + 3 < PART_LEN1; i += 4) { + const __m128 vec_dfw0 = _mm_loadu_ps(&dfw[0][i]); + const __m128 vec_dfw1 = _mm_loadu_ps(&dfw[1][i]); + const __m128 vec_efw0 = _mm_loadu_ps(&efw[0][i]); + const __m128 vec_efw1 = _mm_loadu_ps(&efw[1][i]); + const __m128 vec_xfw0 = _mm_loadu_ps(&xfw[0][i]); + const __m128 vec_xfw1 = _mm_loadu_ps(&xfw[1][i]); + __m128 vec_sd = + _mm_mul_ps(_mm_loadu_ps(&coherence_state->sd[i]), vec_GCoh0); + __m128 vec_se = + _mm_mul_ps(_mm_loadu_ps(&coherence_state->se[i]), vec_GCoh0); + __m128 vec_sx = + _mm_mul_ps(_mm_loadu_ps(&coherence_state->sx[i]), vec_GCoh0); + __m128 vec_dfw_sumsq = _mm_mul_ps(vec_dfw0, vec_dfw0); + __m128 vec_efw_sumsq = _mm_mul_ps(vec_efw0, vec_efw0); + __m128 vec_xfw_sumsq = _mm_mul_ps(vec_xfw0, vec_xfw0); + vec_dfw_sumsq = _mm_add_ps(vec_dfw_sumsq, _mm_mul_ps(vec_dfw1, vec_dfw1)); + vec_efw_sumsq = _mm_add_ps(vec_efw_sumsq, _mm_mul_ps(vec_efw1, vec_efw1)); + vec_xfw_sumsq = _mm_add_ps(vec_xfw_sumsq, _mm_mul_ps(vec_xfw1, vec_xfw1)); + vec_xfw_sumsq = _mm_max_ps(vec_xfw_sumsq, vec_15); + vec_sd = _mm_add_ps(vec_sd, _mm_mul_ps(vec_dfw_sumsq, vec_GCoh1)); + vec_se = _mm_add_ps(vec_se, _mm_mul_ps(vec_efw_sumsq, vec_GCoh1)); + vec_sx = _mm_add_ps(vec_sx, _mm_mul_ps(vec_xfw_sumsq, vec_GCoh1)); + _mm_storeu_ps(&coherence_state->sd[i], vec_sd); + _mm_storeu_ps(&coherence_state->se[i], vec_se); + _mm_storeu_ps(&coherence_state->sx[i], vec_sx); + + { + const __m128 vec_3210 = _mm_loadu_ps(&coherence_state->sde[i][0]); + const __m128 vec_7654 = _mm_loadu_ps(&coherence_state->sde[i + 2][0]); + __m128 vec_a = + _mm_shuffle_ps(vec_3210, vec_7654, _MM_SHUFFLE(2, 0, 2, 0)); + __m128 vec_b = + _mm_shuffle_ps(vec_3210, vec_7654, _MM_SHUFFLE(3, 1, 3, 1)); + __m128 vec_dfwefw0011 = _mm_mul_ps(vec_dfw0, vec_efw0); + __m128 vec_dfwefw0110 = _mm_mul_ps(vec_dfw0, vec_efw1); + vec_a = _mm_mul_ps(vec_a, vec_GCoh0); + vec_b = _mm_mul_ps(vec_b, vec_GCoh0); + vec_dfwefw0011 = + _mm_add_ps(vec_dfwefw0011, _mm_mul_ps(vec_dfw1, vec_efw1)); + vec_dfwefw0110 = + _mm_sub_ps(vec_dfwefw0110, _mm_mul_ps(vec_dfw1, vec_efw0)); + vec_a = _mm_add_ps(vec_a, _mm_mul_ps(vec_dfwefw0011, vec_GCoh1)); + vec_b = _mm_add_ps(vec_b, _mm_mul_ps(vec_dfwefw0110, vec_GCoh1)); + _mm_storeu_ps(&coherence_state->sde[i][0], _mm_unpacklo_ps(vec_a, vec_b)); + _mm_storeu_ps(&coherence_state->sde[i + 2][0], + _mm_unpackhi_ps(vec_a, vec_b)); + } + + { + const __m128 vec_3210 = _mm_loadu_ps(&coherence_state->sxd[i][0]); + const __m128 vec_7654 = _mm_loadu_ps(&coherence_state->sxd[i + 2][0]); + __m128 vec_a = + _mm_shuffle_ps(vec_3210, vec_7654, _MM_SHUFFLE(2, 0, 2, 0)); + __m128 vec_b = + _mm_shuffle_ps(vec_3210, vec_7654, _MM_SHUFFLE(3, 1, 3, 1)); + __m128 vec_dfwxfw0011 = _mm_mul_ps(vec_dfw0, vec_xfw0); + __m128 vec_dfwxfw0110 = _mm_mul_ps(vec_dfw0, vec_xfw1); + vec_a = _mm_mul_ps(vec_a, vec_GCoh0); + vec_b = _mm_mul_ps(vec_b, vec_GCoh0); + vec_dfwxfw0011 = + _mm_add_ps(vec_dfwxfw0011, _mm_mul_ps(vec_dfw1, vec_xfw1)); + vec_dfwxfw0110 = + _mm_sub_ps(vec_dfwxfw0110, _mm_mul_ps(vec_dfw1, vec_xfw0)); + vec_a = _mm_add_ps(vec_a, _mm_mul_ps(vec_dfwxfw0011, vec_GCoh1)); + vec_b = _mm_add_ps(vec_b, _mm_mul_ps(vec_dfwxfw0110, vec_GCoh1)); + _mm_storeu_ps(&coherence_state->sxd[i][0], _mm_unpacklo_ps(vec_a, vec_b)); + _mm_storeu_ps(&coherence_state->sxd[i + 2][0], + _mm_unpackhi_ps(vec_a, vec_b)); + } + + vec_sdSum = _mm_add_ps(vec_sdSum, vec_sd); + vec_seSum = _mm_add_ps(vec_seSum, vec_se); + } + + _mm_add_ps_4x1(vec_sdSum, &sdSum); + _mm_add_ps_4x1(vec_seSum, &seSum); + + for (; i < PART_LEN1; i++) { + coherence_state->sd[i] = + ptrGCoh[0] * coherence_state->sd[i] + + ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]); + coherence_state->se[i] = + ptrGCoh[0] * coherence_state->se[i] + + ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]); + // We threshold here to protect against the ill-effects of a zero farend. + // The threshold is not arbitrarily chosen, but balances protection and + // adverse interaction with the algorithm's tuning. + // TODO(bjornv): investigate further why this is so sensitive. + coherence_state->sx[i] = + ptrGCoh[0] * coherence_state->sx[i] + + ptrGCoh[1] * + WEBRTC_SPL_MAX(xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i], + WebRtcAec_kMinFarendPSD); + + coherence_state->sde[i][0] = + ptrGCoh[0] * coherence_state->sde[i][0] + + ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]); + coherence_state->sde[i][1] = + ptrGCoh[0] * coherence_state->sde[i][1] + + ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]); + + coherence_state->sxd[i][0] = + ptrGCoh[0] * coherence_state->sxd[i][0] + + ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]); + coherence_state->sxd[i][1] = + ptrGCoh[0] * coherence_state->sxd[i][1] + + ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]); + + sdSum += coherence_state->sd[i]; + seSum += coherence_state->se[i]; + } + + // Divergent filter safeguard update. + *filter_divergence_state = + (*filter_divergence_state ? 1.05f : 1.0f) * seSum > sdSum; + + // Signal extreme filter divergence if the error is significantly larger + // than the nearend (13 dB). + *extreme_filter_divergence = (seSum > (19.95f * sdSum)); +} + +// Window time domain data to be used by the fft. +static void WindowDataSSE2(float* x_windowed, const float* x) { + int i; + for (i = 0; i < PART_LEN; i += 4) { + const __m128 vec_Buf1 = _mm_loadu_ps(&x[i]); + const __m128 vec_Buf2 = _mm_loadu_ps(&x[PART_LEN + i]); + const __m128 vec_sqrtHanning = _mm_load_ps(&WebRtcAec_sqrtHanning[i]); + // A B C D + __m128 vec_sqrtHanning_rev = + _mm_loadu_ps(&WebRtcAec_sqrtHanning[PART_LEN - i - 3]); + // D C B A + vec_sqrtHanning_rev = _mm_shuffle_ps( + vec_sqrtHanning_rev, vec_sqrtHanning_rev, _MM_SHUFFLE(0, 1, 2, 3)); + _mm_storeu_ps(&x_windowed[i], _mm_mul_ps(vec_Buf1, vec_sqrtHanning)); + _mm_storeu_ps(&x_windowed[PART_LEN + i], + _mm_mul_ps(vec_Buf2, vec_sqrtHanning_rev)); + } +} + +// Puts fft output data into a complex valued array. +static void StoreAsComplexSSE2(const float* data, + float data_complex[2][PART_LEN1]) { + int i; + for (i = 0; i < PART_LEN; i += 4) { + const __m128 vec_fft0 = _mm_loadu_ps(&data[2 * i]); + const __m128 vec_fft4 = _mm_loadu_ps(&data[2 * i + 4]); + const __m128 vec_a = + _mm_shuffle_ps(vec_fft0, vec_fft4, _MM_SHUFFLE(2, 0, 2, 0)); + const __m128 vec_b = + _mm_shuffle_ps(vec_fft0, vec_fft4, _MM_SHUFFLE(3, 1, 3, 1)); + _mm_storeu_ps(&data_complex[0][i], vec_a); + _mm_storeu_ps(&data_complex[1][i], vec_b); + } + // fix beginning/end values + data_complex[1][0] = 0; + data_complex[1][PART_LEN] = 0; + data_complex[0][0] = data[0]; + data_complex[0][PART_LEN] = data[1]; +} + +static void ComputeCoherenceSSE2(const CoherenceState* coherence_state, + float* cohde, + float* cohxd) { + int i; + + { + const __m128 vec_1eminus10 = _mm_set1_ps(1e-10f); + + // Subband coherence + for (i = 0; i + 3 < PART_LEN1; i += 4) { + const __m128 vec_sd = _mm_loadu_ps(&coherence_state->sd[i]); + const __m128 vec_se = _mm_loadu_ps(&coherence_state->se[i]); + const __m128 vec_sx = _mm_loadu_ps(&coherence_state->sx[i]); + const __m128 vec_sdse = + _mm_add_ps(vec_1eminus10, _mm_mul_ps(vec_sd, vec_se)); + const __m128 vec_sdsx = + _mm_add_ps(vec_1eminus10, _mm_mul_ps(vec_sd, vec_sx)); + const __m128 vec_sde_3210 = _mm_loadu_ps(&coherence_state->sde[i][0]); + const __m128 vec_sde_7654 = _mm_loadu_ps(&coherence_state->sde[i + 2][0]); + const __m128 vec_sxd_3210 = _mm_loadu_ps(&coherence_state->sxd[i][0]); + const __m128 vec_sxd_7654 = _mm_loadu_ps(&coherence_state->sxd[i + 2][0]); + const __m128 vec_sde_0 = + _mm_shuffle_ps(vec_sde_3210, vec_sde_7654, _MM_SHUFFLE(2, 0, 2, 0)); + const __m128 vec_sde_1 = + _mm_shuffle_ps(vec_sde_3210, vec_sde_7654, _MM_SHUFFLE(3, 1, 3, 1)); + const __m128 vec_sxd_0 = + _mm_shuffle_ps(vec_sxd_3210, vec_sxd_7654, _MM_SHUFFLE(2, 0, 2, 0)); + const __m128 vec_sxd_1 = + _mm_shuffle_ps(vec_sxd_3210, vec_sxd_7654, _MM_SHUFFLE(3, 1, 3, 1)); + __m128 vec_cohde = _mm_mul_ps(vec_sde_0, vec_sde_0); + __m128 vec_cohxd = _mm_mul_ps(vec_sxd_0, vec_sxd_0); + vec_cohde = _mm_add_ps(vec_cohde, _mm_mul_ps(vec_sde_1, vec_sde_1)); + vec_cohde = _mm_div_ps(vec_cohde, vec_sdse); + vec_cohxd = _mm_add_ps(vec_cohxd, _mm_mul_ps(vec_sxd_1, vec_sxd_1)); + vec_cohxd = _mm_div_ps(vec_cohxd, vec_sdsx); + _mm_storeu_ps(&cohde[i], vec_cohde); + _mm_storeu_ps(&cohxd[i], vec_cohxd); + } + + // scalar code for the remaining items. + for (; i < PART_LEN1; i++) { + cohde[i] = (coherence_state->sde[i][0] * coherence_state->sde[i][0] + + coherence_state->sde[i][1] * coherence_state->sde[i][1]) / + (coherence_state->sd[i] * coherence_state->se[i] + 1e-10f); + cohxd[i] = (coherence_state->sxd[i][0] * coherence_state->sxd[i][0] + + coherence_state->sxd[i][1] * coherence_state->sxd[i][1]) / + (coherence_state->sx[i] * coherence_state->sd[i] + 1e-10f); + } + } +} + +void WebRtcAec_InitAec_SSE2(void) { + WebRtcAec_FilterFar = FilterFarSSE2; + WebRtcAec_ScaleErrorSignal = ScaleErrorSignalSSE2; + WebRtcAec_FilterAdaptation = FilterAdaptationSSE2; + WebRtcAec_Overdrive = OverdriveSSE2; + WebRtcAec_Suppress = SuppressSSE2; + WebRtcAec_ComputeCoherence = ComputeCoherenceSSE2; + WebRtcAec_UpdateCoherenceSpectra = UpdateCoherenceSpectraSSE2; + WebRtcAec_StoreAsComplex = StoreAsComplexSSE2; + WebRtcAec_PartitionDelay = PartitionDelaySSE2; + WebRtcAec_WindowData = WindowDataSSE2; +} +} // namespace webrtc +#endif diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_resampler.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_resampler.cc new file mode 100644 index 000000000..2630841d4 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_resampler.cc @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* Resamples a signal to an arbitrary rate. Used by the AEC to compensate for + * clock skew by resampling the farend signal. + */ + +#include "webrtc/modules/audio_processing/aec/aec_resampler.h" + +#include +#include +#include + +#include "webrtc/base/checks.h" +#include "webrtc/modules/audio_processing/aec/aec_core.h" + +namespace webrtc { + +enum { kEstimateLengthFrames = 400 }; + +typedef struct { + float buffer[kResamplerBufferSize]; + float position; + + int deviceSampleRateHz; + int skewData[kEstimateLengthFrames]; + int skewDataIndex; + float skewEstimate; +} AecResampler; + +static int EstimateSkew(const int* rawSkew, + int size, + int absLimit, + float* skewEst); + +void* WebRtcAec_CreateResampler() { + return malloc(sizeof(AecResampler)); +} + +int WebRtcAec_InitResampler(void* resampInst, int deviceSampleRateHz) { + AecResampler* obj = static_cast(resampInst); + memset(obj->buffer, 0, sizeof(obj->buffer)); + obj->position = 0.0; + + obj->deviceSampleRateHz = deviceSampleRateHz; + memset(obj->skewData, 0, sizeof(obj->skewData)); + obj->skewDataIndex = 0; + obj->skewEstimate = 0.0; + + return 0; +} + +void WebRtcAec_FreeResampler(void* resampInst) { + AecResampler* obj = static_cast(resampInst); + free(obj); +} + +void WebRtcAec_ResampleLinear(void* resampInst, + const float* inspeech, + size_t size, + float skew, + float* outspeech, + size_t* size_out) { + AecResampler* obj = static_cast(resampInst); + + float* y; + float be, tnew; + size_t tn, mm; + + RTC_DCHECK_LE(size, 2 * FRAME_LEN); + RTC_DCHECK(resampInst); + RTC_DCHECK(inspeech); + RTC_DCHECK(outspeech); + RTC_DCHECK(size_out); + + // Add new frame data in lookahead + memcpy(&obj->buffer[FRAME_LEN + kResamplingDelay], inspeech, + size * sizeof(inspeech[0])); + + // Sample rate ratio + be = 1 + skew; + + // Loop over input frame + mm = 0; + y = &obj->buffer[FRAME_LEN]; // Point at current frame + + tnew = be * mm + obj->position; + tn = (size_t)tnew; + + while (tn < size) { + // Interpolation + outspeech[mm] = y[tn] + (tnew - tn) * (y[tn + 1] - y[tn]); + mm++; + + tnew = be * mm + obj->position; + tn = static_cast(tnew); + } + + *size_out = mm; + obj->position += (*size_out) * be - size; + + // Shift buffer + memmove(obj->buffer, &obj->buffer[size], + (kResamplerBufferSize - size) * sizeof(obj->buffer[0])); +} + +int WebRtcAec_GetSkew(void* resampInst, int rawSkew, float* skewEst) { + AecResampler* obj = static_cast(resampInst); + int err = 0; + + if (obj->skewDataIndex < kEstimateLengthFrames) { + obj->skewData[obj->skewDataIndex] = rawSkew; + obj->skewDataIndex++; + } else if (obj->skewDataIndex == kEstimateLengthFrames) { + err = EstimateSkew(obj->skewData, kEstimateLengthFrames, + obj->deviceSampleRateHz, skewEst); + obj->skewEstimate = *skewEst; + obj->skewDataIndex++; + } else { + *skewEst = obj->skewEstimate; + } + + return err; +} + +int EstimateSkew(const int* rawSkew, + int size, + int deviceSampleRateHz, + float* skewEst) { + const int absLimitOuter = static_cast(0.04f * deviceSampleRateHz); + const int absLimitInner = static_cast(0.0025f * deviceSampleRateHz); + int i = 0; + int n = 0; + float rawAvg = 0; + float err = 0; + float rawAbsDev = 0; + int upperLimit = 0; + int lowerLimit = 0; + float cumSum = 0; + float x = 0; + float x2 = 0; + float y = 0; + float xy = 0; + float xAvg = 0; + float denom = 0; + float skew = 0; + + *skewEst = 0; // Set in case of error below. + for (i = 0; i < size; i++) { + if ((rawSkew[i] < absLimitOuter && rawSkew[i] > -absLimitOuter)) { + n++; + rawAvg += rawSkew[i]; + } + } + + if (n == 0) { + return -1; + } + RTC_DCHECK_GT(n, 0); + rawAvg /= n; + + for (i = 0; i < size; i++) { + if ((rawSkew[i] < absLimitOuter && rawSkew[i] > -absLimitOuter)) { + err = rawSkew[i] - rawAvg; + rawAbsDev += err >= 0 ? err : -err; + } + } + RTC_DCHECK_GT(n, 0); + rawAbsDev /= n; + upperLimit = static_cast(rawAvg + 5 * rawAbsDev + 1); // +1 for ceiling. + lowerLimit = static_cast(rawAvg - 5 * rawAbsDev - 1); // -1 for floor. + + n = 0; + for (i = 0; i < size; i++) { + if ((rawSkew[i] < absLimitInner && rawSkew[i] > -absLimitInner) || + (rawSkew[i] < upperLimit && rawSkew[i] > lowerLimit)) { + n++; + cumSum += rawSkew[i]; + x += n; + x2 += n * n; + y += cumSum; + xy += n * cumSum; + } + } + + if (n == 0) { + return -1; + } + RTC_DCHECK_GT(n, 0); + xAvg = x / n; + denom = x2 - xAvg * x; + + if (denom != 0) { + skew = (xy - xAvg * y) / denom; + } + + *skewEst = skew; + return 0; +} +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_resampler.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_resampler.h new file mode 100644 index 000000000..3a7400b01 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/aec_resampler.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_RESAMPLER_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_RESAMPLER_H_ + +#include "webrtc/modules/audio_processing/aec/aec_core.h" + +namespace webrtc { + +enum { kResamplingDelay = 1 }; +enum { kResamplerBufferSize = FRAME_LEN * 4 }; + +// Unless otherwise specified, functions return 0 on success and -1 on error. +void* WebRtcAec_CreateResampler(); // Returns NULL on error. +int WebRtcAec_InitResampler(void* resampInst, int deviceSampleRateHz); +void WebRtcAec_FreeResampler(void* resampInst); + +// Estimates skew from raw measurement. +int WebRtcAec_GetSkew(void* resampInst, int rawSkew, float* skewEst); + +// Resamples input using linear interpolation. +void WebRtcAec_ResampleLinear(void* resampInst, + const float* inspeech, + size_t size, + float skew, + float* outspeech, + size_t* size_out); + +} // namespace webrtc + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_RESAMPLER_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/echo_cancellation.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/echo_cancellation.cc new file mode 100644 index 000000000..9261632c1 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/echo_cancellation.cc @@ -0,0 +1,863 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * Contains the API functions for the AEC. + */ +#include "webrtc/modules/audio_processing/aec/echo_cancellation.h" + +#include +#include +#include + +extern "C" { +#include "webrtc/common_audio/ring_buffer.h" +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +} +#include "webrtc/modules/audio_processing/aec/aec_core.h" +#include "webrtc/modules/audio_processing/aec/aec_resampler.h" +#include "webrtc/modules/audio_processing/logging/apm_data_dumper.h" +#include "webrtc/typedefs.h" + +namespace webrtc { + +Aec::Aec() = default; +Aec::~Aec() = default; + +// Measured delays [ms] +// Device Chrome GTP +// MacBook Air 10 +// MacBook Retina 10 100 +// MacPro 30? +// +// Win7 Desktop 70 80? +// Win7 T430s 110 +// Win8 T420s 70 +// +// Daisy 50 +// Pixel (w/ preproc?) 240 +// Pixel (w/o preproc?) 110 110 + +// The extended filter mode gives us the flexibility to ignore the system's +// reported delays. We do this for platforms which we believe provide results +// which are incompatible with the AEC's expectations. Based on measurements +// (some provided above) we set a conservative (i.e. lower than measured) +// fixed delay. +// +// WEBRTC_UNTRUSTED_DELAY will only have an impact when |extended_filter_mode| +// is enabled. See the note along with |DelayCorrection| in +// echo_cancellation_impl.h for more details on the mode. +// +// Justification: +// Chromium/Mac: Here, the true latency is so low (~10-20 ms), that it plays +// havoc with the AEC's buffering. To avoid this, we set a fixed delay of 20 ms +// and then compensate by rewinding by 10 ms (in wideband) through +// kDelayDiffOffsetSamples. This trick does not seem to work for larger rewind +// values, but fortunately this is sufficient. +// +// Chromium/Linux(ChromeOS): The values we get on this platform don't correspond +// well to reality. The variance doesn't match the AEC's buffer changes, and the +// bulk values tend to be too low. However, the range across different hardware +// appears to be too large to choose a single value. +// +// GTP/Linux(ChromeOS): TBD, but for the moment we will trust the values. +#if defined(WEBRTC_CHROMIUM_BUILD) && defined(WEBRTC_MAC) +#define WEBRTC_UNTRUSTED_DELAY +#endif + +#if defined(WEBRTC_UNTRUSTED_DELAY) && defined(WEBRTC_MAC) +static const int kDelayDiffOffsetSamples = -160; +#else +// Not enabled for now. +static const int kDelayDiffOffsetSamples = 0; +#endif + +#if defined(WEBRTC_MAC) +static const int kFixedDelayMs = 20; +#else +static const int kFixedDelayMs = 50; +#endif +#if !defined(WEBRTC_UNTRUSTED_DELAY) +static const int kMinTrustedDelayMs = 20; +#endif +static const int kMaxTrustedDelayMs = 500; + +// Maximum length of resampled signal. Must be an integer multiple of frames +// (ceil(1/(1 + MIN_SKEW)*2) + 1)*FRAME_LEN +// The factor of 2 handles wb, and the + 1 is as a safety margin +// TODO(bjornv): Replace with kResamplerBufferSize +#define MAX_RESAMP_LEN (5 * FRAME_LEN) + +static const int kMaxBufSizeStart = 62; // In partitions +static const int sampMsNb = 8; // samples per ms in nb +static const int initCheck = 42; + +int Aec::instance_count = 0; + +// Estimates delay to set the position of the far-end buffer read pointer +// (controlled by knownDelay) +static void EstBufDelayNormal(Aec* aecInst); +static void EstBufDelayExtended(Aec* aecInst); +static int ProcessNormal(Aec* self, + const float* const* near, + size_t num_bands, + float* const* out, + size_t num_samples, + int16_t reported_delay_ms, + int32_t skew); +static void ProcessExtended(Aec* self, + const float* const* near, + size_t num_bands, + float* const* out, + size_t num_samples, + int16_t reported_delay_ms, + int32_t skew); + +void* WebRtcAec_Create() { + Aec* aecpc = new Aec(); + + if (!aecpc) { + return NULL; + } + aecpc->data_dumper.reset(new ApmDataDumper(aecpc->instance_count)); + + aecpc->aec = WebRtcAec_CreateAec(aecpc->instance_count); + if (!aecpc->aec) { + WebRtcAec_Free(aecpc); + return NULL; + } + aecpc->resampler = WebRtcAec_CreateResampler(); + if (!aecpc->resampler) { + WebRtcAec_Free(aecpc); + return NULL; + } + // Create far-end pre-buffer. The buffer size has to be large enough for + // largest possible drift compensation (kResamplerBufferSize) + "almost" an + // FFT buffer (PART_LEN2 - 1). + aecpc->far_pre_buf = + WebRtc_CreateBuffer(PART_LEN2 + kResamplerBufferSize, sizeof(float)); + if (!aecpc->far_pre_buf) { + WebRtcAec_Free(aecpc); + return NULL; + } + + aecpc->initFlag = 0; + + aecpc->instance_count++; + return aecpc; +} + +void WebRtcAec_Free(void* aecInst) { + Aec* aecpc = reinterpret_cast(aecInst); + + if (aecpc == NULL) { + return; + } + + WebRtc_FreeBuffer(aecpc->far_pre_buf); + + WebRtcAec_FreeAec(aecpc->aec); + WebRtcAec_FreeResampler(aecpc->resampler); + delete aecpc; +} + +int32_t WebRtcAec_Init(void* aecInst, int32_t sampFreq, int32_t scSampFreq) { + Aec* aecpc = reinterpret_cast(aecInst); + aecpc->data_dumper->InitiateNewSetOfRecordings(); + AecConfig aecConfig; + + if (sampFreq != 8000 && sampFreq != 16000 && sampFreq != 32000 && + sampFreq != 48000) { + return AEC_BAD_PARAMETER_ERROR; + } + aecpc->sampFreq = sampFreq; + + if (scSampFreq < 1 || scSampFreq > 96000) { + return AEC_BAD_PARAMETER_ERROR; + } + aecpc->scSampFreq = scSampFreq; + + // Initialize echo canceller core + if (WebRtcAec_InitAec(aecpc->aec, aecpc->sampFreq) == -1) { + return AEC_UNSPECIFIED_ERROR; + } + + if (WebRtcAec_InitResampler(aecpc->resampler, aecpc->scSampFreq) == -1) { + return AEC_UNSPECIFIED_ERROR; + } + + WebRtc_InitBuffer(aecpc->far_pre_buf); + WebRtc_MoveReadPtr(aecpc->far_pre_buf, -PART_LEN); // Start overlap. + + aecpc->initFlag = initCheck; // indicates that initialization has been done + + if (aecpc->sampFreq == 32000 || aecpc->sampFreq == 48000) { + aecpc->splitSampFreq = 16000; + } else { + aecpc->splitSampFreq = sampFreq; + } + + aecpc->delayCtr = 0; + aecpc->sampFactor = (aecpc->scSampFreq * 1.0f) / aecpc->splitSampFreq; + // Sampling frequency multiplier (SWB is processed as 160 frame size). + aecpc->rate_factor = aecpc->splitSampFreq / 8000; + + aecpc->sum = 0; + aecpc->counter = 0; + aecpc->checkBuffSize = 1; + aecpc->firstVal = 0; + + // We skip the startup_phase completely (setting to 0) if DA-AEC is enabled, + // but not extended_filter mode. + aecpc->startup_phase = WebRtcAec_extended_filter_enabled(aecpc->aec) || + !WebRtcAec_delay_agnostic_enabled(aecpc->aec); + aecpc->bufSizeStart = 0; + aecpc->checkBufSizeCtr = 0; + aecpc->msInSndCardBuf = 0; + aecpc->filtDelay = -1; // -1 indicates an initialized state. + aecpc->timeForDelayChange = 0; + aecpc->knownDelay = 0; + aecpc->lastDelayDiff = 0; + + aecpc->skewFrCtr = 0; + aecpc->resample = kAecFalse; + aecpc->highSkewCtr = 0; + aecpc->skew = 0; + + aecpc->farend_started = 0; + + // Default settings. + aecConfig.nlpMode = kAecNlpModerate; + aecConfig.skewMode = kAecFalse; + aecConfig.metricsMode = kAecFalse; + aecConfig.delay_logging = kAecFalse; + + if (WebRtcAec_set_config(aecpc, aecConfig) == -1) { + return AEC_UNSPECIFIED_ERROR; + } + + return 0; +} + +// Returns any error that is caused when buffering the +// far-end signal. +int32_t WebRtcAec_GetBufferFarendError(void* aecInst, + const float* farend, + size_t nrOfSamples) { + Aec* aecpc = reinterpret_cast(aecInst); + + if (!farend) + return AEC_NULL_POINTER_ERROR; + + if (aecpc->initFlag != initCheck) + return AEC_UNINITIALIZED_ERROR; + + // number of samples == 160 for SWB input + if (nrOfSamples != 80 && nrOfSamples != 160) + return AEC_BAD_PARAMETER_ERROR; + + return 0; +} + +// only buffer L band for farend +int32_t WebRtcAec_BufferFarend(void* aecInst, + const float* farend, + size_t nrOfSamples) { + Aec* aecpc = reinterpret_cast(aecInst); + size_t newNrOfSamples = nrOfSamples; + float new_farend[MAX_RESAMP_LEN]; + const float* farend_ptr = farend; + + // Get any error caused by buffering the farend signal. + int32_t error_code = + WebRtcAec_GetBufferFarendError(aecInst, farend, nrOfSamples); + + if (error_code != 0) + return error_code; + + if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) { + // Resample and get a new number of samples + WebRtcAec_ResampleLinear(aecpc->resampler, farend, nrOfSamples, aecpc->skew, + new_farend, &newNrOfSamples); + farend_ptr = new_farend; + } + + aecpc->farend_started = 1; + WebRtcAec_SetSystemDelay(aecpc->aec, WebRtcAec_system_delay(aecpc->aec) + + static_cast(newNrOfSamples)); + + // Write the time-domain data to |far_pre_buf|. + WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_ptr, newNrOfSamples); + + // TODO(minyue): reduce to |PART_LEN| samples for each buffering. + while (WebRtc_available_read(aecpc->far_pre_buf) >= PART_LEN2) { + // We have enough data to pass to the FFT, hence read PART_LEN2 samples. + { + float* ptmp = NULL; + float tmp[PART_LEN2]; + WebRtc_ReadBuffer(aecpc->far_pre_buf, + reinterpret_cast(&ptmp), tmp, PART_LEN2); + WebRtcAec_BufferFarendBlock(aecpc->aec, &ptmp[PART_LEN]); + } + + // Rewind |far_pre_buf| PART_LEN samples for overlap before continuing. + WebRtc_MoveReadPtr(aecpc->far_pre_buf, -PART_LEN); + } + + return 0; +} + +int32_t WebRtcAec_Process(void* aecInst, + const float* const* nearend, + size_t num_bands, + float* const* out, + size_t nrOfSamples, + int16_t msInSndCardBuf, + int32_t skew) { + Aec* aecpc = reinterpret_cast(aecInst); + int32_t retVal = 0; + + if (out == NULL) { + return AEC_NULL_POINTER_ERROR; + } + + if (aecpc->initFlag != initCheck) { + return AEC_UNINITIALIZED_ERROR; + } + + // number of samples == 160 for SWB input + if (nrOfSamples != 80 && nrOfSamples != 160) { + return AEC_BAD_PARAMETER_ERROR; + } + + if (msInSndCardBuf < 0) { + msInSndCardBuf = 0; + retVal = AEC_BAD_PARAMETER_WARNING; + } else if (msInSndCardBuf > kMaxTrustedDelayMs) { + // The clamping is now done in ProcessExtended/Normal(). + retVal = AEC_BAD_PARAMETER_WARNING; + } + + // This returns the value of aec->extended_filter_enabled. + if (WebRtcAec_extended_filter_enabled(aecpc->aec)) { + ProcessExtended(aecpc, nearend, num_bands, out, nrOfSamples, msInSndCardBuf, + skew); + } else { + retVal = ProcessNormal(aecpc, nearend, num_bands, out, nrOfSamples, + msInSndCardBuf, skew); + } + + int far_buf_size_samples = WebRtcAec_system_delay(aecpc->aec); + aecpc->data_dumper->DumpRaw("aec_system_delay", 1, &far_buf_size_samples); + aecpc->data_dumper->DumpRaw("aec_known_delay", 1, &aecpc->knownDelay); + + return retVal; +} + +int WebRtcAec_set_config(void* handle, AecConfig config) { + Aec* self = reinterpret_cast(handle); + if (self->initFlag != initCheck) { + return AEC_UNINITIALIZED_ERROR; + } + + if (config.skewMode != kAecFalse && config.skewMode != kAecTrue) { + return AEC_BAD_PARAMETER_ERROR; + } + self->skewMode = config.skewMode; + + if (config.nlpMode != kAecNlpConservative && + config.nlpMode != kAecNlpModerate && + config.nlpMode != kAecNlpAggressive) { + return AEC_BAD_PARAMETER_ERROR; + } + + if (config.metricsMode != kAecFalse && config.metricsMode != kAecTrue) { + return AEC_BAD_PARAMETER_ERROR; + } + + if (config.delay_logging != kAecFalse && config.delay_logging != kAecTrue) { + return AEC_BAD_PARAMETER_ERROR; + } + + WebRtcAec_SetConfigCore(self->aec, config.nlpMode, config.metricsMode, + config.delay_logging); + return 0; +} + +int WebRtcAec_get_echo_status(void* handle, int* status) { + Aec* self = reinterpret_cast(handle); + if (status == NULL) { + return AEC_NULL_POINTER_ERROR; + } + if (self->initFlag != initCheck) { + return AEC_UNINITIALIZED_ERROR; + } + + *status = WebRtcAec_echo_state(self->aec); + + return 0; +} + +int WebRtcAec_GetMetrics(void* handle, AecMetrics* metrics) { + const float kUpWeight = 0.7f; + float dtmp; + int stmp; + Aec* self = reinterpret_cast(handle); + Stats erl; + Stats erle; + Stats a_nlp; + + if (handle == NULL) { + return -1; + } + if (metrics == NULL) { + return AEC_NULL_POINTER_ERROR; + } + if (self->initFlag != initCheck) { + return AEC_UNINITIALIZED_ERROR; + } + + WebRtcAec_GetEchoStats(self->aec, &erl, &erle, &a_nlp, + &metrics->divergent_filter_fraction); + + // ERL + metrics->erl.instant = static_cast(erl.instant); + + if ((erl.himean > kOffsetLevel) && (erl.average > kOffsetLevel)) { + // Use a mix between regular average and upper part average. + dtmp = kUpWeight * erl.himean + (1 - kUpWeight) * erl.average; + metrics->erl.average = static_cast(dtmp); + } else { + metrics->erl.average = kOffsetLevel; + } + + metrics->erl.max = static_cast(erl.max); + + if (erl.min < (kOffsetLevel * (-1))) { + metrics->erl.min = static_cast(erl.min); + } else { + metrics->erl.min = kOffsetLevel; + } + + // ERLE + metrics->erle.instant = static_cast(erle.instant); + + if ((erle.himean > kOffsetLevel) && (erle.average > kOffsetLevel)) { + // Use a mix between regular average and upper part average. + dtmp = kUpWeight * erle.himean + (1 - kUpWeight) * erle.average; + metrics->erle.average = static_cast(dtmp); + } else { + metrics->erle.average = kOffsetLevel; + } + + metrics->erle.max = static_cast(erle.max); + + if (erle.min < (kOffsetLevel * (-1))) { + metrics->erle.min = static_cast(erle.min); + } else { + metrics->erle.min = kOffsetLevel; + } + + // RERL + if ((metrics->erl.average > kOffsetLevel) && + (metrics->erle.average > kOffsetLevel)) { + stmp = metrics->erl.average + metrics->erle.average; + } else { + stmp = kOffsetLevel; + } + metrics->rerl.average = stmp; + + // No other statistics needed, but returned for completeness. + metrics->rerl.instant = stmp; + metrics->rerl.max = stmp; + metrics->rerl.min = stmp; + + // A_NLP + metrics->aNlp.instant = static_cast(a_nlp.instant); + + if ((a_nlp.himean > kOffsetLevel) && (a_nlp.average > kOffsetLevel)) { + // Use a mix between regular average and upper part average. + dtmp = kUpWeight * a_nlp.himean + (1 - kUpWeight) * a_nlp.average; + metrics->aNlp.average = static_cast(dtmp); + } else { + metrics->aNlp.average = kOffsetLevel; + } + + metrics->aNlp.max = static_cast(a_nlp.max); + + if (a_nlp.min < (kOffsetLevel * (-1))) { + metrics->aNlp.min = static_cast(a_nlp.min); + } else { + metrics->aNlp.min = kOffsetLevel; + } + + return 0; +} + +int WebRtcAec_GetDelayMetrics(void* handle, + int* median, + int* std, + float* fraction_poor_delays) { + Aec* self = reinterpret_cast(handle); + if (median == NULL) { + return AEC_NULL_POINTER_ERROR; + } + if (std == NULL) { + return AEC_NULL_POINTER_ERROR; + } + if (self->initFlag != initCheck) { + return AEC_UNINITIALIZED_ERROR; + } + if (WebRtcAec_GetDelayMetricsCore(self->aec, median, std, + fraction_poor_delays) == -1) { + // Logging disabled. + return AEC_UNSUPPORTED_FUNCTION_ERROR; + } + + return 0; +} + +AecCore* WebRtcAec_aec_core(void* handle) { + if (!handle) { + return NULL; + } + return reinterpret_cast(handle)->aec; +} + +static int ProcessNormal(Aec* aecpc, + const float* const* nearend, + size_t num_bands, + float* const* out, + size_t nrOfSamples, + int16_t msInSndCardBuf, + int32_t skew) { + int retVal = 0; + size_t i; + size_t nBlocks10ms; + // Limit resampling to doubling/halving of signal + const float minSkewEst = -0.5f; + const float maxSkewEst = 1.0f; + + msInSndCardBuf = + msInSndCardBuf > kMaxTrustedDelayMs ? kMaxTrustedDelayMs : msInSndCardBuf; + // TODO(andrew): we need to investigate if this +10 is really wanted. + msInSndCardBuf += 10; + aecpc->msInSndCardBuf = msInSndCardBuf; + + if (aecpc->skewMode == kAecTrue) { + if (aecpc->skewFrCtr < 25) { + aecpc->skewFrCtr++; + } else { + retVal = WebRtcAec_GetSkew(aecpc->resampler, skew, &aecpc->skew); + if (retVal == -1) { + aecpc->skew = 0; + retVal = AEC_BAD_PARAMETER_WARNING; + } + + aecpc->skew /= aecpc->sampFactor * nrOfSamples; + + if (aecpc->skew < 1.0e-3 && aecpc->skew > -1.0e-3) { + aecpc->resample = kAecFalse; + } else { + aecpc->resample = kAecTrue; + } + + if (aecpc->skew < minSkewEst) { + aecpc->skew = minSkewEst; + } else if (aecpc->skew > maxSkewEst) { + aecpc->skew = maxSkewEst; + } + + aecpc->data_dumper->DumpRaw("aec_skew", 1, &aecpc->skew); + } + } + + nBlocks10ms = nrOfSamples / (FRAME_LEN * aecpc->rate_factor); + + if (aecpc->startup_phase) { + for (i = 0; i < num_bands; ++i) { + // Only needed if they don't already point to the same place. + if (nearend[i] != out[i]) { + memcpy(out[i], nearend[i], sizeof(nearend[i][0]) * nrOfSamples); + } + } + + // The AEC is in the start up mode + // AEC is disabled until the system delay is OK + + // Mechanism to ensure that the system delay is reasonably stable. + if (aecpc->checkBuffSize) { + aecpc->checkBufSizeCtr++; + // Before we fill up the far-end buffer we require the system delay + // to be stable (+/-8 ms) compared to the first value. This + // comparison is made during the following 6 consecutive 10 ms + // blocks. If it seems to be stable then we start to fill up the + // far-end buffer. + if (aecpc->counter == 0) { + aecpc->firstVal = aecpc->msInSndCardBuf; + aecpc->sum = 0; + } + + if (abs(aecpc->firstVal - aecpc->msInSndCardBuf) < + WEBRTC_SPL_MAX(0.2 * aecpc->msInSndCardBuf, sampMsNb)) { + aecpc->sum += aecpc->msInSndCardBuf; + aecpc->counter++; + } else { + aecpc->counter = 0; + } + + if (aecpc->counter * nBlocks10ms >= 6) { + // The far-end buffer size is determined in partitions of + // PART_LEN samples. Use 75% of the average value of the system + // delay as buffer size to start with. + aecpc->bufSizeStart = + WEBRTC_SPL_MIN((3 * aecpc->sum * aecpc->rate_factor * 8) / + (4 * aecpc->counter * PART_LEN), + kMaxBufSizeStart); + // Buffer size has now been determined. + aecpc->checkBuffSize = 0; + } + + if (aecpc->checkBufSizeCtr * nBlocks10ms > 50) { + // For really bad systems, don't disable the echo canceller for + // more than 0.5 sec. + aecpc->bufSizeStart = WEBRTC_SPL_MIN( + (aecpc->msInSndCardBuf * aecpc->rate_factor * 3) / 40, + kMaxBufSizeStart); + aecpc->checkBuffSize = 0; + } + } + + // If |checkBuffSize| changed in the if-statement above. + if (!aecpc->checkBuffSize) { + // The system delay is now reasonably stable (or has been unstable + // for too long). When the far-end buffer is filled with + // approximately the same amount of data as reported by the system + // we end the startup phase. + int overhead_elements = + WebRtcAec_system_delay(aecpc->aec) / PART_LEN - aecpc->bufSizeStart; + if (overhead_elements == 0) { + // Enable the AEC + aecpc->startup_phase = 0; + } else if (overhead_elements > 0) { + // TODO(bjornv): Do we need a check on how much we actually + // moved the read pointer? It should always be possible to move + // the pointer |overhead_elements| since we have only added data + // to the buffer and no delay compensation nor AEC processing + // has been done. + WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(aecpc->aec, + overhead_elements); + + // Enable the AEC + aecpc->startup_phase = 0; + } + } + } else { + // AEC is enabled. + EstBufDelayNormal(aecpc); + + // Call the AEC. + // TODO(bjornv): Re-structure such that we don't have to pass + // |aecpc->knownDelay| as input. Change name to something like + // |system_buffer_diff|. + WebRtcAec_ProcessFrames(aecpc->aec, nearend, num_bands, nrOfSamples, + aecpc->knownDelay, out); + } + + return retVal; +} + +static void ProcessExtended(Aec* self, + const float* const* near, + size_t num_bands, + float* const* out, + size_t num_samples, + int16_t reported_delay_ms, + int32_t skew) { + size_t i; + const int delay_diff_offset = kDelayDiffOffsetSamples; + RTC_DCHECK(num_samples == 80 || num_samples == 160); +#if defined(WEBRTC_UNTRUSTED_DELAY) + reported_delay_ms = kFixedDelayMs; +#else + // This is the usual mode where we trust the reported system delay values. + // Due to the longer filter, we no longer add 10 ms to the reported delay + // to reduce chance of non-causality. Instead we apply a minimum here to avoid + // issues with the read pointer jumping around needlessly. + reported_delay_ms = reported_delay_ms < kMinTrustedDelayMs + ? kMinTrustedDelayMs + : reported_delay_ms; + // If the reported delay appears to be bogus, we attempt to recover by using + // the measured fixed delay values. We use >= here because higher layers + // may already clamp to this maximum value, and we would otherwise not + // detect it here. + reported_delay_ms = reported_delay_ms >= kMaxTrustedDelayMs + ? kFixedDelayMs + : reported_delay_ms; +#endif + self->msInSndCardBuf = reported_delay_ms; + + if (!self->farend_started) { + for (i = 0; i < num_bands; ++i) { + // Only needed if they don't already point to the same place. + if (near[i] != out[i]) { + memcpy(out[i], near[i], sizeof(near[i][0]) * num_samples); + } + } + return; + } + if (self->startup_phase) { + // In the extended mode, there isn't a startup "phase", just a special + // action on the first frame. In the trusted delay case, we'll take the + // current reported delay, unless it's less then our conservative + // measurement. + int startup_size_ms = + reported_delay_ms < kFixedDelayMs ? kFixedDelayMs : reported_delay_ms; +#if defined(WEBRTC_ANDROID) + int target_delay = startup_size_ms * self->rate_factor * 8; +#else + // To avoid putting the AEC in a non-causal state we're being slightly + // conservative and scale by 2. On Android we use a fixed delay and + // therefore there is no need to scale the target_delay. + int target_delay = startup_size_ms * self->rate_factor * 8 / 2; +#endif + int overhead_elements = + (WebRtcAec_system_delay(self->aec) - target_delay) / PART_LEN; + WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(self->aec, + overhead_elements); + self->startup_phase = 0; + } + + EstBufDelayExtended(self); + + { + // |delay_diff_offset| gives us the option to manually rewind the delay on + // very low delay platforms which can't be expressed purely through + // |reported_delay_ms|. + const int adjusted_known_delay = + WEBRTC_SPL_MAX(0, self->knownDelay + delay_diff_offset); + + WebRtcAec_ProcessFrames(self->aec, near, num_bands, num_samples, + adjusted_known_delay, out); + } +} + +static void EstBufDelayNormal(Aec* aecpc) { + int nSampSndCard = aecpc->msInSndCardBuf * sampMsNb * aecpc->rate_factor; + int current_delay = nSampSndCard - WebRtcAec_system_delay(aecpc->aec); + int delay_difference = 0; + + // Before we proceed with the delay estimate filtering we: + // 1) Compensate for the frame that will be read. + // 2) Compensate for drift resampling. + // 3) Compensate for non-causality if needed, since the estimated delay can't + // be negative. + + // 1) Compensating for the frame(s) that will be read/processed. + current_delay += FRAME_LEN * aecpc->rate_factor; + + // 2) Account for resampling frame delay. + if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) { + current_delay -= kResamplingDelay; + } + + // 3) Compensate for non-causality, if needed, by flushing one block. + if (current_delay < PART_LEN) { + current_delay += + WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(aecpc->aec, 1) * + PART_LEN; + } + + // We use -1 to signal an initialized state in the "extended" implementation; + // compensate for that. + aecpc->filtDelay = aecpc->filtDelay < 0 ? 0 : aecpc->filtDelay; + aecpc->filtDelay = + WEBRTC_SPL_MAX(0, static_cast(0.8 * + aecpc->filtDelay + + 0.2 * current_delay)); + + delay_difference = aecpc->filtDelay - aecpc->knownDelay; + if (delay_difference > 224) { + if (aecpc->lastDelayDiff < 96) { + aecpc->timeForDelayChange = 0; + } else { + aecpc->timeForDelayChange++; + } + } else if (delay_difference < 96 && aecpc->knownDelay > 0) { + if (aecpc->lastDelayDiff > 224) { + aecpc->timeForDelayChange = 0; + } else { + aecpc->timeForDelayChange++; + } + } else { + aecpc->timeForDelayChange = 0; + } + aecpc->lastDelayDiff = delay_difference; + + if (aecpc->timeForDelayChange > 25) { + aecpc->knownDelay = WEBRTC_SPL_MAX((int)aecpc->filtDelay - 160, 0); + } +} + +static void EstBufDelayExtended(Aec* self) { + int reported_delay = self->msInSndCardBuf * sampMsNb * self->rate_factor; + int current_delay = reported_delay - WebRtcAec_system_delay(self->aec); + int delay_difference = 0; + + // Before we proceed with the delay estimate filtering we: + // 1) Compensate for the frame that will be read. + // 2) Compensate for drift resampling. + // 3) Compensate for non-causality if needed, since the estimated delay can't + // be negative. + + // 1) Compensating for the frame(s) that will be read/processed. + current_delay += FRAME_LEN * self->rate_factor; + + // 2) Account for resampling frame delay. + if (self->skewMode == kAecTrue && self->resample == kAecTrue) { + current_delay -= kResamplingDelay; + } + + // 3) Compensate for non-causality, if needed, by flushing two blocks. + if (current_delay < PART_LEN) { + current_delay += + WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(self->aec, 2) * PART_LEN; + } + + if (self->filtDelay == -1) { + self->filtDelay = WEBRTC_SPL_MAX(0, 0.5 * current_delay); + } else { + self->filtDelay = WEBRTC_SPL_MAX( + 0, static_cast(0.95 * self->filtDelay + 0.05 * current_delay)); + } + + delay_difference = self->filtDelay - self->knownDelay; + if (delay_difference > 384) { + if (self->lastDelayDiff < 128) { + self->timeForDelayChange = 0; + } else { + self->timeForDelayChange++; + } + } else if (delay_difference < 128 && self->knownDelay > 0) { + if (self->lastDelayDiff > 384) { + self->timeForDelayChange = 0; + } else { + self->timeForDelayChange++; + } + } else { + self->timeForDelayChange = 0; + } + self->lastDelayDiff = delay_difference; + + if (self->timeForDelayChange > 25) { + self->knownDelay = WEBRTC_SPL_MAX((int)self->filtDelay - 256, 0); + } +} +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/echo_cancellation.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/echo_cancellation.h new file mode 100644 index 000000000..10471139d --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aec/echo_cancellation.h @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_ECHO_CANCELLATION_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_ECHO_CANCELLATION_H_ + +#include + +#include + +extern "C" { +#include "webrtc/common_audio/ring_buffer.h" +} +#include "webrtc/modules/audio_processing/aec/aec_core.h" +#include "webrtc/typedefs.h" + +namespace webrtc { + +// Errors +#define AEC_UNSPECIFIED_ERROR 12000 +#define AEC_UNSUPPORTED_FUNCTION_ERROR 12001 +#define AEC_UNINITIALIZED_ERROR 12002 +#define AEC_NULL_POINTER_ERROR 12003 +#define AEC_BAD_PARAMETER_ERROR 12004 + +// Warnings +#define AEC_BAD_PARAMETER_WARNING 12050 + +enum { kAecNlpConservative = 0, kAecNlpModerate, kAecNlpAggressive }; + +enum { kAecFalse = 0, kAecTrue }; + +typedef struct { + int16_t nlpMode; // default kAecNlpModerate + int16_t skewMode; // default kAecFalse + int16_t metricsMode; // default kAecFalse + int delay_logging; // default kAecFalse + // float realSkew; +} AecConfig; + +typedef struct { + int instant; + int average; + int max; + int min; +} AecLevel; + +typedef struct { + AecLevel rerl; + AecLevel erl; + AecLevel erle; + AecLevel aNlp; + float divergent_filter_fraction; +} AecMetrics; + +struct AecCore; + +class ApmDataDumper; + +typedef struct Aec { + Aec(); + ~Aec(); + + std::unique_ptr data_dumper; + + int delayCtr; + int sampFreq; + int splitSampFreq; + int scSampFreq; + float sampFactor; // scSampRate / sampFreq + short skewMode; + int bufSizeStart; + int knownDelay; + int rate_factor; + + short initFlag; // indicates if AEC has been initialized + + // Variables used for averaging far end buffer size + short counter; + int sum; + short firstVal; + short checkBufSizeCtr; + + // Variables used for delay shifts + short msInSndCardBuf; + short filtDelay; // Filtered delay estimate. + int timeForDelayChange; + int startup_phase; + int checkBuffSize; + short lastDelayDiff; + + // Structures + void* resampler; + + int skewFrCtr; + int resample; // if the skew is small enough we don't resample + int highSkewCtr; + float skew; + + RingBuffer* far_pre_buf; // Time domain far-end pre-buffer. + + int farend_started; + + // Aec instance counter. + static int instance_count; + AecCore* aec; +} Aec; + +/* + * Allocates the memory needed by the AEC. The memory needs to be initialized + * separately using the WebRtcAec_Init() function. Returns a pointer to the + * object or NULL on error. + */ +void* WebRtcAec_Create(); + +/* + * This function releases the memory allocated by WebRtcAec_Create(). + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecInst Pointer to the AEC instance + */ +void WebRtcAec_Free(void* aecInst); + +/* + * Initializes an AEC instance. + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecInst Pointer to the AEC instance + * int32_t sampFreq Sampling frequency of data + * int32_t scSampFreq Soundcard sampling frequency + * + * Outputs Description + * ------------------------------------------------------------------- + * int32_t return 0: OK + * -1: error + */ +int32_t WebRtcAec_Init(void* aecInst, int32_t sampFreq, int32_t scSampFreq); + +/* + * Inserts an 80 or 160 sample block of data into the farend buffer. + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecInst Pointer to the AEC instance + * const float* farend In buffer containing one frame of + * farend signal for L band + * int16_t nrOfSamples Number of samples in farend buffer + * + * Outputs Description + * ------------------------------------------------------------------- + * int32_t return 0: OK + * 12000-12050: error code + */ +int32_t WebRtcAec_BufferFarend(void* aecInst, + const float* farend, + size_t nrOfSamples); + +/* + * Reports any errors that would arise if buffering a farend buffer + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecInst Pointer to the AEC instance + * const float* farend In buffer containing one frame of + * farend signal for L band + * int16_t nrOfSamples Number of samples in farend buffer + * + * Outputs Description + * ------------------------------------------------------------------- + * int32_t return 0: OK + * 12000-12050: error code + */ +int32_t WebRtcAec_GetBufferFarendError(void* aecInst, + const float* farend, + size_t nrOfSamples); + +/* + * Runs the echo canceller on an 80 or 160 sample blocks of data. + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecInst Pointer to the AEC instance + * float* const* nearend In buffer containing one frame of + * nearend+echo signal for each band + * int num_bands Number of bands in nearend buffer + * int16_t nrOfSamples Number of samples in nearend buffer + * int16_t msInSndCardBuf Delay estimate for sound card and + * system buffers + * int16_t skew Difference between number of samples played + * and recorded at the soundcard (for clock skew + * compensation) + * + * Outputs Description + * ------------------------------------------------------------------- + * float* const* out Out buffer, one frame of processed nearend + * for each band + * int32_t return 0: OK + * 12000-12050: error code + */ +int32_t WebRtcAec_Process(void* aecInst, + const float* const* nearend, + size_t num_bands, + float* const* out, + size_t nrOfSamples, + int16_t msInSndCardBuf, + int32_t skew); + +/* + * This function enables the user to set certain parameters on-the-fly. + * + * Inputs Description + * ------------------------------------------------------------------- + * void* handle Pointer to the AEC instance + * AecConfig config Config instance that contains all + * properties to be set + * + * Outputs Description + * ------------------------------------------------------------------- + * int return 0: OK + * 12000-12050: error code + */ +int WebRtcAec_set_config(void* handle, AecConfig config); + +/* + * Gets the current echo status of the nearend signal. + * + * Inputs Description + * ------------------------------------------------------------------- + * void* handle Pointer to the AEC instance + * + * Outputs Description + * ------------------------------------------------------------------- + * int* status 0: Almost certainly nearend single-talk + * 1: Might not be neared single-talk + * int return 0: OK + * 12000-12050: error code + */ +int WebRtcAec_get_echo_status(void* handle, int* status); + +/* + * Gets the current echo metrics for the session. + * + * Inputs Description + * ------------------------------------------------------------------- + * void* handle Pointer to the AEC instance + * + * Outputs Description + * ------------------------------------------------------------------- + * AecMetrics* metrics Struct which will be filled out with the + * current echo metrics. + * int return 0: OK + * 12000-12050: error code + */ +int WebRtcAec_GetMetrics(void* handle, AecMetrics* metrics); + +/* + * Gets the current delay metrics for the session. + * + * Inputs Description + * ------------------------------------------------------------------- + * void* handle Pointer to the AEC instance + * + * Outputs Description + * ------------------------------------------------------------------- + * int* median Delay median value. + * int* std Delay standard deviation. + * float* fraction_poor_delays Fraction of the delay estimates that may + * cause the AEC to perform poorly. + * + * int return 0: OK + * 12000-12050: error code + */ +int WebRtcAec_GetDelayMetrics(void* handle, + int* median, + int* std, + float* fraction_poor_delays); + +// Returns a pointer to the low level AEC handle. +// +// Input: +// - handle : Pointer to the AEC instance. +// +// Return value: +// - AecCore pointer : NULL for error. +// +struct AecCore* WebRtcAec_aec_core(void* handle); + +} // namespace webrtc + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_ECHO_CANCELLATION_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core.cc new file mode 100644 index 000000000..41debd21a --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core.cc @@ -0,0 +1,1231 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing/aecm/aecm_core.h" + +#include +#include + +extern "C" { +#include "webrtc/common_audio/ring_buffer.h" +#include "webrtc/common_audio/signal_processing/include/real_fft.h" +} +#include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h" +#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h" +extern "C" { +#include "webrtc/system_wrappers/include/cpu_features_wrapper.h" +} + +#include "webrtc/base/checks.h" +#include "webrtc/typedefs.h" + +#ifdef AEC_DEBUG +FILE *dfile; +FILE *testfile; +#endif + +const int16_t WebRtcAecm_kCosTable[] = { + 8192, 8190, 8187, 8180, 8172, 8160, 8147, 8130, 8112, + 8091, 8067, 8041, 8012, 7982, 7948, 7912, 7874, 7834, + 7791, 7745, 7697, 7647, 7595, 7540, 7483, 7424, 7362, + 7299, 7233, 7164, 7094, 7021, 6947, 6870, 6791, 6710, + 6627, 6542, 6455, 6366, 6275, 6182, 6087, 5991, 5892, + 5792, 5690, 5586, 5481, 5374, 5265, 5155, 5043, 4930, + 4815, 4698, 4580, 4461, 4341, 4219, 4096, 3971, 3845, + 3719, 3591, 3462, 3331, 3200, 3068, 2935, 2801, 2667, + 2531, 2395, 2258, 2120, 1981, 1842, 1703, 1563, 1422, + 1281, 1140, 998, 856, 713, 571, 428, 285, 142, + 0, -142, -285, -428, -571, -713, -856, -998, -1140, + -1281, -1422, -1563, -1703, -1842, -1981, -2120, -2258, -2395, + -2531, -2667, -2801, -2935, -3068, -3200, -3331, -3462, -3591, + -3719, -3845, -3971, -4095, -4219, -4341, -4461, -4580, -4698, + -4815, -4930, -5043, -5155, -5265, -5374, -5481, -5586, -5690, + -5792, -5892, -5991, -6087, -6182, -6275, -6366, -6455, -6542, + -6627, -6710, -6791, -6870, -6947, -7021, -7094, -7164, -7233, + -7299, -7362, -7424, -7483, -7540, -7595, -7647, -7697, -7745, + -7791, -7834, -7874, -7912, -7948, -7982, -8012, -8041, -8067, + -8091, -8112, -8130, -8147, -8160, -8172, -8180, -8187, -8190, + -8191, -8190, -8187, -8180, -8172, -8160, -8147, -8130, -8112, + -8091, -8067, -8041, -8012, -7982, -7948, -7912, -7874, -7834, + -7791, -7745, -7697, -7647, -7595, -7540, -7483, -7424, -7362, + -7299, -7233, -7164, -7094, -7021, -6947, -6870, -6791, -6710, + -6627, -6542, -6455, -6366, -6275, -6182, -6087, -5991, -5892, + -5792, -5690, -5586, -5481, -5374, -5265, -5155, -5043, -4930, + -4815, -4698, -4580, -4461, -4341, -4219, -4096, -3971, -3845, + -3719, -3591, -3462, -3331, -3200, -3068, -2935, -2801, -2667, + -2531, -2395, -2258, -2120, -1981, -1842, -1703, -1563, -1422, + -1281, -1140, -998, -856, -713, -571, -428, -285, -142, + 0, 142, 285, 428, 571, 713, 856, 998, 1140, + 1281, 1422, 1563, 1703, 1842, 1981, 2120, 2258, 2395, + 2531, 2667, 2801, 2935, 3068, 3200, 3331, 3462, 3591, + 3719, 3845, 3971, 4095, 4219, 4341, 4461, 4580, 4698, + 4815, 4930, 5043, 5155, 5265, 5374, 5481, 5586, 5690, + 5792, 5892, 5991, 6087, 6182, 6275, 6366, 6455, 6542, + 6627, 6710, 6791, 6870, 6947, 7021, 7094, 7164, 7233, + 7299, 7362, 7424, 7483, 7540, 7595, 7647, 7697, 7745, + 7791, 7834, 7874, 7912, 7948, 7982, 8012, 8041, 8067, + 8091, 8112, 8130, 8147, 8160, 8172, 8180, 8187, 8190 +}; + +const int16_t WebRtcAecm_kSinTable[] = { + 0, 142, 285, 428, 571, 713, 856, 998, + 1140, 1281, 1422, 1563, 1703, 1842, 1981, 2120, + 2258, 2395, 2531, 2667, 2801, 2935, 3068, 3200, + 3331, 3462, 3591, 3719, 3845, 3971, 4095, 4219, + 4341, 4461, 4580, 4698, 4815, 4930, 5043, 5155, + 5265, 5374, 5481, 5586, 5690, 5792, 5892, 5991, + 6087, 6182, 6275, 6366, 6455, 6542, 6627, 6710, + 6791, 6870, 6947, 7021, 7094, 7164, 7233, 7299, + 7362, 7424, 7483, 7540, 7595, 7647, 7697, 7745, + 7791, 7834, 7874, 7912, 7948, 7982, 8012, 8041, + 8067, 8091, 8112, 8130, 8147, 8160, 8172, 8180, + 8187, 8190, 8191, 8190, 8187, 8180, 8172, 8160, + 8147, 8130, 8112, 8091, 8067, 8041, 8012, 7982, + 7948, 7912, 7874, 7834, 7791, 7745, 7697, 7647, + 7595, 7540, 7483, 7424, 7362, 7299, 7233, 7164, + 7094, 7021, 6947, 6870, 6791, 6710, 6627, 6542, + 6455, 6366, 6275, 6182, 6087, 5991, 5892, 5792, + 5690, 5586, 5481, 5374, 5265, 5155, 5043, 4930, + 4815, 4698, 4580, 4461, 4341, 4219, 4096, 3971, + 3845, 3719, 3591, 3462, 3331, 3200, 3068, 2935, + 2801, 2667, 2531, 2395, 2258, 2120, 1981, 1842, + 1703, 1563, 1422, 1281, 1140, 998, 856, 713, + 571, 428, 285, 142, 0, -142, -285, -428, + -571, -713, -856, -998, -1140, -1281, -1422, -1563, + -1703, -1842, -1981, -2120, -2258, -2395, -2531, -2667, + -2801, -2935, -3068, -3200, -3331, -3462, -3591, -3719, + -3845, -3971, -4095, -4219, -4341, -4461, -4580, -4698, + -4815, -4930, -5043, -5155, -5265, -5374, -5481, -5586, + -5690, -5792, -5892, -5991, -6087, -6182, -6275, -6366, + -6455, -6542, -6627, -6710, -6791, -6870, -6947, -7021, + -7094, -7164, -7233, -7299, -7362, -7424, -7483, -7540, + -7595, -7647, -7697, -7745, -7791, -7834, -7874, -7912, + -7948, -7982, -8012, -8041, -8067, -8091, -8112, -8130, + -8147, -8160, -8172, -8180, -8187, -8190, -8191, -8190, + -8187, -8180, -8172, -8160, -8147, -8130, -8112, -8091, + -8067, -8041, -8012, -7982, -7948, -7912, -7874, -7834, + -7791, -7745, -7697, -7647, -7595, -7540, -7483, -7424, + -7362, -7299, -7233, -7164, -7094, -7021, -6947, -6870, + -6791, -6710, -6627, -6542, -6455, -6366, -6275, -6182, + -6087, -5991, -5892, -5792, -5690, -5586, -5481, -5374, + -5265, -5155, -5043, -4930, -4815, -4698, -4580, -4461, + -4341, -4219, -4096, -3971, -3845, -3719, -3591, -3462, + -3331, -3200, -3068, -2935, -2801, -2667, -2531, -2395, + -2258, -2120, -1981, -1842, -1703, -1563, -1422, -1281, + -1140, -998, -856, -713, -571, -428, -285, -142 +}; + +// Initialization table for echo channel in 8 kHz +static const int16_t kChannelStored8kHz[PART_LEN1] = { + 2040, 1815, 1590, 1498, 1405, 1395, 1385, 1418, + 1451, 1506, 1562, 1644, 1726, 1804, 1882, 1918, + 1953, 1982, 2010, 2025, 2040, 2034, 2027, 2021, + 2014, 1997, 1980, 1925, 1869, 1800, 1732, 1683, + 1635, 1604, 1572, 1545, 1517, 1481, 1444, 1405, + 1367, 1331, 1294, 1270, 1245, 1239, 1233, 1247, + 1260, 1282, 1303, 1338, 1373, 1407, 1441, 1470, + 1499, 1524, 1549, 1565, 1582, 1601, 1621, 1649, + 1676 +}; + +// Initialization table for echo channel in 16 kHz +static const int16_t kChannelStored16kHz[PART_LEN1] = { + 2040, 1590, 1405, 1385, 1451, 1562, 1726, 1882, + 1953, 2010, 2040, 2027, 2014, 1980, 1869, 1732, + 1635, 1572, 1517, 1444, 1367, 1294, 1245, 1233, + 1260, 1303, 1373, 1441, 1499, 1549, 1582, 1621, + 1676, 1741, 1802, 1861, 1921, 1983, 2040, 2102, + 2170, 2265, 2375, 2515, 2651, 2781, 2922, 3075, + 3253, 3471, 3738, 3976, 4151, 4258, 4308, 4288, + 4270, 4253, 4237, 4179, 4086, 3947, 3757, 3484, + 3153 +}; + +// Moves the pointer to the next entry and inserts |far_spectrum| and +// corresponding Q-domain in its buffer. +// +// Inputs: +// - self : Pointer to the delay estimation instance +// - far_spectrum : Pointer to the far end spectrum +// - far_q : Q-domain of far end spectrum +// +void WebRtcAecm_UpdateFarHistory(AecmCore* self, + uint16_t* far_spectrum, + int far_q) { + // Get new buffer position + self->far_history_pos++; + if (self->far_history_pos >= MAX_DELAY) { + self->far_history_pos = 0; + } + // Update Q-domain buffer + self->far_q_domains[self->far_history_pos] = far_q; + // Update far end spectrum buffer + memcpy(&(self->far_history[self->far_history_pos * PART_LEN1]), + far_spectrum, + sizeof(uint16_t) * PART_LEN1); +} + +// Returns a pointer to the far end spectrum aligned to current near end +// spectrum. The function WebRtc_DelayEstimatorProcessFix(...) should have been +// called before AlignedFarend(...). Otherwise, you get the pointer to the +// previous frame. The memory is only valid until the next call of +// WebRtc_DelayEstimatorProcessFix(...). +// +// Inputs: +// - self : Pointer to the AECM instance. +// - delay : Current delay estimate. +// +// Output: +// - far_q : The Q-domain of the aligned far end spectrum +// +// Return value: +// - far_spectrum : Pointer to the aligned far end spectrum +// NULL - Error +// +const uint16_t* WebRtcAecm_AlignedFarend(AecmCore* self, + int* far_q, + int delay) { + int buffer_position = 0; + RTC_DCHECK(self); + buffer_position = self->far_history_pos - delay; + + // Check buffer position + if (buffer_position < 0) { + buffer_position += MAX_DELAY; + } + // Get Q-domain + *far_q = self->far_q_domains[buffer_position]; + // Return far end spectrum + return &(self->far_history[buffer_position * PART_LEN1]); +} + +// Declare function pointers. +CalcLinearEnergies WebRtcAecm_CalcLinearEnergies; +StoreAdaptiveChannel WebRtcAecm_StoreAdaptiveChannel; +ResetAdaptiveChannel WebRtcAecm_ResetAdaptiveChannel; + +AecmCore* WebRtcAecm_CreateCore() { + AecmCore* aecm = static_cast(malloc(sizeof(AecmCore))); + + aecm->farFrameBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, + sizeof(int16_t)); + if (!aecm->farFrameBuf) + { + WebRtcAecm_FreeCore(aecm); + return NULL; + } + + aecm->nearNoisyFrameBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, + sizeof(int16_t)); + if (!aecm->nearNoisyFrameBuf) + { + WebRtcAecm_FreeCore(aecm); + return NULL; + } + + aecm->nearCleanFrameBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, + sizeof(int16_t)); + if (!aecm->nearCleanFrameBuf) + { + WebRtcAecm_FreeCore(aecm); + return NULL; + } + + aecm->outFrameBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, + sizeof(int16_t)); + if (!aecm->outFrameBuf) + { + WebRtcAecm_FreeCore(aecm); + return NULL; + } + + aecm->delay_estimator_farend = WebRtc_CreateDelayEstimatorFarend(PART_LEN1, + MAX_DELAY); + if (aecm->delay_estimator_farend == NULL) { + WebRtcAecm_FreeCore(aecm); + return NULL; + } + aecm->delay_estimator = + WebRtc_CreateDelayEstimator(aecm->delay_estimator_farend, 0); + if (aecm->delay_estimator == NULL) { + WebRtcAecm_FreeCore(aecm); + return NULL; + } + // TODO(bjornv): Explicitly disable robust delay validation until no + // performance regression has been established. Then remove the line. + WebRtc_enable_robust_validation(aecm->delay_estimator, 0); + + aecm->real_fft = WebRtcSpl_CreateRealFFT(PART_LEN_SHIFT); + if (aecm->real_fft == NULL) { + WebRtcAecm_FreeCore(aecm); + return NULL; + } + + // Init some aecm pointers. 16 and 32 byte alignment is only necessary + // for Neon code currently. + aecm->xBuf = (int16_t*) (((uintptr_t)aecm->xBuf_buf + 31) & ~ 31); + aecm->dBufClean = (int16_t*) (((uintptr_t)aecm->dBufClean_buf + 31) & ~ 31); + aecm->dBufNoisy = (int16_t*) (((uintptr_t)aecm->dBufNoisy_buf + 31) & ~ 31); + aecm->outBuf = (int16_t*) (((uintptr_t)aecm->outBuf_buf + 15) & ~ 15); + aecm->channelStored = (int16_t*) (((uintptr_t) + aecm->channelStored_buf + 15) & ~ 15); + aecm->channelAdapt16 = (int16_t*) (((uintptr_t) + aecm->channelAdapt16_buf + 15) & ~ 15); + aecm->channelAdapt32 = (int32_t*) (((uintptr_t) + aecm->channelAdapt32_buf + 31) & ~ 31); + + return aecm; +} + +void WebRtcAecm_InitEchoPathCore(AecmCore* aecm, const int16_t* echo_path) { + int i = 0; + + // Reset the stored channel + memcpy(aecm->channelStored, echo_path, sizeof(int16_t) * PART_LEN1); + // Reset the adapted channels + memcpy(aecm->channelAdapt16, echo_path, sizeof(int16_t) * PART_LEN1); + for (i = 0; i < PART_LEN1; i++) + { + aecm->channelAdapt32[i] = (int32_t)aecm->channelAdapt16[i] << 16; + } + + // Reset channel storing variables + aecm->mseAdaptOld = 1000; + aecm->mseStoredOld = 1000; + aecm->mseThreshold = WEBRTC_SPL_WORD32_MAX; + aecm->mseChannelCount = 0; +} + +static void CalcLinearEnergiesC(AecmCore* aecm, + const uint16_t* far_spectrum, + int32_t* echo_est, + uint32_t* far_energy, + uint32_t* echo_energy_adapt, + uint32_t* echo_energy_stored) { + int i; + + // Get energy for the delayed far end signal and estimated + // echo using both stored and adapted channels. + for (i = 0; i < PART_LEN1; i++) + { + echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], + far_spectrum[i]); + (*far_energy) += (uint32_t)(far_spectrum[i]); + *echo_energy_adapt += aecm->channelAdapt16[i] * far_spectrum[i]; + (*echo_energy_stored) += (uint32_t)echo_est[i]; + } +} + +static void StoreAdaptiveChannelC(AecmCore* aecm, + const uint16_t* far_spectrum, + int32_t* echo_est) { + int i; + + // During startup we store the channel every block. + memcpy(aecm->channelStored, aecm->channelAdapt16, sizeof(int16_t) * PART_LEN1); + // Recalculate echo estimate + for (i = 0; i < PART_LEN; i += 4) + { + echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], + far_spectrum[i]); + echo_est[i + 1] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 1], + far_spectrum[i + 1]); + echo_est[i + 2] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 2], + far_spectrum[i + 2]); + echo_est[i + 3] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 3], + far_spectrum[i + 3]); + } + echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], + far_spectrum[i]); +} + +static void ResetAdaptiveChannelC(AecmCore* aecm) { + int i; + + // The stored channel has a significantly lower MSE than the adaptive one for + // two consecutive calculations. Reset the adaptive channel. + memcpy(aecm->channelAdapt16, aecm->channelStored, + sizeof(int16_t) * PART_LEN1); + // Restore the W32 channel + for (i = 0; i < PART_LEN; i += 4) + { + aecm->channelAdapt32[i] = (int32_t)aecm->channelStored[i] << 16; + aecm->channelAdapt32[i + 1] = (int32_t)aecm->channelStored[i + 1] << 16; + aecm->channelAdapt32[i + 2] = (int32_t)aecm->channelStored[i + 2] << 16; + aecm->channelAdapt32[i + 3] = (int32_t)aecm->channelStored[i + 3] << 16; + } + aecm->channelAdapt32[i] = (int32_t)aecm->channelStored[i] << 16; +} + +// Initialize function pointers for ARM Neon platform. +#if defined(WEBRTC_HAS_NEON) +static void WebRtcAecm_InitNeon(void) +{ + WebRtcAecm_StoreAdaptiveChannel = WebRtcAecm_StoreAdaptiveChannelNeon; + WebRtcAecm_ResetAdaptiveChannel = WebRtcAecm_ResetAdaptiveChannelNeon; + WebRtcAecm_CalcLinearEnergies = WebRtcAecm_CalcLinearEnergiesNeon; +} +#endif + +// Initialize function pointers for MIPS platform. +#if defined(MIPS32_LE) +static void WebRtcAecm_InitMips(void) +{ +#if defined(MIPS_DSP_R1_LE) + WebRtcAecm_StoreAdaptiveChannel = WebRtcAecm_StoreAdaptiveChannel_mips; + WebRtcAecm_ResetAdaptiveChannel = WebRtcAecm_ResetAdaptiveChannel_mips; +#endif + WebRtcAecm_CalcLinearEnergies = WebRtcAecm_CalcLinearEnergies_mips; +} +#endif + +// WebRtcAecm_InitCore(...) +// +// This function initializes the AECM instant created with WebRtcAecm_CreateCore(...) +// Input: +// - aecm : Pointer to the Echo Suppression instance +// - samplingFreq : Sampling Frequency +// +// Output: +// - aecm : Initialized instance +// +// Return value : 0 - Ok +// -1 - Error +// +int WebRtcAecm_InitCore(AecmCore* const aecm, int samplingFreq) { + int i = 0; + int32_t tmp32 = PART_LEN1 * PART_LEN1; + int16_t tmp16 = PART_LEN1; + + /*if (samplingFreq != 8000 && samplingFreq != 16000) + { + samplingFreq = 8000; + return -1; + }*/ + // sanity check of sampling frequency + aecm->mult = (int16_t)samplingFreq / 8000; + + aecm->farBufWritePos = 0; + aecm->farBufReadPos = 0; + aecm->knownDelay = 0; + aecm->lastKnownDelay = 0; + + WebRtc_InitBuffer(aecm->farFrameBuf); + WebRtc_InitBuffer(aecm->nearNoisyFrameBuf); + WebRtc_InitBuffer(aecm->nearCleanFrameBuf); + WebRtc_InitBuffer(aecm->outFrameBuf); + + memset(aecm->xBuf_buf, 0, sizeof(aecm->xBuf_buf)); + memset(aecm->dBufClean_buf, 0, sizeof(aecm->dBufClean_buf)); + memset(aecm->dBufNoisy_buf, 0, sizeof(aecm->dBufNoisy_buf)); + memset(aecm->outBuf_buf, 0, sizeof(aecm->outBuf_buf)); + + aecm->seed = 666; + aecm->totCount = 0; + + if (WebRtc_InitDelayEstimatorFarend(aecm->delay_estimator_farend) != 0) { + return -1; + } + if (WebRtc_InitDelayEstimator(aecm->delay_estimator) != 0) { + return -1; + } + // Set far end histories to zero + memset(aecm->far_history, 0, sizeof(uint16_t) * PART_LEN1 * MAX_DELAY); + memset(aecm->far_q_domains, 0, sizeof(int) * MAX_DELAY); + aecm->far_history_pos = MAX_DELAY; + + aecm->nlpFlag = 1; + aecm->fixedDelay = -1; + + aecm->dfaCleanQDomain = 0; + aecm->dfaCleanQDomainOld = 0; + aecm->dfaNoisyQDomain = 0; + aecm->dfaNoisyQDomainOld = 0; + + memset(aecm->nearLogEnergy, 0, sizeof(aecm->nearLogEnergy)); + aecm->farLogEnergy = 0; + memset(aecm->echoAdaptLogEnergy, 0, sizeof(aecm->echoAdaptLogEnergy)); + memset(aecm->echoStoredLogEnergy, 0, sizeof(aecm->echoStoredLogEnergy)); + + // Initialize the echo channels with a stored shape. + if (samplingFreq == 8000) + { + WebRtcAecm_InitEchoPathCore(aecm, kChannelStored8kHz); + } + else + { + WebRtcAecm_InitEchoPathCore(aecm, kChannelStored16kHz); + } + + memset(aecm->echoFilt, 0, sizeof(aecm->echoFilt)); + memset(aecm->nearFilt, 0, sizeof(aecm->nearFilt)); + aecm->noiseEstCtr = 0; + + aecm->cngMode = AecmTrue; + + memset(aecm->noiseEstTooLowCtr, 0, sizeof(aecm->noiseEstTooLowCtr)); + memset(aecm->noiseEstTooHighCtr, 0, sizeof(aecm->noiseEstTooHighCtr)); + // Shape the initial noise level to an approximate pink noise. + for (i = 0; i < (PART_LEN1 >> 1) - 1; i++) + { + aecm->noiseEst[i] = (tmp32 << 8); + tmp16--; + tmp32 -= (int32_t)((tmp16 << 1) + 1); + } + for (; i < PART_LEN1; i++) + { + aecm->noiseEst[i] = (tmp32 << 8); + } + + aecm->farEnergyMin = WEBRTC_SPL_WORD16_MAX; + aecm->farEnergyMax = WEBRTC_SPL_WORD16_MIN; + aecm->farEnergyMaxMin = 0; + aecm->farEnergyVAD = FAR_ENERGY_MIN; // This prevents false speech detection at the + // beginning. + aecm->farEnergyMSE = 0; + aecm->currentVADValue = 0; + aecm->vadUpdateCount = 0; + aecm->firstVAD = 1; + + aecm->startupState = 0; + aecm->supGain = SUPGAIN_DEFAULT; + aecm->supGainOld = SUPGAIN_DEFAULT; + + aecm->supGainErrParamA = SUPGAIN_ERROR_PARAM_A; + aecm->supGainErrParamD = SUPGAIN_ERROR_PARAM_D; + aecm->supGainErrParamDiffAB = SUPGAIN_ERROR_PARAM_A - SUPGAIN_ERROR_PARAM_B; + aecm->supGainErrParamDiffBD = SUPGAIN_ERROR_PARAM_B - SUPGAIN_ERROR_PARAM_D; + + // Assert a preprocessor definition at compile-time. It's an assumption + // used in assembly code, so check the assembly files before any change. + static_assert(PART_LEN % 16 == 0, "PART_LEN is not a multiple of 16"); + + // Initialize function pointers. + WebRtcAecm_CalcLinearEnergies = CalcLinearEnergiesC; + WebRtcAecm_StoreAdaptiveChannel = StoreAdaptiveChannelC; + WebRtcAecm_ResetAdaptiveChannel = ResetAdaptiveChannelC; + +#if defined(WEBRTC_HAS_NEON) + WebRtcAecm_InitNeon(); +#endif + +#if defined(MIPS32_LE) + WebRtcAecm_InitMips(); +#endif + return 0; +} + +// TODO(bjornv): This function is currently not used. Add support for these +// parameters from a higher level +int WebRtcAecm_Control(AecmCore* aecm, int delay, int nlpFlag) { + aecm->nlpFlag = nlpFlag; + aecm->fixedDelay = delay; + + return 0; +} + +void WebRtcAecm_FreeCore(AecmCore* aecm) { + if (aecm == NULL) { + return; + } + + WebRtc_FreeBuffer(aecm->farFrameBuf); + WebRtc_FreeBuffer(aecm->nearNoisyFrameBuf); + WebRtc_FreeBuffer(aecm->nearCleanFrameBuf); + WebRtc_FreeBuffer(aecm->outFrameBuf); + + WebRtc_FreeDelayEstimator(aecm->delay_estimator); + WebRtc_FreeDelayEstimatorFarend(aecm->delay_estimator_farend); + WebRtcSpl_FreeRealFFT(aecm->real_fft); + + free(aecm); +} + +int WebRtcAecm_ProcessFrame(AecmCore* aecm, + const int16_t* farend, + const int16_t* nearendNoisy, + const int16_t* nearendClean, + int16_t* out) { + int16_t outBlock_buf[PART_LEN + 8]; // Align buffer to 8-byte boundary. + int16_t* outBlock = (int16_t*) (((uintptr_t) outBlock_buf + 15) & ~ 15); + + int16_t farFrame[FRAME_LEN]; + const int16_t* out_ptr = NULL; + int size = 0; + + // Buffer the current frame. + // Fetch an older one corresponding to the delay. + WebRtcAecm_BufferFarFrame(aecm, farend, FRAME_LEN); + WebRtcAecm_FetchFarFrame(aecm, farFrame, FRAME_LEN, aecm->knownDelay); + + // Buffer the synchronized far and near frames, + // to pass the smaller blocks individually. + WebRtc_WriteBuffer(aecm->farFrameBuf, farFrame, FRAME_LEN); + WebRtc_WriteBuffer(aecm->nearNoisyFrameBuf, nearendNoisy, FRAME_LEN); + if (nearendClean != NULL) + { + WebRtc_WriteBuffer(aecm->nearCleanFrameBuf, nearendClean, FRAME_LEN); + } + + // Process as many blocks as possible. + while (WebRtc_available_read(aecm->farFrameBuf) >= PART_LEN) + { + int16_t far_block[PART_LEN]; + const int16_t* far_block_ptr = NULL; + int16_t near_noisy_block[PART_LEN]; + const int16_t* near_noisy_block_ptr = NULL; + + WebRtc_ReadBuffer(aecm->farFrameBuf, (void**) &far_block_ptr, far_block, + PART_LEN); + WebRtc_ReadBuffer(aecm->nearNoisyFrameBuf, + (void**) &near_noisy_block_ptr, + near_noisy_block, + PART_LEN); + if (nearendClean != NULL) + { + int16_t near_clean_block[PART_LEN]; + const int16_t* near_clean_block_ptr = NULL; + + WebRtc_ReadBuffer(aecm->nearCleanFrameBuf, + (void**) &near_clean_block_ptr, + near_clean_block, + PART_LEN); + if (WebRtcAecm_ProcessBlock(aecm, + far_block_ptr, + near_noisy_block_ptr, + near_clean_block_ptr, + outBlock) == -1) + { + return -1; + } + } else + { + if (WebRtcAecm_ProcessBlock(aecm, + far_block_ptr, + near_noisy_block_ptr, + NULL, + outBlock) == -1) + { + return -1; + } + } + + WebRtc_WriteBuffer(aecm->outFrameBuf, outBlock, PART_LEN); + } + + // Stuff the out buffer if we have less than a frame to output. + // This should only happen for the first frame. + size = (int) WebRtc_available_read(aecm->outFrameBuf); + if (size < FRAME_LEN) + { + WebRtc_MoveReadPtr(aecm->outFrameBuf, size - FRAME_LEN); + } + + // Obtain an output frame. + WebRtc_ReadBuffer(aecm->outFrameBuf, (void**) &out_ptr, out, FRAME_LEN); + if (out_ptr != out) { + // ReadBuffer() hasn't copied to |out| in this case. + memcpy(out, out_ptr, FRAME_LEN * sizeof(int16_t)); + } + + return 0; +} + +// WebRtcAecm_AsymFilt(...) +// +// Performs asymmetric filtering. +// +// Inputs: +// - filtOld : Previous filtered value. +// - inVal : New input value. +// - stepSizePos : Step size when we have a positive contribution. +// - stepSizeNeg : Step size when we have a negative contribution. +// +// Output: +// +// Return: - Filtered value. +// +int16_t WebRtcAecm_AsymFilt(const int16_t filtOld, const int16_t inVal, + const int16_t stepSizePos, + const int16_t stepSizeNeg) +{ + int16_t retVal; + + if ((filtOld == WEBRTC_SPL_WORD16_MAX) | (filtOld == WEBRTC_SPL_WORD16_MIN)) + { + return inVal; + } + retVal = filtOld; + if (filtOld > inVal) + { + retVal -= (filtOld - inVal) >> stepSizeNeg; + } else + { + retVal += (inVal - filtOld) >> stepSizePos; + } + + return retVal; +} + +// ExtractFractionPart(a, zeros) +// +// returns the fraction part of |a|, with |zeros| number of leading zeros, as an +// int16_t scaled to Q8. There is no sanity check of |a| in the sense that the +// number of zeros match. +static int16_t ExtractFractionPart(uint32_t a, int zeros) { + return (int16_t)(((a << zeros) & 0x7FFFFFFF) >> 23); +} + +// Calculates and returns the log of |energy| in Q8. The input |energy| is +// supposed to be in Q(|q_domain|). +static int16_t LogOfEnergyInQ8(uint32_t energy, int q_domain) { + static const int16_t kLogLowValue = PART_LEN_SHIFT << 7; + int16_t log_energy_q8 = kLogLowValue; + if (energy > 0) { + int zeros = WebRtcSpl_NormU32(energy); + int16_t frac = ExtractFractionPart(energy, zeros); + // log2 of |energy| in Q8. + log_energy_q8 += ((31 - zeros) << 8) + frac - (q_domain << 8); + } + return log_energy_q8; +} + +// WebRtcAecm_CalcEnergies(...) +// +// This function calculates the log of energies for nearend, farend and estimated +// echoes. There is also an update of energy decision levels, i.e. internal VAD. +// +// +// @param aecm [i/o] Handle of the AECM instance. +// @param far_spectrum [in] Pointer to farend spectrum. +// @param far_q [in] Q-domain of farend spectrum. +// @param nearEner [in] Near end energy for current block in +// Q(aecm->dfaQDomain). +// @param echoEst [out] Estimated echo in Q(xfa_q+RESOLUTION_CHANNEL16). +// +void WebRtcAecm_CalcEnergies(AecmCore* aecm, + const uint16_t* far_spectrum, + const int16_t far_q, + const uint32_t nearEner, + int32_t* echoEst) { + // Local variables + uint32_t tmpAdapt = 0; + uint32_t tmpStored = 0; + uint32_t tmpFar = 0; + + int i; + + int16_t tmp16; + int16_t increase_max_shifts = 4; + int16_t decrease_max_shifts = 11; + int16_t increase_min_shifts = 11; + int16_t decrease_min_shifts = 3; + + // Get log of near end energy and store in buffer + + // Shift buffer + memmove(aecm->nearLogEnergy + 1, aecm->nearLogEnergy, + sizeof(int16_t) * (MAX_BUF_LEN - 1)); + + // Logarithm of integrated magnitude spectrum (nearEner) + aecm->nearLogEnergy[0] = LogOfEnergyInQ8(nearEner, aecm->dfaNoisyQDomain); + + WebRtcAecm_CalcLinearEnergies(aecm, far_spectrum, echoEst, &tmpFar, &tmpAdapt, &tmpStored); + + // Shift buffers + memmove(aecm->echoAdaptLogEnergy + 1, aecm->echoAdaptLogEnergy, + sizeof(int16_t) * (MAX_BUF_LEN - 1)); + memmove(aecm->echoStoredLogEnergy + 1, aecm->echoStoredLogEnergy, + sizeof(int16_t) * (MAX_BUF_LEN - 1)); + + // Logarithm of delayed far end energy + aecm->farLogEnergy = LogOfEnergyInQ8(tmpFar, far_q); + + // Logarithm of estimated echo energy through adapted channel + aecm->echoAdaptLogEnergy[0] = LogOfEnergyInQ8(tmpAdapt, + RESOLUTION_CHANNEL16 + far_q); + + // Logarithm of estimated echo energy through stored channel + aecm->echoStoredLogEnergy[0] = + LogOfEnergyInQ8(tmpStored, RESOLUTION_CHANNEL16 + far_q); + + // Update farend energy levels (min, max, vad, mse) + if (aecm->farLogEnergy > FAR_ENERGY_MIN) + { + if (aecm->startupState == 0) + { + increase_max_shifts = 2; + decrease_min_shifts = 2; + increase_min_shifts = 8; + } + + aecm->farEnergyMin = WebRtcAecm_AsymFilt(aecm->farEnergyMin, aecm->farLogEnergy, + increase_min_shifts, decrease_min_shifts); + aecm->farEnergyMax = WebRtcAecm_AsymFilt(aecm->farEnergyMax, aecm->farLogEnergy, + increase_max_shifts, decrease_max_shifts); + aecm->farEnergyMaxMin = (aecm->farEnergyMax - aecm->farEnergyMin); + + // Dynamic VAD region size + tmp16 = 2560 - aecm->farEnergyMin; + if (tmp16 > 0) + { + tmp16 = (int16_t)((tmp16 * FAR_ENERGY_VAD_REGION) >> 9); + } else + { + tmp16 = 0; + } + tmp16 += FAR_ENERGY_VAD_REGION; + + if ((aecm->startupState == 0) | (aecm->vadUpdateCount > 1024)) + { + // In startup phase or VAD update halted + aecm->farEnergyVAD = aecm->farEnergyMin + tmp16; + } else + { + if (aecm->farEnergyVAD > aecm->farLogEnergy) + { + aecm->farEnergyVAD += + (aecm->farLogEnergy + tmp16 - aecm->farEnergyVAD) >> 6; + aecm->vadUpdateCount = 0; + } else + { + aecm->vadUpdateCount++; + } + } + // Put MSE threshold higher than VAD + aecm->farEnergyMSE = aecm->farEnergyVAD + (1 << 8); + } + + // Update VAD variables + if (aecm->farLogEnergy > aecm->farEnergyVAD) + { + if ((aecm->startupState == 0) | (aecm->farEnergyMaxMin > FAR_ENERGY_DIFF)) + { + // We are in startup or have significant dynamics in input speech level + aecm->currentVADValue = 1; + } + } else + { + aecm->currentVADValue = 0; + } + if ((aecm->currentVADValue) && (aecm->firstVAD)) + { + aecm->firstVAD = 0; + if (aecm->echoAdaptLogEnergy[0] > aecm->nearLogEnergy[0]) + { + // The estimated echo has higher energy than the near end signal. + // This means that the initialization was too aggressive. Scale + // down by a factor 8 + for (i = 0; i < PART_LEN1; i++) + { + aecm->channelAdapt16[i] >>= 3; + } + // Compensate the adapted echo energy level accordingly. + aecm->echoAdaptLogEnergy[0] -= (3 << 8); + aecm->firstVAD = 1; + } + } +} + +// WebRtcAecm_CalcStepSize(...) +// +// This function calculates the step size used in channel estimation +// +// +// @param aecm [in] Handle of the AECM instance. +// @param mu [out] (Return value) Stepsize in log2(), i.e. number of shifts. +// +// +int16_t WebRtcAecm_CalcStepSize(AecmCore* const aecm) { + int32_t tmp32; + int16_t tmp16; + int16_t mu = MU_MAX; + + // Here we calculate the step size mu used in the + // following NLMS based Channel estimation algorithm + if (!aecm->currentVADValue) + { + // Far end energy level too low, no channel update + mu = 0; + } else if (aecm->startupState > 0) + { + if (aecm->farEnergyMin >= aecm->farEnergyMax) + { + mu = MU_MIN; + } else + { + tmp16 = (aecm->farLogEnergy - aecm->farEnergyMin); + tmp32 = tmp16 * MU_DIFF; + tmp32 = WebRtcSpl_DivW32W16(tmp32, aecm->farEnergyMaxMin); + mu = MU_MIN - 1 - (int16_t)(tmp32); + // The -1 is an alternative to rounding. This way we get a larger + // stepsize, so we in some sense compensate for truncation in NLMS + } + if (mu < MU_MAX) + { + mu = MU_MAX; // Equivalent with maximum step size of 2^-MU_MAX + } + } + + return mu; +} + +// WebRtcAecm_UpdateChannel(...) +// +// This function performs channel estimation. NLMS and decision on channel storage. +// +// +// @param aecm [i/o] Handle of the AECM instance. +// @param far_spectrum [in] Absolute value of the farend signal in Q(far_q) +// @param far_q [in] Q-domain of the farend signal +// @param dfa [in] Absolute value of the nearend signal (Q[aecm->dfaQDomain]) +// @param mu [in] NLMS step size. +// @param echoEst [i/o] Estimated echo in Q(far_q+RESOLUTION_CHANNEL16). +// +void WebRtcAecm_UpdateChannel(AecmCore* aecm, + const uint16_t* far_spectrum, + const int16_t far_q, + const uint16_t* const dfa, + const int16_t mu, + int32_t* echoEst) { + uint32_t tmpU32no1, tmpU32no2; + int32_t tmp32no1, tmp32no2; + int32_t mseStored; + int32_t mseAdapt; + + int i; + + int16_t zerosFar, zerosNum, zerosCh, zerosDfa; + int16_t shiftChFar, shiftNum, shift2ResChan; + int16_t tmp16no1; + int16_t xfaQ, dfaQ; + + // This is the channel estimation algorithm. It is base on NLMS but has a variable step + // length, which was calculated above. + if (mu) + { + for (i = 0; i < PART_LEN1; i++) + { + // Determine norm of channel and farend to make sure we don't get overflow in + // multiplication + zerosCh = WebRtcSpl_NormU32(aecm->channelAdapt32[i]); + zerosFar = WebRtcSpl_NormU32((uint32_t)far_spectrum[i]); + if (zerosCh + zerosFar > 31) + { + // Multiplication is safe + tmpU32no1 = WEBRTC_SPL_UMUL_32_16(aecm->channelAdapt32[i], + far_spectrum[i]); + shiftChFar = 0; + } else + { + // We need to shift down before multiplication + shiftChFar = 32 - zerosCh - zerosFar; + tmpU32no1 = (aecm->channelAdapt32[i] >> shiftChFar) * + far_spectrum[i]; + } + // Determine Q-domain of numerator + zerosNum = WebRtcSpl_NormU32(tmpU32no1); + if (dfa[i]) + { + zerosDfa = WebRtcSpl_NormU32((uint32_t)dfa[i]); + } else + { + zerosDfa = 32; + } + tmp16no1 = zerosDfa - 2 + aecm->dfaNoisyQDomain - + RESOLUTION_CHANNEL32 - far_q + shiftChFar; + if (zerosNum > tmp16no1 + 1) + { + xfaQ = tmp16no1; + dfaQ = zerosDfa - 2; + } else + { + xfaQ = zerosNum - 2; + dfaQ = RESOLUTION_CHANNEL32 + far_q - aecm->dfaNoisyQDomain - + shiftChFar + xfaQ; + } + // Add in the same Q-domain + tmpU32no1 = WEBRTC_SPL_SHIFT_W32(tmpU32no1, xfaQ); + tmpU32no2 = WEBRTC_SPL_SHIFT_W32((uint32_t)dfa[i], dfaQ); + tmp32no1 = (int32_t)tmpU32no2 - (int32_t)tmpU32no1; + zerosNum = WebRtcSpl_NormW32(tmp32no1); + if ((tmp32no1) && (far_spectrum[i] > (CHANNEL_VAD << far_q))) + { + // + // Update is needed + // + // This is what we would like to compute + // + // tmp32no1 = dfa[i] - (aecm->channelAdapt[i] * far_spectrum[i]) + // tmp32norm = (i + 1) + // aecm->channelAdapt[i] += (2^mu) * tmp32no1 + // / (tmp32norm * far_spectrum[i]) + // + + // Make sure we don't get overflow in multiplication. + if (zerosNum + zerosFar > 31) + { + if (tmp32no1 > 0) + { + tmp32no2 = (int32_t)WEBRTC_SPL_UMUL_32_16(tmp32no1, + far_spectrum[i]); + } else + { + tmp32no2 = -(int32_t)WEBRTC_SPL_UMUL_32_16(-tmp32no1, + far_spectrum[i]); + } + shiftNum = 0; + } else + { + shiftNum = 32 - (zerosNum + zerosFar); + if (tmp32no1 > 0) + { + tmp32no2 = (tmp32no1 >> shiftNum) * far_spectrum[i]; + } else + { + tmp32no2 = -((-tmp32no1 >> shiftNum) * far_spectrum[i]); + } + } + // Normalize with respect to frequency bin + tmp32no2 = WebRtcSpl_DivW32W16(tmp32no2, i + 1); + // Make sure we are in the right Q-domain + shift2ResChan = shiftNum + shiftChFar - xfaQ - mu - ((30 - zerosFar) << 1); + if (WebRtcSpl_NormW32(tmp32no2) < shift2ResChan) + { + tmp32no2 = WEBRTC_SPL_WORD32_MAX; + } else + { + tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, shift2ResChan); + } + aecm->channelAdapt32[i] = + WebRtcSpl_AddSatW32(aecm->channelAdapt32[i], tmp32no2); + if (aecm->channelAdapt32[i] < 0) + { + // We can never have negative channel gain + aecm->channelAdapt32[i] = 0; + } + aecm->channelAdapt16[i] = + (int16_t)(aecm->channelAdapt32[i] >> 16); + } + } + } + // END: Adaptive channel update + + // Determine if we should store or restore the channel + if ((aecm->startupState == 0) & (aecm->currentVADValue)) + { + // During startup we store the channel every block, + // and we recalculate echo estimate + WebRtcAecm_StoreAdaptiveChannel(aecm, far_spectrum, echoEst); + } else + { + if (aecm->farLogEnergy < aecm->farEnergyMSE) + { + aecm->mseChannelCount = 0; + } else + { + aecm->mseChannelCount++; + } + // Enough data for validation. Store channel if we can. + if (aecm->mseChannelCount >= (MIN_MSE_COUNT + 10)) + { + // We have enough data. + // Calculate MSE of "Adapt" and "Stored" versions. + // It is actually not MSE, but average absolute error. + mseStored = 0; + mseAdapt = 0; + for (i = 0; i < MIN_MSE_COUNT; i++) + { + tmp32no1 = ((int32_t)aecm->echoStoredLogEnergy[i] + - (int32_t)aecm->nearLogEnergy[i]); + tmp32no2 = WEBRTC_SPL_ABS_W32(tmp32no1); + mseStored += tmp32no2; + + tmp32no1 = ((int32_t)aecm->echoAdaptLogEnergy[i] + - (int32_t)aecm->nearLogEnergy[i]); + tmp32no2 = WEBRTC_SPL_ABS_W32(tmp32no1); + mseAdapt += tmp32no2; + } + if (((mseStored << MSE_RESOLUTION) < (MIN_MSE_DIFF * mseAdapt)) + & ((aecm->mseStoredOld << MSE_RESOLUTION) < (MIN_MSE_DIFF + * aecm->mseAdaptOld))) + { + // The stored channel has a significantly lower MSE than the adaptive one for + // two consecutive calculations. Reset the adaptive channel. + WebRtcAecm_ResetAdaptiveChannel(aecm); + } else if (((MIN_MSE_DIFF * mseStored) > (mseAdapt << MSE_RESOLUTION)) & (mseAdapt + < aecm->mseThreshold) & (aecm->mseAdaptOld < aecm->mseThreshold)) + { + // The adaptive channel has a significantly lower MSE than the stored one. + // The MSE for the adaptive channel has also been low for two consecutive + // calculations. Store the adaptive channel. + WebRtcAecm_StoreAdaptiveChannel(aecm, far_spectrum, echoEst); + + // Update threshold + if (aecm->mseThreshold == WEBRTC_SPL_WORD32_MAX) + { + aecm->mseThreshold = (mseAdapt + aecm->mseAdaptOld); + } else + { + int scaled_threshold = aecm->mseThreshold * 5 / 8; + aecm->mseThreshold += + ((mseAdapt - scaled_threshold) * 205) >> 8; + } + + } + + // Reset counter + aecm->mseChannelCount = 0; + + // Store the MSE values. + aecm->mseStoredOld = mseStored; + aecm->mseAdaptOld = mseAdapt; + } + } + // END: Determine if we should store or reset channel estimate. +} + +// CalcSuppressionGain(...) +// +// This function calculates the suppression gain that is used in the Wiener filter. +// +// +// @param aecm [i/n] Handle of the AECM instance. +// @param supGain [out] (Return value) Suppression gain with which to scale the noise +// level (Q14). +// +// +int16_t WebRtcAecm_CalcSuppressionGain(AecmCore* const aecm) { + int32_t tmp32no1; + + int16_t supGain = SUPGAIN_DEFAULT; + int16_t tmp16no1; + int16_t dE = 0; + + // Determine suppression gain used in the Wiener filter. The gain is based on a mix of far + // end energy and echo estimation error. + // Adjust for the far end signal level. A low signal level indicates no far end signal, + // hence we set the suppression gain to 0 + if (!aecm->currentVADValue) + { + supGain = 0; + } else + { + // Adjust for possible double talk. If we have large variations in estimation error we + // likely have double talk (or poor channel). + tmp16no1 = (aecm->nearLogEnergy[0] - aecm->echoStoredLogEnergy[0] - ENERGY_DEV_OFFSET); + dE = WEBRTC_SPL_ABS_W16(tmp16no1); + + if (dE < ENERGY_DEV_TOL) + { + // Likely no double talk. The better estimation, the more we can suppress signal. + // Update counters + if (dE < SUPGAIN_EPC_DT) + { + tmp32no1 = aecm->supGainErrParamDiffAB * dE; + tmp32no1 += (SUPGAIN_EPC_DT >> 1); + tmp16no1 = (int16_t)WebRtcSpl_DivW32W16(tmp32no1, SUPGAIN_EPC_DT); + supGain = aecm->supGainErrParamA - tmp16no1; + } else + { + tmp32no1 = aecm->supGainErrParamDiffBD * (ENERGY_DEV_TOL - dE); + tmp32no1 += ((ENERGY_DEV_TOL - SUPGAIN_EPC_DT) >> 1); + tmp16no1 = (int16_t)WebRtcSpl_DivW32W16(tmp32no1, (ENERGY_DEV_TOL + - SUPGAIN_EPC_DT)); + supGain = aecm->supGainErrParamD + tmp16no1; + } + } else + { + // Likely in double talk. Use default value + supGain = aecm->supGainErrParamD; + } + } + + if (supGain > aecm->supGainOld) + { + tmp16no1 = supGain; + } else + { + tmp16no1 = aecm->supGainOld; + } + aecm->supGainOld = supGain; + if (tmp16no1 < aecm->supGain) + { + aecm->supGain += (int16_t)((tmp16no1 - aecm->supGain) >> 4); + } else + { + aecm->supGain += (int16_t)((tmp16no1 - aecm->supGain) >> 4); + } + + // END: Update suppression gain + + return aecm->supGain; +} + +void WebRtcAecm_BufferFarFrame(AecmCore* const aecm, + const int16_t* const farend, + const int farLen) { + int writeLen = farLen, writePos = 0; + + // Check if the write position must be wrapped + while (aecm->farBufWritePos + writeLen > FAR_BUF_LEN) + { + // Write to remaining buffer space before wrapping + writeLen = FAR_BUF_LEN - aecm->farBufWritePos; + memcpy(aecm->farBuf + aecm->farBufWritePos, farend + writePos, + sizeof(int16_t) * writeLen); + aecm->farBufWritePos = 0; + writePos = writeLen; + writeLen = farLen - writeLen; + } + + memcpy(aecm->farBuf + aecm->farBufWritePos, farend + writePos, + sizeof(int16_t) * writeLen); + aecm->farBufWritePos += writeLen; +} + +void WebRtcAecm_FetchFarFrame(AecmCore* const aecm, + int16_t* const farend, + const int farLen, + const int knownDelay) { + int readLen = farLen; + int readPos = 0; + int delayChange = knownDelay - aecm->lastKnownDelay; + + aecm->farBufReadPos -= delayChange; + + // Check if delay forces a read position wrap + while (aecm->farBufReadPos < 0) + { + aecm->farBufReadPos += FAR_BUF_LEN; + } + while (aecm->farBufReadPos > FAR_BUF_LEN - 1) + { + aecm->farBufReadPos -= FAR_BUF_LEN; + } + + aecm->lastKnownDelay = knownDelay; + + // Check if read position must be wrapped + while (aecm->farBufReadPos + readLen > FAR_BUF_LEN) + { + + // Read from remaining buffer space before wrapping + readLen = FAR_BUF_LEN - aecm->farBufReadPos; + memcpy(farend + readPos, aecm->farBuf + aecm->farBufReadPos, + sizeof(int16_t) * readLen); + aecm->farBufReadPos = 0; + readPos = readLen; + readLen = farLen - readLen; + } + memcpy(farend + readPos, aecm->farBuf + aecm->farBufReadPos, + sizeof(int16_t) * readLen); + aecm->farBufReadPos += readLen; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core.h new file mode 100644 index 000000000..33d80889a --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core.h @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Performs echo control (suppression) with fft routines in fixed-point. + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AECM_AECM_CORE_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_AECM_AECM_CORE_H_ + +extern "C" { +#include "webrtc/common_audio/ring_buffer.h" +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +} +#include "webrtc/modules/audio_processing/aecm/aecm_defines.h" +#include "webrtc/typedefs.h" + +#ifdef _MSC_VER // visual c++ +#define ALIGN8_BEG __declspec(align(8)) +#define ALIGN8_END +#else // gcc or icc +#define ALIGN8_BEG +#define ALIGN8_END __attribute__((aligned(8))) +#endif + +typedef struct { + int16_t real; + int16_t imag; +} ComplexInt16; + +typedef struct { + int farBufWritePos; + int farBufReadPos; + int knownDelay; + int lastKnownDelay; + int firstVAD; // Parameter to control poorly initialized channels + + RingBuffer* farFrameBuf; + RingBuffer* nearNoisyFrameBuf; + RingBuffer* nearCleanFrameBuf; + RingBuffer* outFrameBuf; + + int16_t farBuf[FAR_BUF_LEN]; + + int16_t mult; + uint32_t seed; + + // Delay estimation variables + void* delay_estimator_farend; + void* delay_estimator; + uint16_t currentDelay; + // Far end history variables + // TODO(bjornv): Replace |far_history| with ring_buffer. + uint16_t far_history[PART_LEN1 * MAX_DELAY]; + int far_history_pos; + int far_q_domains[MAX_DELAY]; + + int16_t nlpFlag; + int16_t fixedDelay; + + uint32_t totCount; + + int16_t dfaCleanQDomain; + int16_t dfaCleanQDomainOld; + int16_t dfaNoisyQDomain; + int16_t dfaNoisyQDomainOld; + + int16_t nearLogEnergy[MAX_BUF_LEN]; + int16_t farLogEnergy; + int16_t echoAdaptLogEnergy[MAX_BUF_LEN]; + int16_t echoStoredLogEnergy[MAX_BUF_LEN]; + + // The extra 16 or 32 bytes in the following buffers are for alignment based + // Neon code. + // It's designed this way since the current GCC compiler can't align a + // buffer in 16 or 32 byte boundaries properly. + int16_t channelStored_buf[PART_LEN1 + 8]; + int16_t channelAdapt16_buf[PART_LEN1 + 8]; + int32_t channelAdapt32_buf[PART_LEN1 + 8]; + int16_t xBuf_buf[PART_LEN2 + 16]; // farend + int16_t dBufClean_buf[PART_LEN2 + 16]; // nearend + int16_t dBufNoisy_buf[PART_LEN2 + 16]; // nearend + int16_t outBuf_buf[PART_LEN + 8]; + + // Pointers to the above buffers + int16_t *channelStored; + int16_t *channelAdapt16; + int32_t *channelAdapt32; + int16_t *xBuf; + int16_t *dBufClean; + int16_t *dBufNoisy; + int16_t *outBuf; + + int32_t echoFilt[PART_LEN1]; + int16_t nearFilt[PART_LEN1]; + int32_t noiseEst[PART_LEN1]; + int noiseEstTooLowCtr[PART_LEN1]; + int noiseEstTooHighCtr[PART_LEN1]; + int16_t noiseEstCtr; + int16_t cngMode; + + int32_t mseAdaptOld; + int32_t mseStoredOld; + int32_t mseThreshold; + + int16_t farEnergyMin; + int16_t farEnergyMax; + int16_t farEnergyMaxMin; + int16_t farEnergyVAD; + int16_t farEnergyMSE; + int currentVADValue; + int16_t vadUpdateCount; + + int16_t startupState; + int16_t mseChannelCount; + int16_t supGain; + int16_t supGainOld; + + int16_t supGainErrParamA; + int16_t supGainErrParamD; + int16_t supGainErrParamDiffAB; + int16_t supGainErrParamDiffBD; + + struct RealFFT* real_fft; + +#ifdef AEC_DEBUG + FILE *farFile; + FILE *nearFile; + FILE *outFile; +#endif +} AecmCore; + +//////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_CreateCore() +// +// Allocates the memory needed by the AECM. The memory needs to be +// initialized separately using the WebRtcAecm_InitCore() function. +// Returns a pointer to the instance and a nullptr at failure. +AecmCore* WebRtcAecm_CreateCore(); + +//////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_InitCore(...) +// +// This function initializes the AECM instant created with +// WebRtcAecm_CreateCore() +// Input: +// - aecm : Pointer to the AECM instance +// - samplingFreq : Sampling Frequency +// +// Output: +// - aecm : Initialized instance +// +// Return value : 0 - Ok +// -1 - Error +// +int WebRtcAecm_InitCore(AecmCore* const aecm, int samplingFreq); + +//////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_FreeCore(...) +// +// This function releases the memory allocated by WebRtcAecm_CreateCore() +// Input: +// - aecm : Pointer to the AECM instance +// +void WebRtcAecm_FreeCore(AecmCore* aecm); + +int WebRtcAecm_Control(AecmCore* aecm, int delay, int nlpFlag); + +//////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_InitEchoPathCore(...) +// +// This function resets the echo channel adaptation with the specified channel. +// Input: +// - aecm : Pointer to the AECM instance +// - echo_path : Pointer to the data that should initialize the echo +// path +// +// Output: +// - aecm : Initialized instance +// +void WebRtcAecm_InitEchoPathCore(AecmCore* aecm, const int16_t* echo_path); + +//////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_ProcessFrame(...) +// +// This function processes frames and sends blocks to +// WebRtcAecm_ProcessBlock(...) +// +// Inputs: +// - aecm : Pointer to the AECM instance +// - farend : In buffer containing one frame of echo signal +// - nearendNoisy : In buffer containing one frame of nearend+echo signal +// without NS +// - nearendClean : In buffer containing one frame of nearend+echo signal +// with NS +// +// Output: +// - out : Out buffer, one frame of nearend signal : +// +// +int WebRtcAecm_ProcessFrame(AecmCore* aecm, + const int16_t* farend, + const int16_t* nearendNoisy, + const int16_t* nearendClean, + int16_t* out); + +//////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_ProcessBlock(...) +// +// This function is called for every block within one frame +// This function is called by WebRtcAecm_ProcessFrame(...) +// +// Inputs: +// - aecm : Pointer to the AECM instance +// - farend : In buffer containing one block of echo signal +// - nearendNoisy : In buffer containing one frame of nearend+echo signal +// without NS +// - nearendClean : In buffer containing one frame of nearend+echo signal +// with NS +// +// Output: +// - out : Out buffer, one block of nearend signal : +// +// +int WebRtcAecm_ProcessBlock(AecmCore* aecm, + const int16_t* farend, + const int16_t* nearendNoisy, + const int16_t* noisyClean, + int16_t* out); + +//////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_BufferFarFrame() +// +// Inserts a frame of data into farend buffer. +// +// Inputs: +// - aecm : Pointer to the AECM instance +// - farend : In buffer containing one frame of farend signal +// - farLen : Length of frame +// +void WebRtcAecm_BufferFarFrame(AecmCore* const aecm, + const int16_t* const farend, + const int farLen); + +//////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_FetchFarFrame() +// +// Read the farend buffer to account for known delay +// +// Inputs: +// - aecm : Pointer to the AECM instance +// - farend : In buffer containing one frame of farend signal +// - farLen : Length of frame +// - knownDelay : known delay +// +void WebRtcAecm_FetchFarFrame(AecmCore* const aecm, + int16_t* const farend, + const int farLen, + const int knownDelay); + +// All the functions below are intended to be private + +//////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_UpdateFarHistory() +// +// Moves the pointer to the next entry and inserts |far_spectrum| and +// corresponding Q-domain in its buffer. +// +// Inputs: +// - self : Pointer to the delay estimation instance +// - far_spectrum : Pointer to the far end spectrum +// - far_q : Q-domain of far end spectrum +// +void WebRtcAecm_UpdateFarHistory(AecmCore* self, + uint16_t* far_spectrum, + int far_q); + +//////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_AlignedFarend() +// +// Returns a pointer to the far end spectrum aligned to current near end +// spectrum. The function WebRtc_DelayEstimatorProcessFix(...) should have been +// called before AlignedFarend(...). Otherwise, you get the pointer to the +// previous frame. The memory is only valid until the next call of +// WebRtc_DelayEstimatorProcessFix(...). +// +// Inputs: +// - self : Pointer to the AECM instance. +// - delay : Current delay estimate. +// +// Output: +// - far_q : The Q-domain of the aligned far end spectrum +// +// Return value: +// - far_spectrum : Pointer to the aligned far end spectrum +// NULL - Error +// +const uint16_t* WebRtcAecm_AlignedFarend(AecmCore* self, int* far_q, int delay); + +/////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_CalcSuppressionGain() +// +// This function calculates the suppression gain that is used in the +// Wiener filter. +// +// Inputs: +// - aecm : Pointer to the AECM instance. +// +// Return value: +// - supGain : Suppression gain with which to scale the noise +// level (Q14). +// +int16_t WebRtcAecm_CalcSuppressionGain(AecmCore* const aecm); + +/////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_CalcEnergies() +// +// This function calculates the log of energies for nearend, farend and +// estimated echoes. There is also an update of energy decision levels, +// i.e. internal VAD. +// +// Inputs: +// - aecm : Pointer to the AECM instance. +// - far_spectrum : Pointer to farend spectrum. +// - far_q : Q-domain of farend spectrum. +// - nearEner : Near end energy for current block in +// Q(aecm->dfaQDomain). +// +// Output: +// - echoEst : Estimated echo in Q(xfa_q+RESOLUTION_CHANNEL16). +// +void WebRtcAecm_CalcEnergies(AecmCore* aecm, + const uint16_t* far_spectrum, + const int16_t far_q, + const uint32_t nearEner, + int32_t* echoEst); + +/////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_CalcStepSize() +// +// This function calculates the step size used in channel estimation +// +// Inputs: +// - aecm : Pointer to the AECM instance. +// +// Return value: +// - mu : Stepsize in log2(), i.e. number of shifts. +// +int16_t WebRtcAecm_CalcStepSize(AecmCore* const aecm); + +/////////////////////////////////////////////////////////////////////////////// +// WebRtcAecm_UpdateChannel(...) +// +// This function performs channel estimation. +// NLMS and decision on channel storage. +// +// Inputs: +// - aecm : Pointer to the AECM instance. +// - far_spectrum : Absolute value of the farend signal in Q(far_q) +// - far_q : Q-domain of the farend signal +// - dfa : Absolute value of the nearend signal +// (Q[aecm->dfaQDomain]) +// - mu : NLMS step size. +// Input/Output: +// - echoEst : Estimated echo in Q(far_q+RESOLUTION_CHANNEL16). +// +void WebRtcAecm_UpdateChannel(AecmCore* aecm, + const uint16_t* far_spectrum, + const int16_t far_q, + const uint16_t* const dfa, + const int16_t mu, + int32_t* echoEst); + +extern const int16_t WebRtcAecm_kCosTable[]; +extern const int16_t WebRtcAecm_kSinTable[]; + +/////////////////////////////////////////////////////////////////////////////// +// Some function pointers, for internal functions shared by ARM NEON and +// generic C code. +// +typedef void (*CalcLinearEnergies)(AecmCore* aecm, + const uint16_t* far_spectrum, + int32_t* echoEst, + uint32_t* far_energy, + uint32_t* echo_energy_adapt, + uint32_t* echo_energy_stored); +extern CalcLinearEnergies WebRtcAecm_CalcLinearEnergies; + +typedef void (*StoreAdaptiveChannel)(AecmCore* aecm, + const uint16_t* far_spectrum, + int32_t* echo_est); +extern StoreAdaptiveChannel WebRtcAecm_StoreAdaptiveChannel; + +typedef void (*ResetAdaptiveChannel)(AecmCore* aecm); +extern ResetAdaptiveChannel WebRtcAecm_ResetAdaptiveChannel; + +// For the above function pointers, functions for generic platforms are declared +// and defined as static in file aecm_core.c, while those for ARM Neon platforms +// are declared below and defined in file aecm_core_neon.c. +#if defined(WEBRTC_HAS_NEON) +void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore* aecm, + const uint16_t* far_spectrum, + int32_t* echo_est, + uint32_t* far_energy, + uint32_t* echo_energy_adapt, + uint32_t* echo_energy_stored); + +void WebRtcAecm_StoreAdaptiveChannelNeon(AecmCore* aecm, + const uint16_t* far_spectrum, + int32_t* echo_est); + +void WebRtcAecm_ResetAdaptiveChannelNeon(AecmCore* aecm); +#endif + +#if defined(MIPS32_LE) +void WebRtcAecm_CalcLinearEnergies_mips(AecmCore* aecm, + const uint16_t* far_spectrum, + int32_t* echo_est, + uint32_t* far_energy, + uint32_t* echo_energy_adapt, + uint32_t* echo_energy_stored); +#if defined(MIPS_DSP_R1_LE) +void WebRtcAecm_StoreAdaptiveChannel_mips(AecmCore* aecm, + const uint16_t* far_spectrum, + int32_t* echo_est); + +void WebRtcAecm_ResetAdaptiveChannel_mips(AecmCore* aecm); +#endif +#endif + +#endif diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core_c.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core_c.cc new file mode 100644 index 000000000..d868d6a2a --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core_c.cc @@ -0,0 +1,769 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing/aecm/aecm_core.h" + +#include +#include + +extern "C" { +#include "webrtc/common_audio/ring_buffer.h" +#include "webrtc/common_audio/signal_processing/include/real_fft.h" +} +#include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h" +#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h" +extern "C" { +#include "webrtc/system_wrappers/include/cpu_features_wrapper.h" +} + +#include "webrtc/base/checks.h" +#include "webrtc/typedefs.h" + +// Square root of Hanning window in Q14. +static const ALIGN8_BEG int16_t WebRtcAecm_kSqrtHanning[] ALIGN8_END = { + 0, 399, 798, 1196, 1594, 1990, 2386, 2780, 3172, + 3562, 3951, 4337, 4720, 5101, 5478, 5853, 6224, + 6591, 6954, 7313, 7668, 8019, 8364, 8705, 9040, + 9370, 9695, 10013, 10326, 10633, 10933, 11227, 11514, + 11795, 12068, 12335, 12594, 12845, 13089, 13325, 13553, + 13773, 13985, 14189, 14384, 14571, 14749, 14918, 15079, + 15231, 15373, 15506, 15631, 15746, 15851, 15947, 16034, + 16111, 16179, 16237, 16286, 16325, 16354, 16373, 16384 +}; + +#ifdef AECM_WITH_ABS_APPROX +//Q15 alpha = 0.99439986968132 const Factor for magnitude approximation +static const uint16_t kAlpha1 = 32584; +//Q15 beta = 0.12967166976970 const Factor for magnitude approximation +static const uint16_t kBeta1 = 4249; +//Q15 alpha = 0.94234827210087 const Factor for magnitude approximation +static const uint16_t kAlpha2 = 30879; +//Q15 beta = 0.33787806009150 const Factor for magnitude approximation +static const uint16_t kBeta2 = 11072; +//Q15 alpha = 0.82247698684306 const Factor for magnitude approximation +static const uint16_t kAlpha3 = 26951; +//Q15 beta = 0.57762063060713 const Factor for magnitude approximation +static const uint16_t kBeta3 = 18927; +#endif + +static const int16_t kNoiseEstQDomain = 15; +static const int16_t kNoiseEstIncCount = 5; + +static void ComfortNoise(AecmCore* aecm, + const uint16_t* dfa, + ComplexInt16* out, + const int16_t* lambda); + +static void WindowAndFFT(AecmCore* aecm, + int16_t* fft, + const int16_t* time_signal, + ComplexInt16* freq_signal, + int time_signal_scaling) { + int i = 0; + + // FFT of signal + for (i = 0; i < PART_LEN; i++) { + // Window time domain signal and insert into real part of + // transformation array |fft| + int16_t scaled_time_signal = time_signal[i] << time_signal_scaling; + fft[i] = (int16_t)((scaled_time_signal * WebRtcAecm_kSqrtHanning[i]) >> 14); + scaled_time_signal = time_signal[i + PART_LEN] << time_signal_scaling; + fft[PART_LEN + i] = (int16_t)(( + scaled_time_signal * WebRtcAecm_kSqrtHanning[PART_LEN - i]) >> 14); + } + + // Do forward FFT, then take only the first PART_LEN complex samples, + // and change signs of the imaginary parts. + WebRtcSpl_RealForwardFFT(aecm->real_fft, fft, (int16_t*)freq_signal); + for (i = 0; i < PART_LEN; i++) { + freq_signal[i].imag = -freq_signal[i].imag; + } +} + +static void InverseFFTAndWindow(AecmCore* aecm, + int16_t* fft, + ComplexInt16* efw, + int16_t* output, + const int16_t* nearendClean) { + int i, j, outCFFT; + int32_t tmp32no1; + // Reuse |efw| for the inverse FFT output after transferring + // the contents to |fft|. + int16_t* ifft_out = (int16_t*)efw; + + // Synthesis + for (i = 1, j = 2; i < PART_LEN; i += 1, j += 2) { + fft[j] = efw[i].real; + fft[j + 1] = -efw[i].imag; + } + fft[0] = efw[0].real; + fft[1] = -efw[0].imag; + + fft[PART_LEN2] = efw[PART_LEN].real; + fft[PART_LEN2 + 1] = -efw[PART_LEN].imag; + + // Inverse FFT. Keep outCFFT to scale the samples in the next block. + outCFFT = WebRtcSpl_RealInverseFFT(aecm->real_fft, fft, ifft_out); + for (i = 0; i < PART_LEN; i++) { + ifft_out[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + ifft_out[i], WebRtcAecm_kSqrtHanning[i], 14); + tmp32no1 = WEBRTC_SPL_SHIFT_W32((int32_t)ifft_out[i], + outCFFT - aecm->dfaCleanQDomain); + output[i] = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, + tmp32no1 + aecm->outBuf[i], + WEBRTC_SPL_WORD16_MIN); + + tmp32no1 = (ifft_out[PART_LEN + i] * + WebRtcAecm_kSqrtHanning[PART_LEN - i]) >> 14; + tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, + outCFFT - aecm->dfaCleanQDomain); + aecm->outBuf[i] = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, + tmp32no1, + WEBRTC_SPL_WORD16_MIN); + } + + // Copy the current block to the old position + // (aecm->outBuf is shifted elsewhere) + memcpy(aecm->xBuf, aecm->xBuf + PART_LEN, sizeof(int16_t) * PART_LEN); + memcpy(aecm->dBufNoisy, + aecm->dBufNoisy + PART_LEN, + sizeof(int16_t) * PART_LEN); + if (nearendClean != NULL) + { + memcpy(aecm->dBufClean, + aecm->dBufClean + PART_LEN, + sizeof(int16_t) * PART_LEN); + } +} + +// Transforms a time domain signal into the frequency domain, outputting the +// complex valued signal, absolute value and sum of absolute values. +// +// time_signal [in] Pointer to time domain signal +// freq_signal_real [out] Pointer to real part of frequency domain array +// freq_signal_imag [out] Pointer to imaginary part of frequency domain +// array +// freq_signal_abs [out] Pointer to absolute value of frequency domain +// array +// freq_signal_sum_abs [out] Pointer to the sum of all absolute values in +// the frequency domain array +// return value The Q-domain of current frequency values +// +static int TimeToFrequencyDomain(AecmCore* aecm, + const int16_t* time_signal, + ComplexInt16* freq_signal, + uint16_t* freq_signal_abs, + uint32_t* freq_signal_sum_abs) { + int i = 0; + int time_signal_scaling = 0; + + int32_t tmp32no1 = 0; + int32_t tmp32no2 = 0; + + // In fft_buf, +16 for 32-byte alignment. + int16_t fft_buf[PART_LEN4 + 16]; + int16_t *fft = (int16_t *) (((uintptr_t) fft_buf + 31) & ~31); + + int16_t tmp16no1; +#ifndef WEBRTC_ARCH_ARM_V7 + int16_t tmp16no2; +#endif +#ifdef AECM_WITH_ABS_APPROX + int16_t max_value = 0; + int16_t min_value = 0; + uint16_t alpha = 0; + uint16_t beta = 0; +#endif + +#ifdef AECM_DYNAMIC_Q + tmp16no1 = WebRtcSpl_MaxAbsValueW16(time_signal, PART_LEN2); + time_signal_scaling = WebRtcSpl_NormW16(tmp16no1); +#endif + + WindowAndFFT(aecm, fft, time_signal, freq_signal, time_signal_scaling); + + // Extract imaginary and real part, calculate the magnitude for + // all frequency bins + freq_signal[0].imag = 0; + freq_signal[PART_LEN].imag = 0; + freq_signal_abs[0] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[0].real); + freq_signal_abs[PART_LEN] = (uint16_t)WEBRTC_SPL_ABS_W16( + freq_signal[PART_LEN].real); + (*freq_signal_sum_abs) = (uint32_t)(freq_signal_abs[0]) + + (uint32_t)(freq_signal_abs[PART_LEN]); + + for (i = 1; i < PART_LEN; i++) + { + if (freq_signal[i].real == 0) + { + freq_signal_abs[i] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[i].imag); + } + else if (freq_signal[i].imag == 0) + { + freq_signal_abs[i] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[i].real); + } + else + { + // Approximation for magnitude of complex fft output + // magn = sqrt(real^2 + imag^2) + // magn ~= alpha * max(|imag|,|real|) + beta * min(|imag|,|real|) + // + // The parameters alpha and beta are stored in Q15 + +#ifdef AECM_WITH_ABS_APPROX + tmp16no1 = WEBRTC_SPL_ABS_W16(freq_signal[i].real); + tmp16no2 = WEBRTC_SPL_ABS_W16(freq_signal[i].imag); + + if(tmp16no1 > tmp16no2) + { + max_value = tmp16no1; + min_value = tmp16no2; + } else + { + max_value = tmp16no2; + min_value = tmp16no1; + } + + // Magnitude in Q(-6) + if ((max_value >> 2) > min_value) + { + alpha = kAlpha1; + beta = kBeta1; + } else if ((max_value >> 1) > min_value) + { + alpha = kAlpha2; + beta = kBeta2; + } else + { + alpha = kAlpha3; + beta = kBeta3; + } + tmp16no1 = (int16_t)((max_value * alpha) >> 15); + tmp16no2 = (int16_t)((min_value * beta) >> 15); + freq_signal_abs[i] = (uint16_t)tmp16no1 + (uint16_t)tmp16no2; +#else +#ifdef WEBRTC_ARCH_ARM_V7 + __asm __volatile( + "smulbb %[tmp32no1], %[real], %[real]\n\t" + "smlabb %[tmp32no2], %[imag], %[imag], %[tmp32no1]\n\t" + :[tmp32no1]"+&r"(tmp32no1), + [tmp32no2]"=r"(tmp32no2) + :[real]"r"(freq_signal[i].real), + [imag]"r"(freq_signal[i].imag) + ); +#else + tmp16no1 = WEBRTC_SPL_ABS_W16(freq_signal[i].real); + tmp16no2 = WEBRTC_SPL_ABS_W16(freq_signal[i].imag); + tmp32no1 = tmp16no1 * tmp16no1; + tmp32no2 = tmp16no2 * tmp16no2; + tmp32no2 = WebRtcSpl_AddSatW32(tmp32no1, tmp32no2); +#endif // WEBRTC_ARCH_ARM_V7 + tmp32no1 = WebRtcSpl_SqrtFloor(tmp32no2); + + freq_signal_abs[i] = (uint16_t)tmp32no1; +#endif // AECM_WITH_ABS_APPROX + } + (*freq_signal_sum_abs) += (uint32_t)freq_signal_abs[i]; + } + + return time_signal_scaling; +} + +int WebRtcAecm_ProcessBlock(AecmCore* aecm, + const int16_t* farend, + const int16_t* nearendNoisy, + const int16_t* nearendClean, + int16_t* output) { + int i; + + uint32_t xfaSum; + uint32_t dfaNoisySum; + uint32_t dfaCleanSum; + uint32_t echoEst32Gained; + uint32_t tmpU32; + + int32_t tmp32no1; + + uint16_t xfa[PART_LEN1]; + uint16_t dfaNoisy[PART_LEN1]; + uint16_t dfaClean[PART_LEN1]; + uint16_t* ptrDfaClean = dfaClean; + const uint16_t* far_spectrum_ptr = NULL; + + // 32 byte aligned buffers (with +8 or +16). + // TODO(kma): define fft with ComplexInt16. + int16_t fft_buf[PART_LEN4 + 2 + 16]; // +2 to make a loop safe. + int32_t echoEst32_buf[PART_LEN1 + 8]; + int32_t dfw_buf[PART_LEN2 + 8]; + int32_t efw_buf[PART_LEN2 + 8]; + + int16_t* fft = (int16_t*) (((uintptr_t) fft_buf + 31) & ~ 31); + int32_t* echoEst32 = (int32_t*) (((uintptr_t) echoEst32_buf + 31) & ~ 31); + ComplexInt16* dfw = (ComplexInt16*)(((uintptr_t)dfw_buf + 31) & ~31); + ComplexInt16* efw = (ComplexInt16*)(((uintptr_t)efw_buf + 31) & ~31); + + int16_t hnl[PART_LEN1]; + int16_t numPosCoef = 0; + int16_t nlpGain = ONE_Q14; + int delay; + int16_t tmp16no1; + int16_t tmp16no2; + int16_t mu; + int16_t supGain; + int16_t zeros32, zeros16; + int16_t zerosDBufNoisy, zerosDBufClean, zerosXBuf; + int far_q; + int16_t resolutionDiff, qDomainDiff, dfa_clean_q_domain_diff; + + const int kMinPrefBand = 4; + const int kMaxPrefBand = 24; + int32_t avgHnl32 = 0; + + // Determine startup state. There are three states: + // (0) the first CONV_LEN blocks + // (1) another CONV_LEN blocks + // (2) the rest + + if (aecm->startupState < 2) + { + aecm->startupState = (aecm->totCount >= CONV_LEN) + + (aecm->totCount >= CONV_LEN2); + } + // END: Determine startup state + + // Buffer near and far end signals + memcpy(aecm->xBuf + PART_LEN, farend, sizeof(int16_t) * PART_LEN); + memcpy(aecm->dBufNoisy + PART_LEN, nearendNoisy, sizeof(int16_t) * PART_LEN); + if (nearendClean != NULL) + { + memcpy(aecm->dBufClean + PART_LEN, + nearendClean, + sizeof(int16_t) * PART_LEN); + } + + // Transform far end signal from time domain to frequency domain. + far_q = TimeToFrequencyDomain(aecm, + aecm->xBuf, + dfw, + xfa, + &xfaSum); + + // Transform noisy near end signal from time domain to frequency domain. + zerosDBufNoisy = TimeToFrequencyDomain(aecm, + aecm->dBufNoisy, + dfw, + dfaNoisy, + &dfaNoisySum); + aecm->dfaNoisyQDomainOld = aecm->dfaNoisyQDomain; + aecm->dfaNoisyQDomain = (int16_t)zerosDBufNoisy; + + + if (nearendClean == NULL) + { + ptrDfaClean = dfaNoisy; + aecm->dfaCleanQDomainOld = aecm->dfaNoisyQDomainOld; + aecm->dfaCleanQDomain = aecm->dfaNoisyQDomain; + dfaCleanSum = dfaNoisySum; + } else + { + // Transform clean near end signal from time domain to frequency domain. + zerosDBufClean = TimeToFrequencyDomain(aecm, + aecm->dBufClean, + dfw, + dfaClean, + &dfaCleanSum); + aecm->dfaCleanQDomainOld = aecm->dfaCleanQDomain; + aecm->dfaCleanQDomain = (int16_t)zerosDBufClean; + } + + // Get the delay + // Save far-end history and estimate delay + WebRtcAecm_UpdateFarHistory(aecm, xfa, far_q); + if (WebRtc_AddFarSpectrumFix(aecm->delay_estimator_farend, + xfa, + PART_LEN1, + far_q) == -1) { + return -1; + } + delay = WebRtc_DelayEstimatorProcessFix(aecm->delay_estimator, + dfaNoisy, + PART_LEN1, + zerosDBufNoisy); + if (delay == -1) + { + return -1; + } + else if (delay == -2) + { + // If the delay is unknown, we assume zero. + // NOTE: this will have to be adjusted if we ever add lookahead. + delay = 0; + } + + if (aecm->fixedDelay >= 0) + { + // Use fixed delay + delay = aecm->fixedDelay; + } + + // Get aligned far end spectrum + far_spectrum_ptr = WebRtcAecm_AlignedFarend(aecm, &far_q, delay); + zerosXBuf = (int16_t) far_q; + if (far_spectrum_ptr == NULL) + { + return -1; + } + + // Calculate log(energy) and update energy threshold levels + WebRtcAecm_CalcEnergies(aecm, + far_spectrum_ptr, + zerosXBuf, + dfaNoisySum, + echoEst32); + + // Calculate stepsize + mu = WebRtcAecm_CalcStepSize(aecm); + + // Update counters + aecm->totCount++; + + // This is the channel estimation algorithm. + // It is base on NLMS but has a variable step length, + // which was calculated above. + WebRtcAecm_UpdateChannel(aecm, + far_spectrum_ptr, + zerosXBuf, + dfaNoisy, + mu, + echoEst32); + supGain = WebRtcAecm_CalcSuppressionGain(aecm); + + + // Calculate Wiener filter hnl[] + for (i = 0; i < PART_LEN1; i++) + { + // Far end signal through channel estimate in Q8 + // How much can we shift right to preserve resolution + tmp32no1 = echoEst32[i] - aecm->echoFilt[i]; + aecm->echoFilt[i] += (tmp32no1 * 50) >> 8; + + zeros32 = WebRtcSpl_NormW32(aecm->echoFilt[i]) + 1; + zeros16 = WebRtcSpl_NormW16(supGain) + 1; + if (zeros32 + zeros16 > 16) + { + // Multiplication is safe + // Result in + // Q(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN+ + // aecm->xfaQDomainBuf[diff]) + echoEst32Gained = WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i], + (uint16_t)supGain); + resolutionDiff = 14 - RESOLUTION_CHANNEL16 - RESOLUTION_SUPGAIN; + resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf); + } else + { + tmp16no1 = 17 - zeros32 - zeros16; + resolutionDiff = 14 + tmp16no1 - RESOLUTION_CHANNEL16 - + RESOLUTION_SUPGAIN; + resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf); + if (zeros32 > tmp16no1) + { + echoEst32Gained = WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i], + supGain >> tmp16no1); + } else + { + // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16) + echoEst32Gained = (aecm->echoFilt[i] >> tmp16no1) * supGain; + } + } + + zeros16 = WebRtcSpl_NormW16(aecm->nearFilt[i]); + RTC_DCHECK_GE(zeros16, 0); // |zeros16| is a norm, hence non-negative. + dfa_clean_q_domain_diff = aecm->dfaCleanQDomain - aecm->dfaCleanQDomainOld; + if (zeros16 < dfa_clean_q_domain_diff && aecm->nearFilt[i]) { + tmp16no1 = aecm->nearFilt[i] << zeros16; + qDomainDiff = zeros16 - dfa_clean_q_domain_diff; + tmp16no2 = ptrDfaClean[i] >> -qDomainDiff; + } else { + tmp16no1 = dfa_clean_q_domain_diff < 0 + ? aecm->nearFilt[i] >> -dfa_clean_q_domain_diff + : aecm->nearFilt[i] << dfa_clean_q_domain_diff; + qDomainDiff = 0; + tmp16no2 = ptrDfaClean[i]; + } + tmp32no1 = (int32_t)(tmp16no2 - tmp16no1); + tmp16no2 = (int16_t)(tmp32no1 >> 4); + tmp16no2 += tmp16no1; + zeros16 = WebRtcSpl_NormW16(tmp16no2); + if ((tmp16no2) & (-qDomainDiff > zeros16)) { + aecm->nearFilt[i] = WEBRTC_SPL_WORD16_MAX; + } else { + aecm->nearFilt[i] = qDomainDiff < 0 ? tmp16no2 << -qDomainDiff + : tmp16no2 >> qDomainDiff; + } + + // Wiener filter coefficients, resulting hnl in Q14 + if (echoEst32Gained == 0) + { + hnl[i] = ONE_Q14; + } else if (aecm->nearFilt[i] == 0) + { + hnl[i] = 0; + } else + { + // Multiply the suppression gain + // Rounding + echoEst32Gained += (uint32_t)(aecm->nearFilt[i] >> 1); + tmpU32 = WebRtcSpl_DivU32U16(echoEst32Gained, + (uint16_t)aecm->nearFilt[i]); + + // Current resolution is + // Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN- max(0,17-zeros16- zeros32)) + // Make sure we are in Q14 + tmp32no1 = (int32_t)WEBRTC_SPL_SHIFT_W32(tmpU32, resolutionDiff); + if (tmp32no1 > ONE_Q14) + { + hnl[i] = 0; + } else if (tmp32no1 < 0) + { + hnl[i] = ONE_Q14; + } else + { + // 1-echoEst/dfa + hnl[i] = ONE_Q14 - (int16_t)tmp32no1; + if (hnl[i] < 0) + { + hnl[i] = 0; + } + } + } + if (hnl[i]) + { + numPosCoef++; + } + } + // Only in wideband. Prevent the gain in upper band from being larger than + // in lower band. + if (aecm->mult == 2) + { + // TODO(bjornv): Investigate if the scaling of hnl[i] below can cause + // speech distortion in double-talk. + for (i = 0; i < PART_LEN1; i++) + { + hnl[i] = (int16_t)((hnl[i] * hnl[i]) >> 14); + } + + for (i = kMinPrefBand; i <= kMaxPrefBand; i++) + { + avgHnl32 += (int32_t)hnl[i]; + } + RTC_DCHECK_GT(kMaxPrefBand - kMinPrefBand + 1, 0); + avgHnl32 /= (kMaxPrefBand - kMinPrefBand + 1); + + for (i = kMaxPrefBand; i < PART_LEN1; i++) + { + if (hnl[i] > (int16_t)avgHnl32) + { + hnl[i] = (int16_t)avgHnl32; + } + } + } + + // Calculate NLP gain, result is in Q14 + if (aecm->nlpFlag) + { + for (i = 0; i < PART_LEN1; i++) + { + // Truncate values close to zero and one. + if (hnl[i] > NLP_COMP_HIGH) + { + hnl[i] = ONE_Q14; + } else if (hnl[i] < NLP_COMP_LOW) + { + hnl[i] = 0; + } + + // Remove outliers + if (numPosCoef < 3) + { + nlpGain = 0; + } else + { + nlpGain = ONE_Q14; + } + + // NLP + if ((hnl[i] == ONE_Q14) && (nlpGain == ONE_Q14)) + { + hnl[i] = ONE_Q14; + } else + { + hnl[i] = (int16_t)((hnl[i] * nlpGain) >> 14); + } + + // multiply with Wiener coefficients + efw[i].real = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real, + hnl[i], 14)); + efw[i].imag = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag, + hnl[i], 14)); + } + } + else + { + // multiply with Wiener coefficients + for (i = 0; i < PART_LEN1; i++) + { + efw[i].real = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real, + hnl[i], 14)); + efw[i].imag = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag, + hnl[i], 14)); + } + } + + if (aecm->cngMode == AecmTrue) + { + ComfortNoise(aecm, ptrDfaClean, efw, hnl); + } + + InverseFFTAndWindow(aecm, fft, efw, output, nearendClean); + + return 0; +} + +static void ComfortNoise(AecmCore* aecm, + const uint16_t* dfa, + ComplexInt16* out, + const int16_t* lambda) { + int16_t i; + int16_t tmp16; + int32_t tmp32; + + int16_t randW16[PART_LEN]; + int16_t uReal[PART_LEN1]; + int16_t uImag[PART_LEN1]; + int32_t outLShift32; + int16_t noiseRShift16[PART_LEN1]; + + int16_t shiftFromNearToNoise = kNoiseEstQDomain - aecm->dfaCleanQDomain; + int16_t minTrackShift; + + RTC_DCHECK_GE(shiftFromNearToNoise, 0); + RTC_DCHECK_LT(shiftFromNearToNoise, 16); + + if (aecm->noiseEstCtr < 100) + { + // Track the minimum more quickly initially. + aecm->noiseEstCtr++; + minTrackShift = 6; + } else + { + minTrackShift = 9; + } + + // Estimate noise power. + for (i = 0; i < PART_LEN1; i++) + { + // Shift to the noise domain. + tmp32 = (int32_t)dfa[i]; + outLShift32 = tmp32 << shiftFromNearToNoise; + + if (outLShift32 < aecm->noiseEst[i]) + { + // Reset "too low" counter + aecm->noiseEstTooLowCtr[i] = 0; + // Track the minimum. + if (aecm->noiseEst[i] < (1 << minTrackShift)) + { + // For small values, decrease noiseEst[i] every + // |kNoiseEstIncCount| block. The regular approach below can not + // go further down due to truncation. + aecm->noiseEstTooHighCtr[i]++; + if (aecm->noiseEstTooHighCtr[i] >= kNoiseEstIncCount) + { + aecm->noiseEst[i]--; + aecm->noiseEstTooHighCtr[i] = 0; // Reset the counter + } + } + else + { + aecm->noiseEst[i] -= ((aecm->noiseEst[i] - outLShift32) + >> minTrackShift); + } + } else + { + // Reset "too high" counter + aecm->noiseEstTooHighCtr[i] = 0; + // Ramp slowly upwards until we hit the minimum again. + if ((aecm->noiseEst[i] >> 19) > 0) + { + // Avoid overflow. + // Multiplication with 2049 will cause wrap around. Scale + // down first and then multiply + aecm->noiseEst[i] >>= 11; + aecm->noiseEst[i] *= 2049; + } + else if ((aecm->noiseEst[i] >> 11) > 0) + { + // Large enough for relative increase + aecm->noiseEst[i] *= 2049; + aecm->noiseEst[i] >>= 11; + } + else + { + // Make incremental increases based on size every + // |kNoiseEstIncCount| block + aecm->noiseEstTooLowCtr[i]++; + if (aecm->noiseEstTooLowCtr[i] >= kNoiseEstIncCount) + { + aecm->noiseEst[i] += (aecm->noiseEst[i] >> 9) + 1; + aecm->noiseEstTooLowCtr[i] = 0; // Reset counter + } + } + } + } + + for (i = 0; i < PART_LEN1; i++) + { + tmp32 = aecm->noiseEst[i] >> shiftFromNearToNoise; + if (tmp32 > 32767) + { + tmp32 = 32767; + aecm->noiseEst[i] = tmp32 << shiftFromNearToNoise; + } + noiseRShift16[i] = (int16_t)tmp32; + + tmp16 = ONE_Q14 - lambda[i]; + noiseRShift16[i] = (int16_t)((tmp16 * noiseRShift16[i]) >> 14); + } + + // Generate a uniform random array on [0 2^15-1]. + WebRtcSpl_RandUArray(randW16, PART_LEN, &aecm->seed); + + // Generate noise according to estimated energy. + uReal[0] = 0; // Reject LF noise. + uImag[0] = 0; + for (i = 1; i < PART_LEN1; i++) + { + // Get a random index for the cos and sin tables over [0 359]. + tmp16 = (int16_t)((359 * randW16[i - 1]) >> 15); + + // Tables are in Q13. + uReal[i] = (int16_t)((noiseRShift16[i] * WebRtcAecm_kCosTable[tmp16]) >> + 13); + uImag[i] = (int16_t)((-noiseRShift16[i] * WebRtcAecm_kSinTable[tmp16]) >> + 13); + } + uImag[PART_LEN] = 0; + + for (i = 0; i < PART_LEN1; i++) + { + out[i].real = WebRtcSpl_AddSatW16(out[i].real, uReal[i]); + out[i].imag = WebRtcSpl_AddSatW16(out[i].imag, uImag[i]); + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core_neon.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core_neon.cc new file mode 100644 index 000000000..5477b39b1 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_core_neon.cc @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing/aecm/aecm_core.h" + +#ifdef WEBRTC_ARCH_ARM_FAMILY + +#include + +#include "webrtc/base/checks.h" +#include "webrtc/common_audio/signal_processing/include/real_fft.h" + +// TODO(kma): Re-write the corresponding assembly file, the offset +// generating script and makefile, to replace these C functions. + +static inline void AddLanes(uint32_t* ptr, uint32x4_t v) { +#if defined(WEBRTC_ARCH_ARM64) + *(ptr) = vaddvq_u32(v); +#else + uint32x2_t tmp_v; + tmp_v = vadd_u32(vget_low_u32(v), vget_high_u32(v)); + tmp_v = vpadd_u32(tmp_v, tmp_v); + *(ptr) = vget_lane_u32(tmp_v, 0); +#endif +} + +void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore* aecm, + const uint16_t* far_spectrum, + int32_t* echo_est, + uint32_t* far_energy, + uint32_t* echo_energy_adapt, + uint32_t* echo_energy_stored) { + int16_t* start_stored_p = aecm->channelStored; + int16_t* start_adapt_p = aecm->channelAdapt16; + int32_t* echo_est_p = echo_est; + const int16_t* end_stored_p = aecm->channelStored + PART_LEN; + const uint16_t* far_spectrum_p = far_spectrum; + int16x8_t store_v, adapt_v; + uint16x8_t spectrum_v; + uint32x4_t echo_est_v_low, echo_est_v_high; + uint32x4_t far_energy_v, echo_stored_v, echo_adapt_v; + + far_energy_v = vdupq_n_u32(0); + echo_adapt_v = vdupq_n_u32(0); + echo_stored_v = vdupq_n_u32(0); + + // Get energy for the delayed far end signal and estimated + // echo using both stored and adapted channels. + // The C code: + // for (i = 0; i < PART_LEN1; i++) { + // echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], + // far_spectrum[i]); + // (*far_energy) += (uint32_t)(far_spectrum[i]); + // *echo_energy_adapt += aecm->channelAdapt16[i] * far_spectrum[i]; + // (*echo_energy_stored) += (uint32_t)echo_est[i]; + // } + while (start_stored_p < end_stored_p) { + spectrum_v = vld1q_u16(far_spectrum_p); + adapt_v = vld1q_s16(start_adapt_p); + store_v = vld1q_s16(start_stored_p); + + far_energy_v = vaddw_u16(far_energy_v, vget_low_u16(spectrum_v)); + far_energy_v = vaddw_u16(far_energy_v, vget_high_u16(spectrum_v)); + + echo_est_v_low = vmull_u16(vreinterpret_u16_s16(vget_low_s16(store_v)), + vget_low_u16(spectrum_v)); + echo_est_v_high = vmull_u16(vreinterpret_u16_s16(vget_high_s16(store_v)), + vget_high_u16(spectrum_v)); + vst1q_s32(echo_est_p, vreinterpretq_s32_u32(echo_est_v_low)); + vst1q_s32(echo_est_p + 4, vreinterpretq_s32_u32(echo_est_v_high)); + + echo_stored_v = vaddq_u32(echo_est_v_low, echo_stored_v); + echo_stored_v = vaddq_u32(echo_est_v_high, echo_stored_v); + + echo_adapt_v = vmlal_u16(echo_adapt_v, + vreinterpret_u16_s16(vget_low_s16(adapt_v)), + vget_low_u16(spectrum_v)); + echo_adapt_v = vmlal_u16(echo_adapt_v, + vreinterpret_u16_s16(vget_high_s16(adapt_v)), + vget_high_u16(spectrum_v)); + + start_stored_p += 8; + start_adapt_p += 8; + far_spectrum_p += 8; + echo_est_p += 8; + } + + AddLanes(far_energy, far_energy_v); + AddLanes(echo_energy_stored, echo_stored_v); + AddLanes(echo_energy_adapt, echo_adapt_v); + + echo_est[PART_LEN] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[PART_LEN], + far_spectrum[PART_LEN]); + *echo_energy_stored += (uint32_t)echo_est[PART_LEN]; + *far_energy += (uint32_t)far_spectrum[PART_LEN]; + *echo_energy_adapt += aecm->channelAdapt16[PART_LEN] * far_spectrum[PART_LEN]; +} + +void WebRtcAecm_StoreAdaptiveChannelNeon(AecmCore* aecm, + const uint16_t* far_spectrum, + int32_t* echo_est) { + RTC_DCHECK_EQ(0u, (uintptr_t)echo_est % 32); + RTC_DCHECK_EQ(0u, (uintptr_t)aecm->channelStored % 16); + RTC_DCHECK_EQ(0u, (uintptr_t)aecm->channelAdapt16 % 16); + + // This is C code of following optimized code. + // During startup we store the channel every block. + // memcpy(aecm->channelStored, + // aecm->channelAdapt16, + // sizeof(int16_t) * PART_LEN1); + // Recalculate echo estimate + // for (i = 0; i < PART_LEN; i += 4) { + // echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], + // far_spectrum[i]); + // echo_est[i + 1] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 1], + // far_spectrum[i + 1]); + // echo_est[i + 2] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 2], + // far_spectrum[i + 2]); + // echo_est[i + 3] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 3], + // far_spectrum[i + 3]); + // } + // echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i], + // far_spectrum[i]); + const uint16_t* far_spectrum_p = far_spectrum; + int16_t* start_adapt_p = aecm->channelAdapt16; + int16_t* start_stored_p = aecm->channelStored; + const int16_t* end_stored_p = aecm->channelStored + PART_LEN; + int32_t* echo_est_p = echo_est; + + uint16x8_t far_spectrum_v; + int16x8_t adapt_v; + uint32x4_t echo_est_v_low, echo_est_v_high; + + while (start_stored_p < end_stored_p) { + far_spectrum_v = vld1q_u16(far_spectrum_p); + adapt_v = vld1q_s16(start_adapt_p); + + vst1q_s16(start_stored_p, adapt_v); + + echo_est_v_low = vmull_u16(vget_low_u16(far_spectrum_v), + vget_low_u16(vreinterpretq_u16_s16(adapt_v))); + echo_est_v_high = vmull_u16(vget_high_u16(far_spectrum_v), + vget_high_u16(vreinterpretq_u16_s16(adapt_v))); + + vst1q_s32(echo_est_p, vreinterpretq_s32_u32(echo_est_v_low)); + vst1q_s32(echo_est_p + 4, vreinterpretq_s32_u32(echo_est_v_high)); + + far_spectrum_p += 8; + start_adapt_p += 8; + start_stored_p += 8; + echo_est_p += 8; + } + aecm->channelStored[PART_LEN] = aecm->channelAdapt16[PART_LEN]; + echo_est[PART_LEN] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[PART_LEN], + far_spectrum[PART_LEN]); +} + +void WebRtcAecm_ResetAdaptiveChannelNeon(AecmCore* aecm) { + RTC_DCHECK_EQ(0u, (uintptr_t)aecm->channelStored % 16); + RTC_DCHECK_EQ(0u, (uintptr_t)aecm->channelAdapt16 % 16); + RTC_DCHECK_EQ(0u, (uintptr_t)aecm->channelAdapt32 % 32); + + // The C code of following optimized code. + // for (i = 0; i < PART_LEN1; i++) { + // aecm->channelAdapt16[i] = aecm->channelStored[i]; + // aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32( + // (int32_t)aecm->channelStored[i], 16); + // } + + int16_t* start_stored_p = aecm->channelStored; + int16_t* start_adapt16_p = aecm->channelAdapt16; + int32_t* start_adapt32_p = aecm->channelAdapt32; + const int16_t* end_stored_p = start_stored_p + PART_LEN; + + int16x8_t stored_v; + int32x4_t adapt32_v_low, adapt32_v_high; + + while (start_stored_p < end_stored_p) { + stored_v = vld1q_s16(start_stored_p); + vst1q_s16(start_adapt16_p, stored_v); + + adapt32_v_low = vshll_n_s16(vget_low_s16(stored_v), 16); + adapt32_v_high = vshll_n_s16(vget_high_s16(stored_v), 16); + + vst1q_s32(start_adapt32_p, adapt32_v_low); + vst1q_s32(start_adapt32_p + 4, adapt32_v_high); + + start_stored_p += 8; + start_adapt16_p += 8; + start_adapt32_p += 8; + } + aecm->channelAdapt16[PART_LEN] = aecm->channelStored[PART_LEN]; + aecm->channelAdapt32[PART_LEN] = (int32_t)aecm->channelStored[PART_LEN] << 16; +} + +#endif diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_defines.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_defines.h new file mode 100644 index 000000000..6d63990b9 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/aecm_defines.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AECM_AECM_DEFINES_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_AECM_AECM_DEFINES_H_ + +#define AECM_DYNAMIC_Q /* Turn on/off dynamic Q-domain. */ + +/* Algorithm parameters */ +#define FRAME_LEN 80 /* Total frame length, 10 ms. */ + +#define PART_LEN 64 /* Length of partition. */ +#define PART_LEN_SHIFT 7 /* Length of (PART_LEN * 2) in base 2. */ + +#define PART_LEN1 (PART_LEN + 1) /* Unique fft coefficients. */ +#define PART_LEN2 (PART_LEN << 1) /* Length of partition * 2. */ +#define PART_LEN4 (PART_LEN << 2) /* Length of partition * 4. */ +#define FAR_BUF_LEN PART_LEN4 /* Length of buffers. */ +#define MAX_DELAY 100 + +/* Counter parameters */ +#define CONV_LEN 512 /* Convergence length used at startup. */ +#define CONV_LEN2 (CONV_LEN << 1) /* Used at startup. */ + +/* Energy parameters */ +#define MAX_BUF_LEN 64 /* History length of energy signals. */ +#define FAR_ENERGY_MIN 1025 /* Lowest Far energy level: At least 2 */ + /* in energy. */ +#define FAR_ENERGY_DIFF 929 /* Allowed difference between max */ + /* and min. */ +#define ENERGY_DEV_OFFSET 0 /* The energy error offset in Q8. */ +#define ENERGY_DEV_TOL 400 /* The energy estimation tolerance (Q8). */ +#define FAR_ENERGY_VAD_REGION 230 /* Far VAD tolerance region. */ + +/* Stepsize parameters */ +#define MU_MIN 10 /* Min stepsize 2^-MU_MIN (far end energy */ + /* dependent). */ +#define MU_MAX 1 /* Max stepsize 2^-MU_MAX (far end energy */ + /* dependent). */ +#define MU_DIFF 9 /* MU_MIN - MU_MAX */ + +/* Channel parameters */ +#define MIN_MSE_COUNT 20 /* Min number of consecutive blocks with enough */ + /* far end energy to compare channel estimates. */ +#define MIN_MSE_DIFF 29 /* The ratio between adapted and stored channel to */ + /* accept a new storage (0.8 in Q-MSE_RESOLUTION). */ +#define MSE_RESOLUTION 5 /* MSE parameter resolution. */ +#define RESOLUTION_CHANNEL16 12 /* W16 Channel in Q-RESOLUTION_CHANNEL16. */ +#define RESOLUTION_CHANNEL32 28 /* W32 Channel in Q-RESOLUTION_CHANNEL. */ +#define CHANNEL_VAD 16 /* Minimum energy in frequency band */ + /* to update channel. */ + +/* Suppression gain parameters: SUPGAIN parameters in Q-(RESOLUTION_SUPGAIN). */ +#define RESOLUTION_SUPGAIN 8 /* Channel in Q-(RESOLUTION_SUPGAIN). */ +#define SUPGAIN_DEFAULT (1 << RESOLUTION_SUPGAIN) /* Default. */ +#define SUPGAIN_ERROR_PARAM_A 3072 /* Estimation error parameter */ + /* (Maximum gain) (8 in Q8). */ +#define SUPGAIN_ERROR_PARAM_B 1536 /* Estimation error parameter */ + /* (Gain before going down). */ +#define SUPGAIN_ERROR_PARAM_D SUPGAIN_DEFAULT /* Estimation error parameter */ + /* (Should be the same as Default) (1 in Q8). */ +#define SUPGAIN_EPC_DT 200 /* SUPGAIN_ERROR_PARAM_C * ENERGY_DEV_TOL */ + +/* Defines for "check delay estimation" */ +#define CORR_WIDTH 31 /* Number of samples to correlate over. */ +#define CORR_MAX 16 /* Maximum correlation offset. */ +#define CORR_MAX_BUF 63 +#define CORR_DEV 4 +#define CORR_MAX_LEVEL 20 +#define CORR_MAX_LOW 4 +#define CORR_BUF_LEN (CORR_MAX << 1) + 1 +/* Note that CORR_WIDTH + 2*CORR_MAX <= MAX_BUF_LEN. */ + +#define ONE_Q14 (1 << 14) + +/* NLP defines */ +#define NLP_COMP_LOW 3277 /* 0.2 in Q14 */ +#define NLP_COMP_HIGH ONE_Q14 /* 1 in Q14 */ + +#endif diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/echo_control_mobile.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/echo_control_mobile.cc new file mode 100644 index 000000000..940554022 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/echo_control_mobile.cc @@ -0,0 +1,648 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h" + +#ifdef AEC_DEBUG +#include +#endif +#include + +extern "C" { +#include "webrtc/common_audio/ring_buffer.h" +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +} +#include "webrtc/modules/audio_processing/aecm/aecm_core.h" + +#define BUF_SIZE_FRAMES 50 // buffer size (frames) +// Maximum length of resampled signal. Must be an integer multiple of frames +// (ceil(1/(1 + MIN_SKEW)*2) + 1)*FRAME_LEN +// The factor of 2 handles wb, and the + 1 is as a safety margin +#define MAX_RESAMP_LEN (5 * FRAME_LEN) + +static const size_t kBufSizeSamp = BUF_SIZE_FRAMES * FRAME_LEN; // buffer size (samples) +static const int kSampMsNb = 8; // samples per ms in nb +// Target suppression levels for nlp modes +// log{0.001, 0.00001, 0.00000001} +static const int kInitCheck = 42; + +typedef struct +{ + int sampFreq; + int scSampFreq; + short bufSizeStart; + int knownDelay; + + // Stores the last frame added to the farend buffer + short farendOld[2][FRAME_LEN]; + short initFlag; // indicates if AEC has been initialized + + // Variables used for averaging far end buffer size + short counter; + short sum; + short firstVal; + short checkBufSizeCtr; + + // Variables used for delay shifts + short msInSndCardBuf; + short filtDelay; + int timeForDelayChange; + int ECstartup; + int checkBuffSize; + int delayChange; + short lastDelayDiff; + + int16_t echoMode; + +#ifdef AEC_DEBUG + FILE *bufFile; + FILE *delayFile; + FILE *preCompFile; + FILE *postCompFile; +#endif // AEC_DEBUG + // Structures + RingBuffer *farendBuf; + + AecmCore* aecmCore; +} AecMobile; + +// Estimates delay to set the position of the farend buffer read pointer +// (controlled by knownDelay) +static int WebRtcAecm_EstBufDelay(AecMobile* aecmInst, short msInSndCardBuf); + +// Stuffs the farend buffer if the estimated delay is too large +static int WebRtcAecm_DelayComp(AecMobile* aecmInst); + +void* WebRtcAecm_Create() { + AecMobile* aecm = static_cast(malloc(sizeof(AecMobile))); + + WebRtcSpl_Init(); + + aecm->aecmCore = WebRtcAecm_CreateCore(); + if (!aecm->aecmCore) { + WebRtcAecm_Free(aecm); + return NULL; + } + + aecm->farendBuf = WebRtc_CreateBuffer(kBufSizeSamp, + sizeof(int16_t)); + if (!aecm->farendBuf) + { + WebRtcAecm_Free(aecm); + return NULL; + } + + aecm->initFlag = 0; + +#ifdef AEC_DEBUG + aecm->aecmCore->farFile = fopen("aecFar.pcm","wb"); + aecm->aecmCore->nearFile = fopen("aecNear.pcm","wb"); + aecm->aecmCore->outFile = fopen("aecOut.pcm","wb"); + //aecm->aecmCore->outLpFile = fopen("aecOutLp.pcm","wb"); + + aecm->bufFile = fopen("aecBuf.dat", "wb"); + aecm->delayFile = fopen("aecDelay.dat", "wb"); + aecm->preCompFile = fopen("preComp.pcm", "wb"); + aecm->postCompFile = fopen("postComp.pcm", "wb"); +#endif // AEC_DEBUG + return aecm; +} + +void WebRtcAecm_Free(void* aecmInst) { + AecMobile* aecm = static_cast(aecmInst); + + if (aecm == NULL) { + return; + } + +#ifdef AEC_DEBUG + fclose(aecm->aecmCore->farFile); + fclose(aecm->aecmCore->nearFile); + fclose(aecm->aecmCore->outFile); + //fclose(aecm->aecmCore->outLpFile); + + fclose(aecm->bufFile); + fclose(aecm->delayFile); + fclose(aecm->preCompFile); + fclose(aecm->postCompFile); +#endif // AEC_DEBUG + WebRtcAecm_FreeCore(aecm->aecmCore); + WebRtc_FreeBuffer(aecm->farendBuf); + free(aecm); +} + +int32_t WebRtcAecm_Init(void *aecmInst, int32_t sampFreq) +{ + AecMobile* aecm = static_cast(aecmInst); + AecmConfig aecConfig; + + if (aecm == NULL) + { + return -1; + } + + /*if (sampFreq != 8000 && sampFreq != 16000) + { + return AECM_BAD_PARAMETER_ERROR; + }*/ + aecm->sampFreq = sampFreq; + + // Initialize AECM core + if (WebRtcAecm_InitCore(aecm->aecmCore, aecm->sampFreq) == -1) + { + return AECM_UNSPECIFIED_ERROR; + } + + // Initialize farend buffer + WebRtc_InitBuffer(aecm->farendBuf); + + aecm->initFlag = kInitCheck; // indicates that initialization has been done + + aecm->delayChange = 1; + + aecm->sum = 0; + aecm->counter = 0; + aecm->checkBuffSize = 1; + aecm->firstVal = 0; + + aecm->ECstartup = 1; + aecm->bufSizeStart = 0; + aecm->checkBufSizeCtr = 0; + aecm->filtDelay = 0; + aecm->timeForDelayChange = 0; + aecm->knownDelay = 0; + aecm->lastDelayDiff = 0; + + memset(&aecm->farendOld[0][0], 0, 160); + + // Default settings. + aecConfig.cngMode = AecmTrue; + aecConfig.echoMode = 3; + + if (WebRtcAecm_set_config(aecm, aecConfig) == -1) + { + return AECM_UNSPECIFIED_ERROR; + } + + return 0; +} + +// Returns any error that is caused when buffering the +// farend signal. +int32_t WebRtcAecm_GetBufferFarendError(void *aecmInst, const int16_t *farend, + size_t nrOfSamples) { + AecMobile* aecm = static_cast(aecmInst); + + if (aecm == NULL) + return -1; + + if (farend == NULL) + return AECM_NULL_POINTER_ERROR; + + if (aecm->initFlag != kInitCheck) + return AECM_UNINITIALIZED_ERROR; + + if (nrOfSamples != 80*aecm->aecmCore->mult && nrOfSamples != 160*aecm->aecmCore->mult) + return AECM_BAD_PARAMETER_ERROR; + + return 0; +} + + +int32_t WebRtcAecm_BufferFarend(void *aecmInst, const int16_t *farend, + size_t nrOfSamples) { + AecMobile* aecm = static_cast(aecmInst); + + const int32_t err = + WebRtcAecm_GetBufferFarendError(aecmInst, farend, nrOfSamples); + + if (err != 0) + return err; + + // TODO(unknown): Is this really a good idea? + if (!aecm->ECstartup) + { + WebRtcAecm_DelayComp(aecm); + } + + WebRtc_WriteBuffer(aecm->farendBuf, farend, nrOfSamples); + + return 0; +} + +int32_t WebRtcAecm_Process(void *aecmInst, const int16_t *nearendNoisy, + const int16_t *nearendClean, int16_t *out, + size_t nrOfSamples, int16_t msInSndCardBuf) +{ + AecMobile* aecm = static_cast(aecmInst); + int32_t retVal = 0; + size_t i; + short nmbrOfFilledBuffers; + size_t nBlocks10ms; + size_t nFrames; +#ifdef AEC_DEBUG + short msInAECBuf; +#endif + + if (aecm == NULL) + { + return -1; + } + + if (nearendNoisy == NULL) + { + return AECM_NULL_POINTER_ERROR; + } + + if (out == NULL) + { + return AECM_NULL_POINTER_ERROR; + } + + if (aecm->initFlag != kInitCheck) + { + return AECM_UNINITIALIZED_ERROR; + } + + if (nrOfSamples != 80*aecm->aecmCore->mult && nrOfSamples != 160*aecm->aecmCore->mult) + { + return AECM_BAD_PARAMETER_ERROR; + } + + if (msInSndCardBuf < 0) + { + msInSndCardBuf = 0; + retVal = AECM_BAD_PARAMETER_WARNING; + } else if (msInSndCardBuf > 500) + { + msInSndCardBuf = 500; + retVal = AECM_BAD_PARAMETER_WARNING; + } + msInSndCardBuf += 10; + aecm->msInSndCardBuf = msInSndCardBuf; + + nFrames = nrOfSamples / FRAME_LEN; + nBlocks10ms = nFrames / aecm->aecmCore->mult; + + if (aecm->ECstartup) + { + if (nearendClean == NULL) + { + if (out != nearendNoisy) + { + memcpy(out, nearendNoisy, sizeof(short) * nrOfSamples); + } + } else if (out != nearendClean) + { + memcpy(out, nearendClean, sizeof(short) * nrOfSamples); + } + + nmbrOfFilledBuffers = + (short) WebRtc_available_read(aecm->farendBuf) / FRAME_LEN; + // The AECM is in the start up mode + // AECM is disabled until the soundcard buffer and farend buffers are OK + + // Mechanism to ensure that the soundcard buffer is reasonably stable. + if (aecm->checkBuffSize) + { + aecm->checkBufSizeCtr++; + // Before we fill up the far end buffer we require the amount of data on the + // sound card to be stable (+/-8 ms) compared to the first value. This + // comparison is made during the following 4 consecutive frames. If it seems + // to be stable then we start to fill up the far end buffer. + + if (aecm->counter == 0) + { + aecm->firstVal = aecm->msInSndCardBuf; + aecm->sum = 0; + } + + if (abs(aecm->firstVal - aecm->msInSndCardBuf) + < WEBRTC_SPL_MAX(0.2 * aecm->msInSndCardBuf, kSampMsNb)) + { + aecm->sum += aecm->msInSndCardBuf; + aecm->counter++; + } else + { + aecm->counter = 0; + } + + if (aecm->counter * nBlocks10ms >= 6) + { + // The farend buffer size is determined in blocks of 80 samples + // Use 75% of the average value of the soundcard buffer + aecm->bufSizeStart + = WEBRTC_SPL_MIN((3 * aecm->sum + * aecm->aecmCore->mult) / (aecm->counter * 40), BUF_SIZE_FRAMES); + // buffersize has now been determined + aecm->checkBuffSize = 0; + } + + if (aecm->checkBufSizeCtr * nBlocks10ms > 50) + { + // for really bad sound cards, don't disable echocanceller for more than 0.5 sec + aecm->bufSizeStart = WEBRTC_SPL_MIN((3 * aecm->msInSndCardBuf + * aecm->aecmCore->mult) / 40, BUF_SIZE_FRAMES); + aecm->checkBuffSize = 0; + } + } + + // if checkBuffSize changed in the if-statement above + if (!aecm->checkBuffSize) + { + // soundcard buffer is now reasonably stable + // When the far end buffer is filled with approximately the same amount of + // data as the amount on the sound card we end the start up phase and start + // to cancel echoes. + + if (nmbrOfFilledBuffers == aecm->bufSizeStart) + { + aecm->ECstartup = 0; // Enable the AECM + } else if (nmbrOfFilledBuffers > aecm->bufSizeStart) + { + WebRtc_MoveReadPtr(aecm->farendBuf, + (int) WebRtc_available_read(aecm->farendBuf) + - (int) aecm->bufSizeStart * FRAME_LEN); + aecm->ECstartup = 0; + } + } + + } else + { + // AECM is enabled + + // Note only 1 block supported for nb and 2 blocks for wb + for (i = 0; i < nFrames; i++) + { + int16_t farend[FRAME_LEN]; + const int16_t* farend_ptr = NULL; + + nmbrOfFilledBuffers = + (short) WebRtc_available_read(aecm->farendBuf) / FRAME_LEN; + + // Check that there is data in the far end buffer + if (nmbrOfFilledBuffers > 0) + { + // Get the next 80 samples from the farend buffer + WebRtc_ReadBuffer(aecm->farendBuf, (void**) &farend_ptr, farend, + FRAME_LEN); + + // Always store the last frame for use when we run out of data + memcpy(&(aecm->farendOld[i][0]), farend_ptr, + FRAME_LEN * sizeof(short)); + } else + { + // We have no data so we use the last played frame + memcpy(farend, &(aecm->farendOld[i][0]), FRAME_LEN * sizeof(short)); + farend_ptr = farend; + } + + // Call buffer delay estimator when all data is extracted, + // i,e. i = 0 for NB and i = 1 for WB + if ((i == 0 && aecm->sampFreq == 8000) || (i == 1 && aecm->sampFreq == 16000)) + { + WebRtcAecm_EstBufDelay(aecm, aecm->msInSndCardBuf); + } + + // Call the AECM + /*WebRtcAecm_ProcessFrame(aecm->aecmCore, farend, &nearend[FRAME_LEN * i], + &out[FRAME_LEN * i], aecm->knownDelay);*/ + if (WebRtcAecm_ProcessFrame(aecm->aecmCore, + farend_ptr, + &nearendNoisy[FRAME_LEN * i], + (nearendClean + ? &nearendClean[FRAME_LEN * i] + : NULL), + &out[FRAME_LEN * i]) == -1) + return -1; + } + } + +#ifdef AEC_DEBUG + msInAECBuf = (short) WebRtc_available_read(aecm->farendBuf) / + (kSampMsNb * aecm->aecmCore->mult); + fwrite(&msInAECBuf, 2, 1, aecm->bufFile); + fwrite(&(aecm->knownDelay), sizeof(aecm->knownDelay), 1, aecm->delayFile); +#endif + + return retVal; +} + +int32_t WebRtcAecm_set_config(void *aecmInst, AecmConfig config) +{ + AecMobile* aecm = static_cast(aecmInst); + + if (aecm == NULL) + { + return -1; + } + + if (aecm->initFlag != kInitCheck) + { + return AECM_UNINITIALIZED_ERROR; + } + + if (config.cngMode != AecmFalse && config.cngMode != AecmTrue) + { + return AECM_BAD_PARAMETER_ERROR; + } + aecm->aecmCore->cngMode = config.cngMode; + + if (config.echoMode < 0 || config.echoMode > 4) + { + return AECM_BAD_PARAMETER_ERROR; + } + aecm->echoMode = config.echoMode; + + if (aecm->echoMode == 0) + { + aecm->aecmCore->supGain = SUPGAIN_DEFAULT >> 3; + aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT >> 3; + aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A >> 3; + aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D >> 3; + aecm->aecmCore->supGainErrParamDiffAB = (SUPGAIN_ERROR_PARAM_A >> 3) + - (SUPGAIN_ERROR_PARAM_B >> 3); + aecm->aecmCore->supGainErrParamDiffBD = (SUPGAIN_ERROR_PARAM_B >> 3) + - (SUPGAIN_ERROR_PARAM_D >> 3); + } else if (aecm->echoMode == 1) + { + aecm->aecmCore->supGain = SUPGAIN_DEFAULT >> 2; + aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT >> 2; + aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A >> 2; + aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D >> 2; + aecm->aecmCore->supGainErrParamDiffAB = (SUPGAIN_ERROR_PARAM_A >> 2) + - (SUPGAIN_ERROR_PARAM_B >> 2); + aecm->aecmCore->supGainErrParamDiffBD = (SUPGAIN_ERROR_PARAM_B >> 2) + - (SUPGAIN_ERROR_PARAM_D >> 2); + } else if (aecm->echoMode == 2) + { + aecm->aecmCore->supGain = SUPGAIN_DEFAULT >> 1; + aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT >> 1; + aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A >> 1; + aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D >> 1; + aecm->aecmCore->supGainErrParamDiffAB = (SUPGAIN_ERROR_PARAM_A >> 1) + - (SUPGAIN_ERROR_PARAM_B >> 1); + aecm->aecmCore->supGainErrParamDiffBD = (SUPGAIN_ERROR_PARAM_B >> 1) + - (SUPGAIN_ERROR_PARAM_D >> 1); + } else if (aecm->echoMode == 3) + { + aecm->aecmCore->supGain = SUPGAIN_DEFAULT; + aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT; + aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A; + aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D; + aecm->aecmCore->supGainErrParamDiffAB = SUPGAIN_ERROR_PARAM_A - SUPGAIN_ERROR_PARAM_B; + aecm->aecmCore->supGainErrParamDiffBD = SUPGAIN_ERROR_PARAM_B - SUPGAIN_ERROR_PARAM_D; + } else if (aecm->echoMode == 4) + { + aecm->aecmCore->supGain = SUPGAIN_DEFAULT << 1; + aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT << 1; + aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A << 1; + aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D << 1; + aecm->aecmCore->supGainErrParamDiffAB = (SUPGAIN_ERROR_PARAM_A << 1) + - (SUPGAIN_ERROR_PARAM_B << 1); + aecm->aecmCore->supGainErrParamDiffBD = (SUPGAIN_ERROR_PARAM_B << 1) + - (SUPGAIN_ERROR_PARAM_D << 1); + } + + return 0; +} + +int32_t WebRtcAecm_InitEchoPath(void* aecmInst, + const void* echo_path, + size_t size_bytes) +{ + AecMobile* aecm = static_cast(aecmInst); + const int16_t* echo_path_ptr = static_cast(echo_path); + + if (aecmInst == NULL) { + return -1; + } + if (echo_path == NULL) { + return AECM_NULL_POINTER_ERROR; + } + if (size_bytes != WebRtcAecm_echo_path_size_bytes()) + { + // Input channel size does not match the size of AECM + return AECM_BAD_PARAMETER_ERROR; + } + if (aecm->initFlag != kInitCheck) + { + return AECM_UNINITIALIZED_ERROR; + } + + WebRtcAecm_InitEchoPathCore(aecm->aecmCore, echo_path_ptr); + + return 0; +} + +int32_t WebRtcAecm_GetEchoPath(void* aecmInst, + void* echo_path, + size_t size_bytes) +{ + AecMobile* aecm = static_cast(aecmInst); + int16_t* echo_path_ptr = static_cast(echo_path); + + if (aecmInst == NULL) { + return -1; + } + if (echo_path == NULL) { + return AECM_NULL_POINTER_ERROR; + } + if (size_bytes != WebRtcAecm_echo_path_size_bytes()) + { + // Input channel size does not match the size of AECM + return AECM_BAD_PARAMETER_ERROR; + } + if (aecm->initFlag != kInitCheck) + { + return AECM_UNINITIALIZED_ERROR; + } + + memcpy(echo_path_ptr, aecm->aecmCore->channelStored, size_bytes); + return 0; +} + +size_t WebRtcAecm_echo_path_size_bytes() +{ + return (PART_LEN1 * sizeof(int16_t)); +} + + +static int WebRtcAecm_EstBufDelay(AecMobile* aecm, short msInSndCardBuf) { + short delayNew, nSampSndCard; + short nSampFar = (short) WebRtc_available_read(aecm->farendBuf); + short diff; + + nSampSndCard = msInSndCardBuf * kSampMsNb * aecm->aecmCore->mult; + + delayNew = nSampSndCard - nSampFar; + + if (delayNew < FRAME_LEN) + { + WebRtc_MoveReadPtr(aecm->farendBuf, FRAME_LEN); + delayNew += FRAME_LEN; + } + + aecm->filtDelay = WEBRTC_SPL_MAX(0, (8 * aecm->filtDelay + 2 * delayNew) / 10); + + diff = aecm->filtDelay - aecm->knownDelay; + if (diff > 224) + { + if (aecm->lastDelayDiff < 96) + { + aecm->timeForDelayChange = 0; + } else + { + aecm->timeForDelayChange++; + } + } else if (diff < 96 && aecm->knownDelay > 0) + { + if (aecm->lastDelayDiff > 224) + { + aecm->timeForDelayChange = 0; + } else + { + aecm->timeForDelayChange++; + } + } else + { + aecm->timeForDelayChange = 0; + } + aecm->lastDelayDiff = diff; + + if (aecm->timeForDelayChange > 25) + { + aecm->knownDelay = WEBRTC_SPL_MAX((int)aecm->filtDelay - 160, 0); + } + return 0; +} + +static int WebRtcAecm_DelayComp(AecMobile* aecm) { + int nSampFar = (int) WebRtc_available_read(aecm->farendBuf); + int nSampSndCard, delayNew, nSampAdd; + const int maxStuffSamp = 10 * FRAME_LEN; + + nSampSndCard = aecm->msInSndCardBuf * kSampMsNb * aecm->aecmCore->mult; + delayNew = nSampSndCard - nSampFar; + + if (delayNew > FAR_BUF_LEN - FRAME_LEN * aecm->aecmCore->mult) + { + // The difference of the buffer sizes is larger than the maximum + // allowed known delay. Compensate by stuffing the buffer. + nSampAdd = (int)(WEBRTC_SPL_MAX(((nSampSndCard >> 1) - nSampFar), + FRAME_LEN)); + nSampAdd = WEBRTC_SPL_MIN(nSampAdd, maxStuffSamp); + + WebRtc_MoveReadPtr(aecm->farendBuf, -nSampAdd); + aecm->delayChange = 1; // the delay needs to be updated + } + + return 0; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/echo_control_mobile.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/echo_control_mobile.h new file mode 100644 index 000000000..b45ff5990 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/aecm/echo_control_mobile.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AECM_ECHO_CONTROL_MOBILE_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_AECM_ECHO_CONTROL_MOBILE_H_ + +#include + +#include "webrtc/typedefs.h" + +enum { + AecmFalse = 0, + AecmTrue +}; + +// Errors +#define AECM_UNSPECIFIED_ERROR 12000 +#define AECM_UNSUPPORTED_FUNCTION_ERROR 12001 +#define AECM_UNINITIALIZED_ERROR 12002 +#define AECM_NULL_POINTER_ERROR 12003 +#define AECM_BAD_PARAMETER_ERROR 12004 + +// Warnings +#define AECM_BAD_PARAMETER_WARNING 12100 + +typedef struct { + int16_t cngMode; // AECM_FALSE, AECM_TRUE (default) + int16_t echoMode; // 0, 1, 2, 3 (default), 4 +} AecmConfig; + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Allocates the memory needed by the AECM. The memory needs to be + * initialized separately using the WebRtcAecm_Init() function. + * Returns a pointer to the instance and a nullptr at failure. + */ +void* WebRtcAecm_Create(); + +/* + * This function releases the memory allocated by WebRtcAecm_Create() + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecmInst Pointer to the AECM instance + */ +void WebRtcAecm_Free(void* aecmInst); + +/* + * Initializes an AECM instance. + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecmInst Pointer to the AECM instance + * int32_t sampFreq Sampling frequency of data + * + * Outputs Description + * ------------------------------------------------------------------- + * int32_t return 0: OK + * 1200-12004,12100: error/warning + */ +int32_t WebRtcAecm_Init(void* aecmInst, int32_t sampFreq); + +/* + * Inserts an 80 or 160 sample block of data into the farend buffer. + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecmInst Pointer to the AECM instance + * int16_t* farend In buffer containing one frame of + * farend signal + * int16_t nrOfSamples Number of samples in farend buffer + * + * Outputs Description + * ------------------------------------------------------------------- + * int32_t return 0: OK + * 1200-12004,12100: error/warning + */ +int32_t WebRtcAecm_BufferFarend(void* aecmInst, + const int16_t* farend, + size_t nrOfSamples); + +/* + * Reports any errors that would arise when buffering a farend buffer. + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecmInst Pointer to the AECM instance + * int16_t* farend In buffer containing one frame of + * farend signal + * int16_t nrOfSamples Number of samples in farend buffer + * + * Outputs Description + * ------------------------------------------------------------------- + * int32_t return 0: OK + * 1200-12004,12100: error/warning + */ +int32_t WebRtcAecm_GetBufferFarendError(void* aecmInst, + const int16_t* farend, + size_t nrOfSamples); + +/* + * Runs the AECM on an 80 or 160 sample blocks of data. + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecmInst Pointer to the AECM instance + * int16_t* nearendNoisy In buffer containing one frame of + * reference nearend+echo signal. If + * noise reduction is active, provide + * the noisy signal here. + * int16_t* nearendClean In buffer containing one frame of + * nearend+echo signal. If noise + * reduction is active, provide the + * clean signal here. Otherwise pass a + * NULL pointer. + * int16_t nrOfSamples Number of samples in nearend buffer + * int16_t msInSndCardBuf Delay estimate for sound card and + * system buffers + * + * Outputs Description + * ------------------------------------------------------------------- + * int16_t* out Out buffer, one frame of processed nearend + * int32_t return 0: OK + * 1200-12004,12100: error/warning + */ +int32_t WebRtcAecm_Process(void* aecmInst, + const int16_t* nearendNoisy, + const int16_t* nearendClean, + int16_t* out, + size_t nrOfSamples, + int16_t msInSndCardBuf); + +/* + * This function enables the user to set certain parameters on-the-fly + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecmInst Pointer to the AECM instance + * AecmConfig config Config instance that contains all + * properties to be set + * + * Outputs Description + * ------------------------------------------------------------------- + * int32_t return 0: OK + * 1200-12004,12100: error/warning + */ +int32_t WebRtcAecm_set_config(void* aecmInst, AecmConfig config); + +/* + * This function enables the user to set the echo path on-the-fly. + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecmInst Pointer to the AECM instance + * void* echo_path Pointer to the echo path to be set + * size_t size_bytes Size in bytes of the echo path + * + * Outputs Description + * ------------------------------------------------------------------- + * int32_t return 0: OK + * 1200-12004,12100: error/warning + */ +int32_t WebRtcAecm_InitEchoPath(void* aecmInst, + const void* echo_path, + size_t size_bytes); + +/* + * This function enables the user to get the currently used echo path + * on-the-fly + * + * Inputs Description + * ------------------------------------------------------------------- + * void* aecmInst Pointer to the AECM instance + * void* echo_path Pointer to echo path + * size_t size_bytes Size in bytes of the echo path + * + * Outputs Description + * ------------------------------------------------------------------- + * int32_t return 0: OK + * 1200-12004,12100: error/warning + */ +int32_t WebRtcAecm_GetEchoPath(void* aecmInst, + void* echo_path, + size_t size_bytes); + +/* + * This function enables the user to get the echo path size in bytes + * + * Outputs Description + * ------------------------------------------------------------------- + * size_t return Size in bytes + */ +size_t WebRtcAecm_echo_path_size_bytes(); + + +#ifdef __cplusplus +} +#endif +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AECM_ECHO_CONTROL_MOBILE_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/analog_agc.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/analog_agc.c new file mode 100644 index 000000000..d2155648d --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/analog_agc.c @@ -0,0 +1,1390 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* analog_agc.c + * + * Using a feedback system, determines an appropriate analog volume level + * given an input signal and current volume level. Targets a conservative + * signal level and is intended for use with a digital AGC to apply + * additional gain. + * + */ + +#include "webrtc/modules/audio_processing/agc/legacy/analog_agc.h" + +#include +#ifdef WEBRTC_AGC_DEBUG_DUMP +#include +#endif + +#include "webrtc/base/checks.h" + +/* The slope of in Q13*/ +static const int16_t kSlope1[8] = {21793, 12517, 7189, 4129, + 2372, 1362, 472, 78}; + +/* The offset in Q14 */ +static const int16_t kOffset1[8] = {25395, 23911, 22206, 20737, + 19612, 18805, 17951, 17367}; + +/* The slope of in Q13*/ +static const int16_t kSlope2[8] = {2063, 1731, 1452, 1218, 1021, 857, 597, 337}; + +/* The offset in Q14 */ +static const int16_t kOffset2[8] = {18432, 18379, 18290, 18177, + 18052, 17920, 17670, 17286}; + +static const int16_t kMuteGuardTimeMs = 8000; +static const int16_t kInitCheck = 42; +static const size_t kNumSubframes = 10; + +/* Default settings if config is not used */ +#define AGC_DEFAULT_TARGET_LEVEL 3 +#define AGC_DEFAULT_COMP_GAIN 9 +/* This is the target level for the analog part in ENV scale. To convert to RMS + * scale you + * have to add OFFSET_ENV_TO_RMS. + */ +#define ANALOG_TARGET_LEVEL 11 +#define ANALOG_TARGET_LEVEL_2 5 // ANALOG_TARGET_LEVEL / 2 +/* Offset between RMS scale (analog part) and ENV scale (digital part). This + * value actually + * varies with the FIXED_ANALOG_TARGET_LEVEL, hence we should in the future + * replace it with + * a table. + */ +#define OFFSET_ENV_TO_RMS 9 +/* The reference input level at which the digital part gives an output of + * targetLevelDbfs + * (desired level) if we have no compression gain. This level should be set high + * enough not + * to compress the peaks due to the dynamics. + */ +#define DIGITAL_REF_AT_0_COMP_GAIN 4 +/* Speed of reference level decrease. + */ +#define DIFF_REF_TO_ANALOG 5 + +#ifdef MIC_LEVEL_FEEDBACK +#define NUM_BLOCKS_IN_SAT_BEFORE_CHANGE_TARGET 7 +#endif +/* Size of analog gain table */ +#define GAIN_TBL_LEN 32 +/* Matlab code: + * fprintf(1, '\t%i, %i, %i, %i,\n', round(10.^(linspace(0,10,32)/20) * 2^12)); + */ +/* Q12 */ +static const uint16_t kGainTableAnalog[GAIN_TBL_LEN] = { + 4096, 4251, 4412, 4579, 4752, 4932, 5118, 5312, 5513, 5722, 5938, + 6163, 6396, 6638, 6889, 7150, 7420, 7701, 7992, 8295, 8609, 8934, + 9273, 9623, 9987, 10365, 10758, 11165, 11587, 12025, 12480, 12953}; + +/* Gain/Suppression tables for virtual Mic (in Q10) */ +static const uint16_t kGainTableVirtualMic[128] = { + 1052, 1081, 1110, 1141, 1172, 1204, 1237, 1271, 1305, 1341, 1378, + 1416, 1454, 1494, 1535, 1577, 1620, 1664, 1710, 1757, 1805, 1854, + 1905, 1957, 2010, 2065, 2122, 2180, 2239, 2301, 2364, 2428, 2495, + 2563, 2633, 2705, 2779, 2855, 2933, 3013, 3096, 3180, 3267, 3357, + 3449, 3543, 3640, 3739, 3842, 3947, 4055, 4166, 4280, 4397, 4517, + 4640, 4767, 4898, 5032, 5169, 5311, 5456, 5605, 5758, 5916, 6078, + 6244, 6415, 6590, 6770, 6956, 7146, 7341, 7542, 7748, 7960, 8178, + 8402, 8631, 8867, 9110, 9359, 9615, 9878, 10148, 10426, 10711, 11004, + 11305, 11614, 11932, 12258, 12593, 12938, 13292, 13655, 14029, 14412, 14807, + 15212, 15628, 16055, 16494, 16945, 17409, 17885, 18374, 18877, 19393, 19923, + 20468, 21028, 21603, 22194, 22801, 23425, 24065, 24724, 25400, 26095, 26808, + 27541, 28295, 29069, 29864, 30681, 31520, 32382}; +static const uint16_t kSuppressionTableVirtualMic[128] = { + 1024, 1006, 988, 970, 952, 935, 918, 902, 886, 870, 854, 839, 824, 809, 794, + 780, 766, 752, 739, 726, 713, 700, 687, 675, 663, 651, 639, 628, 616, 605, + 594, 584, 573, 563, 553, 543, 533, 524, 514, 505, 496, 487, 478, 470, 461, + 453, 445, 437, 429, 421, 414, 406, 399, 392, 385, 378, 371, 364, 358, 351, + 345, 339, 333, 327, 321, 315, 309, 304, 298, 293, 288, 283, 278, 273, 268, + 263, 258, 254, 249, 244, 240, 236, 232, 227, 223, 219, 215, 211, 208, 204, + 200, 197, 193, 190, 186, 183, 180, 176, 173, 170, 167, 164, 161, 158, 155, + 153, 150, 147, 145, 142, 139, 137, 134, 132, 130, 127, 125, 123, 121, 118, + 116, 114, 112, 110, 108, 106, 104, 102}; + +/* Table for target energy levels. Values in Q(-7) + * Matlab code + * targetLevelTable = fprintf('%d,\t%d,\t%d,\t%d,\n', + * round((32767*10.^(-(0:63)'/20)).^2*16/2^7) */ + +static const int32_t kTargetLevelTable[64] = { + 134209536, 106606424, 84680493, 67264106, 53429779, 42440782, 33711911, + 26778323, 21270778, 16895980, 13420954, 10660642, 8468049, 6726411, + 5342978, 4244078, 3371191, 2677832, 2127078, 1689598, 1342095, + 1066064, 846805, 672641, 534298, 424408, 337119, 267783, + 212708, 168960, 134210, 106606, 84680, 67264, 53430, + 42441, 33712, 26778, 21271, 16896, 13421, 10661, + 8468, 6726, 5343, 4244, 3371, 2678, 2127, + 1690, 1342, 1066, 847, 673, 534, 424, + 337, 268, 213, 169, 134, 107, 85, + 67}; + +int WebRtcAgc_AddMic(void* state, + int16_t* const* in_mic, + size_t num_bands, + size_t samples) { + int32_t nrg, max_nrg, sample, tmp32; + int32_t* ptr; + uint16_t targetGainIdx, gain; + size_t i; + int16_t n, L, tmp16, tmp_speech[16]; + LegacyAgc* stt; + stt = (LegacyAgc*)state; + + if (stt->fs == 8000) { + L = 8; + if (samples != 80) { + return -1; + } + } else { + L = 16; + if (samples != 160) { + return -1; + } + } + + /* apply slowly varying digital gain */ + if (stt->micVol > stt->maxAnalog) { + /* |maxLevel| is strictly >= |micVol|, so this condition should be + * satisfied here, ensuring there is no divide-by-zero. */ + RTC_DCHECK_GT(stt->maxLevel, stt->maxAnalog); + + /* Q1 */ + tmp16 = (int16_t)(stt->micVol - stt->maxAnalog); + tmp32 = (GAIN_TBL_LEN - 1) * tmp16; + tmp16 = (int16_t)(stt->maxLevel - stt->maxAnalog); + targetGainIdx = tmp32 / tmp16; + RTC_DCHECK_LT(targetGainIdx, GAIN_TBL_LEN); + + /* Increment through the table towards the target gain. + * If micVol drops below maxAnalog, we allow the gain + * to be dropped immediately. */ + if (stt->gainTableIdx < targetGainIdx) { + stt->gainTableIdx++; + } else if (stt->gainTableIdx > targetGainIdx) { + stt->gainTableIdx--; + } + + /* Q12 */ + gain = kGainTableAnalog[stt->gainTableIdx]; + + for (i = 0; i < samples; i++) { + size_t j; + for (j = 0; j < num_bands; ++j) { + sample = (in_mic[j][i] * gain) >> 12; + if (sample > 32767) { + in_mic[j][i] = 32767; + } else if (sample < -32768) { + in_mic[j][i] = -32768; + } else { + in_mic[j][i] = (int16_t)sample; + } + } + } + } else { + stt->gainTableIdx = 0; + } + + /* compute envelope */ + if (stt->inQueue > 0) { + ptr = stt->env[1]; + } else { + ptr = stt->env[0]; + } + + for (i = 0; i < kNumSubframes; i++) { + /* iterate over samples */ + max_nrg = 0; + for (n = 0; n < L; n++) { + nrg = in_mic[0][i * L + n] * in_mic[0][i * L + n]; + if (nrg > max_nrg) { + max_nrg = nrg; + } + } + ptr[i] = max_nrg; + } + + /* compute energy */ + if (stt->inQueue > 0) { + ptr = stt->Rxx16w32_array[1]; + } else { + ptr = stt->Rxx16w32_array[0]; + } + + for (i = 0; i < kNumSubframes / 2; i++) { + if (stt->fs == 16000) { + WebRtcSpl_DownsampleBy2(&in_mic[0][i * 32], 32, tmp_speech, + stt->filterState); + } else { + memcpy(tmp_speech, &in_mic[0][i * 16], 16 * sizeof(short)); + } + /* Compute energy in blocks of 16 samples */ + ptr[i] = WebRtcSpl_DotProductWithScale(tmp_speech, tmp_speech, 16, 4); + } + + /* update queue information */ + if (stt->inQueue == 0) { + stt->inQueue = 1; + } else { + stt->inQueue = 2; + } + + /* call VAD (use low band only) */ + WebRtcAgc_ProcessVad(&stt->vadMic, in_mic[0], samples); + + return 0; +} + +int WebRtcAgc_AddFarend(void* state, const int16_t* in_far, size_t samples) { + LegacyAgc* stt = (LegacyAgc*)state; + + int err = WebRtcAgc_GetAddFarendError(state, samples); + + if (err != 0) + return err; + + return WebRtcAgc_AddFarendToDigital(&stt->digitalAgc, in_far, samples); +} + +int WebRtcAgc_GetAddFarendError(void* state, size_t samples) { + LegacyAgc* stt; + stt = (LegacyAgc*)state; + + if (stt == NULL) + return -1; + + if (stt->fs == 8000) { + if (samples != 80) + return -1; + } else if (stt->fs == 16000 || stt->fs == 32000 || stt->fs == 48000) { + if (samples != 160) + return -1; + } else { + return -1; + } + + return 0; +} + +int WebRtcAgc_VirtualMic(void* agcInst, + int16_t* const* in_near, + size_t num_bands, + size_t samples, + int32_t micLevelIn, + int32_t* micLevelOut) { + int32_t tmpFlt, micLevelTmp, gainIdx; + uint16_t gain; + size_t ii, j; + LegacyAgc* stt; + + uint32_t nrg; + size_t sampleCntr; + uint32_t frameNrg = 0; + uint32_t frameNrgLimit = 5500; + int16_t numZeroCrossing = 0; + const int16_t kZeroCrossingLowLim = 15; + const int16_t kZeroCrossingHighLim = 20; + + stt = (LegacyAgc*)agcInst; + + /* + * Before applying gain decide if this is a low-level signal. + * The idea is that digital AGC will not adapt to low-level + * signals. + */ + if (stt->fs != 8000) { + frameNrgLimit = frameNrgLimit << 1; + } + + frameNrg = (uint32_t)(in_near[0][0] * in_near[0][0]); + for (sampleCntr = 1; sampleCntr < samples; sampleCntr++) { + // increment frame energy if it is less than the limit + // the correct value of the energy is not important + if (frameNrg < frameNrgLimit) { + nrg = (uint32_t)(in_near[0][sampleCntr] * in_near[0][sampleCntr]); + frameNrg += nrg; + } + + // Count the zero crossings + numZeroCrossing += + ((in_near[0][sampleCntr] ^ in_near[0][sampleCntr - 1]) < 0); + } + + if ((frameNrg < 500) || (numZeroCrossing <= 5)) { + stt->lowLevelSignal = 1; + } else if (numZeroCrossing <= kZeroCrossingLowLim) { + stt->lowLevelSignal = 0; + } else if (frameNrg <= frameNrgLimit) { + stt->lowLevelSignal = 1; + } else if (numZeroCrossing >= kZeroCrossingHighLim) { + stt->lowLevelSignal = 1; + } else { + stt->lowLevelSignal = 0; + } + + micLevelTmp = micLevelIn << stt->scale; + /* Set desired level */ + gainIdx = stt->micVol; + if (stt->micVol > stt->maxAnalog) { + gainIdx = stt->maxAnalog; + } + if (micLevelTmp != stt->micRef) { + /* Something has happened with the physical level, restart. */ + stt->micRef = micLevelTmp; + stt->micVol = 127; + *micLevelOut = 127; + stt->micGainIdx = 127; + gainIdx = 127; + } + /* Pre-process the signal to emulate the microphone level. */ + /* Take one step at a time in the gain table. */ + if (gainIdx > 127) { + gain = kGainTableVirtualMic[gainIdx - 128]; + } else { + gain = kSuppressionTableVirtualMic[127 - gainIdx]; + } + for (ii = 0; ii < samples; ii++) { + tmpFlt = (in_near[0][ii] * gain) >> 10; + if (tmpFlt > 32767) { + tmpFlt = 32767; + gainIdx--; + if (gainIdx >= 127) { + gain = kGainTableVirtualMic[gainIdx - 127]; + } else { + gain = kSuppressionTableVirtualMic[127 - gainIdx]; + } + } + if (tmpFlt < -32768) { + tmpFlt = -32768; + gainIdx--; + if (gainIdx >= 127) { + gain = kGainTableVirtualMic[gainIdx - 127]; + } else { + gain = kSuppressionTableVirtualMic[127 - gainIdx]; + } + } + in_near[0][ii] = (int16_t)tmpFlt; + for (j = 1; j < num_bands; ++j) { + tmpFlt = (in_near[j][ii] * gain) >> 10; + if (tmpFlt > 32767) { + tmpFlt = 32767; + } + if (tmpFlt < -32768) { + tmpFlt = -32768; + } + in_near[j][ii] = (int16_t)tmpFlt; + } + } + /* Set the level we (finally) used */ + stt->micGainIdx = gainIdx; + // *micLevelOut = stt->micGainIdx; + *micLevelOut = stt->micGainIdx >> stt->scale; + /* Add to Mic as if it was the output from a true microphone */ + if (WebRtcAgc_AddMic(agcInst, in_near, num_bands, samples) != 0) { + return -1; + } + return 0; +} + +void WebRtcAgc_UpdateAgcThresholds(LegacyAgc* stt) { + int16_t tmp16; +#ifdef MIC_LEVEL_FEEDBACK + int zeros; + + if (stt->micLvlSat) { + /* Lower the analog target level since we have reached its maximum */ + zeros = WebRtcSpl_NormW32(stt->Rxx160_LPw32); + stt->targetIdxOffset = (3 * zeros - stt->targetIdx - 2) / 4; + } +#endif + + /* Set analog target level in envelope dBOv scale */ + tmp16 = (DIFF_REF_TO_ANALOG * stt->compressionGaindB) + ANALOG_TARGET_LEVEL_2; + tmp16 = WebRtcSpl_DivW32W16ResW16((int32_t)tmp16, ANALOG_TARGET_LEVEL); + stt->analogTarget = DIGITAL_REF_AT_0_COMP_GAIN + tmp16; + if (stt->analogTarget < DIGITAL_REF_AT_0_COMP_GAIN) { + stt->analogTarget = DIGITAL_REF_AT_0_COMP_GAIN; + } + if (stt->agcMode == kAgcModeFixedDigital) { + /* Adjust for different parameter interpretation in FixedDigital mode */ + stt->analogTarget = stt->compressionGaindB; + } +#ifdef MIC_LEVEL_FEEDBACK + stt->analogTarget += stt->targetIdxOffset; +#endif + /* Since the offset between RMS and ENV is not constant, we should make this + * into a + * table, but for now, we'll stick with a constant, tuned for the chosen + * analog + * target level. + */ + stt->targetIdx = ANALOG_TARGET_LEVEL + OFFSET_ENV_TO_RMS; +#ifdef MIC_LEVEL_FEEDBACK + stt->targetIdx += stt->targetIdxOffset; +#endif + /* Analog adaptation limits */ + /* analogTargetLevel = round((32767*10^(-targetIdx/20))^2*16/2^7) */ + stt->analogTargetLevel = + RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx]; /* ex. -20 dBov */ + stt->startUpperLimit = + RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx - 1]; /* -19 dBov */ + stt->startLowerLimit = + RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx + 1]; /* -21 dBov */ + stt->upperPrimaryLimit = + RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx - 2]; /* -18 dBov */ + stt->lowerPrimaryLimit = + RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx + 2]; /* -22 dBov */ + stt->upperSecondaryLimit = + RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx - 5]; /* -15 dBov */ + stt->lowerSecondaryLimit = + RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx + 5]; /* -25 dBov */ + stt->upperLimit = stt->startUpperLimit; + stt->lowerLimit = stt->startLowerLimit; +} + +void WebRtcAgc_SaturationCtrl(LegacyAgc* stt, + uint8_t* saturated, + int32_t* env) { + int16_t i, tmpW16; + + /* Check if the signal is saturated */ + for (i = 0; i < 10; i++) { + tmpW16 = (int16_t)(env[i] >> 20); + if (tmpW16 > 875) { + stt->envSum += tmpW16; + } + } + + if (stt->envSum > 25000) { + *saturated = 1; + stt->envSum = 0; + } + + /* stt->envSum *= 0.99; */ + stt->envSum = (int16_t)((stt->envSum * 32440) >> 15); +} + +void WebRtcAgc_ZeroCtrl(LegacyAgc* stt, int32_t* inMicLevel, int32_t* env) { + int16_t i; + int64_t tmp = 0; + int32_t midVal; + + /* Is the input signal zero? */ + for (i = 0; i < 10; i++) { + tmp += env[i]; + } + + /* Each block is allowed to have a few non-zero + * samples. + */ + if (tmp < 500) { + stt->msZero += 10; + } else { + stt->msZero = 0; + } + + if (stt->muteGuardMs > 0) { + stt->muteGuardMs -= 10; + } + + if (stt->msZero > 500) { + stt->msZero = 0; + + /* Increase microphone level only if it's less than 50% */ + midVal = (stt->maxAnalog + stt->minLevel + 1) / 2; + if (*inMicLevel < midVal) { + /* *inMicLevel *= 1.1; */ + *inMicLevel = (1126 * *inMicLevel) >> 10; + /* Reduces risk of a muted mic repeatedly triggering excessive levels due + * to zero signal detection. */ + *inMicLevel = WEBRTC_SPL_MIN(*inMicLevel, stt->zeroCtrlMax); + stt->micVol = *inMicLevel; + } + +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, + "\t\tAGC->zeroCntrl, frame %d: 500 ms under threshold," + " micVol: %d\n", + stt->fcount, stt->micVol); +#endif + + stt->activeSpeech = 0; + stt->Rxx16_LPw32Max = 0; + + /* The AGC has a tendency (due to problems with the VAD parameters), to + * vastly increase the volume after a muting event. This timer prevents + * upwards adaptation for a short period. */ + stt->muteGuardMs = kMuteGuardTimeMs; + } +} + +void WebRtcAgc_SpeakerInactiveCtrl(LegacyAgc* stt) { + /* Check if the near end speaker is inactive. + * If that is the case the VAD threshold is + * increased since the VAD speech model gets + * more sensitive to any sound after a long + * silence. + */ + + int32_t tmp32; + int16_t vadThresh; + + if (stt->vadMic.stdLongTerm < 2500) { + stt->vadThreshold = 1500; + } else { + vadThresh = kNormalVadThreshold; + if (stt->vadMic.stdLongTerm < 4500) { + /* Scale between min and max threshold */ + vadThresh += (4500 - stt->vadMic.stdLongTerm) / 2; + } + + /* stt->vadThreshold = (31 * stt->vadThreshold + vadThresh) / 32; */ + tmp32 = vadThresh + 31 * stt->vadThreshold; + stt->vadThreshold = (int16_t)(tmp32 >> 5); + } +} + +void WebRtcAgc_ExpCurve(int16_t volume, int16_t* index) { + // volume in Q14 + // index in [0-7] + /* 8 different curves */ + if (volume > 5243) { + if (volume > 7864) { + if (volume > 12124) { + *index = 7; + } else { + *index = 6; + } + } else { + if (volume > 6554) { + *index = 5; + } else { + *index = 4; + } + } + } else { + if (volume > 2621) { + if (volume > 3932) { + *index = 3; + } else { + *index = 2; + } + } else { + if (volume > 1311) { + *index = 1; + } else { + *index = 0; + } + } + } +} + +int32_t WebRtcAgc_ProcessAnalog(void* state, + int32_t inMicLevel, + int32_t* outMicLevel, + int16_t vadLogRatio, + int16_t echo, + uint8_t* saturationWarning) { + uint32_t tmpU32; + int32_t Rxx16w32, tmp32; + int32_t inMicLevelTmp, lastMicVol; + int16_t i; + uint8_t saturated = 0; + LegacyAgc* stt; + + stt = (LegacyAgc*)state; + inMicLevelTmp = inMicLevel << stt->scale; + + if (inMicLevelTmp > stt->maxAnalog) { +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, "\tAGC->ProcessAnalog, frame %d: micLvl > maxAnalog\n", + stt->fcount); +#endif + return -1; + } else if (inMicLevelTmp < stt->minLevel) { +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, "\tAGC->ProcessAnalog, frame %d: micLvl < minLevel\n", + stt->fcount); +#endif + return -1; + } + + if (stt->firstCall == 0) { + int32_t tmpVol; + stt->firstCall = 1; + tmp32 = ((stt->maxLevel - stt->minLevel) * 51) >> 9; + tmpVol = (stt->minLevel + tmp32); + + /* If the mic level is very low at start, increase it! */ + if ((inMicLevelTmp < tmpVol) && (stt->agcMode == kAgcModeAdaptiveAnalog)) { + inMicLevelTmp = tmpVol; + } + stt->micVol = inMicLevelTmp; + } + + /* Set the mic level to the previous output value if there is digital input + * gain */ + if ((inMicLevelTmp == stt->maxAnalog) && (stt->micVol > stt->maxAnalog)) { + inMicLevelTmp = stt->micVol; + } + + /* If the mic level was manually changed to a very low value raise it! */ + if ((inMicLevelTmp != stt->micVol) && (inMicLevelTmp < stt->minOutput)) { + tmp32 = ((stt->maxLevel - stt->minLevel) * 51) >> 9; + inMicLevelTmp = (stt->minLevel + tmp32); + stt->micVol = inMicLevelTmp; +#ifdef MIC_LEVEL_FEEDBACK +// stt->numBlocksMicLvlSat = 0; +#endif +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, + "\tAGC->ProcessAnalog, frame %d: micLvl < minLevel by manual" + " decrease, raise vol\n", + stt->fcount); +#endif + } + + if (inMicLevelTmp != stt->micVol) { + if (inMicLevel == stt->lastInMicLevel) { + // We requested a volume adjustment, but it didn't occur. This is + // probably due to a coarse quantization of the volume slider. + // Restore the requested value to prevent getting stuck. + inMicLevelTmp = stt->micVol; + } else { + // As long as the value changed, update to match. + stt->micVol = inMicLevelTmp; + } + } + + if (inMicLevelTmp > stt->maxLevel) { + // Always allow the user to raise the volume above the maxLevel. + stt->maxLevel = inMicLevelTmp; + } + + // Store last value here, after we've taken care of manual updates etc. + stt->lastInMicLevel = inMicLevel; + lastMicVol = stt->micVol; + + /* Checks if the signal is saturated. Also a check if individual samples + * are larger than 12000 is done. If they are the counter for increasing + * the volume level is set to -100ms + */ + WebRtcAgc_SaturationCtrl(stt, &saturated, stt->env[0]); + + /* The AGC is always allowed to lower the level if the signal is saturated */ + if (saturated == 1) { + /* Lower the recording level + * Rxx160_LP is adjusted down because it is so slow it could + * cause the AGC to make wrong decisions. */ + /* stt->Rxx160_LPw32 *= 0.875; */ + stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 8) * 7; + + stt->zeroCtrlMax = stt->micVol; + + /* stt->micVol *= 0.903; */ + tmp32 = inMicLevelTmp - stt->minLevel; + tmpU32 = WEBRTC_SPL_UMUL(29591, (uint32_t)(tmp32)); + stt->micVol = (tmpU32 >> 15) + stt->minLevel; + if (stt->micVol > lastMicVol - 2) { + stt->micVol = lastMicVol - 2; + } + inMicLevelTmp = stt->micVol; + +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, + "\tAGC->ProcessAnalog, frame %d: saturated, micVol = %d\n", + stt->fcount, stt->micVol); +#endif + + if (stt->micVol < stt->minOutput) { + *saturationWarning = 1; + } + + /* Reset counter for decrease of volume level to avoid + * decreasing too much. The saturation control can still + * lower the level if needed. */ + stt->msTooHigh = -100; + + /* Enable the control mechanism to ensure that our measure, + * Rxx160_LP, is in the correct range. This must be done since + * the measure is very slow. */ + stt->activeSpeech = 0; + stt->Rxx16_LPw32Max = 0; + + /* Reset to initial values */ + stt->msecSpeechInnerChange = kMsecSpeechInner; + stt->msecSpeechOuterChange = kMsecSpeechOuter; + stt->changeToSlowMode = 0; + + stt->muteGuardMs = 0; + + stt->upperLimit = stt->startUpperLimit; + stt->lowerLimit = stt->startLowerLimit; +#ifdef MIC_LEVEL_FEEDBACK +// stt->numBlocksMicLvlSat = 0; +#endif + } + + /* Check if the input speech is zero. If so the mic volume + * is increased. On some computers the input is zero up as high + * level as 17% */ + WebRtcAgc_ZeroCtrl(stt, &inMicLevelTmp, stt->env[0]); + + /* Check if the near end speaker is inactive. + * If that is the case the VAD threshold is + * increased since the VAD speech model gets + * more sensitive to any sound after a long + * silence. + */ + WebRtcAgc_SpeakerInactiveCtrl(stt); + + for (i = 0; i < 5; i++) { + /* Computed on blocks of 16 samples */ + + Rxx16w32 = stt->Rxx16w32_array[0][i]; + + /* Rxx160w32 in Q(-7) */ + tmp32 = (Rxx16w32 - stt->Rxx16_vectorw32[stt->Rxx16pos]) >> 3; + stt->Rxx160w32 = stt->Rxx160w32 + tmp32; + stt->Rxx16_vectorw32[stt->Rxx16pos] = Rxx16w32; + + /* Circular buffer */ + stt->Rxx16pos++; + if (stt->Rxx16pos == RXX_BUFFER_LEN) { + stt->Rxx16pos = 0; + } + + /* Rxx16_LPw32 in Q(-4) */ + tmp32 = (Rxx16w32 - stt->Rxx16_LPw32) >> kAlphaShortTerm; + stt->Rxx16_LPw32 = (stt->Rxx16_LPw32) + tmp32; + + if (vadLogRatio > stt->vadThreshold) { + /* Speech detected! */ + + /* Check if Rxx160_LP is in the correct range. If + * it is too high/low then we set it to the maximum of + * Rxx16_LPw32 during the first 200ms of speech. + */ + if (stt->activeSpeech < 250) { + stt->activeSpeech += 2; + + if (stt->Rxx16_LPw32 > stt->Rxx16_LPw32Max) { + stt->Rxx16_LPw32Max = stt->Rxx16_LPw32; + } + } else if (stt->activeSpeech == 250) { + stt->activeSpeech += 2; + tmp32 = stt->Rxx16_LPw32Max >> 3; + stt->Rxx160_LPw32 = tmp32 * RXX_BUFFER_LEN; + } + + tmp32 = (stt->Rxx160w32 - stt->Rxx160_LPw32) >> kAlphaLongTerm; + stt->Rxx160_LPw32 = stt->Rxx160_LPw32 + tmp32; + + if (stt->Rxx160_LPw32 > stt->upperSecondaryLimit) { + stt->msTooHigh += 2; + stt->msTooLow = 0; + stt->changeToSlowMode = 0; + + if (stt->msTooHigh > stt->msecSpeechOuterChange) { + stt->msTooHigh = 0; + + /* Lower the recording level */ + /* Multiply by 0.828125 which corresponds to decreasing ~0.8dB */ + tmp32 = stt->Rxx160_LPw32 >> 6; + stt->Rxx160_LPw32 = tmp32 * 53; + + /* Reduce the max gain to avoid excessive oscillation + * (but never drop below the maximum analog level). + */ + stt->maxLevel = (15 * stt->maxLevel + stt->micVol) / 16; + stt->maxLevel = WEBRTC_SPL_MAX(stt->maxLevel, stt->maxAnalog); + + stt->zeroCtrlMax = stt->micVol; + + /* 0.95 in Q15 */ + tmp32 = inMicLevelTmp - stt->minLevel; + tmpU32 = WEBRTC_SPL_UMUL(31130, (uint32_t)(tmp32)); + stt->micVol = (tmpU32 >> 15) + stt->minLevel; + if (stt->micVol > lastMicVol - 1) { + stt->micVol = lastMicVol - 1; + } + inMicLevelTmp = stt->micVol; + + /* Enable the control mechanism to ensure that our measure, + * Rxx160_LP, is in the correct range. + */ + stt->activeSpeech = 0; + stt->Rxx16_LPw32Max = 0; +#ifdef MIC_LEVEL_FEEDBACK +// stt->numBlocksMicLvlSat = 0; +#endif +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, + "\tAGC->ProcessAnalog, frame %d: measure >" + " 2ndUpperLim, micVol = %d, maxLevel = %d\n", + stt->fcount, stt->micVol, stt->maxLevel); +#endif + } + } else if (stt->Rxx160_LPw32 > stt->upperLimit) { + stt->msTooHigh += 2; + stt->msTooLow = 0; + stt->changeToSlowMode = 0; + + if (stt->msTooHigh > stt->msecSpeechInnerChange) { + /* Lower the recording level */ + stt->msTooHigh = 0; + /* Multiply by 0.828125 which corresponds to decreasing ~0.8dB */ + stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 64) * 53; + + /* Reduce the max gain to avoid excessive oscillation + * (but never drop below the maximum analog level). + */ + stt->maxLevel = (15 * stt->maxLevel + stt->micVol) / 16; + stt->maxLevel = WEBRTC_SPL_MAX(stt->maxLevel, stt->maxAnalog); + + stt->zeroCtrlMax = stt->micVol; + + /* 0.965 in Q15 */ + tmp32 = inMicLevelTmp - stt->minLevel; + tmpU32 = + WEBRTC_SPL_UMUL(31621, (uint32_t)(inMicLevelTmp - stt->minLevel)); + stt->micVol = (tmpU32 >> 15) + stt->minLevel; + if (stt->micVol > lastMicVol - 1) { + stt->micVol = lastMicVol - 1; + } + inMicLevelTmp = stt->micVol; + +#ifdef MIC_LEVEL_FEEDBACK +// stt->numBlocksMicLvlSat = 0; +#endif +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, + "\tAGC->ProcessAnalog, frame %d: measure >" + " UpperLim, micVol = %d, maxLevel = %d\n", + stt->fcount, stt->micVol, stt->maxLevel); +#endif + } + } else if (stt->Rxx160_LPw32 < stt->lowerSecondaryLimit) { + stt->msTooHigh = 0; + stt->changeToSlowMode = 0; + stt->msTooLow += 2; + + if (stt->msTooLow > stt->msecSpeechOuterChange) { + /* Raise the recording level */ + int16_t index, weightFIX; + int16_t volNormFIX = 16384; // =1 in Q14. + + stt->msTooLow = 0; + + /* Normalize the volume level */ + tmp32 = (inMicLevelTmp - stt->minLevel) << 14; + if (stt->maxInit != stt->minLevel) { + volNormFIX = tmp32 / (stt->maxInit - stt->minLevel); + } + + /* Find correct curve */ + WebRtcAgc_ExpCurve(volNormFIX, &index); + + /* Compute weighting factor for the volume increase, 32^(-2*X)/2+1.05 + */ + weightFIX = + kOffset1[index] - (int16_t)((kSlope1[index] * volNormFIX) >> 13); + + /* stt->Rxx160_LPw32 *= 1.047 [~0.2 dB]; */ + stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 64) * 67; + + tmp32 = inMicLevelTmp - stt->minLevel; + tmpU32 = + ((uint32_t)weightFIX * (uint32_t)(inMicLevelTmp - stt->minLevel)); + stt->micVol = (tmpU32 >> 14) + stt->minLevel; + if (stt->micVol < lastMicVol + 2) { + stt->micVol = lastMicVol + 2; + } + + inMicLevelTmp = stt->micVol; + +#ifdef MIC_LEVEL_FEEDBACK + /* Count ms in level saturation */ + // if (stt->micVol > stt->maxAnalog) { + if (stt->micVol > 150) { + /* mic level is saturated */ + stt->numBlocksMicLvlSat++; + fprintf(stderr, "Sat mic Level: %d\n", stt->numBlocksMicLvlSat); + } +#endif +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, + "\tAGC->ProcessAnalog, frame %d: measure <" + " 2ndLowerLim, micVol = %d\n", + stt->fcount, stt->micVol); +#endif + } + } else if (stt->Rxx160_LPw32 < stt->lowerLimit) { + stt->msTooHigh = 0; + stt->changeToSlowMode = 0; + stt->msTooLow += 2; + + if (stt->msTooLow > stt->msecSpeechInnerChange) { + /* Raise the recording level */ + int16_t index, weightFIX; + int16_t volNormFIX = 16384; // =1 in Q14. + + stt->msTooLow = 0; + + /* Normalize the volume level */ + tmp32 = (inMicLevelTmp - stt->minLevel) << 14; + if (stt->maxInit != stt->minLevel) { + volNormFIX = tmp32 / (stt->maxInit - stt->minLevel); + } + + /* Find correct curve */ + WebRtcAgc_ExpCurve(volNormFIX, &index); + + /* Compute weighting factor for the volume increase, (3.^(-2.*X))/8+1 + */ + weightFIX = + kOffset2[index] - (int16_t)((kSlope2[index] * volNormFIX) >> 13); + + /* stt->Rxx160_LPw32 *= 1.047 [~0.2 dB]; */ + stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 64) * 67; + + tmp32 = inMicLevelTmp - stt->minLevel; + tmpU32 = + ((uint32_t)weightFIX * (uint32_t)(inMicLevelTmp - stt->minLevel)); + stt->micVol = (tmpU32 >> 14) + stt->minLevel; + if (stt->micVol < lastMicVol + 1) { + stt->micVol = lastMicVol + 1; + } + + inMicLevelTmp = stt->micVol; + +#ifdef MIC_LEVEL_FEEDBACK + /* Count ms in level saturation */ + // if (stt->micVol > stt->maxAnalog) { + if (stt->micVol > 150) { + /* mic level is saturated */ + stt->numBlocksMicLvlSat++; + fprintf(stderr, "Sat mic Level: %d\n", stt->numBlocksMicLvlSat); + } +#endif +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, + "\tAGC->ProcessAnalog, frame %d: measure < LowerLim, micVol " + "= %d\n", + stt->fcount, stt->micVol); +#endif + } + } else { + /* The signal is inside the desired range which is: + * lowerLimit < Rxx160_LP/640 < upperLimit + */ + if (stt->changeToSlowMode > 4000) { + stt->msecSpeechInnerChange = 1000; + stt->msecSpeechOuterChange = 500; + stt->upperLimit = stt->upperPrimaryLimit; + stt->lowerLimit = stt->lowerPrimaryLimit; + } else { + stt->changeToSlowMode += 2; // in milliseconds + } + stt->msTooLow = 0; + stt->msTooHigh = 0; + + stt->micVol = inMicLevelTmp; + } +#ifdef MIC_LEVEL_FEEDBACK + if (stt->numBlocksMicLvlSat > NUM_BLOCKS_IN_SAT_BEFORE_CHANGE_TARGET) { + stt->micLvlSat = 1; + fprintf(stderr, "target before = %d (%d)\n", stt->analogTargetLevel, + stt->targetIdx); + WebRtcAgc_UpdateAgcThresholds(stt); + WebRtcAgc_CalculateGainTable( + &(stt->digitalAgc.gainTable[0]), stt->compressionGaindB, + stt->targetLevelDbfs, stt->limiterEnable, stt->analogTarget); + stt->numBlocksMicLvlSat = 0; + stt->micLvlSat = 0; + fprintf(stderr, "target offset = %d\n", stt->targetIdxOffset); + fprintf(stderr, "target after = %d (%d)\n", stt->analogTargetLevel, + stt->targetIdx); + } +#endif + } + } + + /* Ensure gain is not increased in presence of echo or after a mute event + * (but allow the zeroCtrl() increase on the frame of a mute detection). + */ + if (echo == 1 || + (stt->muteGuardMs > 0 && stt->muteGuardMs < kMuteGuardTimeMs)) { + if (stt->micVol > lastMicVol) { + stt->micVol = lastMicVol; + } + } + + /* limit the gain */ + if (stt->micVol > stt->maxLevel) { + stt->micVol = stt->maxLevel; + } else if (stt->micVol < stt->minOutput) { + stt->micVol = stt->minOutput; + } + + *outMicLevel = WEBRTC_SPL_MIN(stt->micVol, stt->maxAnalog) >> stt->scale; + + return 0; +} + +int WebRtcAgc_Process(void* agcInst, + const int16_t* const* in_near, + size_t num_bands, + size_t samples, + int16_t* const* out, + int32_t inMicLevel, + int32_t* outMicLevel, + int16_t echo, + uint8_t* saturationWarning) { + LegacyAgc* stt; + + stt = (LegacyAgc*)agcInst; + + // + if (stt == NULL) { + return -1; + } + // + + if (stt->fs == 8000) { + if (samples != 80) { + return -1; + } + } else if (stt->fs == 16000 || stt->fs == 32000 || stt->fs == 48000) { + if (samples != 160) { + return -1; + } + } else { + return -1; + } + + *saturationWarning = 0; + // TODO(minyue): PUT IN RANGE CHECKING FOR INPUT LEVELS + *outMicLevel = inMicLevel; + +#ifdef WEBRTC_AGC_DEBUG_DUMP + stt->fcount++; +#endif + + if (WebRtcAgc_ProcessDigital(&stt->digitalAgc, in_near, num_bands, out, + stt->fs, stt->lowLevelSignal) == -1) { +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, "AGC->Process, frame %d: Error from DigAGC\n\n", + stt->fcount); +#endif + return -1; + } + if (stt->agcMode < kAgcModeFixedDigital && + (stt->lowLevelSignal == 0 || stt->agcMode != kAgcModeAdaptiveDigital)) { + if (WebRtcAgc_ProcessAnalog(agcInst, inMicLevel, outMicLevel, + stt->vadMic.logRatio, echo, + saturationWarning) == -1) { + return -1; + } + } +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->agcLog, "%5d\t%d\t%d\t%d\t%d\n", stt->fcount, inMicLevel, + *outMicLevel, stt->maxLevel, stt->micVol); +#endif + + /* update queue */ + if (stt->inQueue > 1) { + memcpy(stt->env[0], stt->env[1], 10 * sizeof(int32_t)); + memcpy(stt->Rxx16w32_array[0], stt->Rxx16w32_array[1], 5 * sizeof(int32_t)); + } + + if (stt->inQueue > 0) { + stt->inQueue--; + } + + return 0; +} + +int WebRtcAgc_set_config(void* agcInst, WebRtcAgcConfig agcConfig) { + LegacyAgc* stt; + stt = (LegacyAgc*)agcInst; + + if (stt == NULL) { + return -1; + } + + if (stt->initFlag != kInitCheck) { + stt->lastError = AGC_UNINITIALIZED_ERROR; + return -1; + } + + if (agcConfig.limiterEnable != kAgcFalse && + agcConfig.limiterEnable != kAgcTrue) { + stt->lastError = AGC_BAD_PARAMETER_ERROR; + return -1; + } + stt->limiterEnable = agcConfig.limiterEnable; + stt->compressionGaindB = agcConfig.compressionGaindB; + if ((agcConfig.targetLevelDbfs < 0) || (agcConfig.targetLevelDbfs > 31)) { + stt->lastError = AGC_BAD_PARAMETER_ERROR; + return -1; + } + stt->targetLevelDbfs = agcConfig.targetLevelDbfs; + + if (stt->agcMode == kAgcModeFixedDigital) { + /* Adjust for different parameter interpretation in FixedDigital mode */ + stt->compressionGaindB += agcConfig.targetLevelDbfs; + } + + /* Update threshold levels for analog adaptation */ + WebRtcAgc_UpdateAgcThresholds(stt); + + /* Recalculate gain table */ + if (WebRtcAgc_CalculateGainTable( + &(stt->digitalAgc.gainTable[0]), stt->compressionGaindB, + stt->targetLevelDbfs, stt->limiterEnable, stt->analogTarget) == -1) { +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, "AGC->set_config, frame %d: Error from calcGainTable\n\n", + stt->fcount); +#endif + return -1; + } + /* Store the config in a WebRtcAgcConfig */ + stt->usedConfig.compressionGaindB = agcConfig.compressionGaindB; + stt->usedConfig.limiterEnable = agcConfig.limiterEnable; + stt->usedConfig.targetLevelDbfs = agcConfig.targetLevelDbfs; + + return 0; +} + +int WebRtcAgc_get_config(void* agcInst, WebRtcAgcConfig* config) { + LegacyAgc* stt; + stt = (LegacyAgc*)agcInst; + + if (stt == NULL) { + return -1; + } + + if (config == NULL) { + stt->lastError = AGC_NULL_POINTER_ERROR; + return -1; + } + + if (stt->initFlag != kInitCheck) { + stt->lastError = AGC_UNINITIALIZED_ERROR; + return -1; + } + + config->limiterEnable = stt->usedConfig.limiterEnable; + config->targetLevelDbfs = stt->usedConfig.targetLevelDbfs; + config->compressionGaindB = stt->usedConfig.compressionGaindB; + + return 0; +} + +void* WebRtcAgc_Create() { + LegacyAgc* stt = malloc(sizeof(LegacyAgc)); + +#ifdef WEBRTC_AGC_DEBUG_DUMP + stt->fpt = fopen("./agc_test_log.txt", "wt"); + stt->agcLog = fopen("./agc_debug_log.txt", "wt"); + stt->digitalAgc.logFile = fopen("./agc_log.txt", "wt"); +#endif + + stt->initFlag = 0; + stt->lastError = 0; + + return stt; +} + +void WebRtcAgc_Free(void* state) { + LegacyAgc* stt; + + stt = (LegacyAgc*)state; +#ifdef WEBRTC_AGC_DEBUG_DUMP + fclose(stt->fpt); + fclose(stt->agcLog); + fclose(stt->digitalAgc.logFile); +#endif + free(stt); +} + +/* minLevel - Minimum volume level + * maxLevel - Maximum volume level + */ +int WebRtcAgc_Init(void* agcInst, + int32_t minLevel, + int32_t maxLevel, + int16_t agcMode, + uint32_t fs) { + int32_t max_add, tmp32; + int16_t i; + int tmpNorm; + LegacyAgc* stt; + + /* typecast state pointer */ + stt = (LegacyAgc*)agcInst; + + if (WebRtcAgc_InitDigital(&stt->digitalAgc, agcMode) != 0) { + stt->lastError = AGC_UNINITIALIZED_ERROR; + return -1; + } + + /* Analog AGC variables */ + stt->envSum = 0; + +/* mode = 0 - Only saturation protection + * 1 - Analog Automatic Gain Control [-targetLevelDbfs (default -3 + * dBOv)] + * 2 - Digital Automatic Gain Control [-targetLevelDbfs (default -3 + * dBOv)] + * 3 - Fixed Digital Gain [compressionGaindB (default 8 dB)] + */ +#ifdef WEBRTC_AGC_DEBUG_DUMP + stt->fcount = 0; + fprintf(stt->fpt, "AGC->Init\n"); +#endif + if (agcMode < kAgcModeUnchanged || agcMode > kAgcModeFixedDigital) { +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, "AGC->Init: error, incorrect mode\n\n"); +#endif + return -1; + } + stt->agcMode = agcMode; + stt->fs = fs; + + /* initialize input VAD */ + WebRtcAgc_InitVad(&stt->vadMic); + + /* If the volume range is smaller than 0-256 then + * the levels are shifted up to Q8-domain */ + tmpNorm = WebRtcSpl_NormU32((uint32_t)maxLevel); + stt->scale = tmpNorm - 23; + if (stt->scale < 0) { + stt->scale = 0; + } + // TODO(bjornv): Investigate if we really need to scale up a small range now + // when we have + // a guard against zero-increments. For now, we do not support scale up (scale + // = 0). + stt->scale = 0; + maxLevel <<= stt->scale; + minLevel <<= stt->scale; + + /* Make minLevel and maxLevel static in AdaptiveDigital */ + if (stt->agcMode == kAgcModeAdaptiveDigital) { + minLevel = 0; + maxLevel = 255; + stt->scale = 0; + } + /* The maximum supplemental volume range is based on a vague idea + * of how much lower the gain will be than the real analog gain. */ + max_add = (maxLevel - minLevel) / 4; + + /* Minimum/maximum volume level that can be set */ + stt->minLevel = minLevel; + stt->maxAnalog = maxLevel; + stt->maxLevel = maxLevel + max_add; + stt->maxInit = stt->maxLevel; + + stt->zeroCtrlMax = stt->maxAnalog; + stt->lastInMicLevel = 0; + + /* Initialize micVol parameter */ + stt->micVol = stt->maxAnalog; + if (stt->agcMode == kAgcModeAdaptiveDigital) { + stt->micVol = 127; /* Mid-point of mic level */ + } + stt->micRef = stt->micVol; + stt->micGainIdx = 127; +#ifdef MIC_LEVEL_FEEDBACK + stt->numBlocksMicLvlSat = 0; + stt->micLvlSat = 0; +#endif +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, "AGC->Init: minLevel = %d, maxAnalog = %d, maxLevel = %d\n", + stt->minLevel, stt->maxAnalog, stt->maxLevel); +#endif + + /* Minimum output volume is 4% higher than the available lowest volume level + */ + tmp32 = ((stt->maxLevel - stt->minLevel) * 10) >> 8; + stt->minOutput = (stt->minLevel + tmp32); + + stt->msTooLow = 0; + stt->msTooHigh = 0; + stt->changeToSlowMode = 0; + stt->firstCall = 0; + stt->msZero = 0; + stt->muteGuardMs = 0; + stt->gainTableIdx = 0; + + stt->msecSpeechInnerChange = kMsecSpeechInner; + stt->msecSpeechOuterChange = kMsecSpeechOuter; + + stt->activeSpeech = 0; + stt->Rxx16_LPw32Max = 0; + + stt->vadThreshold = kNormalVadThreshold; + stt->inActive = 0; + + for (i = 0; i < RXX_BUFFER_LEN; i++) { + stt->Rxx16_vectorw32[i] = (int32_t)1000; /* -54dBm0 */ + } + stt->Rxx160w32 = + 125 * RXX_BUFFER_LEN; /* (stt->Rxx16_vectorw32[0]>>3) = 125 */ + + stt->Rxx16pos = 0; + stt->Rxx16_LPw32 = (int32_t)16284; /* Q(-4) */ + + for (i = 0; i < 5; i++) { + stt->Rxx16w32_array[0][i] = 0; + } + for (i = 0; i < 10; i++) { + stt->env[0][i] = 0; + stt->env[1][i] = 0; + } + stt->inQueue = 0; + +#ifdef MIC_LEVEL_FEEDBACK + stt->targetIdxOffset = 0; +#endif + + WebRtcSpl_MemSetW32(stt->filterState, 0, 8); + + stt->initFlag = kInitCheck; + // Default config settings. + stt->defaultConfig.limiterEnable = kAgcTrue; + stt->defaultConfig.targetLevelDbfs = AGC_DEFAULT_TARGET_LEVEL; + stt->defaultConfig.compressionGaindB = AGC_DEFAULT_COMP_GAIN; + + if (WebRtcAgc_set_config(stt, stt->defaultConfig) == -1) { + stt->lastError = AGC_UNSPECIFIED_ERROR; + return -1; + } + stt->Rxx160_LPw32 = stt->analogTargetLevel; // Initialize rms value + + stt->lowLevelSignal = 0; + + /* Only positive values are allowed that are not too large */ + if ((minLevel >= maxLevel) || (maxLevel & 0xFC000000)) { +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, "minLevel, maxLevel value(s) are invalid\n\n"); +#endif + return -1; + } else { +#ifdef WEBRTC_AGC_DEBUG_DUMP + fprintf(stt->fpt, "\n"); +#endif + return 0; + } +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/analog_agc.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/analog_agc.h new file mode 100644 index 000000000..235fd1430 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/analog_agc.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AGC_LEGACY_ANALOG_AGC_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_AGC_LEGACY_ANALOG_AGC_H_ + +//#define MIC_LEVEL_FEEDBACK +#ifdef WEBRTC_AGC_DEBUG_DUMP +#include +#endif + +#include "webrtc/modules/audio_processing/agc/legacy/digital_agc.h" +#include "webrtc/modules/audio_processing/agc/legacy/gain_control.h" +#include "webrtc/typedefs.h" + +/* Analog Automatic Gain Control variables: + * Constant declarations (inner limits inside which no changes are done) + * In the beginning the range is narrower to widen as soon as the measure + * 'Rxx160_LP' is inside it. Currently the starting limits are -22.2+/-1dBm0 + * and the final limits -22.2+/-2.5dBm0. These levels makes the speech signal + * go towards -25.4dBm0 (-31.4dBov). Tuned with wbfile-31.4dBov.pcm + * The limits are created by running the AGC with a file having the desired + * signal level and thereafter plotting Rxx160_LP in the dBm0-domain defined + * by out=10*log10(in/260537279.7); Set the target level to the average level + * of our measure Rxx160_LP. Remember that the levels are in blocks of 16 in + * Q(-7). (Example matlab code: round(db2pow(-21.2)*16/2^7) ) + */ +#define RXX_BUFFER_LEN 10 + +static const int16_t kMsecSpeechInner = 520; +static const int16_t kMsecSpeechOuter = 340; + +static const int16_t kNormalVadThreshold = 400; + +static const int16_t kAlphaShortTerm = 6; // 1 >> 6 = 0.0156 +static const int16_t kAlphaLongTerm = 10; // 1 >> 10 = 0.000977 + +typedef struct { + // Configurable parameters/variables + uint32_t fs; // Sampling frequency + int16_t compressionGaindB; // Fixed gain level in dB + int16_t targetLevelDbfs; // Target level in -dBfs of envelope (default -3) + int16_t agcMode; // Hard coded mode (adaptAna/adaptDig/fixedDig) + uint8_t limiterEnable; // Enabling limiter (on/off (default off)) + WebRtcAgcConfig defaultConfig; + WebRtcAgcConfig usedConfig; + + // General variables + int16_t initFlag; + int16_t lastError; + + // Target level parameters + // Based on the above: analogTargetLevel = round((32767*10^(-22/20))^2*16/2^7) + int32_t analogTargetLevel; // = RXX_BUFFER_LEN * 846805; -22 dBfs + int32_t startUpperLimit; // = RXX_BUFFER_LEN * 1066064; -21 dBfs + int32_t startLowerLimit; // = RXX_BUFFER_LEN * 672641; -23 dBfs + int32_t upperPrimaryLimit; // = RXX_BUFFER_LEN * 1342095; -20 dBfs + int32_t lowerPrimaryLimit; // = RXX_BUFFER_LEN * 534298; -24 dBfs + int32_t upperSecondaryLimit; // = RXX_BUFFER_LEN * 2677832; -17 dBfs + int32_t lowerSecondaryLimit; // = RXX_BUFFER_LEN * 267783; -27 dBfs + uint16_t targetIdx; // Table index for corresponding target level +#ifdef MIC_LEVEL_FEEDBACK + uint16_t targetIdxOffset; // Table index offset for level compensation +#endif + int16_t analogTarget; // Digital reference level in ENV scale + + // Analog AGC specific variables + int32_t filterState[8]; // For downsampling wb to nb + int32_t upperLimit; // Upper limit for mic energy + int32_t lowerLimit; // Lower limit for mic energy + int32_t Rxx160w32; // Average energy for one frame + int32_t Rxx16_LPw32; // Low pass filtered subframe energies + int32_t Rxx160_LPw32; // Low pass filtered frame energies + int32_t Rxx16_LPw32Max; // Keeps track of largest energy subframe + int32_t Rxx16_vectorw32[RXX_BUFFER_LEN]; // Array with subframe energies + int32_t Rxx16w32_array[2][5]; // Energy values of microphone signal + int32_t env[2][10]; // Envelope values of subframes + + int16_t Rxx16pos; // Current position in the Rxx16_vectorw32 + int16_t envSum; // Filtered scaled envelope in subframes + int16_t vadThreshold; // Threshold for VAD decision + int16_t inActive; // Inactive time in milliseconds + int16_t msTooLow; // Milliseconds of speech at a too low level + int16_t msTooHigh; // Milliseconds of speech at a too high level + int16_t changeToSlowMode; // Change to slow mode after some time at target + int16_t firstCall; // First call to the process-function + int16_t msZero; // Milliseconds of zero input + int16_t msecSpeechOuterChange; // Min ms of speech between volume changes + int16_t msecSpeechInnerChange; // Min ms of speech between volume changes + int16_t activeSpeech; // Milliseconds of active speech + int16_t muteGuardMs; // Counter to prevent mute action + int16_t inQueue; // 10 ms batch indicator + + // Microphone level variables + int32_t micRef; // Remember ref. mic level for virtual mic + uint16_t gainTableIdx; // Current position in virtual gain table + int32_t micGainIdx; // Gain index of mic level to increase slowly + int32_t micVol; // Remember volume between frames + int32_t maxLevel; // Max possible vol level, incl dig gain + int32_t maxAnalog; // Maximum possible analog volume level + int32_t maxInit; // Initial value of "max" + int32_t minLevel; // Minimum possible volume level + int32_t minOutput; // Minimum output volume level + int32_t zeroCtrlMax; // Remember max gain => don't amp low input + int32_t lastInMicLevel; + + int16_t scale; // Scale factor for internal volume levels +#ifdef MIC_LEVEL_FEEDBACK + int16_t numBlocksMicLvlSat; + uint8_t micLvlSat; +#endif + // Structs for VAD and digital_agc + AgcVad vadMic; + DigitalAgc digitalAgc; + +#ifdef WEBRTC_AGC_DEBUG_DUMP + FILE* fpt; + FILE* agcLog; + int32_t fcount; +#endif + + int16_t lowLevelSignal; +} LegacyAgc; + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AGC_LEGACY_ANALOG_AGC_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/digital_agc.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/digital_agc.c new file mode 100644 index 000000000..dd24845cf --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/digital_agc.c @@ -0,0 +1,688 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* digital_agc.c + * + */ + +#include "webrtc/modules/audio_processing/agc/legacy/digital_agc.h" + +#include +#ifdef WEBRTC_AGC_DEBUG_DUMP +#include +#endif + +#include "webrtc/base/checks.h" +#include "webrtc/modules/audio_processing/agc/legacy/gain_control.h" + +// To generate the gaintable, copy&paste the following lines to a Matlab window: +// MaxGain = 6; MinGain = 0; CompRatio = 3; Knee = 1; +// zeros = 0:31; lvl = 2.^(1-zeros); +// A = -10*log10(lvl) * (CompRatio - 1) / CompRatio; +// B = MaxGain - MinGain; +// gains = round(2^16*10.^(0.05 * (MinGain + B * ( +// log(exp(-Knee*A)+exp(-Knee*B)) - log(1+exp(-Knee*B)) ) / +// log(1/(1+exp(Knee*B)))))); +// fprintf(1, '\t%i, %i, %i, %i,\n', gains); +// % Matlab code for plotting the gain and input/output level characteristic +// (copy/paste the following 3 lines): +// in = 10*log10(lvl); out = 20*log10(gains/65536); +// subplot(121); plot(in, out); axis([-30, 0, -5, 20]); grid on; xlabel('Input +// (dB)'); ylabel('Gain (dB)'); +// subplot(122); plot(in, in+out); axis([-30, 0, -30, 5]); grid on; +// xlabel('Input (dB)'); ylabel('Output (dB)'); +// zoom on; + +// Generator table for y=log2(1+e^x) in Q8. +enum { kGenFuncTableSize = 128 }; +static const uint16_t kGenFuncTable[kGenFuncTableSize] = { + 256, 485, 786, 1126, 1484, 1849, 2217, 2586, 2955, 3324, 3693, + 4063, 4432, 4801, 5171, 5540, 5909, 6279, 6648, 7017, 7387, 7756, + 8125, 8495, 8864, 9233, 9603, 9972, 10341, 10711, 11080, 11449, 11819, + 12188, 12557, 12927, 13296, 13665, 14035, 14404, 14773, 15143, 15512, 15881, + 16251, 16620, 16989, 17359, 17728, 18097, 18466, 18836, 19205, 19574, 19944, + 20313, 20682, 21052, 21421, 21790, 22160, 22529, 22898, 23268, 23637, 24006, + 24376, 24745, 25114, 25484, 25853, 26222, 26592, 26961, 27330, 27700, 28069, + 28438, 28808, 29177, 29546, 29916, 30285, 30654, 31024, 31393, 31762, 32132, + 32501, 32870, 33240, 33609, 33978, 34348, 34717, 35086, 35456, 35825, 36194, + 36564, 36933, 37302, 37672, 38041, 38410, 38780, 39149, 39518, 39888, 40257, + 40626, 40996, 41365, 41734, 42104, 42473, 42842, 43212, 43581, 43950, 44320, + 44689, 45058, 45428, 45797, 46166, 46536, 46905}; + +static const int16_t kAvgDecayTime = 250; // frames; < 3000 + +int32_t WebRtcAgc_CalculateGainTable(int32_t* gainTable, // Q16 + int16_t digCompGaindB, // Q0 + int16_t targetLevelDbfs, // Q0 + uint8_t limiterEnable, + int16_t analogTarget) // Q0 +{ + // This function generates the compressor gain table used in the fixed digital + // part. + uint32_t tmpU32no1, tmpU32no2, absInLevel, logApprox; + int32_t inLevel, limiterLvl; + int32_t tmp32, tmp32no1, tmp32no2, numFIX, den, y32; + const uint16_t kLog10 = 54426; // log2(10) in Q14 + const uint16_t kLog10_2 = 49321; // 10*log10(2) in Q14 + const uint16_t kLogE_1 = 23637; // log2(e) in Q14 + uint16_t constMaxGain; + uint16_t tmpU16, intPart, fracPart; + const int16_t kCompRatio = 3; + const int16_t kSoftLimiterLeft = 1; + int16_t limiterOffset = 0; // Limiter offset + int16_t limiterIdx, limiterLvlX; + int16_t constLinApprox, zeroGainLvl, maxGain, diffGain; + int16_t i, tmp16, tmp16no1; + int zeros, zerosScale; + + // Constants + // kLogE_1 = 23637; // log2(e) in Q14 + // kLog10 = 54426; // log2(10) in Q14 + // kLog10_2 = 49321; // 10*log10(2) in Q14 + + // Calculate maximum digital gain and zero gain level + tmp32no1 = (digCompGaindB - analogTarget) * (kCompRatio - 1); + tmp16no1 = analogTarget - targetLevelDbfs; + tmp16no1 += + WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio); + maxGain = WEBRTC_SPL_MAX(tmp16no1, (analogTarget - targetLevelDbfs)); + tmp32no1 = maxGain * kCompRatio; + zeroGainLvl = digCompGaindB; + zeroGainLvl -= WebRtcSpl_DivW32W16ResW16(tmp32no1 + ((kCompRatio - 1) >> 1), + kCompRatio - 1); + if ((digCompGaindB <= analogTarget) && (limiterEnable)) { + zeroGainLvl += (analogTarget - digCompGaindB + kSoftLimiterLeft); + limiterOffset = 0; + } + + // Calculate the difference between maximum gain and gain at 0dB0v: + // diffGain = maxGain + (compRatio-1)*zeroGainLvl/compRatio + // = (compRatio-1)*digCompGaindB/compRatio + tmp32no1 = digCompGaindB * (kCompRatio - 1); + diffGain = + WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio); + if (diffGain < 0 || diffGain >= kGenFuncTableSize) { + RTC_DCHECK(0); + return -1; + } + + // Calculate the limiter level and index: + // limiterLvlX = analogTarget - limiterOffset + // limiterLvl = targetLevelDbfs + limiterOffset/compRatio + limiterLvlX = analogTarget - limiterOffset; + limiterIdx = 2 + WebRtcSpl_DivW32W16ResW16((int32_t)limiterLvlX * (1 << 13), + kLog10_2 / 2); + tmp16no1 = + WebRtcSpl_DivW32W16ResW16(limiterOffset + (kCompRatio >> 1), kCompRatio); + limiterLvl = targetLevelDbfs + tmp16no1; + + // Calculate (through table lookup): + // constMaxGain = log2(1+2^(log2(e)*diffGain)); (in Q8) + constMaxGain = kGenFuncTable[diffGain]; // in Q8 + + // Calculate a parameter used to approximate the fractional part of 2^x with a + // piecewise linear function in Q14: + // constLinApprox = round(3/2*(4*(3-2*sqrt(2))/(log(2)^2)-0.5)*2^14); + constLinApprox = 22817; // in Q14 + + // Calculate a denominator used in the exponential part to convert from dB to + // linear scale: + // den = 20*constMaxGain (in Q8) + den = WEBRTC_SPL_MUL_16_U16(20, constMaxGain); // in Q8 + + for (i = 0; i < 32; i++) { + // Calculate scaled input level (compressor): + // inLevel = + // fix((-constLog10_2*(compRatio-1)*(1-i)+fix(compRatio/2))/compRatio) + tmp16 = (int16_t)((kCompRatio - 1) * (i - 1)); // Q0 + tmp32 = WEBRTC_SPL_MUL_16_U16(tmp16, kLog10_2) + 1; // Q14 + inLevel = WebRtcSpl_DivW32W16(tmp32, kCompRatio); // Q14 + + // Calculate diffGain-inLevel, to map using the genFuncTable + inLevel = (int32_t)diffGain * (1 << 14) - inLevel; // Q14 + + // Make calculations on abs(inLevel) and compensate for the sign afterwards. + absInLevel = (uint32_t)WEBRTC_SPL_ABS_W32(inLevel); // Q14 + + // LUT with interpolation + intPart = (uint16_t)(absInLevel >> 14); + fracPart = + (uint16_t)(absInLevel & 0x00003FFF); // extract the fractional part + tmpU16 = kGenFuncTable[intPart + 1] - kGenFuncTable[intPart]; // Q8 + tmpU32no1 = tmpU16 * fracPart; // Q22 + tmpU32no1 += (uint32_t)kGenFuncTable[intPart] << 14; // Q22 + logApprox = tmpU32no1 >> 8; // Q14 + // Compensate for negative exponent using the relation: + // log2(1 + 2^-x) = log2(1 + 2^x) - x + if (inLevel < 0) { + zeros = WebRtcSpl_NormU32(absInLevel); + zerosScale = 0; + if (zeros < 15) { + // Not enough space for multiplication + tmpU32no2 = absInLevel >> (15 - zeros); // Q(zeros-1) + tmpU32no2 = WEBRTC_SPL_UMUL_32_16(tmpU32no2, kLogE_1); // Q(zeros+13) + if (zeros < 9) { + zerosScale = 9 - zeros; + tmpU32no1 >>= zerosScale; // Q(zeros+13) + } else { + tmpU32no2 >>= zeros - 9; // Q22 + } + } else { + tmpU32no2 = WEBRTC_SPL_UMUL_32_16(absInLevel, kLogE_1); // Q28 + tmpU32no2 >>= 6; // Q22 + } + logApprox = 0; + if (tmpU32no2 < tmpU32no1) { + logApprox = (tmpU32no1 - tmpU32no2) >> (8 - zerosScale); // Q14 + } + } + numFIX = (maxGain * constMaxGain) * (1 << 6); // Q14 + numFIX -= (int32_t)logApprox * diffGain; // Q14 + + // Calculate ratio + // Shift |numFIX| as much as possible. + // Ensure we avoid wrap-around in |den| as well. + if (numFIX > (den >> 8) || -numFIX > (den >> 8)) // |den| is Q8. + { + zeros = WebRtcSpl_NormW32(numFIX); + } else { + zeros = WebRtcSpl_NormW32(den) + 8; + } + numFIX *= 1 << zeros; // Q(14+zeros) + + // Shift den so we end up in Qy1 + tmp32no1 = WEBRTC_SPL_SHIFT_W32(den, zeros - 9); // Q(zeros - 1) + y32 = numFIX / tmp32no1; // in Q15 + // This is to do rounding in Q14. + y32 = y32 >= 0 ? (y32 + 1) >> 1 : -((-y32 + 1) >> 1); + + if (limiterEnable && (i < limiterIdx)) { + tmp32 = WEBRTC_SPL_MUL_16_U16(i - 1, kLog10_2); // Q14 + tmp32 -= limiterLvl * (1 << 14); // Q14 + y32 = WebRtcSpl_DivW32W16(tmp32 + 10, 20); + } + if (y32 > 39000) { + tmp32 = (y32 >> 1) * kLog10 + 4096; // in Q27 + tmp32 >>= 13; // In Q14. + } else { + tmp32 = y32 * kLog10 + 8192; // in Q28 + tmp32 >>= 14; // In Q14. + } + tmp32 += 16 << 14; // in Q14 (Make sure final output is in Q16) + + // Calculate power + if (tmp32 > 0) { + intPart = (int16_t)(tmp32 >> 14); + fracPart = (uint16_t)(tmp32 & 0x00003FFF); // in Q14 + if ((fracPart >> 13) != 0) { + tmp16 = (2 << 14) - constLinApprox; + tmp32no2 = (1 << 14) - fracPart; + tmp32no2 *= tmp16; + tmp32no2 >>= 13; + tmp32no2 = (1 << 14) - tmp32no2; + } else { + tmp16 = constLinApprox - (1 << 14); + tmp32no2 = (fracPart * tmp16) >> 13; + } + fracPart = (uint16_t)tmp32no2; + gainTable[i] = + (1 << intPart) + WEBRTC_SPL_SHIFT_W32(fracPart, intPart - 14); + } else { + gainTable[i] = 0; + } + } + + return 0; +} + +int32_t WebRtcAgc_InitDigital(DigitalAgc* stt, int16_t agcMode) { + if (agcMode == kAgcModeFixedDigital) { + // start at minimum to find correct gain faster + stt->capacitorSlow = 0; + } else { + // start out with 0 dB gain + stt->capacitorSlow = 134217728; // (int32_t)(0.125f * 32768.0f * 32768.0f); + } + stt->capacitorFast = 0; + stt->gain = 65536; + stt->gatePrevious = 0; + stt->agcMode = agcMode; +#ifdef WEBRTC_AGC_DEBUG_DUMP + stt->frameCounter = 0; +#endif + + // initialize VADs + WebRtcAgc_InitVad(&stt->vadNearend); + WebRtcAgc_InitVad(&stt->vadFarend); + + return 0; +} + +int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* stt, + const int16_t* in_far, + size_t nrSamples) { + RTC_DCHECK(stt); + // VAD for far end + WebRtcAgc_ProcessVad(&stt->vadFarend, in_far, nrSamples); + + return 0; +} + +int32_t WebRtcAgc_ProcessDigital(DigitalAgc* stt, + const int16_t* const* in_near, + size_t num_bands, + int16_t* const* out, + uint32_t FS, + int16_t lowlevelSignal) { + // array for gains (one value per ms, incl start & end) + int32_t gains[11]; + + int32_t out_tmp, tmp32; + int32_t env[10]; + int32_t max_nrg; + int32_t cur_level; + int32_t gain32, delta; + int16_t logratio; + int16_t lower_thr, upper_thr; + int16_t zeros = 0, zeros_fast, frac = 0; + int16_t decay; + int16_t gate, gain_adj; + int16_t k; + size_t n, i, L; + int16_t L2; // samples/subframe + + // determine number of samples per ms + if (FS == 8000) { + L = 8; + L2 = 3; + } else if (FS == 16000 || FS == 32000 || FS == 48000) { + L = 16; + L2 = 4; + } else { + return -1; + } + + for (i = 0; i < num_bands; ++i) { + if (in_near[i] != out[i]) { + // Only needed if they don't already point to the same place. + memcpy(out[i], in_near[i], 10 * L * sizeof(in_near[i][0])); + } + } + // VAD for near end + logratio = WebRtcAgc_ProcessVad(&stt->vadNearend, out[0], L * 10); + + // Account for far end VAD + if (stt->vadFarend.counter > 10) { + tmp32 = 3 * logratio; + logratio = (int16_t)((tmp32 - stt->vadFarend.logRatio) >> 2); + } + + // Determine decay factor depending on VAD + // upper_thr = 1.0f; + // lower_thr = 0.25f; + upper_thr = 1024; // Q10 + lower_thr = 0; // Q10 + if (logratio > upper_thr) { + // decay = -2^17 / DecayTime; -> -65 + decay = -65; + } else if (logratio < lower_thr) { + decay = 0; + } else { + // decay = (int16_t)(((lower_thr - logratio) + // * (2^27/(DecayTime*(upper_thr-lower_thr)))) >> 10); + // SUBSTITUTED: 2^27/(DecayTime*(upper_thr-lower_thr)) -> 65 + tmp32 = (lower_thr - logratio) * 65; + decay = (int16_t)(tmp32 >> 10); + } + + // adjust decay factor for long silence (detected as low standard deviation) + // This is only done in the adaptive modes + if (stt->agcMode != kAgcModeFixedDigital) { + if (stt->vadNearend.stdLongTerm < 4000) { + decay = 0; + } else if (stt->vadNearend.stdLongTerm < 8096) { + // decay = (int16_t)(((stt->vadNearend.stdLongTerm - 4000) * decay) >> + // 12); + tmp32 = (stt->vadNearend.stdLongTerm - 4000) * decay; + decay = (int16_t)(tmp32 >> 12); + } + + if (lowlevelSignal != 0) { + decay = 0; + } + } +#ifdef WEBRTC_AGC_DEBUG_DUMP + stt->frameCounter++; + fprintf(stt->logFile, "%5.2f\t%d\t%d\t%d\t", (float)(stt->frameCounter) / 100, + logratio, decay, stt->vadNearend.stdLongTerm); +#endif + // Find max amplitude per sub frame + // iterate over sub frames + for (k = 0; k < 10; k++) { + // iterate over samples + max_nrg = 0; + for (n = 0; n < L; n++) { + int32_t nrg = out[0][k * L + n] * out[0][k * L + n]; + if (nrg > max_nrg) { + max_nrg = nrg; + } + } + env[k] = max_nrg; + } + + // Calculate gain per sub frame + gains[0] = stt->gain; + for (k = 0; k < 10; k++) { + // Fast envelope follower + // decay time = -131000 / -1000 = 131 (ms) + stt->capacitorFast = + AGC_SCALEDIFF32(-1000, stt->capacitorFast, stt->capacitorFast); + if (env[k] > stt->capacitorFast) { + stt->capacitorFast = env[k]; + } + // Slow envelope follower + if (env[k] > stt->capacitorSlow) { + // increase capacitorSlow + stt->capacitorSlow = AGC_SCALEDIFF32(500, (env[k] - stt->capacitorSlow), + stt->capacitorSlow); + } else { + // decrease capacitorSlow + stt->capacitorSlow = + AGC_SCALEDIFF32(decay, stt->capacitorSlow, stt->capacitorSlow); + } + + // use maximum of both capacitors as current level + if (stt->capacitorFast > stt->capacitorSlow) { + cur_level = stt->capacitorFast; + } else { + cur_level = stt->capacitorSlow; + } + // Translate signal level into gain, using a piecewise linear approximation + // find number of leading zeros + zeros = WebRtcSpl_NormU32((uint32_t)cur_level); + if (cur_level == 0) { + zeros = 31; + } + tmp32 = (cur_level << zeros) & 0x7FFFFFFF; + frac = (int16_t)(tmp32 >> 19); // Q12. + tmp32 = (stt->gainTable[zeros - 1] - stt->gainTable[zeros]) * frac; + gains[k + 1] = stt->gainTable[zeros] + (tmp32 >> 12); +#ifdef WEBRTC_AGC_DEBUG_DUMP + if (k == 0) { + fprintf(stt->logFile, "%d\t%d\t%d\t%d\t%d\n", env[0], cur_level, + stt->capacitorFast, stt->capacitorSlow, zeros); + } +#endif + } + + // Gate processing (lower gain during absence of speech) + zeros = (zeros << 9) - (frac >> 3); + // find number of leading zeros + zeros_fast = WebRtcSpl_NormU32((uint32_t)stt->capacitorFast); + if (stt->capacitorFast == 0) { + zeros_fast = 31; + } + tmp32 = (stt->capacitorFast << zeros_fast) & 0x7FFFFFFF; + zeros_fast <<= 9; + zeros_fast -= (int16_t)(tmp32 >> 22); + + gate = 1000 + zeros_fast - zeros - stt->vadNearend.stdShortTerm; + + if (gate < 0) { + stt->gatePrevious = 0; + } else { + tmp32 = stt->gatePrevious * 7; + gate = (int16_t)((gate + tmp32) >> 3); + stt->gatePrevious = gate; + } + // gate < 0 -> no gate + // gate > 2500 -> max gate + if (gate > 0) { + if (gate < 2500) { + gain_adj = (2500 - gate) >> 5; + } else { + gain_adj = 0; + } + for (k = 0; k < 10; k++) { + if ((gains[k + 1] - stt->gainTable[0]) > 8388608) { + // To prevent wraparound + tmp32 = (gains[k + 1] - stt->gainTable[0]) >> 8; + tmp32 *= 178 + gain_adj; + } else { + tmp32 = (gains[k + 1] - stt->gainTable[0]) * (178 + gain_adj); + tmp32 >>= 8; + } + gains[k + 1] = stt->gainTable[0] + tmp32; + } + } + + // Limit gain to avoid overload distortion + for (k = 0; k < 10; k++) { + // To prevent wrap around + zeros = 10; + if (gains[k + 1] > 47453132) { + zeros = 16 - WebRtcSpl_NormW32(gains[k + 1]); + } + gain32 = (gains[k + 1] >> zeros) + 1; + gain32 *= gain32; + // check for overflow + while (AGC_MUL32((env[k] >> 12) + 1, gain32) > + WEBRTC_SPL_SHIFT_W32((int32_t)32767, 2 * (1 - zeros + 10))) { + // multiply by 253/256 ==> -0.1 dB + if (gains[k + 1] > 8388607) { + // Prevent wrap around + gains[k + 1] = (gains[k + 1] / 256) * 253; + } else { + gains[k + 1] = (gains[k + 1] * 253) / 256; + } + gain32 = (gains[k + 1] >> zeros) + 1; + gain32 *= gain32; + } + } + // gain reductions should be done 1 ms earlier than gain increases + for (k = 1; k < 10; k++) { + if (gains[k] > gains[k + 1]) { + gains[k] = gains[k + 1]; + } + } + // save start gain for next frame + stt->gain = gains[10]; + + // Apply gain + // handle first sub frame separately + delta = (gains[1] - gains[0]) * (1 << (4 - L2)); + gain32 = gains[0] * (1 << 4); + // iterate over samples + for (n = 0; n < L; n++) { + for (i = 0; i < num_bands; ++i) { + tmp32 = out[i][n] * ((gain32 + 127) >> 7); + out_tmp = tmp32 >> 16; + if (out_tmp > 4095) { + out[i][n] = (int16_t)32767; + } else if (out_tmp < -4096) { + out[i][n] = (int16_t)-32768; + } else { + tmp32 = out[i][n] * (gain32 >> 4); + out[i][n] = (int16_t)(tmp32 >> 16); + } + } + // + + gain32 += delta; + } + // iterate over subframes + for (k = 1; k < 10; k++) { + delta = (gains[k + 1] - gains[k]) * (1 << (4 - L2)); + gain32 = gains[k] * (1 << 4); + // iterate over samples + for (n = 0; n < L; n++) { + for (i = 0; i < num_bands; ++i) { + tmp32 = out[i][k * L + n] * (gain32 >> 4); + out[i][k * L + n] = (int16_t)(tmp32 >> 16); + } + gain32 += delta; + } + } + + return 0; +} + +void WebRtcAgc_InitVad(AgcVad* state) { + int16_t k; + + state->HPstate = 0; // state of high pass filter + state->logRatio = 0; // log( P(active) / P(inactive) ) + // average input level (Q10) + state->meanLongTerm = 15 << 10; + + // variance of input level (Q8) + state->varianceLongTerm = 500 << 8; + + state->stdLongTerm = 0; // standard deviation of input level in dB + // short-term average input level (Q10) + state->meanShortTerm = 15 << 10; + + // short-term variance of input level (Q8) + state->varianceShortTerm = 500 << 8; + + state->stdShortTerm = + 0; // short-term standard deviation of input level in dB + state->counter = 3; // counts updates + for (k = 0; k < 8; k++) { + // downsampling filter + state->downState[k] = 0; + } +} + +int16_t WebRtcAgc_ProcessVad(AgcVad* state, // (i) VAD state + const int16_t* in, // (i) Speech signal + size_t nrSamples) // (i) number of samples +{ + int32_t out, nrg, tmp32, tmp32b; + uint16_t tmpU16; + int16_t k, subfr, tmp16; + int16_t buf1[8]; + int16_t buf2[4]; + int16_t HPstate; + int16_t zeros, dB; + + // process in 10 sub frames of 1 ms (to save on memory) + nrg = 0; + HPstate = state->HPstate; + for (subfr = 0; subfr < 10; subfr++) { + // downsample to 4 kHz + if (nrSamples == 160) { + for (k = 0; k < 8; k++) { + tmp32 = (int32_t)in[2 * k] + (int32_t)in[2 * k + 1]; + tmp32 >>= 1; + buf1[k] = (int16_t)tmp32; + } + in += 16; + + WebRtcSpl_DownsampleBy2(buf1, 8, buf2, state->downState); + } else { + WebRtcSpl_DownsampleBy2(in, 8, buf2, state->downState); + in += 8; + } + + // high pass filter and compute energy + for (k = 0; k < 4; k++) { + out = buf2[k] + HPstate; + tmp32 = 600 * out; + HPstate = (int16_t)((tmp32 >> 10) - buf2[k]); + nrg += (out * out) >> 6; + } + } + state->HPstate = HPstate; + + // find number of leading zeros + if (!(0xFFFF0000 & nrg)) { + zeros = 16; + } else { + zeros = 0; + } + if (!(0xFF000000 & (nrg << zeros))) { + zeros += 8; + } + if (!(0xF0000000 & (nrg << zeros))) { + zeros += 4; + } + if (!(0xC0000000 & (nrg << zeros))) { + zeros += 2; + } + if (!(0x80000000 & (nrg << zeros))) { + zeros += 1; + } + + // energy level (range {-32..30}) (Q10) + dB = (15 - zeros) << 11; + + // Update statistics + + if (state->counter < kAvgDecayTime) { + // decay time = AvgDecTime * 10 ms + state->counter++; + } + + // update short-term estimate of mean energy level (Q10) + tmp32 = state->meanShortTerm * 15 + dB; + state->meanShortTerm = (int16_t)(tmp32 >> 4); + + // update short-term estimate of variance in energy level (Q8) + tmp32 = (dB * dB) >> 12; + tmp32 += state->varianceShortTerm * 15; + state->varianceShortTerm = tmp32 / 16; + + // update short-term estimate of standard deviation in energy level (Q10) + tmp32 = state->meanShortTerm * state->meanShortTerm; + tmp32 = (state->varianceShortTerm << 12) - tmp32; + state->stdShortTerm = (int16_t)WebRtcSpl_Sqrt(tmp32); + + // update long-term estimate of mean energy level (Q10) + tmp32 = state->meanLongTerm * state->counter + dB; + state->meanLongTerm = + WebRtcSpl_DivW32W16ResW16(tmp32, WebRtcSpl_AddSatW16(state->counter, 1)); + + // update long-term estimate of variance in energy level (Q8) + tmp32 = (dB * dB) >> 12; + tmp32 += state->varianceLongTerm * state->counter; + state->varianceLongTerm = + WebRtcSpl_DivW32W16(tmp32, WebRtcSpl_AddSatW16(state->counter, 1)); + + // update long-term estimate of standard deviation in energy level (Q10) + tmp32 = state->meanLongTerm * state->meanLongTerm; + tmp32 = (state->varianceLongTerm << 12) - tmp32; + state->stdLongTerm = (int16_t)WebRtcSpl_Sqrt(tmp32); + + // update voice activity measure (Q10) + tmp16 = 3 << 12; + // TODO(bjornv): (dB - state->meanLongTerm) can overflow, e.g., in + // ApmTest.Process unit test. Previously the macro WEBRTC_SPL_MUL_16_16() + // was used, which did an intermediate cast to (int16_t), hence losing + // significant bits. This cause logRatio to max out positive, rather than + // negative. This is a bug, but has very little significance. + tmp32 = tmp16 * (int16_t)(dB - state->meanLongTerm); + tmp32 = WebRtcSpl_DivW32W16(tmp32, state->stdLongTerm); + tmpU16 = (13 << 12); + tmp32b = WEBRTC_SPL_MUL_16_U16(state->logRatio, tmpU16); + tmp32 += tmp32b >> 10; + + state->logRatio = (int16_t)(tmp32 >> 6); + + // limit + if (state->logRatio > 2048) { + state->logRatio = 2048; + } + if (state->logRatio < -2048) { + state->logRatio = -2048; + } + + return state->logRatio; // Q10 +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/digital_agc.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/digital_agc.h new file mode 100644 index 000000000..4664b59dc --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/digital_agc.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_ + +#ifdef WEBRTC_AGC_DEBUG_DUMP +#include +#endif +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +#include "webrtc/typedefs.h" + +// the 32 most significant bits of A(19) * B(26) >> 13 +#define AGC_MUL32(A, B) (((B) >> 13) * (A) + (((0x00001FFF & (B)) * (A)) >> 13)) +// C + the 32 most significant bits of A * B +#define AGC_SCALEDIFF32(A, B, C) \ + ((C) + ((B) >> 16) * (A) + (((0x0000FFFF & (B)) * (A)) >> 16)) + +typedef struct { + int32_t downState[8]; + int16_t HPstate; + int16_t counter; + int16_t logRatio; // log( P(active) / P(inactive) ) (Q10) + int16_t meanLongTerm; // Q10 + int32_t varianceLongTerm; // Q8 + int16_t stdLongTerm; // Q10 + int16_t meanShortTerm; // Q10 + int32_t varianceShortTerm; // Q8 + int16_t stdShortTerm; // Q10 +} AgcVad; // total = 54 bytes + +typedef struct { + int32_t capacitorSlow; + int32_t capacitorFast; + int32_t gain; + int32_t gainTable[32]; + int16_t gatePrevious; + int16_t agcMode; + AgcVad vadNearend; + AgcVad vadFarend; +#ifdef WEBRTC_AGC_DEBUG_DUMP + FILE* logFile; + int frameCounter; +#endif +} DigitalAgc; + +int32_t WebRtcAgc_InitDigital(DigitalAgc* digitalAgcInst, int16_t agcMode); + +int32_t WebRtcAgc_ProcessDigital(DigitalAgc* digitalAgcInst, + const int16_t* const* inNear, + size_t num_bands, + int16_t* const* out, + uint32_t FS, + int16_t lowLevelSignal); + +int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* digitalAgcInst, + const int16_t* inFar, + size_t nrSamples); + +void WebRtcAgc_InitVad(AgcVad* vadInst); + +int16_t WebRtcAgc_ProcessVad(AgcVad* vadInst, // (i) VAD state + const int16_t* in, // (i) Speech signal + size_t nrSamples); // (i) number of samples + +int32_t WebRtcAgc_CalculateGainTable(int32_t* gainTable, // Q16 + int16_t compressionGaindB, // Q0 (in dB) + int16_t targetLevelDbfs, // Q0 (in dB) + uint8_t limiterEnable, + int16_t analogTarget); + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/gain_control.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/gain_control.h new file mode 100644 index 000000000..c3157500c --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/agc/legacy/gain_control.h @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AGC_LEGACY_GAIN_CONTROL_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_AGC_LEGACY_GAIN_CONTROL_H_ + +#include "webrtc/typedefs.h" + +// Errors +#define AGC_UNSPECIFIED_ERROR 18000 +#define AGC_UNSUPPORTED_FUNCTION_ERROR 18001 +#define AGC_UNINITIALIZED_ERROR 18002 +#define AGC_NULL_POINTER_ERROR 18003 +#define AGC_BAD_PARAMETER_ERROR 18004 + +// Warnings +#define AGC_BAD_PARAMETER_WARNING 18050 + +enum { + kAgcModeUnchanged, + kAgcModeAdaptiveAnalog, + kAgcModeAdaptiveDigital, + kAgcModeFixedDigital +}; + +enum { kAgcFalse = 0, kAgcTrue }; + +typedef struct { + int16_t targetLevelDbfs; // default 3 (-3 dBOv) + int16_t compressionGaindB; // default 9 dB + uint8_t limiterEnable; // default kAgcTrue (on) +} WebRtcAgcConfig; + +#if defined(__cplusplus) +extern "C" { +#endif + +/* + * This function analyses the number of samples passed to + * farend and produces any error code that could arise. + * + * Input: + * - agcInst : AGC instance. + * - samples : Number of samples in input vector. + * + * Return value: + * : 0 - Normal operation. + * : -1 - Error. + */ +int WebRtcAgc_GetAddFarendError(void* state, size_t samples); + +/* + * This function processes a 10 ms frame of far-end speech to determine + * if there is active speech. The length of the input speech vector must be + * given in samples (80 when FS=8000, and 160 when FS=16000, FS=32000 or + * FS=48000). + * + * Input: + * - agcInst : AGC instance. + * - inFar : Far-end input speech vector + * - samples : Number of samples in input vector + * + * Return value: + * : 0 - Normal operation. + * : -1 - Error + */ +int WebRtcAgc_AddFarend(void* agcInst, const int16_t* inFar, size_t samples); + +/* + * This function processes a 10 ms frame of microphone speech to determine + * if there is active speech. The length of the input speech vector must be + * given in samples (80 when FS=8000, and 160 when FS=16000, FS=32000 or + * FS=48000). For very low input levels, the input signal is increased in level + * by multiplying and overwriting the samples in inMic[]. + * + * This function should be called before any further processing of the + * near-end microphone signal. + * + * Input: + * - agcInst : AGC instance. + * - inMic : Microphone input speech vector for each band + * - num_bands : Number of bands in input vector + * - samples : Number of samples in input vector + * + * Return value: + * : 0 - Normal operation. + * : -1 - Error + */ +int WebRtcAgc_AddMic(void* agcInst, + int16_t* const* inMic, + size_t num_bands, + size_t samples); + +/* + * This function replaces the analog microphone with a virtual one. + * It is a digital gain applied to the input signal and is used in the + * agcAdaptiveDigital mode where no microphone level is adjustable. The length + * of the input speech vector must be given in samples (80 when FS=8000, and 160 + * when FS=16000, FS=32000 or FS=48000). + * + * Input: + * - agcInst : AGC instance. + * - inMic : Microphone input speech vector for each band + * - num_bands : Number of bands in input vector + * - samples : Number of samples in input vector + * - micLevelIn : Input level of microphone (static) + * + * Output: + * - inMic : Microphone output after processing (L band) + * - inMic_H : Microphone output after processing (H band) + * - micLevelOut : Adjusted microphone level after processing + * + * Return value: + * : 0 - Normal operation. + * : -1 - Error + */ +int WebRtcAgc_VirtualMic(void* agcInst, + int16_t* const* inMic, + size_t num_bands, + size_t samples, + int32_t micLevelIn, + int32_t* micLevelOut); + +/* + * This function processes a 10 ms frame and adjusts (normalizes) the gain both + * analog and digitally. The gain adjustments are done only during active + * periods of speech. The length of the speech vectors must be given in samples + * (80 when FS=8000, and 160 when FS=16000, FS=32000 or FS=48000). The echo + * parameter can be used to ensure the AGC will not adjust upward in the + * presence of echo. + * + * This function should be called after processing the near-end microphone + * signal, in any case after any echo cancellation. + * + * Input: + * - agcInst : AGC instance + * - inNear : Near-end input speech vector for each band + * - num_bands : Number of bands in input/output vector + * - samples : Number of samples in input/output vector + * - inMicLevel : Current microphone volume level + * - echo : Set to 0 if the signal passed to add_mic is + * almost certainly free of echo; otherwise set + * to 1. If you have no information regarding echo + * set to 0. + * + * Output: + * - outMicLevel : Adjusted microphone volume level + * - out : Gain-adjusted near-end speech vector + * : May be the same vector as the input. + * - saturationWarning : A returned value of 1 indicates a saturation event + * has occurred and the volume cannot be further + * reduced. Otherwise will be set to 0. + * + * Return value: + * : 0 - Normal operation. + * : -1 - Error + */ +int WebRtcAgc_Process(void* agcInst, + const int16_t* const* inNear, + size_t num_bands, + size_t samples, + int16_t* const* out, + int32_t inMicLevel, + int32_t* outMicLevel, + int16_t echo, + uint8_t* saturationWarning); + +/* + * This function sets the config parameters (targetLevelDbfs, + * compressionGaindB and limiterEnable). + * + * Input: + * - agcInst : AGC instance + * - config : config struct + * + * Output: + * + * Return value: + * : 0 - Normal operation. + * : -1 - Error + */ +int WebRtcAgc_set_config(void* agcInst, WebRtcAgcConfig config); + +/* + * This function returns the config parameters (targetLevelDbfs, + * compressionGaindB and limiterEnable). + * + * Input: + * - agcInst : AGC instance + * + * Output: + * - config : config struct + * + * Return value: + * : 0 - Normal operation. + * : -1 - Error + */ +int WebRtcAgc_get_config(void* agcInst, WebRtcAgcConfig* config); + +/* + * This function creates and returns an AGC instance, which will contain the + * state information for one (duplex) channel. + */ +void* WebRtcAgc_Create(); + +/* + * This function frees the AGC instance created at the beginning. + * + * Input: + * - agcInst : AGC instance. + */ +void WebRtcAgc_Free(void* agcInst); + +/* + * This function initializes an AGC instance. + * + * Input: + * - agcInst : AGC instance. + * - minLevel : Minimum possible mic level + * - maxLevel : Maximum possible mic level + * - agcMode : 0 - Unchanged + * : 1 - Adaptive Analog Automatic Gain Control -3dBOv + * : 2 - Adaptive Digital Automatic Gain Control -3dBOv + * : 3 - Fixed Digital Gain 0dB + * - fs : Sampling frequency + * + * Return value : 0 - Ok + * -1 - Error + */ +int WebRtcAgc_Init(void* agcInst, + int32_t minLevel, + int32_t maxLevel, + int16_t agcMode, + uint32_t fs); + +#if defined(__cplusplus) +} +#endif + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AGC_LEGACY_GAIN_CONTROL_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/logging/apm_data_dumper.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/logging/apm_data_dumper.cc new file mode 100644 index 000000000..66ec5178d --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/logging/apm_data_dumper.cc @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing/logging/apm_data_dumper.h" + +#include + +#include "webrtc/base/stringutils.h" + +// Check to verify that the define is properly set. +#if !defined(WEBRTC_APM_DEBUG_DUMP) || \ + (WEBRTC_APM_DEBUG_DUMP != 0 && WEBRTC_APM_DEBUG_DUMP != 1) +#error "Set WEBRTC_APM_DEBUG_DUMP to either 0 or 1" +#endif + +namespace webrtc { + +namespace { + +#if WEBRTC_APM_DEBUG_DUMP == 1 +std::string FormFileName(const char* name, + int instance_index, + int reinit_index, + const std::string& suffix) { + std::stringstream ss; + ss << name << "_" << instance_index << "-" << reinit_index << suffix; + return ss.str(); +} +#endif + +} // namespace + +#if WEBRTC_APM_DEBUG_DUMP == 1 +ApmDataDumper::ApmDataDumper(int instance_index) + : instance_index_(instance_index) {} +#else +ApmDataDumper::ApmDataDumper(int instance_index) {} +#endif + +ApmDataDumper::~ApmDataDumper() {} + +#if WEBRTC_APM_DEBUG_DUMP == 1 +FILE* ApmDataDumper::GetRawFile(const char* name) { + std::string filename = + FormFileName(name, instance_index_, recording_set_index_, ".dat"); + auto& f = raw_files_[filename]; + if (!f) { + f.reset(fopen(filename.c_str(), "wb")); + } + return f.get(); +} + +WavWriter* ApmDataDumper::GetWavFile(const char* name, + int sample_rate_hz, + int num_channels) { + std::string filename = + FormFileName(name, instance_index_, recording_set_index_, ".wav"); + auto& f = wav_files_[filename]; + if (!f) { + f.reset(new WavWriter(filename.c_str(), sample_rate_hz, num_channels)); + } + return f.get(); +} + +#endif + +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/logging/apm_data_dumper.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/logging/apm_data_dumper.h new file mode 100644 index 000000000..691c4cec5 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/logging/apm_data_dumper.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_LOGGING_APM_DATA_DUMPER_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_LOGGING_APM_DATA_DUMPER_H_ + +#include + +#include +#include +#include + +#include "webrtc/base/array_view.h" +#include "webrtc/base/constructormagic.h" +#include "webrtc/common_audio/wav_file.h" + +// Check to verify that the define is properly set. +#if !defined(WEBRTC_APM_DEBUG_DUMP) || \ + (WEBRTC_APM_DEBUG_DUMP != 0 && WEBRTC_APM_DEBUG_DUMP != 1) +#error "Set WEBRTC_APM_DEBUG_DUMP to either 0 or 1" +#endif + +namespace webrtc { + +#if WEBRTC_APM_DEBUG_DUMP == 1 +// Functor used to use as a custom deleter in the map of file pointers to raw +// files. +struct RawFileCloseFunctor { + void operator()(FILE* f) const { fclose(f); } +}; +#endif + +// Class that handles dumping of variables into files. +class ApmDataDumper { + public: + // Constructor that takes an instance index that may + // be used to distinguish data dumped from different + // instances of the code. + explicit ApmDataDumper(int instance_index); + + ~ApmDataDumper(); + + // Reinitializes the data dumping such that new versions + // of all files being dumped to are created. + void InitiateNewSetOfRecordings() { +#if WEBRTC_APM_DEBUG_DUMP == 1 + ++recording_set_index_; +#endif + } + + // Methods for performing dumping of data of various types into + // various formats. + void DumpRaw(const char* name, int v_length, const float* v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + FILE* file = GetRawFile(name); + fwrite(v, sizeof(v[0]), v_length, file); +#endif + } + + void DumpRaw(const char* name, rtc::ArrayView v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + DumpRaw(name, v.size(), v.data()); +#endif + } + + void DumpRaw(const char* name, int v_length, const bool* v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + FILE* file = GetRawFile(name); + for (int k = 0; k < v_length; ++k) { + int16_t value = static_cast(v[k]); + fwrite(&value, sizeof(value), 1, file); + } +#endif + } + + void DumpRaw(const char* name, rtc::ArrayView v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + DumpRaw(name, v.size(), v.data()); +#endif + } + + void DumpRaw(const char* name, int v_length, const int16_t* v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + FILE* file = GetRawFile(name); + fwrite(v, sizeof(v[0]), v_length, file); +#endif + } + + void DumpRaw(const char* name, rtc::ArrayView v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + DumpRaw(name, v.size(), v.data()); +#endif + } + + void DumpRaw(const char* name, int v_length, const int32_t* v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + FILE* file = GetRawFile(name); + fwrite(v, sizeof(v[0]), v_length, file); +#endif + } + + void DumpRaw(const char* name, rtc::ArrayView v) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + DumpRaw(name, v.size(), v.data()); +#endif + } + + void DumpWav(const char* name, + int v_length, + const float* v, + int sample_rate_hz, + int num_channels) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + WavWriter* file = GetWavFile(name, sample_rate_hz, num_channels); + file->WriteSamples(v, v_length); +#endif + } + + void DumpWav(const char* name, + rtc::ArrayView v, + int sample_rate_hz, + int num_channels) { +#if WEBRTC_APM_DEBUG_DUMP == 1 + DumpWav(name, v.size(), v.data(), sample_rate_hz, num_channels); +#endif + } + + private: +#if WEBRTC_APM_DEBUG_DUMP == 1 + const int instance_index_; + int recording_set_index_ = 0; + std::unordered_map> + raw_files_; + std::unordered_map> wav_files_; + + FILE* GetRawFile(const char* name); + WavWriter* GetWavFile(const char* name, int sample_rate_hz, int num_channels); +#endif + RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(ApmDataDumper); +}; + +} // namespace webrtc + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_LOGGING_APM_DATA_DUMPER_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/defines.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/defines.h new file mode 100644 index 000000000..8271332ce --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/defines.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_DEFINES_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_DEFINES_H_ + +#define BLOCKL_MAX 160 // max processing block length: 160 +#define ANAL_BLOCKL_MAX 256 // max analysis block length: 256 +#define HALF_ANAL_BLOCKL 129 // half max analysis block length + 1 +#define NUM_HIGH_BANDS_MAX 2 // max number of high bands: 2 + +#define QUANTILE (float)0.25 + +#define SIMULT 3 +#define END_STARTUP_LONG 200 +#define END_STARTUP_SHORT 50 +#define FACTOR (float)40.0 +#define WIDTH (float)0.01 + +// Length of fft work arrays. +#define IP_LENGTH (ANAL_BLOCKL_MAX >> 1) // must be at least ceil(2 + sqrt(ANAL_BLOCKL_MAX/2)) +#define W_LENGTH (ANAL_BLOCKL_MAX >> 1) + +//PARAMETERS FOR NEW METHOD +#define DD_PR_SNR (float)0.98 // DD update of prior SNR +#define LRT_TAVG (float)0.50 // tavg parameter for LRT (previously 0.90) +#define SPECT_FL_TAVG (float)0.30 // tavg parameter for spectral flatness measure +#define SPECT_DIFF_TAVG (float)0.30 // tavg parameter for spectral difference measure +#define PRIOR_UPDATE (float)0.10 // update parameter of prior model +#define NOISE_UPDATE (float)0.90 // update parameter for noise +#define SPEECH_UPDATE (float)0.99 // update parameter when likely speech +#define WIDTH_PR_MAP (float)4.0 // width parameter in sigmoid map for prior model +#define LRT_FEATURE_THR (float)0.5 // default threshold for LRT feature +#define SF_FEATURE_THR (float)0.5 // default threshold for Spectral Flatness feature +#define SD_FEATURE_THR (float)0.5 // default threshold for Spectral Difference feature +#define PROB_RANGE (float)0.20 // probability threshold for noise state in + // speech/noise likelihood +#define HIST_PAR_EST 1000 // histogram size for estimation of parameters +#define GAMMA_PAUSE (float)0.05 // update for conservative noise estimate +// +#define B_LIM (float)0.5 // threshold in final energy gain factor calculation +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_DEFINES_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression.c new file mode 100644 index 000000000..8b6f45fce --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression.c @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing/ns/noise_suppression.h" + +#include +#include + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +#include "webrtc/modules/audio_processing/ns/defines.h" +#include "webrtc/modules/audio_processing/ns/ns_core.h" + +NsHandle* WebRtcNs_Create() { + NoiseSuppressionC* self = malloc(sizeof(NoiseSuppressionC)); + self->initFlag = 0; + return (NsHandle*)self; +} + +void WebRtcNs_Free(NsHandle* NS_inst) { + free(NS_inst); +} + +int WebRtcNs_Init(NsHandle* NS_inst, uint32_t fs) { + return WebRtcNs_InitCore((NoiseSuppressionC*)NS_inst, fs); +} + +int WebRtcNs_set_policy(NsHandle* NS_inst, int mode) { + return WebRtcNs_set_policy_core((NoiseSuppressionC*)NS_inst, mode); +} + +void WebRtcNs_Analyze(NsHandle* NS_inst, const float* spframe) { + WebRtcNs_AnalyzeCore((NoiseSuppressionC*)NS_inst, spframe); +} + +void WebRtcNs_Process(NsHandle* NS_inst, + const float* const* spframe, + size_t num_bands, + float* const* outframe) { + WebRtcNs_ProcessCore((NoiseSuppressionC*)NS_inst, spframe, num_bands, + outframe); +} + +float WebRtcNs_prior_speech_probability(NsHandle* handle) { + NoiseSuppressionC* self = (NoiseSuppressionC*)handle; + if (handle == NULL) { + return -1; + } + if (self->initFlag == 0) { + return -1; + } + return self->priorSpeechProb; +} + +const float* WebRtcNs_noise_estimate(const NsHandle* handle) { + const NoiseSuppressionC* self = (const NoiseSuppressionC*)handle; + if (handle == NULL || self->initFlag == 0) { + return NULL; + } + return self->noise; +} + +size_t WebRtcNs_num_freq() { + return HALF_ANAL_BLOCKL; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression.h new file mode 100644 index 000000000..41cad4ed1 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_H_ + +#include + +#include "webrtc/typedefs.h" + +typedef struct NsHandleT NsHandle; + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This function creates an instance of the floating point Noise Suppression. + */ +NsHandle* WebRtcNs_Create(); + +/* + * This function frees the dynamic memory of a specified noise suppression + * instance. + * + * Input: + * - NS_inst : Pointer to NS instance that should be freed + */ +void WebRtcNs_Free(NsHandle* NS_inst); + +/* + * This function initializes a NS instance and has to be called before any other + * processing is made. + * + * Input: + * - NS_inst : Instance that should be initialized + * - fs : sampling frequency + * + * Output: + * - NS_inst : Initialized instance + * + * Return value : 0 - Ok + * -1 - Error + */ +int WebRtcNs_Init(NsHandle* NS_inst, uint32_t fs); + +/* + * This changes the aggressiveness of the noise suppression method. + * + * Input: + * - NS_inst : Noise suppression instance. + * - mode : 0: Mild, 1: Medium , 2: Aggressive + * + * Output: + * - NS_inst : Updated instance. + * + * Return value : 0 - Ok + * -1 - Error + */ +int WebRtcNs_set_policy(NsHandle* NS_inst, int mode); + +/* + * This functions estimates the background noise for the inserted speech frame. + * The input and output signals should always be 10ms (80 or 160 samples). + * + * Input + * - NS_inst : Noise suppression instance. + * - spframe : Pointer to speech frame buffer for L band + * + * Output: + * - NS_inst : Updated NS instance + */ +void WebRtcNs_Analyze(NsHandle* NS_inst, const float* spframe); + +/* + * This functions does Noise Suppression for the inserted speech frame. The + * input and output signals should always be 10ms (80 or 160 samples). + * + * Input + * - NS_inst : Noise suppression instance. + * - spframe : Pointer to speech frame buffer for each band + * - num_bands : Number of bands + * + * Output: + * - NS_inst : Updated NS instance + * - outframe : Pointer to output frame for each band + */ +void WebRtcNs_Process(NsHandle* NS_inst, + const float* const* spframe, + size_t num_bands, + float* const* outframe); + +/* Returns the internally used prior speech probability of the current frame. + * There is a frequency bin based one as well, with which this should not be + * confused. + * + * Input + * - handle : Noise suppression instance. + * + * Return value : Prior speech probability in interval [0.0, 1.0]. + * -1 - NULL pointer or uninitialized instance. + */ +float WebRtcNs_prior_speech_probability(NsHandle* handle); + +/* Returns a pointer to the noise estimate per frequency bin. The number of + * frequency bins can be provided using WebRtcNs_num_freq(). + * + * Input + * - handle : Noise suppression instance. + * + * Return value : Pointer to the noise estimate per frequency bin. + * Returns NULL if the input is a NULL pointer or an + * uninitialized instance. + */ +const float* WebRtcNs_noise_estimate(const NsHandle* handle); + +/* Returns the number of frequency bins, which is the length of the noise + * estimate for example. + * + * Return value : Number of frequency bins. + */ +size_t WebRtcNs_num_freq(); + +#ifdef __cplusplus +} +#endif + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression_x.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression_x.c new file mode 100644 index 000000000..28a07e8c1 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression_x.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing/ns/noise_suppression_x.h" + +#include + +#include "webrtc/common_audio/signal_processing/include/real_fft.h" +#include "webrtc/modules/audio_processing/ns/nsx_core.h" +#include "webrtc/modules/audio_processing/ns/nsx_defines.h" + +NsxHandle* WebRtcNsx_Create() { + NoiseSuppressionFixedC* self = malloc(sizeof(NoiseSuppressionFixedC)); + WebRtcSpl_Init(); + self->real_fft = NULL; + self->initFlag = 0; + return (NsxHandle*)self; +} + +void WebRtcNsx_Free(NsxHandle* nsxInst) { + WebRtcSpl_FreeRealFFT(((NoiseSuppressionFixedC*)nsxInst)->real_fft); + free(nsxInst); +} + +int WebRtcNsx_Init(NsxHandle* nsxInst, uint32_t fs) { + return WebRtcNsx_InitCore((NoiseSuppressionFixedC*)nsxInst, fs); +} + +int WebRtcNsx_set_policy(NsxHandle* nsxInst, int mode) { + return WebRtcNsx_set_policy_core((NoiseSuppressionFixedC*)nsxInst, mode); +} + +void WebRtcNsx_Process(NsxHandle* nsxInst, + const short* const* speechFrame, + int num_bands, + short* const* outFrame) { + WebRtcNsx_ProcessCore((NoiseSuppressionFixedC*)nsxInst, speechFrame, + num_bands, outFrame); +} + +const uint32_t* WebRtcNsx_noise_estimate(const NsxHandle* nsxInst, + int* q_noise) { + *q_noise = 11; + const NoiseSuppressionFixedC* self = (const NoiseSuppressionFixedC*)nsxInst; + if (nsxInst == NULL || self->initFlag == 0) { + return NULL; + } + *q_noise += self->prevQNoise; + return self->prevNoiseU32; +} + +size_t WebRtcNsx_num_freq() { + return HALF_ANAL_BLOCKL; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression_x.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression_x.h new file mode 100644 index 000000000..79a5fc626 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/noise_suppression_x.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_X_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_X_H_ + +#include + +#include "webrtc/typedefs.h" + +typedef struct NsxHandleT NsxHandle; + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This function creates an instance of the fixed point Noise Suppression. + */ +NsxHandle* WebRtcNsx_Create(); + +/* + * This function frees the dynamic memory of a specified Noise Suppression + * instance. + * + * Input: + * - nsxInst : Pointer to NS instance that should be freed + */ +void WebRtcNsx_Free(NsxHandle* nsxInst); + +/* + * This function initializes a NS instance + * + * Input: + * - nsxInst : Instance that should be initialized + * - fs : sampling frequency + * + * Output: + * - nsxInst : Initialized instance + * + * Return value : 0 - Ok + * -1 - Error + */ +int WebRtcNsx_Init(NsxHandle* nsxInst, uint32_t fs); + +/* + * This changes the aggressiveness of the noise suppression method. + * + * Input: + * - nsxInst : Instance that should be initialized + * - mode : 0: Mild, 1: Medium , 2: Aggressive + * + * Output: + * - nsxInst : Initialized instance + * + * Return value : 0 - Ok + * -1 - Error + */ +int WebRtcNsx_set_policy(NsxHandle* nsxInst, int mode); + +/* + * This functions does noise suppression for the inserted speech frame. The + * input and output signals should always be 10ms (80 or 160 samples). + * + * Input + * - nsxInst : NSx instance. Needs to be initiated before call. + * - speechFrame : Pointer to speech frame buffer for each band + * - num_bands : Number of bands + * + * Output: + * - nsxInst : Updated NSx instance + * - outFrame : Pointer to output frame for each band + */ +void WebRtcNsx_Process(NsxHandle* nsxInst, + const short* const* speechFrame, + int num_bands, + short* const* outFrame); + +/* Returns a pointer to the noise estimate per frequency bin. The number of + * frequency bins can be provided using WebRtcNsx_num_freq(). + * + * Input + * - nsxInst : NSx instance. Needs to be initiated before call. + * - q_noise : Q value of the noise estimate, which is the number of + * bits that it needs to be right-shifted to be + * normalized. + * + * Return value : Pointer to the noise estimate per frequency bin. + * Returns NULL if the input is a NULL pointer or an + * uninitialized instance. + */ +const uint32_t* WebRtcNsx_noise_estimate(const NsxHandle* nsxInst, + int* q_noise); + +/* Returns the number of frequency bins, which is the length of the noise + * estimate for example. + * + * Return value : Number of frequency bins. + */ +size_t WebRtcNsx_num_freq(); + +#ifdef __cplusplus +} +#endif + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_X_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/ns_core.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/ns_core.c new file mode 100644 index 000000000..76589c5fe --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/ns_core.c @@ -0,0 +1,1416 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include +#include + +#include "webrtc/base/checks.h" +#include "webrtc/common_audio/fft4g.h" +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +#include "webrtc/modules/audio_processing/ns/noise_suppression.h" +#include "webrtc/modules/audio_processing/ns/ns_core.h" +#include "webrtc/modules/audio_processing/ns/windows_private.h" + +// Set Feature Extraction Parameters. +static void set_feature_extraction_parameters(NoiseSuppressionC* self) { + // Bin size of histogram. + self->featureExtractionParams.binSizeLrt = 0.1f; + self->featureExtractionParams.binSizeSpecFlat = 0.05f; + self->featureExtractionParams.binSizeSpecDiff = 0.1f; + + // Range of histogram over which LRT threshold is computed. + self->featureExtractionParams.rangeAvgHistLrt = 1.f; + + // Scale parameters: multiply dominant peaks of the histograms by scale factor + // to obtain thresholds for prior model. + // For LRT and spectral difference. + self->featureExtractionParams.factor1ModelPars = 1.2f; + // For spectral_flatness: used when noise is flatter than speech. + self->featureExtractionParams.factor2ModelPars = 0.9f; + + // Peak limit for spectral flatness (varies between 0 and 1). + self->featureExtractionParams.thresPosSpecFlat = 0.6f; + + // Limit on spacing of two highest peaks in histogram: spacing determined by + // bin size. + self->featureExtractionParams.limitPeakSpacingSpecFlat = + 2 * self->featureExtractionParams.binSizeSpecFlat; + self->featureExtractionParams.limitPeakSpacingSpecDiff = + 2 * self->featureExtractionParams.binSizeSpecDiff; + + // Limit on relevance of second peak. + self->featureExtractionParams.limitPeakWeightsSpecFlat = 0.5f; + self->featureExtractionParams.limitPeakWeightsSpecDiff = 0.5f; + + // Fluctuation limit of LRT feature. + self->featureExtractionParams.thresFluctLrt = 0.05f; + + // Limit on the max and min values for the feature thresholds. + self->featureExtractionParams.maxLrt = 1.f; + self->featureExtractionParams.minLrt = 0.2f; + + self->featureExtractionParams.maxSpecFlat = 0.95f; + self->featureExtractionParams.minSpecFlat = 0.1f; + + self->featureExtractionParams.maxSpecDiff = 1.f; + self->featureExtractionParams.minSpecDiff = 0.16f; + + // Criteria of weight of histogram peak to accept/reject feature. + self->featureExtractionParams.thresWeightSpecFlat = + (int)(0.3 * (self->modelUpdatePars[1])); // For spectral flatness. + self->featureExtractionParams.thresWeightSpecDiff = + (int)(0.3 * (self->modelUpdatePars[1])); // For spectral difference. +} + +// Initialize state. +int WebRtcNs_InitCore(NoiseSuppressionC* self, uint32_t fs) { + int i; + // Check for valid pointer. + if (self == NULL) { + return -1; + } + + // Initialization of struct. + if (fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000) { + self->fs = fs; + } else { + return -1; + } + self->windShift = 0; + // We only support 10ms frames. + if (fs == 8000) { + self->blockLen = 80; + self->anaLen = 128; + self->window = kBlocks80w128; + } else { + self->blockLen = 160; + self->anaLen = 256; + self->window = kBlocks160w256; + } + self->magnLen = self->anaLen / 2 + 1; // Number of frequency bins. + + // Initialize FFT work arrays. + self->ip[0] = 0; // Setting this triggers initialization. + memset(self->dataBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX); + WebRtc_rdft(self->anaLen, 1, self->dataBuf, self->ip, self->wfft); + + memset(self->analyzeBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX); + memset(self->dataBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX); + memset(self->syntBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX); + + // For HB processing. + memset(self->dataBufHB, + 0, + sizeof(float) * NUM_HIGH_BANDS_MAX * ANAL_BLOCKL_MAX); + + // For quantile noise estimation. + memset(self->quantile, 0, sizeof(float) * HALF_ANAL_BLOCKL); + for (i = 0; i < SIMULT * HALF_ANAL_BLOCKL; i++) { + self->lquantile[i] = 8.f; + self->density[i] = 0.3f; + } + + for (i = 0; i < SIMULT; i++) { + self->counter[i] = + (int)floor((float)(END_STARTUP_LONG * (i + 1)) / (float)SIMULT); + } + + self->updates = 0; + + // Wiener filter initialization. + for (i = 0; i < HALF_ANAL_BLOCKL; i++) { + self->smooth[i] = 1.f; + } + + // Set the aggressiveness: default. + self->aggrMode = 0; + + // Initialize variables for new method. + self->priorSpeechProb = 0.5f; // Prior prob for speech/noise. + // Previous analyze mag spectrum. + memset(self->magnPrevAnalyze, 0, sizeof(float) * HALF_ANAL_BLOCKL); + // Previous process mag spectrum. + memset(self->magnPrevProcess, 0, sizeof(float) * HALF_ANAL_BLOCKL); + // Current noise-spectrum. + memset(self->noise, 0, sizeof(float) * HALF_ANAL_BLOCKL); + // Previous noise-spectrum. + memset(self->noisePrev, 0, sizeof(float) * HALF_ANAL_BLOCKL); + // Conservative noise spectrum estimate. + memset(self->magnAvgPause, 0, sizeof(float) * HALF_ANAL_BLOCKL); + // For estimation of HB in second pass. + memset(self->speechProb, 0, sizeof(float) * HALF_ANAL_BLOCKL); + // Initial average magnitude spectrum. + memset(self->initMagnEst, 0, sizeof(float) * HALF_ANAL_BLOCKL); + for (i = 0; i < HALF_ANAL_BLOCKL; i++) { + // Smooth LR (same as threshold). + self->logLrtTimeAvg[i] = LRT_FEATURE_THR; + } + + // Feature quantities. + // Spectral flatness (start on threshold). + self->featureData[0] = SF_FEATURE_THR; + self->featureData[1] = 0.f; // Spectral entropy: not used in this version. + self->featureData[2] = 0.f; // Spectral variance: not used in this version. + // Average LRT factor (start on threshold). + self->featureData[3] = LRT_FEATURE_THR; + // Spectral template diff (start on threshold). + self->featureData[4] = SF_FEATURE_THR; + self->featureData[5] = 0.f; // Normalization for spectral difference. + // Window time-average of input magnitude spectrum. + self->featureData[6] = 0.f; + + // Histogram quantities: used to estimate/update thresholds for features. + memset(self->histLrt, 0, sizeof(int) * HIST_PAR_EST); + memset(self->histSpecFlat, 0, sizeof(int) * HIST_PAR_EST); + memset(self->histSpecDiff, 0, sizeof(int) * HIST_PAR_EST); + + + self->blockInd = -1; // Frame counter. + // Default threshold for LRT feature. + self->priorModelPars[0] = LRT_FEATURE_THR; + // Threshold for spectral flatness: determined on-line. + self->priorModelPars[1] = 0.5f; + // sgn_map par for spectral measure: 1 for flatness measure. + self->priorModelPars[2] = 1.f; + // Threshold for template-difference feature: determined on-line. + self->priorModelPars[3] = 0.5f; + // Default weighting parameter for LRT feature. + self->priorModelPars[4] = 1.f; + // Default weighting parameter for spectral flatness feature. + self->priorModelPars[5] = 0.f; + // Default weighting parameter for spectral difference feature. + self->priorModelPars[6] = 0.f; + + // Update flag for parameters: + // 0 no update, 1 = update once, 2 = update every window. + self->modelUpdatePars[0] = 2; + self->modelUpdatePars[1] = 500; // Window for update. + // Counter for update of conservative noise spectrum. + self->modelUpdatePars[2] = 0; + // Counter if the feature thresholds are updated during the sequence. + self->modelUpdatePars[3] = self->modelUpdatePars[1]; + + self->signalEnergy = 0.0; + self->sumMagn = 0.0; + self->whiteNoiseLevel = 0.0; + self->pinkNoiseNumerator = 0.0; + self->pinkNoiseExp = 0.0; + + set_feature_extraction_parameters(self); + + // Default mode. + WebRtcNs_set_policy_core(self, 0); + + self->initFlag = 1; + return 0; +} + +// Estimate noise. +static void NoiseEstimation(NoiseSuppressionC* self, + float* magn, + float* noise) { + size_t i, s, offset; + float lmagn[HALF_ANAL_BLOCKL], delta; + + if (self->updates < END_STARTUP_LONG) { + self->updates++; + } + + for (i = 0; i < self->magnLen; i++) { + lmagn[i] = (float)log(magn[i]); + } + + // Loop over simultaneous estimates. + for (s = 0; s < SIMULT; s++) { + offset = s * self->magnLen; + + // newquantest(...) + for (i = 0; i < self->magnLen; i++) { + // Compute delta. + if (self->density[offset + i] > 1.0) { + delta = FACTOR * 1.f / self->density[offset + i]; + } else { + delta = FACTOR; + } + + // Update log quantile estimate. + if (lmagn[i] > self->lquantile[offset + i]) { + self->lquantile[offset + i] += + QUANTILE * delta / (float)(self->counter[s] + 1); + } else { + self->lquantile[offset + i] -= + (1.f - QUANTILE) * delta / (float)(self->counter[s] + 1); + } + + // Update density estimate. + if (fabs(lmagn[i] - self->lquantile[offset + i]) < WIDTH) { + self->density[offset + i] = + ((float)self->counter[s] * self->density[offset + i] + + 1.f / (2.f * WIDTH)) / + (float)(self->counter[s] + 1); + } + } // End loop over magnitude spectrum. + + if (self->counter[s] >= END_STARTUP_LONG) { + self->counter[s] = 0; + if (self->updates >= END_STARTUP_LONG) { + for (i = 0; i < self->magnLen; i++) { + self->quantile[i] = (float)exp(self->lquantile[offset + i]); + } + } + } + + self->counter[s]++; + } // End loop over simultaneous estimates. + + // Sequentially update the noise during startup. + if (self->updates < END_STARTUP_LONG) { + // Use the last "s" to get noise during startup that differ from zero. + for (i = 0; i < self->magnLen; i++) { + self->quantile[i] = (float)exp(self->lquantile[offset + i]); + } + } + + for (i = 0; i < self->magnLen; i++) { + noise[i] = self->quantile[i]; + } +} + +// Extract thresholds for feature parameters. +// Histograms are computed over some window size (given by +// self->modelUpdatePars[1]). +// Thresholds and weights are extracted every window. +// |flag| = 0 updates histogram only, |flag| = 1 computes the threshold/weights. +// Threshold and weights are returned in: self->priorModelPars. +static void FeatureParameterExtraction(NoiseSuppressionC* self, int flag) { + int i, useFeatureSpecFlat, useFeatureSpecDiff, numHistLrt; + int maxPeak1, maxPeak2; + int weightPeak1SpecFlat, weightPeak2SpecFlat, weightPeak1SpecDiff, + weightPeak2SpecDiff; + + float binMid, featureSum; + float posPeak1SpecFlat, posPeak2SpecFlat, posPeak1SpecDiff, posPeak2SpecDiff; + float fluctLrt, avgHistLrt, avgSquareHistLrt, avgHistLrtCompl; + + // 3 features: LRT, flatness, difference. + // lrt_feature = self->featureData[3]; + // flat_feature = self->featureData[0]; + // diff_feature = self->featureData[4]; + + // Update histograms. + if (flag == 0) { + // LRT + if ((self->featureData[3] < + HIST_PAR_EST * self->featureExtractionParams.binSizeLrt) && + (self->featureData[3] >= 0.0)) { + i = (int)(self->featureData[3] / + self->featureExtractionParams.binSizeLrt); + self->histLrt[i]++; + } + // Spectral flatness. + if ((self->featureData[0] < + HIST_PAR_EST * self->featureExtractionParams.binSizeSpecFlat) && + (self->featureData[0] >= 0.0)) { + i = (int)(self->featureData[0] / + self->featureExtractionParams.binSizeSpecFlat); + self->histSpecFlat[i]++; + } + // Spectral difference. + if ((self->featureData[4] < + HIST_PAR_EST * self->featureExtractionParams.binSizeSpecDiff) && + (self->featureData[4] >= 0.0)) { + i = (int)(self->featureData[4] / + self->featureExtractionParams.binSizeSpecDiff); + self->histSpecDiff[i]++; + } + } + + // Extract parameters for speech/noise probability. + if (flag == 1) { + // LRT feature: compute the average over + // self->featureExtractionParams.rangeAvgHistLrt. + avgHistLrt = 0.0; + avgHistLrtCompl = 0.0; + avgSquareHistLrt = 0.0; + numHistLrt = 0; + for (i = 0; i < HIST_PAR_EST; i++) { + binMid = ((float)i + 0.5f) * self->featureExtractionParams.binSizeLrt; + if (binMid <= self->featureExtractionParams.rangeAvgHistLrt) { + avgHistLrt += self->histLrt[i] * binMid; + numHistLrt += self->histLrt[i]; + } + avgSquareHistLrt += self->histLrt[i] * binMid * binMid; + avgHistLrtCompl += self->histLrt[i] * binMid; + } + if (numHistLrt > 0) { + avgHistLrt = avgHistLrt / ((float)numHistLrt); + } + avgHistLrtCompl = avgHistLrtCompl / ((float)self->modelUpdatePars[1]); + avgSquareHistLrt = avgSquareHistLrt / ((float)self->modelUpdatePars[1]); + fluctLrt = avgSquareHistLrt - avgHistLrt * avgHistLrtCompl; + // Get threshold for LRT feature. + if (fluctLrt < self->featureExtractionParams.thresFluctLrt) { + // Very low fluctuation, so likely noise. + self->priorModelPars[0] = self->featureExtractionParams.maxLrt; + } else { + self->priorModelPars[0] = + self->featureExtractionParams.factor1ModelPars * avgHistLrt; + // Check if value is within min/max range. + if (self->priorModelPars[0] < self->featureExtractionParams.minLrt) { + self->priorModelPars[0] = self->featureExtractionParams.minLrt; + } + if (self->priorModelPars[0] > self->featureExtractionParams.maxLrt) { + self->priorModelPars[0] = self->featureExtractionParams.maxLrt; + } + } + // Done with LRT feature. + + // For spectral flatness and spectral difference: compute the main peaks of + // histogram. + maxPeak1 = 0; + maxPeak2 = 0; + posPeak1SpecFlat = 0.0; + posPeak2SpecFlat = 0.0; + weightPeak1SpecFlat = 0; + weightPeak2SpecFlat = 0; + + // Peaks for flatness. + for (i = 0; i < HIST_PAR_EST; i++) { + binMid = + (i + 0.5f) * self->featureExtractionParams.binSizeSpecFlat; + if (self->histSpecFlat[i] > maxPeak1) { + // Found new "first" peak. + maxPeak2 = maxPeak1; + weightPeak2SpecFlat = weightPeak1SpecFlat; + posPeak2SpecFlat = posPeak1SpecFlat; + + maxPeak1 = self->histSpecFlat[i]; + weightPeak1SpecFlat = self->histSpecFlat[i]; + posPeak1SpecFlat = binMid; + } else if (self->histSpecFlat[i] > maxPeak2) { + // Found new "second" peak. + maxPeak2 = self->histSpecFlat[i]; + weightPeak2SpecFlat = self->histSpecFlat[i]; + posPeak2SpecFlat = binMid; + } + } + + // Compute two peaks for spectral difference. + maxPeak1 = 0; + maxPeak2 = 0; + posPeak1SpecDiff = 0.0; + posPeak2SpecDiff = 0.0; + weightPeak1SpecDiff = 0; + weightPeak2SpecDiff = 0; + // Peaks for spectral difference. + for (i = 0; i < HIST_PAR_EST; i++) { + binMid = + ((float)i + 0.5f) * self->featureExtractionParams.binSizeSpecDiff; + if (self->histSpecDiff[i] > maxPeak1) { + // Found new "first" peak. + maxPeak2 = maxPeak1; + weightPeak2SpecDiff = weightPeak1SpecDiff; + posPeak2SpecDiff = posPeak1SpecDiff; + + maxPeak1 = self->histSpecDiff[i]; + weightPeak1SpecDiff = self->histSpecDiff[i]; + posPeak1SpecDiff = binMid; + } else if (self->histSpecDiff[i] > maxPeak2) { + // Found new "second" peak. + maxPeak2 = self->histSpecDiff[i]; + weightPeak2SpecDiff = self->histSpecDiff[i]; + posPeak2SpecDiff = binMid; + } + } + + // For spectrum flatness feature. + useFeatureSpecFlat = 1; + // Merge the two peaks if they are close. + if ((fabs(posPeak2SpecFlat - posPeak1SpecFlat) < + self->featureExtractionParams.limitPeakSpacingSpecFlat) && + (weightPeak2SpecFlat > + self->featureExtractionParams.limitPeakWeightsSpecFlat * + weightPeak1SpecFlat)) { + weightPeak1SpecFlat += weightPeak2SpecFlat; + posPeak1SpecFlat = 0.5f * (posPeak1SpecFlat + posPeak2SpecFlat); + } + // Reject if weight of peaks is not large enough, or peak value too small. + if (weightPeak1SpecFlat < + self->featureExtractionParams.thresWeightSpecFlat || + posPeak1SpecFlat < self->featureExtractionParams.thresPosSpecFlat) { + useFeatureSpecFlat = 0; + } + // If selected, get the threshold. + if (useFeatureSpecFlat == 1) { + // Compute the threshold. + self->priorModelPars[1] = + self->featureExtractionParams.factor2ModelPars * posPeak1SpecFlat; + // Check if value is within min/max range. + if (self->priorModelPars[1] < self->featureExtractionParams.minSpecFlat) { + self->priorModelPars[1] = self->featureExtractionParams.minSpecFlat; + } + if (self->priorModelPars[1] > self->featureExtractionParams.maxSpecFlat) { + self->priorModelPars[1] = self->featureExtractionParams.maxSpecFlat; + } + } + // Done with flatness feature. + + // For template feature. + useFeatureSpecDiff = 1; + // Merge the two peaks if they are close. + if ((fabs(posPeak2SpecDiff - posPeak1SpecDiff) < + self->featureExtractionParams.limitPeakSpacingSpecDiff) && + (weightPeak2SpecDiff > + self->featureExtractionParams.limitPeakWeightsSpecDiff * + weightPeak1SpecDiff)) { + weightPeak1SpecDiff += weightPeak2SpecDiff; + posPeak1SpecDiff = 0.5f * (posPeak1SpecDiff + posPeak2SpecDiff); + } + // Get the threshold value. + self->priorModelPars[3] = + self->featureExtractionParams.factor1ModelPars * posPeak1SpecDiff; + // Reject if weight of peaks is not large enough. + if (weightPeak1SpecDiff < + self->featureExtractionParams.thresWeightSpecDiff) { + useFeatureSpecDiff = 0; + } + // Check if value is within min/max range. + if (self->priorModelPars[3] < self->featureExtractionParams.minSpecDiff) { + self->priorModelPars[3] = self->featureExtractionParams.minSpecDiff; + } + if (self->priorModelPars[3] > self->featureExtractionParams.maxSpecDiff) { + self->priorModelPars[3] = self->featureExtractionParams.maxSpecDiff; + } + // Done with spectral difference feature. + + // Don't use template feature if fluctuation of LRT feature is very low: + // most likely just noise state. + if (fluctLrt < self->featureExtractionParams.thresFluctLrt) { + useFeatureSpecDiff = 0; + } + + // Select the weights between the features. + // self->priorModelPars[4] is weight for LRT: always selected. + // self->priorModelPars[5] is weight for spectral flatness. + // self->priorModelPars[6] is weight for spectral difference. + featureSum = (float)(1 + useFeatureSpecFlat + useFeatureSpecDiff); + self->priorModelPars[4] = 1.f / featureSum; + self->priorModelPars[5] = ((float)useFeatureSpecFlat) / featureSum; + self->priorModelPars[6] = ((float)useFeatureSpecDiff) / featureSum; + + // Set hists to zero for next update. + if (self->modelUpdatePars[0] >= 1) { + for (i = 0; i < HIST_PAR_EST; i++) { + self->histLrt[i] = 0; + self->histSpecFlat[i] = 0; + self->histSpecDiff[i] = 0; + } + } + } // End of flag == 1. +} + +// Compute spectral flatness on input spectrum. +// |magnIn| is the magnitude spectrum. +// Spectral flatness is returned in self->featureData[0]. +static void ComputeSpectralFlatness(NoiseSuppressionC* self, + const float* magnIn) { + size_t i; + size_t shiftLP = 1; // Option to remove first bin(s) from spectral measures. + float avgSpectralFlatnessNum, avgSpectralFlatnessDen, spectralTmp; + + // Compute spectral measures. + // For flatness. + avgSpectralFlatnessNum = 0.0; + avgSpectralFlatnessDen = self->sumMagn; + for (i = 0; i < shiftLP; i++) { + avgSpectralFlatnessDen -= magnIn[i]; + } + // Compute log of ratio of the geometric to arithmetic mean: check for log(0) + // case. + for (i = shiftLP; i < self->magnLen; i++) { + if (magnIn[i] > 0.0) { + avgSpectralFlatnessNum += (float)log(magnIn[i]); + } else { + self->featureData[0] -= SPECT_FL_TAVG * self->featureData[0]; + return; + } + } + // Normalize. + avgSpectralFlatnessDen = avgSpectralFlatnessDen / self->magnLen; + avgSpectralFlatnessNum = avgSpectralFlatnessNum / self->magnLen; + + // Ratio and inverse log: check for case of log(0). + spectralTmp = (float)exp(avgSpectralFlatnessNum) / avgSpectralFlatnessDen; + + // Time-avg update of spectral flatness feature. + self->featureData[0] += SPECT_FL_TAVG * (spectralTmp - self->featureData[0]); + // Done with flatness feature. +} + +// Compute prior and post SNR based on quantile noise estimation. +// Compute DD estimate of prior SNR. +// Inputs: +// * |magn| is the signal magnitude spectrum estimate. +// * |noise| is the magnitude noise spectrum estimate. +// Outputs: +// * |snrLocPrior| is the computed prior SNR. +// * |snrLocPost| is the computed post SNR. +static void ComputeSnr(const NoiseSuppressionC* self, + const float* magn, + const float* noise, + float* snrLocPrior, + float* snrLocPost) { + size_t i; + + for (i = 0; i < self->magnLen; i++) { + // Previous post SNR. + // Previous estimate: based on previous frame with gain filter. + float previousEstimateStsa = self->magnPrevAnalyze[i] / + (self->noisePrev[i] + 0.0001f) * self->smooth[i]; + // Post SNR. + snrLocPost[i] = 0.f; + if (magn[i] > noise[i]) { + snrLocPost[i] = magn[i] / (noise[i] + 0.0001f) - 1.f; + } + // DD estimate is sum of two terms: current estimate and previous estimate. + // Directed decision update of snrPrior. + snrLocPrior[i] = + DD_PR_SNR * previousEstimateStsa + (1.f - DD_PR_SNR) * snrLocPost[i]; + } // End of loop over frequencies. +} + +// Compute the difference measure between input spectrum and a template/learned +// noise spectrum. +// |magnIn| is the input spectrum. +// The reference/template spectrum is self->magnAvgPause[i]. +// Returns (normalized) spectral difference in self->featureData[4]. +static void ComputeSpectralDifference(NoiseSuppressionC* self, + const float* magnIn) { + // avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 / + // var(magnAvgPause) + size_t i; + float avgPause, avgMagn, covMagnPause, varPause, varMagn, avgDiffNormMagn; + + avgPause = 0.0; + avgMagn = self->sumMagn; + // Compute average quantities. + for (i = 0; i < self->magnLen; i++) { + // Conservative smooth noise spectrum from pause frames. + avgPause += self->magnAvgPause[i]; + } + avgPause /= self->magnLen; + avgMagn /= self->magnLen; + + covMagnPause = 0.0; + varPause = 0.0; + varMagn = 0.0; + // Compute variance and covariance quantities. + for (i = 0; i < self->magnLen; i++) { + covMagnPause += (magnIn[i] - avgMagn) * (self->magnAvgPause[i] - avgPause); + varPause += + (self->magnAvgPause[i] - avgPause) * (self->magnAvgPause[i] - avgPause); + varMagn += (magnIn[i] - avgMagn) * (magnIn[i] - avgMagn); + } + covMagnPause /= self->magnLen; + varPause /= self->magnLen; + varMagn /= self->magnLen; + // Update of average magnitude spectrum. + self->featureData[6] += self->signalEnergy; + + avgDiffNormMagn = + varMagn - (covMagnPause * covMagnPause) / (varPause + 0.0001f); + // Normalize and compute time-avg update of difference feature. + avgDiffNormMagn = (float)(avgDiffNormMagn / (self->featureData[5] + 0.0001f)); + self->featureData[4] += + SPECT_DIFF_TAVG * (avgDiffNormMagn - self->featureData[4]); +} + +// Compute speech/noise probability. +// Speech/noise probability is returned in |probSpeechFinal|. +// |magn| is the input magnitude spectrum. +// |noise| is the noise spectrum. +// |snrLocPrior| is the prior SNR for each frequency. +// |snrLocPost| is the post SNR for each frequency. +static void SpeechNoiseProb(NoiseSuppressionC* self, + float* probSpeechFinal, + const float* snrLocPrior, + const float* snrLocPost) { + size_t i; + int sgnMap; + float invLrt, gainPrior, indPrior; + float logLrtTimeAvgKsum, besselTmp; + float indicator0, indicator1, indicator2; + float tmpFloat1, tmpFloat2; + float weightIndPrior0, weightIndPrior1, weightIndPrior2; + float threshPrior0, threshPrior1, threshPrior2; + float widthPrior, widthPrior0, widthPrior1, widthPrior2; + + widthPrior0 = WIDTH_PR_MAP; + // Width for pause region: lower range, so increase width in tanh map. + widthPrior1 = 2.f * WIDTH_PR_MAP; + widthPrior2 = 2.f * WIDTH_PR_MAP; // For spectral-difference measure. + + // Threshold parameters for features. + threshPrior0 = self->priorModelPars[0]; + threshPrior1 = self->priorModelPars[1]; + threshPrior2 = self->priorModelPars[3]; + + // Sign for flatness feature. + sgnMap = (int)(self->priorModelPars[2]); + + // Weight parameters for features. + weightIndPrior0 = self->priorModelPars[4]; + weightIndPrior1 = self->priorModelPars[5]; + weightIndPrior2 = self->priorModelPars[6]; + + // Compute feature based on average LR factor. + // This is the average over all frequencies of the smooth log LRT. + logLrtTimeAvgKsum = 0.0; + for (i = 0; i < self->magnLen; i++) { + tmpFloat1 = 1.f + 2.f * snrLocPrior[i]; + tmpFloat2 = 2.f * snrLocPrior[i] / (tmpFloat1 + 0.0001f); + besselTmp = (snrLocPost[i] + 1.f) * tmpFloat2; + self->logLrtTimeAvg[i] += + LRT_TAVG * (besselTmp - (float)log(tmpFloat1) - self->logLrtTimeAvg[i]); + logLrtTimeAvgKsum += self->logLrtTimeAvg[i]; + } + logLrtTimeAvgKsum = (float)logLrtTimeAvgKsum / (self->magnLen); + self->featureData[3] = logLrtTimeAvgKsum; + // Done with computation of LR factor. + + // Compute the indicator functions. + // Average LRT feature. + widthPrior = widthPrior0; + // Use larger width in tanh map for pause regions. + if (logLrtTimeAvgKsum < threshPrior0) { + widthPrior = widthPrior1; + } + // Compute indicator function: sigmoid map. + indicator0 = + 0.5f * + ((float)tanh(widthPrior * (logLrtTimeAvgKsum - threshPrior0)) + 1.f); + + // Spectral flatness feature. + tmpFloat1 = self->featureData[0]; + widthPrior = widthPrior0; + // Use larger width in tanh map for pause regions. + if (sgnMap == 1 && (tmpFloat1 > threshPrior1)) { + widthPrior = widthPrior1; + } + if (sgnMap == -1 && (tmpFloat1 < threshPrior1)) { + widthPrior = widthPrior1; + } + // Compute indicator function: sigmoid map. + indicator1 = + 0.5f * + ((float)tanh((float)sgnMap * widthPrior * (threshPrior1 - tmpFloat1)) + + 1.f); + + // For template spectrum-difference. + tmpFloat1 = self->featureData[4]; + widthPrior = widthPrior0; + // Use larger width in tanh map for pause regions. + if (tmpFloat1 < threshPrior2) { + widthPrior = widthPrior2; + } + // Compute indicator function: sigmoid map. + indicator2 = + 0.5f * ((float)tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.f); + + // Combine the indicator function with the feature weights. + indPrior = weightIndPrior0 * indicator0 + weightIndPrior1 * indicator1 + + weightIndPrior2 * indicator2; + // Done with computing indicator function. + + // Compute the prior probability. + self->priorSpeechProb += PRIOR_UPDATE * (indPrior - self->priorSpeechProb); + // Make sure probabilities are within range: keep floor to 0.01. + if (self->priorSpeechProb > 1.f) { + self->priorSpeechProb = 1.f; + } + if (self->priorSpeechProb < 0.01f) { + self->priorSpeechProb = 0.01f; + } + + // Final speech probability: combine prior model with LR factor:. + gainPrior = (1.f - self->priorSpeechProb) / (self->priorSpeechProb + 0.0001f); + for (i = 0; i < self->magnLen; i++) { + invLrt = (float)exp(-self->logLrtTimeAvg[i]); + invLrt = (float)gainPrior * invLrt; + probSpeechFinal[i] = 1.f / (1.f + invLrt); + } +} + +// Update the noise features. +// Inputs: +// * |magn| is the signal magnitude spectrum estimate. +// * |updateParsFlag| is an update flag for parameters. +static void FeatureUpdate(NoiseSuppressionC* self, + const float* magn, + int updateParsFlag) { + // Compute spectral flatness on input spectrum. + ComputeSpectralFlatness(self, magn); + // Compute difference of input spectrum with learned/estimated noise spectrum. + ComputeSpectralDifference(self, magn); + // Compute histograms for parameter decisions (thresholds and weights for + // features). + // Parameters are extracted once every window time. + // (=self->modelUpdatePars[1]) + if (updateParsFlag >= 1) { + // Counter update. + self->modelUpdatePars[3]--; + // Update histogram. + if (self->modelUpdatePars[3] > 0) { + FeatureParameterExtraction(self, 0); + } + // Compute model parameters. + if (self->modelUpdatePars[3] == 0) { + FeatureParameterExtraction(self, 1); + self->modelUpdatePars[3] = self->modelUpdatePars[1]; + // If wish to update only once, set flag to zero. + if (updateParsFlag == 1) { + self->modelUpdatePars[0] = 0; + } else { + // Update every window: + // Get normalization for spectral difference for next window estimate. + self->featureData[6] = + self->featureData[6] / ((float)self->modelUpdatePars[1]); + self->featureData[5] = + 0.5f * (self->featureData[6] + self->featureData[5]); + self->featureData[6] = 0.f; + } + } + } +} + +// Update the noise estimate. +// Inputs: +// * |magn| is the signal magnitude spectrum estimate. +// * |snrLocPrior| is the prior SNR. +// * |snrLocPost| is the post SNR. +// Output: +// * |noise| is the updated noise magnitude spectrum estimate. +static void UpdateNoiseEstimate(NoiseSuppressionC* self, + const float* magn, + const float* snrLocPrior, + const float* snrLocPost, + float* noise) { + size_t i; + float probSpeech, probNonSpeech; + // Time-avg parameter for noise update. + float gammaNoiseTmp = NOISE_UPDATE; + float gammaNoiseOld; + float noiseUpdateTmp; + + for (i = 0; i < self->magnLen; i++) { + probSpeech = self->speechProb[i]; + probNonSpeech = 1.f - probSpeech; + // Temporary noise update: + // Use it for speech frames if update value is less than previous. + noiseUpdateTmp = gammaNoiseTmp * self->noisePrev[i] + + (1.f - gammaNoiseTmp) * (probNonSpeech * magn[i] + + probSpeech * self->noisePrev[i]); + // Time-constant based on speech/noise state. + gammaNoiseOld = gammaNoiseTmp; + gammaNoiseTmp = NOISE_UPDATE; + // Increase gamma (i.e., less noise update) for frame likely to be speech. + if (probSpeech > PROB_RANGE) { + gammaNoiseTmp = SPEECH_UPDATE; + } + // Conservative noise update. + if (probSpeech < PROB_RANGE) { + self->magnAvgPause[i] += GAMMA_PAUSE * (magn[i] - self->magnAvgPause[i]); + } + // Noise update. + if (gammaNoiseTmp == gammaNoiseOld) { + noise[i] = noiseUpdateTmp; + } else { + noise[i] = gammaNoiseTmp * self->noisePrev[i] + + (1.f - gammaNoiseTmp) * (probNonSpeech * magn[i] + + probSpeech * self->noisePrev[i]); + // Allow for noise update downwards: + // If noise update decreases the noise, it is safe, so allow it to + // happen. + if (noiseUpdateTmp < noise[i]) { + noise[i] = noiseUpdateTmp; + } + } + } // End of freq loop. +} + +// Updates |buffer| with a new |frame|. +// Inputs: +// * |frame| is a new speech frame or NULL for setting to zero. +// * |frame_length| is the length of the new frame. +// * |buffer_length| is the length of the buffer. +// Output: +// * |buffer| is the updated buffer. +static void UpdateBuffer(const float* frame, + size_t frame_length, + size_t buffer_length, + float* buffer) { + RTC_DCHECK_LT(buffer_length, 2 * frame_length); + + memcpy(buffer, + buffer + frame_length, + sizeof(*buffer) * (buffer_length - frame_length)); + if (frame) { + memcpy(buffer + buffer_length - frame_length, + frame, + sizeof(*buffer) * frame_length); + } else { + memset(buffer + buffer_length - frame_length, + 0, + sizeof(*buffer) * frame_length); + } +} + +// Transforms the signal from time to frequency domain. +// Inputs: +// * |time_data| is the signal in the time domain. +// * |time_data_length| is the length of the analysis buffer. +// * |magnitude_length| is the length of the spectrum magnitude, which equals +// the length of both |real| and |imag| (time_data_length / 2 + 1). +// Outputs: +// * |time_data| is the signal in the frequency domain. +// * |real| is the real part of the frequency domain. +// * |imag| is the imaginary part of the frequency domain. +// * |magn| is the calculated signal magnitude in the frequency domain. +static void FFT(NoiseSuppressionC* self, + float* time_data, + size_t time_data_length, + size_t magnitude_length, + float* real, + float* imag, + float* magn) { + size_t i; + + RTC_DCHECK_EQ(magnitude_length, time_data_length / 2 + 1); + + WebRtc_rdft(time_data_length, 1, time_data, self->ip, self->wfft); + + imag[0] = 0; + real[0] = time_data[0]; + magn[0] = fabsf(real[0]) + 1.f; + imag[magnitude_length - 1] = 0; + real[magnitude_length - 1] = time_data[1]; + magn[magnitude_length - 1] = fabsf(real[magnitude_length - 1]) + 1.f; + for (i = 1; i < magnitude_length - 1; ++i) { + real[i] = time_data[2 * i]; + imag[i] = time_data[2 * i + 1]; + // Magnitude spectrum. + magn[i] = sqrtf(real[i] * real[i] + imag[i] * imag[i]) + 1.f; + } +} + +// Transforms the signal from frequency to time domain. +// Inputs: +// * |real| is the real part of the frequency domain. +// * |imag| is the imaginary part of the frequency domain. +// * |magnitude_length| is the length of the spectrum magnitude, which equals +// the length of both |real| and |imag|. +// * |time_data_length| is the length of the analysis buffer +// (2 * (magnitude_length - 1)). +// Output: +// * |time_data| is the signal in the time domain. +static void IFFT(NoiseSuppressionC* self, + const float* real, + const float* imag, + size_t magnitude_length, + size_t time_data_length, + float* time_data) { + size_t i; + + RTC_DCHECK_EQ(time_data_length, 2 * (magnitude_length - 1)); + + time_data[0] = real[0]; + time_data[1] = real[magnitude_length - 1]; + for (i = 1; i < magnitude_length - 1; ++i) { + time_data[2 * i] = real[i]; + time_data[2 * i + 1] = imag[i]; + } + WebRtc_rdft(time_data_length, -1, time_data, self->ip, self->wfft); + + for (i = 0; i < time_data_length; ++i) { + time_data[i] *= 2.f / time_data_length; // FFT scaling. + } +} + +// Calculates the energy of a buffer. +// Inputs: +// * |buffer| is the buffer over which the energy is calculated. +// * |length| is the length of the buffer. +// Returns the calculated energy. +static float Energy(const float* buffer, size_t length) { + size_t i; + float energy = 0.f; + + for (i = 0; i < length; ++i) { + energy += buffer[i] * buffer[i]; + } + + return energy; +} + +// Windows a buffer. +// Inputs: +// * |window| is the window by which to multiply. +// * |data| is the data without windowing. +// * |length| is the length of the window and data. +// Output: +// * |data_windowed| is the windowed data. +static void Windowing(const float* window, + const float* data, + size_t length, + float* data_windowed) { + size_t i; + + for (i = 0; i < length; ++i) { + data_windowed[i] = window[i] * data[i]; + } +} + +// Estimate prior SNR decision-directed and compute DD based Wiener Filter. +// Input: +// * |magn| is the signal magnitude spectrum estimate. +// Output: +// * |theFilter| is the frequency response of the computed Wiener filter. +static void ComputeDdBasedWienerFilter(const NoiseSuppressionC* self, + const float* magn, + float* theFilter) { + size_t i; + float snrPrior, previousEstimateStsa, currentEstimateStsa; + + for (i = 0; i < self->magnLen; i++) { + // Previous estimate: based on previous frame with gain filter. + previousEstimateStsa = self->magnPrevProcess[i] / + (self->noisePrev[i] + 0.0001f) * self->smooth[i]; + // Post and prior SNR. + currentEstimateStsa = 0.f; + if (magn[i] > self->noise[i]) { + currentEstimateStsa = magn[i] / (self->noise[i] + 0.0001f) - 1.f; + } + // DD estimate is sum of two terms: current estimate and previous estimate. + // Directed decision update of |snrPrior|. + snrPrior = DD_PR_SNR * previousEstimateStsa + + (1.f - DD_PR_SNR) * currentEstimateStsa; + // Gain filter. + theFilter[i] = snrPrior / (self->overdrive + snrPrior); + } // End of loop over frequencies. +} + +// Changes the aggressiveness of the noise suppression method. +// |mode| = 0 is mild (6dB), |mode| = 1 is medium (10dB) and |mode| = 2 is +// aggressive (15dB). +// Returns 0 on success and -1 otherwise. +int WebRtcNs_set_policy_core(NoiseSuppressionC* self, int mode) { + // Allow for modes: 0, 1, 2, 3. + if (mode < 0 || mode > 3) { + return (-1); + } + + self->aggrMode = mode; + if (mode == 0) { + self->overdrive = 1.f; + self->denoiseBound = 0.5f; + self->gainmap = 0; + } else if (mode == 1) { + // self->overdrive = 1.25f; + self->overdrive = 1.f; + self->denoiseBound = 0.25f; + self->gainmap = 1; + } else if (mode == 2) { + // self->overdrive = 1.25f; + self->overdrive = 1.1f; + self->denoiseBound = 0.125f; + self->gainmap = 1; + } else if (mode == 3) { + // self->overdrive = 1.3f; + self->overdrive = 1.25f; + self->denoiseBound = 0.09f; + self->gainmap = 1; + } + return 0; +} + +void WebRtcNs_AnalyzeCore(NoiseSuppressionC* self, const float* speechFrame) { + size_t i; + const size_t kStartBand = 5; // Skip first frequency bins during estimation. + int updateParsFlag; + float energy; + float signalEnergy = 0.f; + float sumMagn = 0.f; + float tmpFloat1, tmpFloat2, tmpFloat3; + float winData[ANAL_BLOCKL_MAX]; + float magn[HALF_ANAL_BLOCKL], noise[HALF_ANAL_BLOCKL]; + float snrLocPost[HALF_ANAL_BLOCKL], snrLocPrior[HALF_ANAL_BLOCKL]; + float real[ANAL_BLOCKL_MAX], imag[HALF_ANAL_BLOCKL]; + // Variables during startup. + float sum_log_i = 0.0; + float sum_log_i_square = 0.0; + float sum_log_magn = 0.0; + float sum_log_i_log_magn = 0.0; + float parametric_exp = 0.0; + float parametric_num = 0.0; + + // Check that initiation has been done. + RTC_DCHECK_EQ(1, self->initFlag); + updateParsFlag = self->modelUpdatePars[0]; + + // Update analysis buffer for L band. + UpdateBuffer(speechFrame, self->blockLen, self->anaLen, self->analyzeBuf); + + Windowing(self->window, self->analyzeBuf, self->anaLen, winData); + energy = Energy(winData, self->anaLen); + if (energy == 0.0) { + // We want to avoid updating statistics in this case: + // Updating feature statistics when we have zeros only will cause + // thresholds to move towards zero signal situations. This in turn has the + // effect that once the signal is "turned on" (non-zero values) everything + // will be treated as speech and there is no noise suppression effect. + // Depending on the duration of the inactive signal it takes a + // considerable amount of time for the system to learn what is noise and + // what is speech. + return; + } + + self->blockInd++; // Update the block index only when we process a block. + + FFT(self, winData, self->anaLen, self->magnLen, real, imag, magn); + + for (i = 0; i < self->magnLen; i++) { + signalEnergy += real[i] * real[i] + imag[i] * imag[i]; + sumMagn += magn[i]; + if (self->blockInd < END_STARTUP_SHORT) { + if (i >= kStartBand) { + tmpFloat2 = logf((float)i); + sum_log_i += tmpFloat2; + sum_log_i_square += tmpFloat2 * tmpFloat2; + tmpFloat1 = logf(magn[i]); + sum_log_magn += tmpFloat1; + sum_log_i_log_magn += tmpFloat2 * tmpFloat1; + } + } + } + signalEnergy /= self->magnLen; + self->signalEnergy = signalEnergy; + self->sumMagn = sumMagn; + + // Quantile noise estimate. + NoiseEstimation(self, magn, noise); + // Compute simplified noise model during startup. + if (self->blockInd < END_STARTUP_SHORT) { + // Estimate White noise. + self->whiteNoiseLevel += sumMagn / self->magnLen * self->overdrive; + // Estimate Pink noise parameters. + tmpFloat1 = sum_log_i_square * (self->magnLen - kStartBand); + tmpFloat1 -= (sum_log_i * sum_log_i); + tmpFloat2 = + (sum_log_i_square * sum_log_magn - sum_log_i * sum_log_i_log_magn); + tmpFloat3 = tmpFloat2 / tmpFloat1; + // Constrain the estimated spectrum to be positive. + if (tmpFloat3 < 0.f) { + tmpFloat3 = 0.f; + } + self->pinkNoiseNumerator += tmpFloat3; + tmpFloat2 = (sum_log_i * sum_log_magn); + tmpFloat2 -= (self->magnLen - kStartBand) * sum_log_i_log_magn; + tmpFloat3 = tmpFloat2 / tmpFloat1; + // Constrain the pink noise power to be in the interval [0, 1]. + if (tmpFloat3 < 0.f) { + tmpFloat3 = 0.f; + } + if (tmpFloat3 > 1.f) { + tmpFloat3 = 1.f; + } + self->pinkNoiseExp += tmpFloat3; + + // Calculate frequency independent parts of parametric noise estimate. + if (self->pinkNoiseExp > 0.f) { + // Use pink noise estimate. + parametric_num = + expf(self->pinkNoiseNumerator / (float)(self->blockInd + 1)); + parametric_num *= (float)(self->blockInd + 1); + parametric_exp = self->pinkNoiseExp / (float)(self->blockInd + 1); + } + for (i = 0; i < self->magnLen; i++) { + // Estimate the background noise using the white and pink noise + // parameters. + if (self->pinkNoiseExp == 0.f) { + // Use white noise estimate. + self->parametricNoise[i] = self->whiteNoiseLevel; + } else { + // Use pink noise estimate. + float use_band = (float)(i < kStartBand ? kStartBand : i); + self->parametricNoise[i] = + parametric_num / powf(use_band, parametric_exp); + } + // Weight quantile noise with modeled noise. + noise[i] *= (self->blockInd); + tmpFloat2 = + self->parametricNoise[i] * (END_STARTUP_SHORT - self->blockInd); + noise[i] += (tmpFloat2 / (float)(self->blockInd + 1)); + noise[i] /= END_STARTUP_SHORT; + } + } + // Compute average signal during END_STARTUP_LONG time: + // used to normalize spectral difference measure. + if (self->blockInd < END_STARTUP_LONG) { + self->featureData[5] *= self->blockInd; + self->featureData[5] += signalEnergy; + self->featureData[5] /= (self->blockInd + 1); + } + + // Post and prior SNR needed for SpeechNoiseProb. + ComputeSnr(self, magn, noise, snrLocPrior, snrLocPost); + + FeatureUpdate(self, magn, updateParsFlag); + SpeechNoiseProb(self, self->speechProb, snrLocPrior, snrLocPost); + UpdateNoiseEstimate(self, magn, snrLocPrior, snrLocPost, noise); + + // Keep track of noise spectrum for next frame. + memcpy(self->noise, noise, sizeof(*noise) * self->magnLen); + memcpy(self->magnPrevAnalyze, magn, sizeof(*magn) * self->magnLen); +} + +void WebRtcNs_ProcessCore(NoiseSuppressionC* self, + const float* const* speechFrame, + size_t num_bands, + float* const* outFrame) { + // Main routine for noise reduction. + int flagHB = 0; + size_t i, j; + + float energy1, energy2, gain, factor, factor1, factor2; + float fout[BLOCKL_MAX]; + float winData[ANAL_BLOCKL_MAX]; + float magn[HALF_ANAL_BLOCKL]; + float theFilter[HALF_ANAL_BLOCKL], theFilterTmp[HALF_ANAL_BLOCKL]; + float real[ANAL_BLOCKL_MAX], imag[HALF_ANAL_BLOCKL]; + + // SWB variables. + int deltaBweHB = 1; + int deltaGainHB = 1; + float decayBweHB = 1.0; + float gainMapParHB = 1.0; + float gainTimeDomainHB = 1.0; + float avgProbSpeechHB, avgProbSpeechHBTmp, avgFilterGainHB, gainModHB; + float sumMagnAnalyze, sumMagnProcess; + + // Check that initiation has been done. + RTC_DCHECK_EQ(1, self->initFlag); + RTC_DCHECK_LE(num_bands - 1, NUM_HIGH_BANDS_MAX); + + const float* const* speechFrameHB = NULL; + float* const* outFrameHB = NULL; + size_t num_high_bands = 0; + if (num_bands > 1) { + speechFrameHB = &speechFrame[1]; + outFrameHB = &outFrame[1]; + num_high_bands = num_bands - 1; + flagHB = 1; + // Range for averaging low band quantities for H band gain. + deltaBweHB = (int)self->magnLen / 4; + deltaGainHB = deltaBweHB; + } + + // Update analysis buffer for L band. + UpdateBuffer(speechFrame[0], self->blockLen, self->anaLen, self->dataBuf); + + if (flagHB == 1) { + // Update analysis buffer for H bands. + for (i = 0; i < num_high_bands; ++i) { + UpdateBuffer(speechFrameHB[i], + self->blockLen, + self->anaLen, + self->dataBufHB[i]); + } + } + + Windowing(self->window, self->dataBuf, self->anaLen, winData); + energy1 = Energy(winData, self->anaLen); + if (energy1 == 0.0) { + // Synthesize the special case of zero input. + // Read out fully processed segment. + for (i = self->windShift; i < self->blockLen + self->windShift; i++) { + fout[i - self->windShift] = self->syntBuf[i]; + } + // Update synthesis buffer. + UpdateBuffer(NULL, self->blockLen, self->anaLen, self->syntBuf); + + for (i = 0; i < self->blockLen; ++i) + outFrame[0][i] = + WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN); + + // For time-domain gain of HB. + if (flagHB == 1) { + for (i = 0; i < num_high_bands; ++i) { + for (j = 0; j < self->blockLen; ++j) { + outFrameHB[i][j] = WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, + self->dataBufHB[i][j], + WEBRTC_SPL_WORD16_MIN); + } + } + } + + return; + } + + FFT(self, winData, self->anaLen, self->magnLen, real, imag, magn); + + if (self->blockInd < END_STARTUP_SHORT) { + for (i = 0; i < self->magnLen; i++) { + self->initMagnEst[i] += magn[i]; + } + } + + ComputeDdBasedWienerFilter(self, magn, theFilter); + + for (i = 0; i < self->magnLen; i++) { + // Flooring bottom. + if (theFilter[i] < self->denoiseBound) { + theFilter[i] = self->denoiseBound; + } + // Flooring top. + if (theFilter[i] > 1.f) { + theFilter[i] = 1.f; + } + if (self->blockInd < END_STARTUP_SHORT) { + theFilterTmp[i] = + (self->initMagnEst[i] - self->overdrive * self->parametricNoise[i]); + theFilterTmp[i] /= (self->initMagnEst[i] + 0.0001f); + // Flooring bottom. + if (theFilterTmp[i] < self->denoiseBound) { + theFilterTmp[i] = self->denoiseBound; + } + // Flooring top. + if (theFilterTmp[i] > 1.f) { + theFilterTmp[i] = 1.f; + } + // Weight the two suppression filters. + theFilter[i] *= (self->blockInd); + theFilterTmp[i] *= (END_STARTUP_SHORT - self->blockInd); + theFilter[i] += theFilterTmp[i]; + theFilter[i] /= (END_STARTUP_SHORT); + } + + self->smooth[i] = theFilter[i]; + real[i] *= self->smooth[i]; + imag[i] *= self->smooth[i]; + } + // Keep track of |magn| spectrum for next frame. + memcpy(self->magnPrevProcess, magn, sizeof(*magn) * self->magnLen); + memcpy(self->noisePrev, self->noise, sizeof(self->noise[0]) * self->magnLen); + // Back to time domain. + IFFT(self, real, imag, self->magnLen, self->anaLen, winData); + + // Scale factor: only do it after END_STARTUP_LONG time. + factor = 1.f; + if (self->gainmap == 1 && self->blockInd > END_STARTUP_LONG) { + factor1 = 1.f; + factor2 = 1.f; + + energy2 = Energy(winData, self->anaLen); + gain = (float)sqrt(energy2 / (energy1 + 1.f)); + + // Scaling for new version. + if (gain > B_LIM) { + factor1 = 1.f + 1.3f * (gain - B_LIM); + if (gain * factor1 > 1.f) { + factor1 = 1.f / gain; + } + } + if (gain < B_LIM) { + // Don't reduce scale too much for pause regions: + // attenuation here should be controlled by flooring. + if (gain <= self->denoiseBound) { + gain = self->denoiseBound; + } + factor2 = 1.f - 0.3f * (B_LIM - gain); + } + // Combine both scales with speech/noise prob: + // note prior (priorSpeechProb) is not frequency dependent. + factor = self->priorSpeechProb * factor1 + + (1.f - self->priorSpeechProb) * factor2; + } // Out of self->gainmap == 1. + + Windowing(self->window, winData, self->anaLen, winData); + + // Synthesis. + for (i = 0; i < self->anaLen; i++) { + self->syntBuf[i] += factor * winData[i]; + } + // Read out fully processed segment. + for (i = self->windShift; i < self->blockLen + self->windShift; i++) { + fout[i - self->windShift] = self->syntBuf[i]; + } + // Update synthesis buffer. + UpdateBuffer(NULL, self->blockLen, self->anaLen, self->syntBuf); + + for (i = 0; i < self->blockLen; ++i) + outFrame[0][i] = + WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN); + + // For time-domain gain of HB. + if (flagHB == 1) { + // Average speech prob from low band. + // Average over second half (i.e., 4->8kHz) of frequencies spectrum. + avgProbSpeechHB = 0.0; + for (i = self->magnLen - deltaBweHB - 1; i < self->magnLen - 1; i++) { + avgProbSpeechHB += self->speechProb[i]; + } + avgProbSpeechHB = avgProbSpeechHB / ((float)deltaBweHB); + // If the speech was suppressed by a component between Analyze and + // Process, for example the AEC, then it should not be considered speech + // for high band suppression purposes. + sumMagnAnalyze = 0; + sumMagnProcess = 0; + for (i = 0; i < self->magnLen; ++i) { + sumMagnAnalyze += self->magnPrevAnalyze[i]; + sumMagnProcess += self->magnPrevProcess[i]; + } + avgProbSpeechHB *= sumMagnProcess / sumMagnAnalyze; + // Average filter gain from low band. + // Average over second half (i.e., 4->8kHz) of frequencies spectrum. + avgFilterGainHB = 0.0; + for (i = self->magnLen - deltaGainHB - 1; i < self->magnLen - 1; i++) { + avgFilterGainHB += self->smooth[i]; + } + avgFilterGainHB = avgFilterGainHB / ((float)(deltaGainHB)); + avgProbSpeechHBTmp = 2.f * avgProbSpeechHB - 1.f; + // Gain based on speech probability. + gainModHB = 0.5f * (1.f + (float)tanh(gainMapParHB * avgProbSpeechHBTmp)); + // Combine gain with low band gain. + gainTimeDomainHB = 0.5f * gainModHB + 0.5f * avgFilterGainHB; + if (avgProbSpeechHB >= 0.5f) { + gainTimeDomainHB = 0.25f * gainModHB + 0.75f * avgFilterGainHB; + } + gainTimeDomainHB = gainTimeDomainHB * decayBweHB; + // Make sure gain is within flooring range. + // Flooring bottom. + if (gainTimeDomainHB < self->denoiseBound) { + gainTimeDomainHB = self->denoiseBound; + } + // Flooring top. + if (gainTimeDomainHB > 1.f) { + gainTimeDomainHB = 1.f; + } + // Apply gain. + for (i = 0; i < num_high_bands; ++i) { + for (j = 0; j < self->blockLen; j++) { + outFrameHB[i][j] = + WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, + gainTimeDomainHB * self->dataBufHB[i][j], + WEBRTC_SPL_WORD16_MIN); + } + } + } // End of H band gain computation. +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/ns_core.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/ns_core.h new file mode 100644 index 000000000..aba1c468e --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/ns_core.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_NS_CORE_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_NS_CORE_H_ + +#include "webrtc/modules/audio_processing/ns/defines.h" + +typedef struct NSParaExtract_ { + // Bin size of histogram. + float binSizeLrt; + float binSizeSpecFlat; + float binSizeSpecDiff; + // Range of histogram over which LRT threshold is computed. + float rangeAvgHistLrt; + // Scale parameters: multiply dominant peaks of the histograms by scale factor + // to obtain thresholds for prior model. + float factor1ModelPars; // For LRT and spectral difference. + float factor2ModelPars; // For spectral_flatness: used when noise is flatter + // than speech. + // Peak limit for spectral flatness (varies between 0 and 1). + float thresPosSpecFlat; + // Limit on spacing of two highest peaks in histogram: spacing determined by + // bin size. + float limitPeakSpacingSpecFlat; + float limitPeakSpacingSpecDiff; + // Limit on relevance of second peak. + float limitPeakWeightsSpecFlat; + float limitPeakWeightsSpecDiff; + // Limit on fluctuation of LRT feature. + float thresFluctLrt; + // Limit on the max and min values for the feature thresholds. + float maxLrt; + float minLrt; + float maxSpecFlat; + float minSpecFlat; + float maxSpecDiff; + float minSpecDiff; + // Criteria of weight of histogram peak to accept/reject feature. + int thresWeightSpecFlat; + int thresWeightSpecDiff; + +} NSParaExtract; + +typedef struct NoiseSuppressionC_ { + uint32_t fs; + size_t blockLen; + size_t windShift; + size_t anaLen; + size_t magnLen; + int aggrMode; + const float* window; + float analyzeBuf[ANAL_BLOCKL_MAX]; + float dataBuf[ANAL_BLOCKL_MAX]; + float syntBuf[ANAL_BLOCKL_MAX]; + + int initFlag; + // Parameters for quantile noise estimation. + float density[SIMULT * HALF_ANAL_BLOCKL]; + float lquantile[SIMULT * HALF_ANAL_BLOCKL]; + float quantile[HALF_ANAL_BLOCKL]; + int counter[SIMULT]; + int updates; + // Parameters for Wiener filter. + float smooth[HALF_ANAL_BLOCKL]; + float overdrive; + float denoiseBound; + int gainmap; + // FFT work arrays. + size_t ip[IP_LENGTH]; + float wfft[W_LENGTH]; + + // Parameters for new method: some not needed, will reduce/cleanup later. + int32_t blockInd; // Frame index counter. + int modelUpdatePars[4]; // Parameters for updating or estimating. + // Thresholds/weights for prior model. + float priorModelPars[7]; // Parameters for prior model. + float noise[HALF_ANAL_BLOCKL]; // Noise spectrum from current frame. + float noisePrev[HALF_ANAL_BLOCKL]; // Noise spectrum from previous frame. + // Magnitude spectrum of previous analyze frame. + float magnPrevAnalyze[HALF_ANAL_BLOCKL]; + // Magnitude spectrum of previous process frame. + float magnPrevProcess[HALF_ANAL_BLOCKL]; + float logLrtTimeAvg[HALF_ANAL_BLOCKL]; // Log LRT factor with time-smoothing. + float priorSpeechProb; // Prior speech/noise probability. + float featureData[7]; + // Conservative noise spectrum estimate. + float magnAvgPause[HALF_ANAL_BLOCKL]; + float signalEnergy; // Energy of |magn|. + float sumMagn; + float whiteNoiseLevel; // Initial noise estimate. + float initMagnEst[HALF_ANAL_BLOCKL]; // Initial magnitude spectrum estimate. + float pinkNoiseNumerator; // Pink noise parameter: numerator. + float pinkNoiseExp; // Pink noise parameter: power of frequencies. + float parametricNoise[HALF_ANAL_BLOCKL]; + // Parameters for feature extraction. + NSParaExtract featureExtractionParams; + // Histograms for parameter estimation. + int histLrt[HIST_PAR_EST]; + int histSpecFlat[HIST_PAR_EST]; + int histSpecDiff[HIST_PAR_EST]; + // Quantities for high band estimate. + float speechProb[HALF_ANAL_BLOCKL]; // Final speech/noise prob: prior + LRT. + // Buffering data for HB. + float dataBufHB[NUM_HIGH_BANDS_MAX][ANAL_BLOCKL_MAX]; + +} NoiseSuppressionC; + +#ifdef __cplusplus +extern "C" { +#endif + +/**************************************************************************** + * WebRtcNs_InitCore(...) + * + * This function initializes a noise suppression instance + * + * Input: + * - self : Instance that should be initialized + * - fs : Sampling frequency + * + * Output: + * - self : Initialized instance + * + * Return value : 0 - Ok + * -1 - Error + */ +int WebRtcNs_InitCore(NoiseSuppressionC* self, uint32_t fs); + +/**************************************************************************** + * WebRtcNs_set_policy_core(...) + * + * This changes the aggressiveness of the noise suppression method. + * + * Input: + * - self : Instance that should be initialized + * - mode : 0: Mild (6dB), 1: Medium (10dB), 2: Aggressive (15dB) + * + * Output: + * - self : Initialized instance + * + * Return value : 0 - Ok + * -1 - Error + */ +int WebRtcNs_set_policy_core(NoiseSuppressionC* self, int mode); + +/**************************************************************************** + * WebRtcNs_AnalyzeCore + * + * Estimate the background noise. + * + * Input: + * - self : Instance that should be initialized + * - speechFrame : Input speech frame for lower band + * + * Output: + * - self : Updated instance + */ +void WebRtcNs_AnalyzeCore(NoiseSuppressionC* self, const float* speechFrame); + +/**************************************************************************** + * WebRtcNs_ProcessCore + * + * Do noise suppression. + * + * Input: + * - self : Instance that should be initialized + * - inFrame : Input speech frame for each band + * - num_bands : Number of bands + * + * Output: + * - self : Updated instance + * - outFrame : Output speech frame for each band + */ +void WebRtcNs_ProcessCore(NoiseSuppressionC* self, + const float* const* inFrame, + size_t num_bands, + float* const* outFrame); + +#ifdef __cplusplus +} +#endif +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_NS_CORE_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core.c new file mode 100644 index 000000000..c58fc39ba --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core.c @@ -0,0 +1,2107 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing/ns/noise_suppression_x.h" + +#include +#include +#include + +#include "webrtc/base/checks.h" +#include "webrtc/common_audio/signal_processing/include/real_fft.h" +#include "webrtc/modules/audio_processing/ns/nsx_core.h" +#include "webrtc/system_wrappers/include/cpu_features_wrapper.h" + +#if defined(WEBRTC_HAS_NEON) +/* Tables are defined in ARM assembly files. */ +extern const int16_t WebRtcNsx_kLogTable[9]; +extern const int16_t WebRtcNsx_kCounterDiv[201]; +extern const int16_t WebRtcNsx_kLogTableFrac[256]; +#else +static const int16_t WebRtcNsx_kLogTable[9] = { + 0, 177, 355, 532, 710, 887, 1065, 1242, 1420 +}; + +static const int16_t WebRtcNsx_kCounterDiv[201] = { + 32767, 16384, 10923, 8192, 6554, 5461, 4681, 4096, 3641, 3277, 2979, 2731, + 2521, 2341, 2185, 2048, 1928, 1820, 1725, 1638, 1560, 1489, 1425, 1365, 1311, + 1260, 1214, 1170, 1130, 1092, 1057, 1024, 993, 964, 936, 910, 886, 862, 840, + 819, 799, 780, 762, 745, 728, 712, 697, 683, 669, 655, 643, 630, 618, 607, + 596, 585, 575, 565, 555, 546, 537, 529, 520, 512, 504, 496, 489, 482, 475, + 468, 462, 455, 449, 443, 437, 431, 426, 420, 415, 410, 405, 400, 395, 390, + 386, 381, 377, 372, 368, 364, 360, 356, 352, 349, 345, 341, 338, 334, 331, + 328, 324, 321, 318, 315, 312, 309, 306, 303, 301, 298, 295, 293, 290, 287, + 285, 282, 280, 278, 275, 273, 271, 269, 266, 264, 262, 260, 258, 256, 254, + 252, 250, 248, 246, 245, 243, 241, 239, 237, 236, 234, 232, 231, 229, 228, + 226, 224, 223, 221, 220, 218, 217, 216, 214, 213, 211, 210, 209, 207, 206, + 205, 204, 202, 201, 200, 199, 197, 196, 195, 194, 193, 192, 191, 189, 188, + 187, 186, 185, 184, 183, 182, 181, 180, 179, 178, 177, 176, 175, 174, 173, + 172, 172, 171, 170, 169, 168, 167, 166, 165, 165, 164, 163 +}; + +static const int16_t WebRtcNsx_kLogTableFrac[256] = { + 0, 1, 3, 4, 6, 7, 9, 10, 11, 13, 14, 16, 17, 18, 20, 21, + 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, + 44, 45, 46, 47, 49, 50, 51, 52, 54, 55, 56, 57, 59, 60, 61, 62, + 63, 65, 66, 67, 68, 69, 71, 72, 73, 74, 75, 77, 78, 79, 80, 81, + 82, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 178, 179, 180, 181, 182, 183, 184, 185, 185, 186, 187, + 188, 189, 190, 191, 192, 192, 193, 194, 195, 196, 197, 198, 198, 199, 200, + 201, 202, 203, 203, 204, 205, 206, 207, 208, 208, 209, 210, 211, 212, 212, + 213, 214, 215, 216, 216, 217, 218, 219, 220, 220, 221, 222, 223, 224, 224, + 225, 226, 227, 228, 228, 229, 230, 231, 231, 232, 233, 234, 234, 235, 236, + 237, 238, 238, 239, 240, 241, 241, 242, 243, 244, 244, 245, 246, 247, 247, + 248, 249, 249, 250, 251, 252, 252, 253, 254, 255, 255 +}; +#endif // WEBRTC_HAS_NEON + +// Skip first frequency bins during estimation. (0 <= value < 64) +static const size_t kStartBand = 5; + +// hybrib Hanning & flat window +static const int16_t kBlocks80w128x[128] = { + 0, 536, 1072, 1606, 2139, 2669, 3196, 3720, 4240, 4756, 5266, + 5771, 6270, 6762, 7246, 7723, 8192, 8652, 9102, 9543, 9974, 10394, + 10803, 11200, 11585, 11958, 12318, 12665, 12998, 13318, 13623, 13913, 14189, + 14449, 14694, 14924, 15137, 15334, 15515, 15679, 15826, 15956, 16069, 16165, + 16244, 16305, 16349, 16375, 16384, 16384, 16384, 16384, 16384, 16384, 16384, + 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, + 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, + 16384, 16384, 16384, 16384, 16375, 16349, 16305, 16244, 16165, 16069, 15956, + 15826, 15679, 15515, 15334, 15137, 14924, 14694, 14449, 14189, 13913, 13623, + 13318, 12998, 12665, 12318, 11958, 11585, 11200, 10803, 10394, 9974, 9543, + 9102, 8652, 8192, 7723, 7246, 6762, 6270, 5771, 5266, 4756, 4240, + 3720, 3196, 2669, 2139, 1606, 1072, 536 +}; + +// hybrib Hanning & flat window +static const int16_t kBlocks160w256x[256] = { + 0, 268, 536, 804, 1072, 1339, 1606, 1872, + 2139, 2404, 2669, 2933, 3196, 3459, 3720, 3981, + 4240, 4499, 4756, 5012, 5266, 5520, 5771, 6021, + 6270, 6517, 6762, 7005, 7246, 7486, 7723, 7959, + 8192, 8423, 8652, 8878, 9102, 9324, 9543, 9760, + 9974, 10185, 10394, 10600, 10803, 11003, 11200, 11394, + 11585, 11773, 11958, 12140, 12318, 12493, 12665, 12833, + 12998, 13160, 13318, 13472, 13623, 13770, 13913, 14053, + 14189, 14321, 14449, 14574, 14694, 14811, 14924, 15032, + 15137, 15237, 15334, 15426, 15515, 15599, 15679, 15754, + 15826, 15893, 15956, 16015, 16069, 16119, 16165, 16207, + 16244, 16277, 16305, 16329, 16349, 16364, 16375, 16382, + 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, + 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, + 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, + 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, + 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, + 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, + 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, + 16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384, + 16384, 16382, 16375, 16364, 16349, 16329, 16305, 16277, + 16244, 16207, 16165, 16119, 16069, 16015, 15956, 15893, + 15826, 15754, 15679, 15599, 15515, 15426, 15334, 15237, + 15137, 15032, 14924, 14811, 14694, 14574, 14449, 14321, + 14189, 14053, 13913, 13770, 13623, 13472, 13318, 13160, + 12998, 12833, 12665, 12493, 12318, 12140, 11958, 11773, + 11585, 11394, 11200, 11003, 10803, 10600, 10394, 10185, + 9974, 9760, 9543, 9324, 9102, 8878, 8652, 8423, + 8192, 7959, 7723, 7486, 7246, 7005, 6762, 6517, + 6270, 6021, 5771, 5520, 5266, 5012, 4756, 4499, + 4240, 3981, 3720, 3459, 3196, 2933, 2669, 2404, + 2139, 1872, 1606, 1339, 1072, 804, 536, 268 +}; + +// Gain factor1 table: Input value in Q8 and output value in Q13 +// original floating point code +// if (gain > blim) { +// factor1 = 1.0 + 1.3 * (gain - blim); +// if (gain * factor1 > 1.0) { +// factor1 = 1.0 / gain; +// } +// } else { +// factor1 = 1.0; +// } +static const int16_t kFactor1Table[257] = { + 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8233, 8274, 8315, 8355, 8396, 8436, 8475, 8515, 8554, 8592, 8631, 8669, + 8707, 8745, 8783, 8820, 8857, 8894, 8931, 8967, 9003, 9039, 9075, 9111, 9146, 9181, + 9216, 9251, 9286, 9320, 9354, 9388, 9422, 9456, 9489, 9523, 9556, 9589, 9622, 9655, + 9687, 9719, 9752, 9784, 9816, 9848, 9879, 9911, 9942, 9973, 10004, 10035, 10066, + 10097, 10128, 10158, 10188, 10218, 10249, 10279, 10308, 10338, 10368, 10397, 10426, + 10456, 10485, 10514, 10543, 10572, 10600, 10629, 10657, 10686, 10714, 10742, 10770, + 10798, 10826, 10854, 10882, 10847, 10810, 10774, 10737, 10701, 10666, 10631, 10596, + 10562, 10527, 10494, 10460, 10427, 10394, 10362, 10329, 10297, 10266, 10235, 10203, + 10173, 10142, 10112, 10082, 10052, 10023, 9994, 9965, 9936, 9908, 9879, 9851, 9824, + 9796, 9769, 9742, 9715, 9689, 9662, 9636, 9610, 9584, 9559, 9534, 9508, 9484, 9459, + 9434, 9410, 9386, 9362, 9338, 9314, 9291, 9268, 9245, 9222, 9199, 9176, 9154, 9132, + 9110, 9088, 9066, 9044, 9023, 9002, 8980, 8959, 8939, 8918, 8897, 8877, 8857, 8836, + 8816, 8796, 8777, 8757, 8738, 8718, 8699, 8680, 8661, 8642, 8623, 8605, 8586, 8568, + 8550, 8532, 8514, 8496, 8478, 8460, 8443, 8425, 8408, 8391, 8373, 8356, 8339, 8323, + 8306, 8289, 8273, 8256, 8240, 8224, 8208, 8192 +}; + +// For Factor2 tables +// original floating point code +// if (gain > blim) { +// factor2 = 1.0; +// } else { +// factor2 = 1.0 - 0.3 * (blim - gain); +// if (gain <= inst->denoiseBound) { +// factor2 = 1.0 - 0.3 * (blim - inst->denoiseBound); +// } +// } +// +// Gain factor table: Input value in Q8 and output value in Q13 +static const int16_t kFactor2Aggressiveness1[257] = { + 7577, 7577, 7577, 7577, 7577, 7577, + 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7596, 7614, 7632, + 7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845, + 7858, 7871, 7884, 7897, 7910, 7922, 7934, 7946, 7958, 7970, 7982, 7993, 8004, 8016, + 8027, 8038, 8049, 8060, 8070, 8081, 8091, 8102, 8112, 8122, 8132, 8143, 8152, 8162, + 8172, 8182, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192 +}; + +// Gain factor table: Input value in Q8 and output value in Q13 +static const int16_t kFactor2Aggressiveness2[257] = { + 7270, 7270, 7270, 7270, 7270, 7306, + 7339, 7369, 7397, 7424, 7448, 7472, 7495, 7517, 7537, 7558, 7577, 7596, 7614, 7632, + 7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845, + 7858, 7871, 7884, 7897, 7910, 7922, 7934, 7946, 7958, 7970, 7982, 7993, 8004, 8016, + 8027, 8038, 8049, 8060, 8070, 8081, 8091, 8102, 8112, 8122, 8132, 8143, 8152, 8162, + 8172, 8182, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192 +}; + +// Gain factor table: Input value in Q8 and output value in Q13 +static const int16_t kFactor2Aggressiveness3[257] = { + 7184, 7184, 7184, 7229, 7270, 7306, + 7339, 7369, 7397, 7424, 7448, 7472, 7495, 7517, 7537, 7558, 7577, 7596, 7614, 7632, + 7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845, + 7858, 7871, 7884, 7897, 7910, 7922, 7934, 7946, 7958, 7970, 7982, 7993, 8004, 8016, + 8027, 8038, 8049, 8060, 8070, 8081, 8091, 8102, 8112, 8122, 8132, 8143, 8152, 8162, + 8172, 8182, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, + 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192 +}; + +// sum of log2(i) from table index to inst->anaLen2 in Q5 +// Note that the first table value is invalid, since log2(0) = -infinity +static const int16_t kSumLogIndex[66] = { + 0, 22917, 22917, 22885, 22834, 22770, 22696, 22613, + 22524, 22428, 22326, 22220, 22109, 21994, 21876, 21754, + 21629, 21501, 21370, 21237, 21101, 20963, 20822, 20679, + 20535, 20388, 20239, 20089, 19937, 19783, 19628, 19470, + 19312, 19152, 18991, 18828, 18664, 18498, 18331, 18164, + 17994, 17824, 17653, 17480, 17306, 17132, 16956, 16779, + 16602, 16423, 16243, 16063, 15881, 15699, 15515, 15331, + 15146, 14960, 14774, 14586, 14398, 14209, 14019, 13829, + 13637, 13445 +}; + +// sum of log2(i)^2 from table index to inst->anaLen2 in Q2 +// Note that the first table value is invalid, since log2(0) = -infinity +static const int16_t kSumSquareLogIndex[66] = { + 0, 16959, 16959, 16955, 16945, 16929, 16908, 16881, + 16850, 16814, 16773, 16729, 16681, 16630, 16575, 16517, + 16456, 16392, 16325, 16256, 16184, 16109, 16032, 15952, + 15870, 15786, 15700, 15612, 15521, 15429, 15334, 15238, + 15140, 15040, 14938, 14834, 14729, 14622, 14514, 14404, + 14292, 14179, 14064, 13947, 13830, 13710, 13590, 13468, + 13344, 13220, 13094, 12966, 12837, 12707, 12576, 12444, + 12310, 12175, 12039, 11902, 11763, 11624, 11483, 11341, + 11198, 11054 +}; + +// log2(table index) in Q12 +// Note that the first table value is invalid, since log2(0) = -infinity +static const int16_t kLogIndex[129] = { + 0, 0, 4096, 6492, 8192, 9511, 10588, 11499, + 12288, 12984, 13607, 14170, 14684, 15157, 15595, 16003, + 16384, 16742, 17080, 17400, 17703, 17991, 18266, 18529, + 18780, 19021, 19253, 19476, 19691, 19898, 20099, 20292, + 20480, 20662, 20838, 21010, 21176, 21338, 21496, 21649, + 21799, 21945, 22087, 22226, 22362, 22495, 22625, 22752, + 22876, 22998, 23117, 23234, 23349, 23462, 23572, 23680, + 23787, 23892, 23994, 24095, 24195, 24292, 24388, 24483, + 24576, 24668, 24758, 24847, 24934, 25021, 25106, 25189, + 25272, 25354, 25434, 25513, 25592, 25669, 25745, 25820, + 25895, 25968, 26041, 26112, 26183, 26253, 26322, 26390, + 26458, 26525, 26591, 26656, 26721, 26784, 26848, 26910, + 26972, 27033, 27094, 27154, 27213, 27272, 27330, 27388, + 27445, 27502, 27558, 27613, 27668, 27722, 27776, 27830, + 27883, 27935, 27988, 28039, 28090, 28141, 28191, 28241, + 28291, 28340, 28388, 28437, 28484, 28532, 28579, 28626, + 28672 +}; + +// determinant of estimation matrix in Q0 corresponding to the log2 tables above +// Note that the first table value is invalid, since log2(0) = -infinity +static const int16_t kDeterminantEstMatrix[66] = { + 0, 29814, 25574, 22640, 20351, 18469, 16873, 15491, + 14277, 13199, 12233, 11362, 10571, 9851, 9192, 8587, + 8030, 7515, 7038, 6596, 6186, 5804, 5448, 5115, + 4805, 4514, 4242, 3988, 3749, 3524, 3314, 3116, + 2930, 2755, 2590, 2435, 2289, 2152, 2022, 1900, + 1785, 1677, 1575, 1478, 1388, 1302, 1221, 1145, + 1073, 1005, 942, 881, 825, 771, 721, 674, + 629, 587, 547, 510, 475, 442, 411, 382, + 355, 330 +}; + +// Update the noise estimation information. +static void UpdateNoiseEstimate(NoiseSuppressionFixedC* inst, int offset) { + int32_t tmp32no1 = 0; + int32_t tmp32no2 = 0; + int16_t tmp16 = 0; + const int16_t kExp2Const = 11819; // Q13 + + size_t i = 0; + + tmp16 = WebRtcSpl_MaxValueW16(inst->noiseEstLogQuantile + offset, + inst->magnLen); + // Guarantee a Q-domain as high as possible and still fit in int16 + inst->qNoise = 14 - (int) WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + kExp2Const, tmp16, 21); + for (i = 0; i < inst->magnLen; i++) { + // inst->quantile[i]=exp(inst->lquantile[offset+i]); + // in Q21 + tmp32no2 = kExp2Const * inst->noiseEstLogQuantile[offset + i]; + tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF)); // 2^21 + frac + tmp16 = (int16_t)(tmp32no2 >> 21); + tmp16 -= 21;// shift 21 to get result in Q0 + tmp16 += (int16_t) inst->qNoise; //shift to get result in Q(qNoise) + if (tmp16 < 0) { + tmp32no1 >>= -tmp16; + } else { + tmp32no1 <<= tmp16; + } + inst->noiseEstQuantile[i] = WebRtcSpl_SatW32ToW16(tmp32no1); + } +} + +// Noise Estimation +static void NoiseEstimationC(NoiseSuppressionFixedC* inst, + uint16_t* magn, + uint32_t* noise, + int16_t* q_noise) { + int16_t lmagn[HALF_ANAL_BLOCKL], counter, countDiv; + int16_t countProd, delta, zeros, frac; + int16_t log2, tabind, logval, tmp16, tmp16no1, tmp16no2; + const int16_t log2_const = 22713; // Q15 + const int16_t width_factor = 21845; + + size_t i, s, offset; + + tabind = inst->stages - inst->normData; + RTC_DCHECK_LT(tabind, 9); + RTC_DCHECK_GT(tabind, -9); + if (tabind < 0) { + logval = -WebRtcNsx_kLogTable[-tabind]; + } else { + logval = WebRtcNsx_kLogTable[tabind]; + } + + // lmagn(i)=log(magn(i))=log(2)*log2(magn(i)) + // magn is in Q(-stages), and the real lmagn values are: + // real_lmagn(i)=log(magn(i)*2^stages)=log(magn(i))+log(2^stages) + // lmagn in Q8 + for (i = 0; i < inst->magnLen; i++) { + if (magn[i]) { + zeros = WebRtcSpl_NormU32((uint32_t)magn[i]); + frac = (int16_t)((((uint32_t)magn[i] << zeros) + & 0x7FFFFFFF) >> 23); + // log2(magn(i)) + RTC_DCHECK_LT(frac, 256); + log2 = (int16_t)(((31 - zeros) << 8) + + WebRtcNsx_kLogTableFrac[frac]); + // log2(magn(i))*log(2) + lmagn[i] = (int16_t)((log2 * log2_const) >> 15); + // + log(2^stages) + lmagn[i] += logval; + } else { + lmagn[i] = logval;//0; + } + } + + // loop over simultaneous estimates + for (s = 0; s < SIMULT; s++) { + offset = s * inst->magnLen; + + // Get counter values from state + counter = inst->noiseEstCounter[s]; + RTC_DCHECK_LT(counter, 201); + countDiv = WebRtcNsx_kCounterDiv[counter]; + countProd = (int16_t)(counter * countDiv); + + // quant_est(...) + for (i = 0; i < inst->magnLen; i++) { + // compute delta + if (inst->noiseEstDensity[offset + i] > 512) { + // Get the value for delta by shifting intead of dividing. + int factor = WebRtcSpl_NormW16(inst->noiseEstDensity[offset + i]); + delta = (int16_t)(FACTOR_Q16 >> (14 - factor)); + } else { + delta = FACTOR_Q7; + if (inst->blockIndex < END_STARTUP_LONG) { + // Smaller step size during startup. This prevents from using + // unrealistic values causing overflow. + delta = FACTOR_Q7_STARTUP; + } + } + + // update log quantile estimate + tmp16 = (int16_t)((delta * countDiv) >> 14); + if (lmagn[i] > inst->noiseEstLogQuantile[offset + i]) { + // +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2 + // CounterDiv=1/(inst->counter[s]+1) in Q15 + tmp16 += 2; + inst->noiseEstLogQuantile[offset + i] += tmp16 / 4; + } else { + tmp16 += 1; + // *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2 + // TODO(bjornv): investigate why we need to truncate twice. + tmp16no2 = (int16_t)((tmp16 / 2) * 3 / 2); + inst->noiseEstLogQuantile[offset + i] -= tmp16no2; + if (inst->noiseEstLogQuantile[offset + i] < logval) { + // This is the smallest fixed point representation we can + // have, hence we limit the output. + inst->noiseEstLogQuantile[offset + i] = logval; + } + } + + // update density estimate + if (WEBRTC_SPL_ABS_W16(lmagn[i] - inst->noiseEstLogQuantile[offset + i]) + < WIDTH_Q8) { + tmp16no1 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + inst->noiseEstDensity[offset + i], countProd, 15); + tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + width_factor, countDiv, 15); + inst->noiseEstDensity[offset + i] = tmp16no1 + tmp16no2; + } + } // end loop over magnitude spectrum + + if (counter >= END_STARTUP_LONG) { + inst->noiseEstCounter[s] = 0; + if (inst->blockIndex >= END_STARTUP_LONG) { + UpdateNoiseEstimate(inst, offset); + } + } + inst->noiseEstCounter[s]++; + + } // end loop over simultaneous estimates + + // Sequentially update the noise during startup + if (inst->blockIndex < END_STARTUP_LONG) { + UpdateNoiseEstimate(inst, offset); + } + + for (i = 0; i < inst->magnLen; i++) { + noise[i] = (uint32_t)(inst->noiseEstQuantile[i]); // Q(qNoise) + } + (*q_noise) = (int16_t)inst->qNoise; +} + +// Filter the data in the frequency domain, and create spectrum. +static void PrepareSpectrumC(NoiseSuppressionFixedC* inst, int16_t* freq_buf) { + size_t i = 0, j = 0; + + for (i = 0; i < inst->magnLen; i++) { + inst->real[i] = (int16_t)((inst->real[i] * + (int16_t)(inst->noiseSupFilter[i])) >> 14); // Q(normData-stages) + inst->imag[i] = (int16_t)((inst->imag[i] * + (int16_t)(inst->noiseSupFilter[i])) >> 14); // Q(normData-stages) + } + + freq_buf[0] = inst->real[0]; + freq_buf[1] = -inst->imag[0]; + for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) { + freq_buf[j] = inst->real[i]; + freq_buf[j + 1] = -inst->imag[i]; + } + freq_buf[inst->anaLen] = inst->real[inst->anaLen2]; + freq_buf[inst->anaLen + 1] = -inst->imag[inst->anaLen2]; +} + +// Denormalize the real-valued signal |in|, the output from inverse FFT. +static void DenormalizeC(NoiseSuppressionFixedC* inst, + int16_t* in, + int factor) { + size_t i = 0; + int32_t tmp32 = 0; + for (i = 0; i < inst->anaLen; i += 1) { + tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t)in[i], + factor - inst->normData); + inst->real[i] = WebRtcSpl_SatW32ToW16(tmp32); // Q0 + } +} + +// For the noise supression process, synthesis, read out fully processed +// segment, and update synthesis buffer. +static void SynthesisUpdateC(NoiseSuppressionFixedC* inst, + int16_t* out_frame, + int16_t gain_factor) { + size_t i = 0; + int16_t tmp16a = 0; + int16_t tmp16b = 0; + int32_t tmp32 = 0; + + // synthesis + for (i = 0; i < inst->anaLen; i++) { + tmp16a = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + inst->window[i], inst->real[i], 14); // Q0, window in Q14 + tmp32 = WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(tmp16a, gain_factor, 13); // Q0 + // Down shift with rounding + tmp16b = WebRtcSpl_SatW32ToW16(tmp32); // Q0 + inst->synthesisBuffer[i] = WebRtcSpl_AddSatW16(inst->synthesisBuffer[i], + tmp16b); // Q0 + } + + // read out fully processed segment + for (i = 0; i < inst->blockLen10ms; i++) { + out_frame[i] = inst->synthesisBuffer[i]; // Q0 + } + + // update synthesis buffer + memcpy(inst->synthesisBuffer, inst->synthesisBuffer + inst->blockLen10ms, + (inst->anaLen - inst->blockLen10ms) * sizeof(*inst->synthesisBuffer)); + WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer + + inst->anaLen - inst->blockLen10ms, inst->blockLen10ms); +} + +// Update analysis buffer for lower band, and window data before FFT. +static void AnalysisUpdateC(NoiseSuppressionFixedC* inst, + int16_t* out, + int16_t* new_speech) { + size_t i = 0; + + // For lower band update analysis buffer. + memcpy(inst->analysisBuffer, inst->analysisBuffer + inst->blockLen10ms, + (inst->anaLen - inst->blockLen10ms) * sizeof(*inst->analysisBuffer)); + memcpy(inst->analysisBuffer + inst->anaLen - inst->blockLen10ms, new_speech, + inst->blockLen10ms * sizeof(*inst->analysisBuffer)); + + // Window data before FFT. + for (i = 0; i < inst->anaLen; i++) { + out[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + inst->window[i], inst->analysisBuffer[i], 14); // Q0 + } +} + +// Normalize the real-valued signal |in|, the input to forward FFT. +static void NormalizeRealBufferC(NoiseSuppressionFixedC* inst, + const int16_t* in, + int16_t* out) { + size_t i = 0; + RTC_DCHECK_GE(inst->normData, 0); + for (i = 0; i < inst->anaLen; ++i) { + out[i] = in[i] << inst->normData; // Q(normData) + } +} + +// Declare function pointers. +NoiseEstimation WebRtcNsx_NoiseEstimation; +PrepareSpectrum WebRtcNsx_PrepareSpectrum; +SynthesisUpdate WebRtcNsx_SynthesisUpdate; +AnalysisUpdate WebRtcNsx_AnalysisUpdate; +Denormalize WebRtcNsx_Denormalize; +NormalizeRealBuffer WebRtcNsx_NormalizeRealBuffer; + +#if defined(WEBRTC_HAS_NEON) +// Initialize function pointers for ARM Neon platform. +static void WebRtcNsx_InitNeon(void) { + WebRtcNsx_NoiseEstimation = WebRtcNsx_NoiseEstimationNeon; + WebRtcNsx_PrepareSpectrum = WebRtcNsx_PrepareSpectrumNeon; + WebRtcNsx_SynthesisUpdate = WebRtcNsx_SynthesisUpdateNeon; + WebRtcNsx_AnalysisUpdate = WebRtcNsx_AnalysisUpdateNeon; +} +#endif + +#if defined(MIPS32_LE) +// Initialize function pointers for MIPS platform. +static void WebRtcNsx_InitMips(void) { + WebRtcNsx_PrepareSpectrum = WebRtcNsx_PrepareSpectrum_mips; + WebRtcNsx_SynthesisUpdate = WebRtcNsx_SynthesisUpdate_mips; + WebRtcNsx_AnalysisUpdate = WebRtcNsx_AnalysisUpdate_mips; + WebRtcNsx_NormalizeRealBuffer = WebRtcNsx_NormalizeRealBuffer_mips; +#if defined(MIPS_DSP_R1_LE) + WebRtcNsx_Denormalize = WebRtcNsx_Denormalize_mips; +#endif +} +#endif + +void WebRtcNsx_CalcParametricNoiseEstimate(NoiseSuppressionFixedC* inst, + int16_t pink_noise_exp_avg, + int32_t pink_noise_num_avg, + int freq_index, + uint32_t* noise_estimate, + uint32_t* noise_estimate_avg) { + int32_t tmp32no1 = 0; + int32_t tmp32no2 = 0; + + int16_t int_part = 0; + int16_t frac_part = 0; + + // Use pink noise estimate + // noise_estimate = 2^(pinkNoiseNumerator + pinkNoiseExp * log2(j)) + RTC_DCHECK_GE(freq_index, 0); + RTC_DCHECK_LT(freq_index, 129); + tmp32no2 = (pink_noise_exp_avg * kLogIndex[freq_index]) >> 15; // Q11 + tmp32no1 = pink_noise_num_avg - tmp32no2; // Q11 + + // Calculate output: 2^tmp32no1 + // Output in Q(minNorm-stages) + tmp32no1 += (inst->minNorm - inst->stages) << 11; + if (tmp32no1 > 0) { + int_part = (int16_t)(tmp32no1 >> 11); + frac_part = (int16_t)(tmp32no1 & 0x000007ff); // Q11 + // Piecewise linear approximation of 'b' in + // 2^(int_part+frac_part) = 2^int_part * (1 + b) + // 'b' is given in Q11 and below stored in frac_part. + if (frac_part >> 10) { + // Upper fractional part + tmp32no2 = (2048 - frac_part) * 1244; // Q21 + tmp32no2 = 2048 - (tmp32no2 >> 10); + } else { + // Lower fractional part + tmp32no2 = (frac_part * 804) >> 10; + } + // Shift fractional part to Q(minNorm-stages) + tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, int_part - 11); + *noise_estimate_avg = (1 << int_part) + (uint32_t)tmp32no2; + // Scale up to initMagnEst, which is not block averaged + *noise_estimate = (*noise_estimate_avg) * (uint32_t)(inst->blockIndex + 1); + } +} + +// Initialize state +int32_t WebRtcNsx_InitCore(NoiseSuppressionFixedC* inst, uint32_t fs) { + int i; + + //check for valid pointer + if (inst == NULL) { + return -1; + } + // + + // Initialization of struct + if (fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000) { + inst->fs = fs; + } else { + return -1; + } + + if (fs == 8000) { + inst->blockLen10ms = 80; + inst->anaLen = 128; + inst->stages = 7; + inst->window = kBlocks80w128x; + inst->thresholdLogLrt = 131072; //default threshold for LRT feature + inst->maxLrt = 0x0040000; + inst->minLrt = 52429; + } else { + inst->blockLen10ms = 160; + inst->anaLen = 256; + inst->stages = 8; + inst->window = kBlocks160w256x; + inst->thresholdLogLrt = 212644; //default threshold for LRT feature + inst->maxLrt = 0x0080000; + inst->minLrt = 104858; + } + inst->anaLen2 = inst->anaLen / 2; + inst->magnLen = inst->anaLen2 + 1; + + if (inst->real_fft != NULL) { + WebRtcSpl_FreeRealFFT(inst->real_fft); + } + inst->real_fft = WebRtcSpl_CreateRealFFT(inst->stages); + if (inst->real_fft == NULL) { + return -1; + } + + WebRtcSpl_ZerosArrayW16(inst->analysisBuffer, ANAL_BLOCKL_MAX); + WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer, ANAL_BLOCKL_MAX); + + // for HB processing + WebRtcSpl_ZerosArrayW16(inst->dataBufHBFX[0], + NUM_HIGH_BANDS_MAX * ANAL_BLOCKL_MAX); + // for quantile noise estimation + WebRtcSpl_ZerosArrayW16(inst->noiseEstQuantile, HALF_ANAL_BLOCKL); + for (i = 0; i < SIMULT * HALF_ANAL_BLOCKL; i++) { + inst->noiseEstLogQuantile[i] = 2048; // Q8 + inst->noiseEstDensity[i] = 153; // Q9 + } + for (i = 0; i < SIMULT; i++) { + inst->noiseEstCounter[i] = (int16_t)(END_STARTUP_LONG * (i + 1)) / SIMULT; + } + + // Initialize suppression filter with ones + WebRtcSpl_MemSetW16((int16_t*)inst->noiseSupFilter, 16384, HALF_ANAL_BLOCKL); + + // Set the aggressiveness: default + inst->aggrMode = 0; + + //initialize variables for new method + inst->priorNonSpeechProb = 8192; // Q14(0.5) prior probability for speech/noise + for (i = 0; i < HALF_ANAL_BLOCKL; i++) { + inst->prevMagnU16[i] = 0; + inst->prevNoiseU32[i] = 0; //previous noise-spectrum + inst->logLrtTimeAvgW32[i] = 0; //smooth LR ratio + inst->avgMagnPause[i] = 0; //conservative noise spectrum estimate + inst->initMagnEst[i] = 0; //initial average magnitude spectrum + } + + //feature quantities + inst->thresholdSpecDiff = 50; //threshold for difference feature: determined on-line + inst->thresholdSpecFlat = 20480; //threshold for flatness: determined on-line + inst->featureLogLrt = inst->thresholdLogLrt; //average LRT factor (= threshold) + inst->featureSpecFlat = inst->thresholdSpecFlat; //spectral flatness (= threshold) + inst->featureSpecDiff = inst->thresholdSpecDiff; //spectral difference (= threshold) + inst->weightLogLrt = 6; //default weighting par for LRT feature + inst->weightSpecFlat = 0; //default weighting par for spectral flatness feature + inst->weightSpecDiff = 0; //default weighting par for spectral difference feature + + inst->curAvgMagnEnergy = 0; //window time-average of input magnitude spectrum + inst->timeAvgMagnEnergy = 0; //normalization for spectral difference + inst->timeAvgMagnEnergyTmp = 0; //normalization for spectral difference + + //histogram quantities: used to estimate/update thresholds for features + WebRtcSpl_ZerosArrayW16(inst->histLrt, HIST_PAR_EST); + WebRtcSpl_ZerosArrayW16(inst->histSpecDiff, HIST_PAR_EST); + WebRtcSpl_ZerosArrayW16(inst->histSpecFlat, HIST_PAR_EST); + + inst->blockIndex = -1; //frame counter + + //inst->modelUpdate = 500; //window for update + inst->modelUpdate = (1 << STAT_UPDATES); //window for update + inst->cntThresUpdate = 0; //counter feature thresholds updates + + inst->sumMagn = 0; + inst->magnEnergy = 0; + inst->prevQMagn = 0; + inst->qNoise = 0; + inst->prevQNoise = 0; + + inst->energyIn = 0; + inst->scaleEnergyIn = 0; + + inst->whiteNoiseLevel = 0; + inst->pinkNoiseNumerator = 0; + inst->pinkNoiseExp = 0; + inst->minNorm = 15; // Start with full scale + inst->zeroInputSignal = 0; + + //default mode + WebRtcNsx_set_policy_core(inst, 0); + +#ifdef NS_FILEDEBUG + inst->infile = fopen("indebug.pcm", "wb"); + inst->outfile = fopen("outdebug.pcm", "wb"); + inst->file1 = fopen("file1.pcm", "wb"); + inst->file2 = fopen("file2.pcm", "wb"); + inst->file3 = fopen("file3.pcm", "wb"); + inst->file4 = fopen("file4.pcm", "wb"); + inst->file5 = fopen("file5.pcm", "wb"); +#endif + + // Initialize function pointers. + WebRtcNsx_NoiseEstimation = NoiseEstimationC; + WebRtcNsx_PrepareSpectrum = PrepareSpectrumC; + WebRtcNsx_SynthesisUpdate = SynthesisUpdateC; + WebRtcNsx_AnalysisUpdate = AnalysisUpdateC; + WebRtcNsx_Denormalize = DenormalizeC; + WebRtcNsx_NormalizeRealBuffer = NormalizeRealBufferC; + +#if defined(WEBRTC_HAS_NEON) + WebRtcNsx_InitNeon(); +#endif + +#if defined(MIPS32_LE) + WebRtcNsx_InitMips(); +#endif + + inst->initFlag = 1; + + return 0; +} + +int WebRtcNsx_set_policy_core(NoiseSuppressionFixedC* inst, int mode) { + // allow for modes:0,1,2,3 + if (mode < 0 || mode > 3) { + return -1; + } + + inst->aggrMode = mode; + if (mode == 0) { + inst->overdrive = 256; // Q8(1.0) + inst->denoiseBound = 8192; // Q14(0.5) + inst->gainMap = 0; // No gain compensation + } else if (mode == 1) { + inst->overdrive = 256; // Q8(1.0) + inst->denoiseBound = 4096; // Q14(0.25) + inst->factor2Table = kFactor2Aggressiveness1; + inst->gainMap = 1; + } else if (mode == 2) { + inst->overdrive = 282; // ~= Q8(1.1) + inst->denoiseBound = 2048; // Q14(0.125) + inst->factor2Table = kFactor2Aggressiveness2; + inst->gainMap = 1; + } else if (mode == 3) { + inst->overdrive = 320; // Q8(1.25) + inst->denoiseBound = 1475; // ~= Q14(0.09) + inst->factor2Table = kFactor2Aggressiveness3; + inst->gainMap = 1; + } + return 0; +} + +// Extract thresholds for feature parameters +// histograms are computed over some window_size (given by window_pars) +// thresholds and weights are extracted every window +// flag 0 means update histogram only, flag 1 means compute the thresholds/weights +// threshold and weights are returned in: inst->priorModelPars +void WebRtcNsx_FeatureParameterExtraction(NoiseSuppressionFixedC* inst, + int flag) { + uint32_t tmpU32; + uint32_t histIndex; + uint32_t posPeak1SpecFlatFX, posPeak2SpecFlatFX; + uint32_t posPeak1SpecDiffFX, posPeak2SpecDiffFX; + + int32_t tmp32; + int32_t fluctLrtFX, thresFluctLrtFX; + int32_t avgHistLrtFX, avgSquareHistLrtFX, avgHistLrtComplFX; + + int16_t j; + int16_t numHistLrt; + + int i; + int useFeatureSpecFlat, useFeatureSpecDiff, featureSum; + int maxPeak1, maxPeak2; + int weightPeak1SpecFlat, weightPeak2SpecFlat; + int weightPeak1SpecDiff, weightPeak2SpecDiff; + + //update histograms + if (!flag) { + // LRT + // Type casting to UWord32 is safe since negative values will not be wrapped to larger + // values than HIST_PAR_EST + histIndex = (uint32_t)(inst->featureLogLrt); + if (histIndex < HIST_PAR_EST) { + inst->histLrt[histIndex]++; + } + // Spectral flatness + // (inst->featureSpecFlat*20)>>10 = (inst->featureSpecFlat*5)>>8 + histIndex = (inst->featureSpecFlat * 5) >> 8; + if (histIndex < HIST_PAR_EST) { + inst->histSpecFlat[histIndex]++; + } + // Spectral difference + histIndex = HIST_PAR_EST; + if (inst->timeAvgMagnEnergy > 0) { + // Guard against division by zero + // If timeAvgMagnEnergy == 0 we have no normalizing statistics and + // therefore can't update the histogram + histIndex = ((inst->featureSpecDiff * 5) >> inst->stages) / + inst->timeAvgMagnEnergy; + } + if (histIndex < HIST_PAR_EST) { + inst->histSpecDiff[histIndex]++; + } + } + + // extract parameters for speech/noise probability + if (flag) { + useFeatureSpecDiff = 1; + //for LRT feature: + // compute the average over inst->featureExtractionParams.rangeAvgHistLrt + avgHistLrtFX = 0; + avgSquareHistLrtFX = 0; + numHistLrt = 0; + for (i = 0; i < BIN_SIZE_LRT; i++) { + j = (2 * i + 1); + tmp32 = inst->histLrt[i] * j; + avgHistLrtFX += tmp32; + numHistLrt += inst->histLrt[i]; + avgSquareHistLrtFX += tmp32 * j; + } + avgHistLrtComplFX = avgHistLrtFX; + for (; i < HIST_PAR_EST; i++) { + j = (2 * i + 1); + tmp32 = inst->histLrt[i] * j; + avgHistLrtComplFX += tmp32; + avgSquareHistLrtFX += tmp32 * j; + } + fluctLrtFX = avgSquareHistLrtFX * numHistLrt - + avgHistLrtFX * avgHistLrtComplFX; + thresFluctLrtFX = THRES_FLUCT_LRT * numHistLrt; + // get threshold for LRT feature: + tmpU32 = (FACTOR_1_LRT_DIFF * (uint32_t)avgHistLrtFX); + if ((fluctLrtFX < thresFluctLrtFX) || (numHistLrt == 0) || + (tmpU32 > (uint32_t)(100 * numHistLrt))) { + //very low fluctuation, so likely noise + inst->thresholdLogLrt = inst->maxLrt; + } else { + tmp32 = (int32_t)((tmpU32 << (9 + inst->stages)) / numHistLrt / + 25); + // check if value is within min/max range + inst->thresholdLogLrt = WEBRTC_SPL_SAT(inst->maxLrt, + tmp32, + inst->minLrt); + } + if (fluctLrtFX < thresFluctLrtFX) { + // Do not use difference feature if fluctuation of LRT feature is very low: + // most likely just noise state + useFeatureSpecDiff = 0; + } + + // for spectral flatness and spectral difference: compute the main peaks of histogram + maxPeak1 = 0; + maxPeak2 = 0; + posPeak1SpecFlatFX = 0; + posPeak2SpecFlatFX = 0; + weightPeak1SpecFlat = 0; + weightPeak2SpecFlat = 0; + + // peaks for flatness + for (i = 0; i < HIST_PAR_EST; i++) { + if (inst->histSpecFlat[i] > maxPeak1) { + // Found new "first" peak + maxPeak2 = maxPeak1; + weightPeak2SpecFlat = weightPeak1SpecFlat; + posPeak2SpecFlatFX = posPeak1SpecFlatFX; + + maxPeak1 = inst->histSpecFlat[i]; + weightPeak1SpecFlat = inst->histSpecFlat[i]; + posPeak1SpecFlatFX = (uint32_t)(2 * i + 1); + } else if (inst->histSpecFlat[i] > maxPeak2) { + // Found new "second" peak + maxPeak2 = inst->histSpecFlat[i]; + weightPeak2SpecFlat = inst->histSpecFlat[i]; + posPeak2SpecFlatFX = (uint32_t)(2 * i + 1); + } + } + + // for spectral flatness feature + useFeatureSpecFlat = 1; + // merge the two peaks if they are close + if ((posPeak1SpecFlatFX - posPeak2SpecFlatFX < LIM_PEAK_SPACE_FLAT_DIFF) + && (weightPeak2SpecFlat * LIM_PEAK_WEIGHT_FLAT_DIFF > weightPeak1SpecFlat)) { + weightPeak1SpecFlat += weightPeak2SpecFlat; + posPeak1SpecFlatFX = (posPeak1SpecFlatFX + posPeak2SpecFlatFX) >> 1; + } + //reject if weight of peaks is not large enough, or peak value too small + if (weightPeak1SpecFlat < THRES_WEIGHT_FLAT_DIFF || posPeak1SpecFlatFX + < THRES_PEAK_FLAT) { + useFeatureSpecFlat = 0; + } else { // if selected, get the threshold + // compute the threshold and check if value is within min/max range + inst->thresholdSpecFlat = WEBRTC_SPL_SAT(MAX_FLAT_Q10, FACTOR_2_FLAT_Q10 + * posPeak1SpecFlatFX, MIN_FLAT_Q10); //Q10 + } + // done with flatness feature + + if (useFeatureSpecDiff) { + //compute two peaks for spectral difference + maxPeak1 = 0; + maxPeak2 = 0; + posPeak1SpecDiffFX = 0; + posPeak2SpecDiffFX = 0; + weightPeak1SpecDiff = 0; + weightPeak2SpecDiff = 0; + // peaks for spectral difference + for (i = 0; i < HIST_PAR_EST; i++) { + if (inst->histSpecDiff[i] > maxPeak1) { + // Found new "first" peak + maxPeak2 = maxPeak1; + weightPeak2SpecDiff = weightPeak1SpecDiff; + posPeak2SpecDiffFX = posPeak1SpecDiffFX; + + maxPeak1 = inst->histSpecDiff[i]; + weightPeak1SpecDiff = inst->histSpecDiff[i]; + posPeak1SpecDiffFX = (uint32_t)(2 * i + 1); + } else if (inst->histSpecDiff[i] > maxPeak2) { + // Found new "second" peak + maxPeak2 = inst->histSpecDiff[i]; + weightPeak2SpecDiff = inst->histSpecDiff[i]; + posPeak2SpecDiffFX = (uint32_t)(2 * i + 1); + } + } + + // merge the two peaks if they are close + if ((posPeak1SpecDiffFX - posPeak2SpecDiffFX < LIM_PEAK_SPACE_FLAT_DIFF) + && (weightPeak2SpecDiff * LIM_PEAK_WEIGHT_FLAT_DIFF > weightPeak1SpecDiff)) { + weightPeak1SpecDiff += weightPeak2SpecDiff; + posPeak1SpecDiffFX = (posPeak1SpecDiffFX + posPeak2SpecDiffFX) >> 1; + } + // get the threshold value and check if value is within min/max range + inst->thresholdSpecDiff = WEBRTC_SPL_SAT(MAX_DIFF, FACTOR_1_LRT_DIFF + * posPeak1SpecDiffFX, MIN_DIFF); //5x bigger + //reject if weight of peaks is not large enough + if (weightPeak1SpecDiff < THRES_WEIGHT_FLAT_DIFF) { + useFeatureSpecDiff = 0; + } + // done with spectral difference feature + } + + // select the weights between the features + // inst->priorModelPars[4] is weight for LRT: always selected + featureSum = 6 / (1 + useFeatureSpecFlat + useFeatureSpecDiff); + inst->weightLogLrt = featureSum; + inst->weightSpecFlat = useFeatureSpecFlat * featureSum; + inst->weightSpecDiff = useFeatureSpecDiff * featureSum; + + // set histograms to zero for next update + WebRtcSpl_ZerosArrayW16(inst->histLrt, HIST_PAR_EST); + WebRtcSpl_ZerosArrayW16(inst->histSpecDiff, HIST_PAR_EST); + WebRtcSpl_ZerosArrayW16(inst->histSpecFlat, HIST_PAR_EST); + } // end of flag == 1 +} + + +// Compute spectral flatness on input spectrum +// magn is the magnitude spectrum +// spectral flatness is returned in inst->featureSpecFlat +void WebRtcNsx_ComputeSpectralFlatness(NoiseSuppressionFixedC* inst, + uint16_t* magn) { + uint32_t tmpU32; + uint32_t avgSpectralFlatnessNum, avgSpectralFlatnessDen; + + int32_t tmp32; + int32_t currentSpectralFlatness, logCurSpectralFlatness; + + int16_t zeros, frac, intPart; + + size_t i; + + // for flatness + avgSpectralFlatnessNum = 0; + avgSpectralFlatnessDen = inst->sumMagn - (uint32_t)magn[0]; // Q(normData-stages) + + // compute log of ratio of the geometric to arithmetic mean: check for log(0) case + // flatness = exp( sum(log(magn[i]))/N - log(sum(magn[i])/N) ) + // = exp( sum(log(magn[i]))/N ) * N / sum(magn[i]) + // = 2^( sum(log2(magn[i]))/N - (log2(sum(magn[i])) - log2(N)) ) [This is used] + for (i = 1; i < inst->magnLen; i++) { + // First bin is excluded from spectrum measures. Number of bins is now a power of 2 + if (magn[i]) { + zeros = WebRtcSpl_NormU32((uint32_t)magn[i]); + frac = (int16_t)(((uint32_t)((uint32_t)(magn[i]) << zeros) + & 0x7FFFFFFF) >> 23); + // log2(magn(i)) + RTC_DCHECK_LT(frac, 256); + tmpU32 = (uint32_t)(((31 - zeros) << 8) + + WebRtcNsx_kLogTableFrac[frac]); // Q8 + avgSpectralFlatnessNum += tmpU32; // Q8 + } else { + //if at least one frequency component is zero, treat separately + tmpU32 = WEBRTC_SPL_UMUL_32_16(inst->featureSpecFlat, SPECT_FLAT_TAVG_Q14); // Q24 + inst->featureSpecFlat -= tmpU32 >> 14; // Q10 + return; + } + } + //ratio and inverse log: check for case of log(0) + zeros = WebRtcSpl_NormU32(avgSpectralFlatnessDen); + frac = (int16_t)(((avgSpectralFlatnessDen << zeros) & 0x7FFFFFFF) >> 23); + // log2(avgSpectralFlatnessDen) + RTC_DCHECK_LT(frac, 256); + tmp32 = (int32_t)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]); // Q8 + logCurSpectralFlatness = (int32_t)avgSpectralFlatnessNum; + logCurSpectralFlatness += ((int32_t)(inst->stages - 1) << (inst->stages + 7)); // Q(8+stages-1) + logCurSpectralFlatness -= (tmp32 << (inst->stages - 1)); + logCurSpectralFlatness <<= (10 - inst->stages); // Q17 + tmp32 = (int32_t)(0x00020000 | (WEBRTC_SPL_ABS_W32(logCurSpectralFlatness) + & 0x0001FFFF)); //Q17 + intPart = 7 - (logCurSpectralFlatness >> 17); // Add 7 for output in Q10. + if (intPart > 0) { + currentSpectralFlatness = tmp32 >> intPart; + } else { + currentSpectralFlatness = tmp32 << -intPart; + } + + //time average update of spectral flatness feature + tmp32 = currentSpectralFlatness - (int32_t)inst->featureSpecFlat; // Q10 + tmp32 *= SPECT_FLAT_TAVG_Q14; // Q24 + inst->featureSpecFlat += tmp32 >> 14; // Q10 + // done with flatness feature +} + + +// Compute the difference measure between input spectrum and a template/learned noise spectrum +// magn_tmp is the input spectrum +// the reference/template spectrum is inst->magn_avg_pause[i] +// returns (normalized) spectral difference in inst->featureSpecDiff +void WebRtcNsx_ComputeSpectralDifference(NoiseSuppressionFixedC* inst, + uint16_t* magnIn) { + // This is to be calculated: + // avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 / var(magnAvgPause) + + uint32_t tmpU32no1, tmpU32no2; + uint32_t varMagnUFX, varPauseUFX, avgDiffNormMagnUFX; + + int32_t tmp32no1, tmp32no2; + int32_t avgPauseFX, avgMagnFX, covMagnPauseFX; + int32_t maxPause, minPause; + + int16_t tmp16no1; + + size_t i; + int norm32, nShifts; + + avgPauseFX = 0; + maxPause = 0; + minPause = inst->avgMagnPause[0]; // Q(prevQMagn) + // compute average quantities + for (i = 0; i < inst->magnLen; i++) { + // Compute mean of magn_pause + avgPauseFX += inst->avgMagnPause[i]; // in Q(prevQMagn) + maxPause = WEBRTC_SPL_MAX(maxPause, inst->avgMagnPause[i]); + minPause = WEBRTC_SPL_MIN(minPause, inst->avgMagnPause[i]); + } + // normalize by replacing div of "inst->magnLen" with "inst->stages-1" shifts + avgPauseFX >>= inst->stages - 1; + avgMagnFX = inst->sumMagn >> (inst->stages - 1); + // Largest possible deviation in magnPause for (co)var calculations + tmp32no1 = WEBRTC_SPL_MAX(maxPause - avgPauseFX, avgPauseFX - minPause); + // Get number of shifts to make sure we don't get wrap around in varPause + nShifts = WEBRTC_SPL_MAX(0, 10 + inst->stages - WebRtcSpl_NormW32(tmp32no1)); + + varMagnUFX = 0; + varPauseUFX = 0; + covMagnPauseFX = 0; + for (i = 0; i < inst->magnLen; i++) { + // Compute var and cov of magn and magn_pause + tmp16no1 = (int16_t)((int32_t)magnIn[i] - avgMagnFX); + tmp32no2 = inst->avgMagnPause[i] - avgPauseFX; + varMagnUFX += (uint32_t)(tmp16no1 * tmp16no1); // Q(2*qMagn) + tmp32no1 = tmp32no2 * tmp16no1; // Q(prevQMagn+qMagn) + covMagnPauseFX += tmp32no1; // Q(prevQMagn+qMagn) + tmp32no1 = tmp32no2 >> nShifts; // Q(prevQMagn-minPause). + varPauseUFX += tmp32no1 * tmp32no1; // Q(2*(prevQMagn-minPause)) + } + //update of average magnitude spectrum: Q(-2*stages) and averaging replaced by shifts + inst->curAvgMagnEnergy += + inst->magnEnergy >> (2 * inst->normData + inst->stages - 1); + + avgDiffNormMagnUFX = varMagnUFX; // Q(2*qMagn) + if ((varPauseUFX) && (covMagnPauseFX)) { + tmpU32no1 = (uint32_t)WEBRTC_SPL_ABS_W32(covMagnPauseFX); // Q(prevQMagn+qMagn) + norm32 = WebRtcSpl_NormU32(tmpU32no1) - 16; + if (norm32 > 0) { + tmpU32no1 <<= norm32; // Q(prevQMagn+qMagn+norm32) + } else { + tmpU32no1 >>= -norm32; // Q(prevQMagn+qMagn+norm32) + } + tmpU32no2 = WEBRTC_SPL_UMUL(tmpU32no1, tmpU32no1); // Q(2*(prevQMagn+qMagn-norm32)) + + nShifts += norm32; + nShifts <<= 1; + if (nShifts < 0) { + varPauseUFX >>= (-nShifts); // Q(2*(qMagn+norm32+minPause)) + nShifts = 0; + } + if (varPauseUFX > 0) { + // Q(2*(qMagn+norm32-16+minPause)) + tmpU32no1 = tmpU32no2 / varPauseUFX; + tmpU32no1 >>= nShifts; + + // Q(2*qMagn) + avgDiffNormMagnUFX -= WEBRTC_SPL_MIN(avgDiffNormMagnUFX, tmpU32no1); + } else { + avgDiffNormMagnUFX = 0; + } + } + //normalize and compute time average update of difference feature + tmpU32no1 = avgDiffNormMagnUFX >> (2 * inst->normData); + if (inst->featureSpecDiff > tmpU32no1) { + tmpU32no2 = WEBRTC_SPL_UMUL_32_16(inst->featureSpecDiff - tmpU32no1, + SPECT_DIFF_TAVG_Q8); // Q(8-2*stages) + inst->featureSpecDiff -= tmpU32no2 >> 8; // Q(-2*stages) + } else { + tmpU32no2 = WEBRTC_SPL_UMUL_32_16(tmpU32no1 - inst->featureSpecDiff, + SPECT_DIFF_TAVG_Q8); // Q(8-2*stages) + inst->featureSpecDiff += tmpU32no2 >> 8; // Q(-2*stages) + } +} + +// Transform input (speechFrame) to frequency domain magnitude (magnU16) +void WebRtcNsx_DataAnalysis(NoiseSuppressionFixedC* inst, + short* speechFrame, + uint16_t* magnU16) { + uint32_t tmpU32no1; + + int32_t tmp_1_w32 = 0; + int32_t tmp_2_w32 = 0; + int32_t sum_log_magn = 0; + int32_t sum_log_i_log_magn = 0; + + uint16_t sum_log_magn_u16 = 0; + uint16_t tmp_u16 = 0; + + int16_t sum_log_i = 0; + int16_t sum_log_i_square = 0; + int16_t frac = 0; + int16_t log2 = 0; + int16_t matrix_determinant = 0; + int16_t maxWinData; + + size_t i, j; + int zeros; + int net_norm = 0; + int right_shifts_in_magnU16 = 0; + int right_shifts_in_initMagnEst = 0; + + int16_t winData_buff[ANAL_BLOCKL_MAX * 2 + 16]; + int16_t realImag_buff[ANAL_BLOCKL_MAX * 2 + 16]; + + // Align the structures to 32-byte boundary for the FFT function. + int16_t* winData = (int16_t*) (((uintptr_t)winData_buff + 31) & ~31); + int16_t* realImag = (int16_t*) (((uintptr_t) realImag_buff + 31) & ~31); + + // Update analysis buffer for lower band, and window data before FFT. + WebRtcNsx_AnalysisUpdate(inst, winData, speechFrame); + + // Get input energy + inst->energyIn = + WebRtcSpl_Energy(winData, inst->anaLen, &inst->scaleEnergyIn); + + // Reset zero input flag + inst->zeroInputSignal = 0; + // Acquire norm for winData + maxWinData = WebRtcSpl_MaxAbsValueW16(winData, inst->anaLen); + inst->normData = WebRtcSpl_NormW16(maxWinData); + if (maxWinData == 0) { + // Treat zero input separately. + inst->zeroInputSignal = 1; + return; + } + + // Determine the net normalization in the frequency domain + net_norm = inst->stages - inst->normData; + // Track lowest normalization factor and use it to prevent wrap around in shifting + right_shifts_in_magnU16 = inst->normData - inst->minNorm; + right_shifts_in_initMagnEst = WEBRTC_SPL_MAX(-right_shifts_in_magnU16, 0); + inst->minNorm -= right_shifts_in_initMagnEst; + right_shifts_in_magnU16 = WEBRTC_SPL_MAX(right_shifts_in_magnU16, 0); + + // create realImag as winData interleaved with zeros (= imag. part), normalize it + WebRtcNsx_NormalizeRealBuffer(inst, winData, realImag); + + // FFT output will be in winData[]. + WebRtcSpl_RealForwardFFT(inst->real_fft, realImag, winData); + + inst->imag[0] = 0; // Q(normData-stages) + inst->imag[inst->anaLen2] = 0; + inst->real[0] = winData[0]; // Q(normData-stages) + inst->real[inst->anaLen2] = winData[inst->anaLen]; + // Q(2*(normData-stages)) + inst->magnEnergy = (uint32_t)(inst->real[0] * inst->real[0]); + inst->magnEnergy += (uint32_t)(inst->real[inst->anaLen2] * + inst->real[inst->anaLen2]); + magnU16[0] = (uint16_t)WEBRTC_SPL_ABS_W16(inst->real[0]); // Q(normData-stages) + magnU16[inst->anaLen2] = (uint16_t)WEBRTC_SPL_ABS_W16(inst->real[inst->anaLen2]); + inst->sumMagn = (uint32_t)magnU16[0]; // Q(normData-stages) + inst->sumMagn += (uint32_t)magnU16[inst->anaLen2]; + + if (inst->blockIndex >= END_STARTUP_SHORT) { + for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) { + inst->real[i] = winData[j]; + inst->imag[i] = -winData[j + 1]; + // magnitude spectrum + // energy in Q(2*(normData-stages)) + tmpU32no1 = (uint32_t)(winData[j] * winData[j]); + tmpU32no1 += (uint32_t)(winData[j + 1] * winData[j + 1]); + inst->magnEnergy += tmpU32no1; // Q(2*(normData-stages)) + + magnU16[i] = (uint16_t)WebRtcSpl_SqrtFloor(tmpU32no1); // Q(normData-stages) + inst->sumMagn += (uint32_t)magnU16[i]; // Q(normData-stages) + } + } else { + // + // Gather information during startup for noise parameter estimation + // + + // Switch initMagnEst to Q(minNorm-stages) + inst->initMagnEst[0] >>= right_shifts_in_initMagnEst; + inst->initMagnEst[inst->anaLen2] >>= right_shifts_in_initMagnEst; + + // Update initMagnEst with magnU16 in Q(minNorm-stages). + inst->initMagnEst[0] += magnU16[0] >> right_shifts_in_magnU16; + inst->initMagnEst[inst->anaLen2] += + magnU16[inst->anaLen2] >> right_shifts_in_magnU16; + + log2 = 0; + if (magnU16[inst->anaLen2]) { + // Calculate log2(magnU16[inst->anaLen2]) + zeros = WebRtcSpl_NormU32((uint32_t)magnU16[inst->anaLen2]); + frac = (int16_t)((((uint32_t)magnU16[inst->anaLen2] << zeros) & + 0x7FFFFFFF) >> 23); // Q8 + // log2(magnU16(i)) in Q8 + RTC_DCHECK_LT(frac, 256); + log2 = (int16_t)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]); + } + + sum_log_magn = (int32_t)log2; // Q8 + // sum_log_i_log_magn in Q17 + sum_log_i_log_magn = (kLogIndex[inst->anaLen2] * log2) >> 3; + + for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) { + inst->real[i] = winData[j]; + inst->imag[i] = -winData[j + 1]; + // magnitude spectrum + // energy in Q(2*(normData-stages)) + tmpU32no1 = (uint32_t)(winData[j] * winData[j]); + tmpU32no1 += (uint32_t)(winData[j + 1] * winData[j + 1]); + inst->magnEnergy += tmpU32no1; // Q(2*(normData-stages)) + + magnU16[i] = (uint16_t)WebRtcSpl_SqrtFloor(tmpU32no1); // Q(normData-stages) + inst->sumMagn += (uint32_t)magnU16[i]; // Q(normData-stages) + + // Switch initMagnEst to Q(minNorm-stages) + inst->initMagnEst[i] >>= right_shifts_in_initMagnEst; + + // Update initMagnEst with magnU16 in Q(minNorm-stages). + inst->initMagnEst[i] += magnU16[i] >> right_shifts_in_magnU16; + + if (i >= kStartBand) { + // For pink noise estimation. Collect data neglecting lower frequency band + log2 = 0; + if (magnU16[i]) { + zeros = WebRtcSpl_NormU32((uint32_t)magnU16[i]); + frac = (int16_t)((((uint32_t)magnU16[i] << zeros) & + 0x7FFFFFFF) >> 23); + // log2(magnU16(i)) in Q8 + RTC_DCHECK_LT(frac, 256); + log2 = (int16_t)(((31 - zeros) << 8) + + WebRtcNsx_kLogTableFrac[frac]); + } + sum_log_magn += (int32_t)log2; // Q8 + // sum_log_i_log_magn in Q17 + sum_log_i_log_magn += (kLogIndex[i] * log2) >> 3; + } + } + + // + //compute simplified noise model during startup + // + + // Estimate White noise + + // Switch whiteNoiseLevel to Q(minNorm-stages) + inst->whiteNoiseLevel >>= right_shifts_in_initMagnEst; + + // Update the average magnitude spectrum, used as noise estimate. + tmpU32no1 = WEBRTC_SPL_UMUL_32_16(inst->sumMagn, inst->overdrive); + tmpU32no1 >>= inst->stages + 8; + + // Replacing division above with 'stages' shifts + // Shift to same Q-domain as whiteNoiseLevel + tmpU32no1 >>= right_shifts_in_magnU16; + // This operation is safe from wrap around as long as END_STARTUP_SHORT < 128 + RTC_DCHECK_LT(END_STARTUP_SHORT, 128); + inst->whiteNoiseLevel += tmpU32no1; // Q(minNorm-stages) + + // Estimate Pink noise parameters + // Denominator used in both parameter estimates. + // The value is only dependent on the size of the frequency band (kStartBand) + // and to reduce computational complexity stored in a table (kDeterminantEstMatrix[]) + RTC_DCHECK_LT(kStartBand, 66); + matrix_determinant = kDeterminantEstMatrix[kStartBand]; // Q0 + sum_log_i = kSumLogIndex[kStartBand]; // Q5 + sum_log_i_square = kSumSquareLogIndex[kStartBand]; // Q2 + if (inst->fs == 8000) { + // Adjust values to shorter blocks in narrow band. + tmp_1_w32 = (int32_t)matrix_determinant; + tmp_1_w32 += (kSumLogIndex[65] * sum_log_i) >> 9; + tmp_1_w32 -= (kSumLogIndex[65] * kSumLogIndex[65]) >> 10; + tmp_1_w32 -= (int32_t)sum_log_i_square << 4; + tmp_1_w32 -= ((inst->magnLen - kStartBand) * kSumSquareLogIndex[65]) >> 2; + matrix_determinant = (int16_t)tmp_1_w32; + sum_log_i -= kSumLogIndex[65]; // Q5 + sum_log_i_square -= kSumSquareLogIndex[65]; // Q2 + } + + // Necessary number of shifts to fit sum_log_magn in a word16 + zeros = 16 - WebRtcSpl_NormW32(sum_log_magn); + if (zeros < 0) { + zeros = 0; + } + tmp_1_w32 = sum_log_magn << 1; // Q9 + sum_log_magn_u16 = (uint16_t)(tmp_1_w32 >> zeros); // Q(9-zeros). + + // Calculate and update pinkNoiseNumerator. Result in Q11. + tmp_2_w32 = WEBRTC_SPL_MUL_16_U16(sum_log_i_square, sum_log_magn_u16); // Q(11-zeros) + tmpU32no1 = sum_log_i_log_magn >> 12; // Q5 + + // Shift the largest value of sum_log_i and tmp32no3 before multiplication + tmp_u16 = ((uint16_t)sum_log_i << 1); // Q6 + if ((uint32_t)sum_log_i > tmpU32no1) { + tmp_u16 >>= zeros; + } else { + tmpU32no1 >>= zeros; + } + tmp_2_w32 -= (int32_t)WEBRTC_SPL_UMUL_32_16(tmpU32no1, tmp_u16); // Q(11-zeros) + matrix_determinant >>= zeros; // Q(-zeros) + tmp_2_w32 = WebRtcSpl_DivW32W16(tmp_2_w32, matrix_determinant); // Q11 + tmp_2_w32 += (int32_t)net_norm << 11; // Q11 + if (tmp_2_w32 < 0) { + tmp_2_w32 = 0; + } + inst->pinkNoiseNumerator += tmp_2_w32; // Q11 + + // Calculate and update pinkNoiseExp. Result in Q14. + tmp_2_w32 = WEBRTC_SPL_MUL_16_U16(sum_log_i, sum_log_magn_u16); // Q(14-zeros) + tmp_1_w32 = sum_log_i_log_magn >> (3 + zeros); + tmp_1_w32 *= inst->magnLen - kStartBand; + tmp_2_w32 -= tmp_1_w32; // Q(14-zeros) + if (tmp_2_w32 > 0) { + // If the exponential parameter is negative force it to zero, which means a + // flat spectrum. + tmp_1_w32 = WebRtcSpl_DivW32W16(tmp_2_w32, matrix_determinant); // Q14 + inst->pinkNoiseExp += WEBRTC_SPL_SAT(16384, tmp_1_w32, 0); // Q14 + } + } +} + +void WebRtcNsx_DataSynthesis(NoiseSuppressionFixedC* inst, short* outFrame) { + int32_t energyOut; + + int16_t realImag_buff[ANAL_BLOCKL_MAX * 2 + 16]; + int16_t rfft_out_buff[ANAL_BLOCKL_MAX * 2 + 16]; + + // Align the structures to 32-byte boundary for the FFT function. + int16_t* realImag = (int16_t*) (((uintptr_t)realImag_buff + 31) & ~31); + int16_t* rfft_out = (int16_t*) (((uintptr_t) rfft_out_buff + 31) & ~31); + + int16_t tmp16no1, tmp16no2; + int16_t energyRatio; + int16_t gainFactor, gainFactor1, gainFactor2; + + size_t i; + int outCIFFT; + int scaleEnergyOut = 0; + + if (inst->zeroInputSignal) { + // synthesize the special case of zero input + // read out fully processed segment + for (i = 0; i < inst->blockLen10ms; i++) { + outFrame[i] = inst->synthesisBuffer[i]; // Q0 + } + // update synthesis buffer + memcpy(inst->synthesisBuffer, inst->synthesisBuffer + inst->blockLen10ms, + (inst->anaLen - inst->blockLen10ms) * sizeof(*inst->synthesisBuffer)); + WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer + inst->anaLen - inst->blockLen10ms, + inst->blockLen10ms); + return; + } + + // Filter the data in the frequency domain, and create spectrum. + WebRtcNsx_PrepareSpectrum(inst, realImag); + + // Inverse FFT output will be in rfft_out[]. + outCIFFT = WebRtcSpl_RealInverseFFT(inst->real_fft, realImag, rfft_out); + + WebRtcNsx_Denormalize(inst, rfft_out, outCIFFT); + + //scale factor: only do it after END_STARTUP_LONG time + gainFactor = 8192; // 8192 = Q13(1.0) + if (inst->gainMap == 1 && + inst->blockIndex > END_STARTUP_LONG && + inst->energyIn > 0) { + // Q(-scaleEnergyOut) + energyOut = WebRtcSpl_Energy(inst->real, inst->anaLen, &scaleEnergyOut); + if (scaleEnergyOut == 0 && !(energyOut & 0x7f800000)) { + energyOut = WEBRTC_SPL_SHIFT_W32(energyOut, 8 + scaleEnergyOut + - inst->scaleEnergyIn); + } else { + // |energyIn| is currently in Q(|scaleEnergyIn|), but to later on end up + // with an |energyRatio| in Q8 we need to change the Q-domain to + // Q(-8-scaleEnergyOut). + inst->energyIn >>= 8 + scaleEnergyOut - inst->scaleEnergyIn; + } + + RTC_DCHECK_GT(inst->energyIn, 0); + energyRatio = (energyOut + inst->energyIn / 2) / inst->energyIn; // Q8 + // Limit the ratio to [0, 1] in Q8, i.e., [0, 256] + energyRatio = WEBRTC_SPL_SAT(256, energyRatio, 0); + + // all done in lookup tables now + RTC_DCHECK_LT(energyRatio, 257); + gainFactor1 = kFactor1Table[energyRatio]; // Q8 + gainFactor2 = inst->factor2Table[energyRatio]; // Q8 + + //combine both scales with speech/noise prob: note prior (priorSpeechProb) is not frequency dependent + + // factor = inst->priorSpeechProb*factor1 + (1.0-inst->priorSpeechProb)*factor2; // original code + tmp16no1 = (int16_t)(((16384 - inst->priorNonSpeechProb) * gainFactor1) >> + 14); // in Q13, where 16384 = Q14(1.0) + tmp16no2 = (int16_t)((inst->priorNonSpeechProb * gainFactor2) >> 14); + gainFactor = tmp16no1 + tmp16no2; // Q13 + } // out of flag_gain_map==1 + + // Synthesis, read out fully processed segment, and update synthesis buffer. + WebRtcNsx_SynthesisUpdate(inst, outFrame, gainFactor); +} + +void WebRtcNsx_ProcessCore(NoiseSuppressionFixedC* inst, + const short* const* speechFrame, + int num_bands, + short* const* outFrame) { + // main routine for noise suppression + + uint32_t tmpU32no1, tmpU32no2, tmpU32no3; + uint32_t satMax, maxNoiseU32; + uint32_t tmpMagnU32, tmpNoiseU32; + uint32_t nearMagnEst; + uint32_t noiseUpdateU32; + uint32_t noiseU32[HALF_ANAL_BLOCKL]; + uint32_t postLocSnr[HALF_ANAL_BLOCKL]; + uint32_t priorLocSnr[HALF_ANAL_BLOCKL]; + uint32_t prevNearSnr[HALF_ANAL_BLOCKL]; + uint32_t curNearSnr; + uint32_t priorSnr; + uint32_t noise_estimate = 0; + uint32_t noise_estimate_avg = 0; + uint32_t numerator = 0; + + int32_t tmp32no1, tmp32no2; + int32_t pink_noise_num_avg = 0; + + uint16_t tmpU16no1; + uint16_t magnU16[HALF_ANAL_BLOCKL]; + uint16_t prevNoiseU16[HALF_ANAL_BLOCKL]; + uint16_t nonSpeechProbFinal[HALF_ANAL_BLOCKL]; + uint16_t gammaNoise, prevGammaNoise; + uint16_t noiseSupFilterTmp[HALF_ANAL_BLOCKL]; + + int16_t qMagn, qNoise; + int16_t avgProbSpeechHB, gainModHB, avgFilterGainHB, gainTimeDomainHB; + int16_t pink_noise_exp_avg = 0; + + size_t i, j; + int nShifts, postShifts; + int norm32no1, norm32no2; + int flag, sign; + int q_domain_to_use = 0; + + // Code for ARMv7-Neon platform assumes the following: + RTC_DCHECK_GT(inst->anaLen, 0); + RTC_DCHECK_GT(inst->anaLen2, 0); + RTC_DCHECK_EQ(0, inst->anaLen % 16); + RTC_DCHECK_EQ(0, inst->anaLen2 % 8); + RTC_DCHECK_GT(inst->blockLen10ms, 0); + RTC_DCHECK_EQ(0, inst->blockLen10ms % 16); + RTC_DCHECK_EQ(inst->magnLen, inst->anaLen2 + 1); + +#ifdef NS_FILEDEBUG + if (fwrite(spframe, sizeof(short), + inst->blockLen10ms, inst->infile) != inst->blockLen10ms) { + RTC_DCHECK(false); + } +#endif + + // Check that initialization has been done + RTC_DCHECK_EQ(1, inst->initFlag); + RTC_DCHECK_LE(num_bands - 1, NUM_HIGH_BANDS_MAX); + + const short* const* speechFrameHB = NULL; + short* const* outFrameHB = NULL; + size_t num_high_bands = 0; + if (num_bands > 1) { + speechFrameHB = &speechFrame[1]; + outFrameHB = &outFrame[1]; + num_high_bands = (size_t)(num_bands - 1); + } + + // Store speechFrame and transform to frequency domain + WebRtcNsx_DataAnalysis(inst, (short*)speechFrame[0], magnU16); + + if (inst->zeroInputSignal) { + WebRtcNsx_DataSynthesis(inst, outFrame[0]); + + if (num_bands > 1) { + // update analysis buffer for H band + // append new data to buffer FX + for (i = 0; i < num_high_bands; ++i) { + int block_shift = inst->anaLen - inst->blockLen10ms; + memcpy(inst->dataBufHBFX[i], inst->dataBufHBFX[i] + inst->blockLen10ms, + block_shift * sizeof(*inst->dataBufHBFX[i])); + memcpy(inst->dataBufHBFX[i] + block_shift, speechFrameHB[i], + inst->blockLen10ms * sizeof(*inst->dataBufHBFX[i])); + for (j = 0; j < inst->blockLen10ms; j++) { + outFrameHB[i][j] = inst->dataBufHBFX[i][j]; // Q0 + } + } + } // end of H band gain computation + return; + } + + // Update block index when we have something to process + inst->blockIndex++; + // + + // Norm of magn + qMagn = inst->normData - inst->stages; + + // Compute spectral flatness on input spectrum + WebRtcNsx_ComputeSpectralFlatness(inst, magnU16); + + // quantile noise estimate + WebRtcNsx_NoiseEstimation(inst, magnU16, noiseU32, &qNoise); + + //noise estimate from previous frame + for (i = 0; i < inst->magnLen; i++) { + prevNoiseU16[i] = (uint16_t)(inst->prevNoiseU32[i] >> 11); // Q(prevQNoise) + } + + if (inst->blockIndex < END_STARTUP_SHORT) { + // Noise Q-domain to be used later; see description at end of section. + q_domain_to_use = WEBRTC_SPL_MIN((int)qNoise, inst->minNorm - inst->stages); + + // Calculate frequency independent parts in parametric noise estimate and calculate + // the estimate for the lower frequency band (same values for all frequency bins) + if (inst->pinkNoiseExp) { + pink_noise_exp_avg = (int16_t)WebRtcSpl_DivW32W16(inst->pinkNoiseExp, + (int16_t)(inst->blockIndex + 1)); // Q14 + pink_noise_num_avg = WebRtcSpl_DivW32W16(inst->pinkNoiseNumerator, + (int16_t)(inst->blockIndex + 1)); // Q11 + WebRtcNsx_CalcParametricNoiseEstimate(inst, + pink_noise_exp_avg, + pink_noise_num_avg, + kStartBand, + &noise_estimate, + &noise_estimate_avg); + } else { + // Use white noise estimate if we have poor pink noise parameter estimates + noise_estimate = inst->whiteNoiseLevel; // Q(minNorm-stages) + noise_estimate_avg = noise_estimate / (inst->blockIndex + 1); // Q(minNorm-stages) + } + for (i = 0; i < inst->magnLen; i++) { + // Estimate the background noise using the pink noise parameters if permitted + if ((inst->pinkNoiseExp) && (i >= kStartBand)) { + // Reset noise_estimate + noise_estimate = 0; + noise_estimate_avg = 0; + // Calculate the parametric noise estimate for current frequency bin + WebRtcNsx_CalcParametricNoiseEstimate(inst, + pink_noise_exp_avg, + pink_noise_num_avg, + i, + &noise_estimate, + &noise_estimate_avg); + } + // Calculate parametric Wiener filter + noiseSupFilterTmp[i] = inst->denoiseBound; + if (inst->initMagnEst[i]) { + // numerator = (initMagnEst - noise_estimate * overdrive) + // Result in Q(8+minNorm-stages) + tmpU32no1 = WEBRTC_SPL_UMUL_32_16(noise_estimate, inst->overdrive); + numerator = inst->initMagnEst[i] << 8; + if (numerator > tmpU32no1) { + // Suppression filter coefficient larger than zero, so calculate. + numerator -= tmpU32no1; + + // Determine number of left shifts in numerator for best accuracy after + // division + nShifts = WebRtcSpl_NormU32(numerator); + nShifts = WEBRTC_SPL_SAT(6, nShifts, 0); + + // Shift numerator to Q(nShifts+8+minNorm-stages) + numerator <<= nShifts; + + // Shift denominator to Q(nShifts-6+minNorm-stages) + tmpU32no1 = inst->initMagnEst[i] >> (6 - nShifts); + if (tmpU32no1 == 0) { + // This is only possible if numerator = 0, in which case + // we don't need any division. + tmpU32no1 = 1; + } + tmpU32no2 = numerator / tmpU32no1; // Q14 + noiseSupFilterTmp[i] = (uint16_t)WEBRTC_SPL_SAT(16384, tmpU32no2, + (uint32_t)(inst->denoiseBound)); // Q14 + } + } + // Weight quantile noise 'noiseU32' with modeled noise 'noise_estimate_avg' + // 'noiseU32 is in Q(qNoise) and 'noise_estimate' in Q(minNorm-stages) + // To guarantee that we do not get wrap around when shifting to the same domain + // we use the lowest one. Furthermore, we need to save 6 bits for the weighting. + // 'noise_estimate_avg' can handle this operation by construction, but 'noiseU32' + // may not. + + // Shift 'noiseU32' to 'q_domain_to_use' + tmpU32no1 = noiseU32[i] >> (qNoise - q_domain_to_use); + // Shift 'noise_estimate_avg' to 'q_domain_to_use' + tmpU32no2 = noise_estimate_avg >> + (inst->minNorm - inst->stages - q_domain_to_use); + // Make a simple check to see if we have enough room for weighting 'tmpU32no1' + // without wrap around + nShifts = 0; + if (tmpU32no1 & 0xfc000000) { + tmpU32no1 >>= 6; + tmpU32no2 >>= 6; + nShifts = 6; + } + tmpU32no1 *= inst->blockIndex; + tmpU32no2 *= (END_STARTUP_SHORT - inst->blockIndex); + // Add them together and divide by startup length + noiseU32[i] = WebRtcSpl_DivU32U16(tmpU32no1 + tmpU32no2, END_STARTUP_SHORT); + // Shift back if necessary + noiseU32[i] <<= nShifts; + } + // Update new Q-domain for 'noiseU32' + qNoise = q_domain_to_use; + } + // compute average signal during END_STARTUP_LONG time: + // used to normalize spectral difference measure + if (inst->blockIndex < END_STARTUP_LONG) { + // substituting division with shift ending up in Q(-2*stages) + inst->timeAvgMagnEnergyTmp += + inst->magnEnergy >> (2 * inst->normData + inst->stages - 1); + inst->timeAvgMagnEnergy = WebRtcSpl_DivU32U16(inst->timeAvgMagnEnergyTmp, + inst->blockIndex + 1); + } + + //start processing at frames == converged+1 + // STEP 1: compute prior and post SNR based on quantile noise estimates + + // compute direct decision (DD) estimate of prior SNR: needed for new method + satMax = (uint32_t)1048575;// Largest possible value without getting overflow despite shifting 12 steps + postShifts = 6 + qMagn - qNoise; + nShifts = 5 - inst->prevQMagn + inst->prevQNoise; + for (i = 0; i < inst->magnLen; i++) { + // FLOAT: + // post SNR + // postLocSnr[i] = 0.0; + // if (magn[i] > noise[i]) + // { + // postLocSnr[i] = magn[i] / (noise[i] + 0.0001); + // } + // // previous post SNR + // // previous estimate: based on previous frame with gain filter (smooth is previous filter) + // + // prevNearSnr[i] = inst->prevMagnU16[i] / (inst->noisePrev[i] + 0.0001) * (inst->smooth[i]); + // + // // DD estimate is sum of two terms: current estimate and previous estimate + // // directed decision update of priorSnr (or we actually store [2*priorSnr+1]) + // + // priorLocSnr[i] = DD_PR_SNR * prevNearSnr[i] + (1.0 - DD_PR_SNR) * (postLocSnr[i] - 1.0); + + // calculate post SNR: output in Q11 + postLocSnr[i] = 2048; // 1.0 in Q11 + tmpU32no1 = (uint32_t)magnU16[i] << 6; // Q(6+qMagn) + if (postShifts < 0) { + tmpU32no2 = noiseU32[i] >> -postShifts; // Q(6+qMagn) + } else { + tmpU32no2 = noiseU32[i] << postShifts; // Q(6+qMagn) + } + if (tmpU32no1 > tmpU32no2) { + // Current magnitude larger than noise + tmpU32no1 <<= 11; // Q(17+qMagn) + if (tmpU32no2 > 0) { + tmpU32no1 /= tmpU32no2; // Q11 + postLocSnr[i] = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11 + } else { + postLocSnr[i] = satMax; + } + } + + // calculate prevNearSnr[i] and save for later instead of recalculating it later + // |nearMagnEst| in Q(prevQMagn + 14) + nearMagnEst = inst->prevMagnU16[i] * inst->noiseSupFilter[i]; + tmpU32no1 = nearMagnEst << 3; // Q(prevQMagn+17) + tmpU32no2 = inst->prevNoiseU32[i] >> nShifts; // Q(prevQMagn+6) + + if (tmpU32no2 > 0) { + tmpU32no1 /= tmpU32no2; // Q11 + tmpU32no1 = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11 + } else { + tmpU32no1 = satMax; // Q11 + } + prevNearSnr[i] = tmpU32no1; // Q11 + + //directed decision update of priorSnr + tmpU32no1 = WEBRTC_SPL_UMUL_32_16(prevNearSnr[i], DD_PR_SNR_Q11); // Q22 + tmpU32no2 = WEBRTC_SPL_UMUL_32_16(postLocSnr[i] - 2048, ONE_MINUS_DD_PR_SNR_Q11); // Q22 + priorSnr = tmpU32no1 + tmpU32no2 + 512; // Q22 (added 512 for rounding) + // priorLocSnr = 1 + 2*priorSnr + priorLocSnr[i] = 2048 + (priorSnr >> 10); // Q11 + } // end of loop over frequencies + // done with step 1: DD computation of prior and post SNR + + // STEP 2: compute speech/noise likelihood + + //compute difference of input spectrum with learned/estimated noise spectrum + WebRtcNsx_ComputeSpectralDifference(inst, magnU16); + //compute histograms for determination of parameters (thresholds and weights for features) + //parameters are extracted once every window time (=inst->modelUpdate) + //counter update + inst->cntThresUpdate++; + flag = (int)(inst->cntThresUpdate == inst->modelUpdate); + //update histogram + WebRtcNsx_FeatureParameterExtraction(inst, flag); + //compute model parameters + if (flag) { + inst->cntThresUpdate = 0; // Reset counter + //update every window: + // get normalization for spectral difference for next window estimate + + // Shift to Q(-2*stages) + inst->curAvgMagnEnergy >>= STAT_UPDATES; + + tmpU32no1 = (inst->curAvgMagnEnergy + inst->timeAvgMagnEnergy + 1) >> 1; //Q(-2*stages) + // Update featureSpecDiff + if ((tmpU32no1 != inst->timeAvgMagnEnergy) && (inst->featureSpecDiff) && + (inst->timeAvgMagnEnergy > 0)) { + norm32no1 = 0; + tmpU32no3 = tmpU32no1; + while (0xFFFF0000 & tmpU32no3) { + tmpU32no3 >>= 1; + norm32no1++; + } + tmpU32no2 = inst->featureSpecDiff; + while (0xFFFF0000 & tmpU32no2) { + tmpU32no2 >>= 1; + norm32no1++; + } + tmpU32no3 = WEBRTC_SPL_UMUL(tmpU32no3, tmpU32no2); + tmpU32no3 /= inst->timeAvgMagnEnergy; + if (WebRtcSpl_NormU32(tmpU32no3) < norm32no1) { + inst->featureSpecDiff = 0x007FFFFF; + } else { + inst->featureSpecDiff = WEBRTC_SPL_MIN(0x007FFFFF, + tmpU32no3 << norm32no1); + } + } + + inst->timeAvgMagnEnergy = tmpU32no1; // Q(-2*stages) + inst->curAvgMagnEnergy = 0; + } + + //compute speech/noise probability + WebRtcNsx_SpeechNoiseProb(inst, nonSpeechProbFinal, priorLocSnr, postLocSnr); + + //time-avg parameter for noise update + gammaNoise = NOISE_UPDATE_Q8; // Q8 + + maxNoiseU32 = 0; + postShifts = inst->prevQNoise - qMagn; + nShifts = inst->prevQMagn - qMagn; + for (i = 0; i < inst->magnLen; i++) { + // temporary noise update: use it for speech frames if update value is less than previous + // the formula has been rewritten into: + // noiseUpdate = noisePrev[i] + (1 - gammaNoise) * nonSpeechProb * (magn[i] - noisePrev[i]) + + if (postShifts < 0) { + tmpU32no2 = magnU16[i] >> -postShifts; // Q(prevQNoise) + } else { + tmpU32no2 = (uint32_t)magnU16[i] << postShifts; // Q(prevQNoise) + } + if (prevNoiseU16[i] > tmpU32no2) { + sign = -1; + tmpU32no1 = prevNoiseU16[i] - tmpU32no2; + } else { + sign = 1; + tmpU32no1 = tmpU32no2 - prevNoiseU16[i]; + } + noiseUpdateU32 = inst->prevNoiseU32[i]; // Q(prevQNoise+11) + tmpU32no3 = 0; + if ((tmpU32no1) && (nonSpeechProbFinal[i])) { + // This value will be used later, if gammaNoise changes + tmpU32no3 = WEBRTC_SPL_UMUL_32_16(tmpU32no1, nonSpeechProbFinal[i]); // Q(prevQNoise+8) + if (0x7c000000 & tmpU32no3) { + // Shifting required before multiplication + tmpU32no2 = (tmpU32no3 >> 5) * gammaNoise; // Q(prevQNoise+11) + } else { + // We can do shifting after multiplication + tmpU32no2 = (tmpU32no3 * gammaNoise) >> 5; // Q(prevQNoise+11) + } + if (sign > 0) { + noiseUpdateU32 += tmpU32no2; // Q(prevQNoise+11) + } else { + // This operation is safe. We can never get wrap around, since worst + // case scenario means magnU16 = 0 + noiseUpdateU32 -= tmpU32no2; // Q(prevQNoise+11) + } + } + + //increase gamma (i.e., less noise update) for frame likely to be speech + prevGammaNoise = gammaNoise; + gammaNoise = NOISE_UPDATE_Q8; + //time-constant based on speech/noise state + //increase gamma (i.e., less noise update) for frames likely to be speech + if (nonSpeechProbFinal[i] < ONE_MINUS_PROB_RANGE_Q8) { + gammaNoise = GAMMA_NOISE_TRANS_AND_SPEECH_Q8; + } + + if (prevGammaNoise != gammaNoise) { + // new noise update + // this line is the same as above, only that the result is stored in a different variable and the gammaNoise + // has changed + // + // noiseUpdate = noisePrev[i] + (1 - gammaNoise) * nonSpeechProb * (magn[i] - noisePrev[i]) + + if (0x7c000000 & tmpU32no3) { + // Shifting required before multiplication + tmpU32no2 = (tmpU32no3 >> 5) * gammaNoise; // Q(prevQNoise+11) + } else { + // We can do shifting after multiplication + tmpU32no2 = (tmpU32no3 * gammaNoise) >> 5; // Q(prevQNoise+11) + } + if (sign > 0) { + tmpU32no1 = inst->prevNoiseU32[i] + tmpU32no2; // Q(prevQNoise+11) + } else { + tmpU32no1 = inst->prevNoiseU32[i] - tmpU32no2; // Q(prevQNoise+11) + } + if (noiseUpdateU32 > tmpU32no1) { + noiseUpdateU32 = tmpU32no1; // Q(prevQNoise+11) + } + } + noiseU32[i] = noiseUpdateU32; // Q(prevQNoise+11) + if (noiseUpdateU32 > maxNoiseU32) { + maxNoiseU32 = noiseUpdateU32; + } + + // conservative noise update + // // original FLOAT code + // if (prob_speech < PROB_RANGE) { + // inst->avgMagnPause[i] = inst->avgMagnPause[i] + (1.0 - gamma_pause)*(magn[i] - inst->avgMagnPause[i]); + // } + + tmp32no2 = WEBRTC_SPL_SHIFT_W32(inst->avgMagnPause[i], -nShifts); + if (nonSpeechProbFinal[i] > ONE_MINUS_PROB_RANGE_Q8) { + if (nShifts < 0) { + tmp32no1 = (int32_t)magnU16[i] - tmp32no2; // Q(qMagn) + tmp32no1 *= ONE_MINUS_GAMMA_PAUSE_Q8; // Q(8+prevQMagn+nShifts) + tmp32no1 = (tmp32no1 + 128) >> 8; // Q(qMagn). + } else { + // In Q(qMagn+nShifts) + tmp32no1 = ((int32_t)magnU16[i] << nShifts) - inst->avgMagnPause[i]; + tmp32no1 *= ONE_MINUS_GAMMA_PAUSE_Q8; // Q(8+prevQMagn+nShifts) + tmp32no1 = (tmp32no1 + (128 << nShifts)) >> (8 + nShifts); // Q(qMagn). + } + tmp32no2 += tmp32no1; // Q(qMagn) + } + inst->avgMagnPause[i] = tmp32no2; + } // end of frequency loop + + norm32no1 = WebRtcSpl_NormU32(maxNoiseU32); + qNoise = inst->prevQNoise + norm32no1 - 5; + // done with step 2: noise update + + // STEP 3: compute dd update of prior snr and post snr based on new noise estimate + nShifts = inst->prevQNoise + 11 - qMagn; + for (i = 0; i < inst->magnLen; i++) { + // FLOAT code + // // post and prior SNR + // curNearSnr = 0.0; + // if (magn[i] > noise[i]) + // { + // curNearSnr = magn[i] / (noise[i] + 0.0001) - 1.0; + // } + // // DD estimate is sum of two terms: current estimate and previous estimate + // // directed decision update of snrPrior + // snrPrior = DD_PR_SNR * prevNearSnr[i] + (1.0 - DD_PR_SNR) * curNearSnr; + // // gain filter + // tmpFloat1 = inst->overdrive + snrPrior; + // tmpFloat2 = snrPrior / tmpFloat1; + // theFilter[i] = tmpFloat2; + + // calculate curNearSnr again, this is necessary because a new noise estimate has been made since then. for the original + curNearSnr = 0; // Q11 + if (nShifts < 0) { + // This case is equivalent with magn < noise which implies curNearSnr = 0; + tmpMagnU32 = (uint32_t)magnU16[i]; // Q(qMagn) + tmpNoiseU32 = noiseU32[i] << -nShifts; // Q(qMagn) + } else if (nShifts > 17) { + tmpMagnU32 = (uint32_t)magnU16[i] << 17; // Q(qMagn+17) + tmpNoiseU32 = noiseU32[i] >> (nShifts - 17); // Q(qMagn+17) + } else { + tmpMagnU32 = (uint32_t)magnU16[i] << nShifts; // Q(qNoise_prev+11) + tmpNoiseU32 = noiseU32[i]; // Q(qNoise_prev+11) + } + if (tmpMagnU32 > tmpNoiseU32) { + tmpU32no1 = tmpMagnU32 - tmpNoiseU32; // Q(qCur) + norm32no2 = WEBRTC_SPL_MIN(11, WebRtcSpl_NormU32(tmpU32no1)); + tmpU32no1 <<= norm32no2; // Q(qCur+norm32no2) + tmpU32no2 = tmpNoiseU32 >> (11 - norm32no2); // Q(qCur+norm32no2-11) + if (tmpU32no2 > 0) { + tmpU32no1 /= tmpU32no2; // Q11 + } + curNearSnr = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11 + } + + //directed decision update of priorSnr + // FLOAT + // priorSnr = DD_PR_SNR * prevNearSnr + (1.0-DD_PR_SNR) * curNearSnr; + + tmpU32no1 = WEBRTC_SPL_UMUL_32_16(prevNearSnr[i], DD_PR_SNR_Q11); // Q22 + tmpU32no2 = WEBRTC_SPL_UMUL_32_16(curNearSnr, ONE_MINUS_DD_PR_SNR_Q11); // Q22 + priorSnr = tmpU32no1 + tmpU32no2; // Q22 + + //gain filter + tmpU32no1 = inst->overdrive + ((priorSnr + 8192) >> 14); // Q8 + RTC_DCHECK_GT(inst->overdrive, 0); + tmpU16no1 = (priorSnr + tmpU32no1 / 2) / tmpU32no1; // Q14 + inst->noiseSupFilter[i] = WEBRTC_SPL_SAT(16384, tmpU16no1, inst->denoiseBound); // 16384 = Q14(1.0) // Q14 + + // Weight in the parametric Wiener filter during startup + if (inst->blockIndex < END_STARTUP_SHORT) { + // Weight the two suppression filters + tmpU32no1 = inst->noiseSupFilter[i] * inst->blockIndex; + tmpU32no2 = noiseSupFilterTmp[i] * + (END_STARTUP_SHORT - inst->blockIndex); + tmpU32no1 += tmpU32no2; + inst->noiseSupFilter[i] = (uint16_t)WebRtcSpl_DivU32U16(tmpU32no1, + END_STARTUP_SHORT); + } + } // end of loop over frequencies + //done with step3 + + // save noise and magnitude spectrum for next frame + inst->prevQNoise = qNoise; + inst->prevQMagn = qMagn; + if (norm32no1 > 5) { + for (i = 0; i < inst->magnLen; i++) { + inst->prevNoiseU32[i] = noiseU32[i] << (norm32no1 - 5); // Q(qNoise+11) + inst->prevMagnU16[i] = magnU16[i]; // Q(qMagn) + } + } else { + for (i = 0; i < inst->magnLen; i++) { + inst->prevNoiseU32[i] = noiseU32[i] >> (5 - norm32no1); // Q(qNoise+11) + inst->prevMagnU16[i] = magnU16[i]; // Q(qMagn) + } + } + + WebRtcNsx_DataSynthesis(inst, outFrame[0]); +#ifdef NS_FILEDEBUG + if (fwrite(outframe, sizeof(short), + inst->blockLen10ms, inst->outfile) != inst->blockLen10ms) { + RTC_DCHECK(false); + } +#endif + + //for H band: + // only update data buffer, then apply time-domain gain is applied derived from L band + if (num_bands > 1) { + // update analysis buffer for H band + // append new data to buffer FX + for (i = 0; i < num_high_bands; ++i) { + memcpy(inst->dataBufHBFX[i], inst->dataBufHBFX[i] + inst->blockLen10ms, + (inst->anaLen - inst->blockLen10ms) * sizeof(*inst->dataBufHBFX[i])); + memcpy(inst->dataBufHBFX[i] + inst->anaLen - inst->blockLen10ms, + speechFrameHB[i], inst->blockLen10ms * sizeof(*inst->dataBufHBFX[i])); + } + // range for averaging low band quantities for H band gain + + gainTimeDomainHB = 16384; // 16384 = Q14(1.0) + //average speech prob from low band + //average filter gain from low band + //avg over second half (i.e., 4->8kHz) of freq. spectrum + tmpU32no1 = 0; // Q12 + tmpU16no1 = 0; // Q8 + for (i = inst->anaLen2 - (inst->anaLen2 >> 2); i < inst->anaLen2; i++) { + tmpU16no1 += nonSpeechProbFinal[i]; // Q8 + tmpU32no1 += (uint32_t)(inst->noiseSupFilter[i]); // Q14 + } + RTC_DCHECK_GE(inst->stages, 7); + avgProbSpeechHB = (4096 - (tmpU16no1 >> (inst->stages - 7))); // Q12 + avgFilterGainHB = (int16_t)(tmpU32no1 >> (inst->stages - 3)); // Q14 + + // // original FLOAT code + // // gain based on speech probability: + // avg_prob_speech_tt=(float)2.0*avg_prob_speech-(float)1.0; + // gain_mod=(float)0.5*((float)1.0+(float)tanh(avg_prob_speech_tt)); // between 0 and 1 + + // gain based on speech probability: + // original expression: "0.5 * (1 + tanh(2x-1))" + // avgProbSpeechHB has been anyway saturated to a value between 0 and 1 so the other cases don't have to be dealt with + // avgProbSpeechHB and gainModHB are in Q12, 3607 = Q12(0.880615234375) which is a zero point of + // |0.5 * (1 + tanh(2x-1)) - x| - |0.5 * (1 + tanh(2x-1)) - 0.880615234375| meaning that from that point the error of approximating + // the expression with f(x) = x would be greater than the error of approximating the expression with f(x) = 0.880615234375 + // error: "|0.5 * (1 + tanh(2x-1)) - x| from x=0 to 0.880615234375" -> http://www.wolframalpha.com/input/?i=|0.5+*+(1+%2B+tanh(2x-1))+-+x|+from+x%3D0+to+0.880615234375 + // and: "|0.5 * (1 + tanh(2x-1)) - 0.880615234375| from x=0.880615234375 to 1" -> http://www.wolframalpha.com/input/?i=+|0.5+*+(1+%2B+tanh(2x-1))+-+0.880615234375|+from+x%3D0.880615234375+to+1 + gainModHB = WEBRTC_SPL_MIN(avgProbSpeechHB, 3607); + + // // original FLOAT code + // //combine gain with low band gain + // if (avg_prob_speech < (float)0.5) { + // gain_time_domain_HB=(float)0.5*gain_mod+(float)0.5*avg_filter_gain; + // } + // else { + // gain_time_domain_HB=(float)0.25*gain_mod+(float)0.75*avg_filter_gain; + // } + + + //combine gain with low band gain + if (avgProbSpeechHB < 2048) { + // 2048 = Q12(0.5) + // the next two lines in float are "gain_time_domain = 0.5 * gain_mod + 0.5 * avg_filter_gain"; Q2(0.5) = 2 equals one left shift + gainTimeDomainHB = (gainModHB << 1) + (avgFilterGainHB >> 1); // Q14 + } else { + // "gain_time_domain = 0.25 * gain_mod + 0.75 * agv_filter_gain;" + gainTimeDomainHB = (int16_t)((3 * avgFilterGainHB) >> 2); // 3 = Q2(0.75) + gainTimeDomainHB += gainModHB; // Q14 + } + //make sure gain is within flooring range + gainTimeDomainHB + = WEBRTC_SPL_SAT(16384, gainTimeDomainHB, (int16_t)(inst->denoiseBound)); // 16384 = Q14(1.0) + + + //apply gain + for (i = 0; i < num_high_bands; ++i) { + for (j = 0; j < inst->blockLen10ms; j++) { + outFrameHB[i][j] = (int16_t)((gainTimeDomainHB * + inst->dataBufHBFX[i][j]) >> 14); // Q0 + } + } + } // end of H band gain computation +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core.h new file mode 100644 index 000000000..d1754f31e --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core.h @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_CORE_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_CORE_H_ + +#ifdef NS_FILEDEBUG +#include +#endif + +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +#include "webrtc/modules/audio_processing/ns/nsx_defines.h" +#include "webrtc/typedefs.h" + +typedef struct NoiseSuppressionFixedC_ { + uint32_t fs; + + const int16_t* window; + int16_t analysisBuffer[ANAL_BLOCKL_MAX]; + int16_t synthesisBuffer[ANAL_BLOCKL_MAX]; + uint16_t noiseSupFilter[HALF_ANAL_BLOCKL]; + uint16_t overdrive; /* Q8 */ + uint16_t denoiseBound; /* Q14 */ + const int16_t* factor2Table; + int16_t noiseEstLogQuantile[SIMULT* HALF_ANAL_BLOCKL]; + int16_t noiseEstDensity[SIMULT* HALF_ANAL_BLOCKL]; + int16_t noiseEstCounter[SIMULT]; + int16_t noiseEstQuantile[HALF_ANAL_BLOCKL]; + + size_t anaLen; + size_t anaLen2; + size_t magnLen; + int aggrMode; + int stages; + int initFlag; + int gainMap; + + int32_t maxLrt; + int32_t minLrt; + // Log LRT factor with time-smoothing in Q8. + int32_t logLrtTimeAvgW32[HALF_ANAL_BLOCKL]; + int32_t featureLogLrt; + int32_t thresholdLogLrt; + int16_t weightLogLrt; + + uint32_t featureSpecDiff; + uint32_t thresholdSpecDiff; + int16_t weightSpecDiff; + + uint32_t featureSpecFlat; + uint32_t thresholdSpecFlat; + int16_t weightSpecFlat; + + // Conservative estimate of noise spectrum. + int32_t avgMagnPause[HALF_ANAL_BLOCKL]; + uint32_t magnEnergy; + uint32_t sumMagn; + uint32_t curAvgMagnEnergy; + uint32_t timeAvgMagnEnergy; + uint32_t timeAvgMagnEnergyTmp; + + uint32_t whiteNoiseLevel; // Initial noise estimate. + // Initial magnitude spectrum estimate. + uint32_t initMagnEst[HALF_ANAL_BLOCKL]; + // Pink noise parameters: + int32_t pinkNoiseNumerator; // Numerator. + int32_t pinkNoiseExp; // Power of freq. + int minNorm; // Smallest normalization factor. + int zeroInputSignal; // Zero input signal flag. + + // Noise spectrum from previous frame. + uint32_t prevNoiseU32[HALF_ANAL_BLOCKL]; + // Magnitude spectrum from previous frame. + uint16_t prevMagnU16[HALF_ANAL_BLOCKL]; + // Prior speech/noise probability in Q14. + int16_t priorNonSpeechProb; + + int blockIndex; // Frame index counter. + // Parameter for updating or estimating thresholds/weights for prior model. + int modelUpdate; + int cntThresUpdate; + + // Histograms for parameter estimation. + int16_t histLrt[HIST_PAR_EST]; + int16_t histSpecFlat[HIST_PAR_EST]; + int16_t histSpecDiff[HIST_PAR_EST]; + + // Quantities for high band estimate. + int16_t dataBufHBFX[NUM_HIGH_BANDS_MAX][ANAL_BLOCKL_MAX]; + + int qNoise; + int prevQNoise; + int prevQMagn; + size_t blockLen10ms; + + int16_t real[ANAL_BLOCKL_MAX]; + int16_t imag[ANAL_BLOCKL_MAX]; + int32_t energyIn; + int scaleEnergyIn; + int normData; + + struct RealFFT* real_fft; +} NoiseSuppressionFixedC; + +#ifdef __cplusplus +extern "C" +{ +#endif + +/**************************************************************************** + * WebRtcNsx_InitCore(...) + * + * This function initializes a noise suppression instance + * + * Input: + * - inst : Instance that should be initialized + * - fs : Sampling frequency + * + * Output: + * - inst : Initialized instance + * + * Return value : 0 - Ok + * -1 - Error + */ +int32_t WebRtcNsx_InitCore(NoiseSuppressionFixedC* inst, uint32_t fs); + +/**************************************************************************** + * WebRtcNsx_set_policy_core(...) + * + * This changes the aggressiveness of the noise suppression method. + * + * Input: + * - inst : Instance that should be initialized + * - mode : 0: Mild (6 dB), 1: Medium (10 dB), 2: Aggressive (15 dB) + * + * Output: + * - inst : Initialized instance + * + * Return value : 0 - Ok + * -1 - Error + */ +int WebRtcNsx_set_policy_core(NoiseSuppressionFixedC* inst, int mode); + +/**************************************************************************** + * WebRtcNsx_ProcessCore + * + * Do noise suppression. + * + * Input: + * - inst : Instance that should be initialized + * - inFrame : Input speech frame for each band + * - num_bands : Number of bands + * + * Output: + * - inst : Updated instance + * - outFrame : Output speech frame for each band + */ +void WebRtcNsx_ProcessCore(NoiseSuppressionFixedC* inst, + const short* const* inFrame, + int num_bands, + short* const* outFrame); + +/**************************************************************************** + * Some function pointers, for internal functions shared by ARM NEON and + * generic C code. + */ +// Noise Estimation. +typedef void (*NoiseEstimation)(NoiseSuppressionFixedC* inst, + uint16_t* magn, + uint32_t* noise, + int16_t* q_noise); +extern NoiseEstimation WebRtcNsx_NoiseEstimation; + +// Filter the data in the frequency domain, and create spectrum. +typedef void (*PrepareSpectrum)(NoiseSuppressionFixedC* inst, + int16_t* freq_buff); +extern PrepareSpectrum WebRtcNsx_PrepareSpectrum; + +// For the noise supression process, synthesis, read out fully processed +// segment, and update synthesis buffer. +typedef void (*SynthesisUpdate)(NoiseSuppressionFixedC* inst, + int16_t* out_frame, + int16_t gain_factor); +extern SynthesisUpdate WebRtcNsx_SynthesisUpdate; + +// Update analysis buffer for lower band, and window data before FFT. +typedef void (*AnalysisUpdate)(NoiseSuppressionFixedC* inst, + int16_t* out, + int16_t* new_speech); +extern AnalysisUpdate WebRtcNsx_AnalysisUpdate; + +// Denormalize the real-valued signal |in|, the output from inverse FFT. +typedef void (*Denormalize)(NoiseSuppressionFixedC* inst, + int16_t* in, + int factor); +extern Denormalize WebRtcNsx_Denormalize; + +// Normalize the real-valued signal |in|, the input to forward FFT. +typedef void (*NormalizeRealBuffer)(NoiseSuppressionFixedC* inst, + const int16_t* in, + int16_t* out); +extern NormalizeRealBuffer WebRtcNsx_NormalizeRealBuffer; + +// Compute speech/noise probability. +// Intended to be private. +void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst, + uint16_t* nonSpeechProbFinal, + uint32_t* priorLocSnr, + uint32_t* postLocSnr); + +#if defined(WEBRTC_HAS_NEON) +// For the above function pointers, functions for generic platforms are declared +// and defined as static in file nsx_core.c, while those for ARM Neon platforms +// are declared below and defined in file nsx_core_neon.c. +void WebRtcNsx_NoiseEstimationNeon(NoiseSuppressionFixedC* inst, + uint16_t* magn, + uint32_t* noise, + int16_t* q_noise); +void WebRtcNsx_SynthesisUpdateNeon(NoiseSuppressionFixedC* inst, + int16_t* out_frame, + int16_t gain_factor); +void WebRtcNsx_AnalysisUpdateNeon(NoiseSuppressionFixedC* inst, + int16_t* out, + int16_t* new_speech); +void WebRtcNsx_PrepareSpectrumNeon(NoiseSuppressionFixedC* inst, + int16_t* freq_buff); +#endif + +#if defined(MIPS32_LE) +// For the above function pointers, functions for generic platforms are declared +// and defined as static in file nsx_core.c, while those for MIPS platforms +// are declared below and defined in file nsx_core_mips.c. +void WebRtcNsx_SynthesisUpdate_mips(NoiseSuppressionFixedC* inst, + int16_t* out_frame, + int16_t gain_factor); +void WebRtcNsx_AnalysisUpdate_mips(NoiseSuppressionFixedC* inst, + int16_t* out, + int16_t* new_speech); +void WebRtcNsx_PrepareSpectrum_mips(NoiseSuppressionFixedC* inst, + int16_t* freq_buff); +void WebRtcNsx_NormalizeRealBuffer_mips(NoiseSuppressionFixedC* inst, + const int16_t* in, + int16_t* out); +#if defined(MIPS_DSP_R1_LE) +void WebRtcNsx_Denormalize_mips(NoiseSuppressionFixedC* inst, + int16_t* in, + int factor); +#endif + +#endif + +#ifdef __cplusplus +} +#endif + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_CORE_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core_c.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core_c.c new file mode 100644 index 000000000..abfb2c9e3 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core_c.c @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/base/checks.h" +#include "webrtc/modules/audio_processing/ns/noise_suppression_x.h" +#include "webrtc/modules/audio_processing/ns/nsx_core.h" +#include "webrtc/modules/audio_processing/ns/nsx_defines.h" + +static const int16_t kIndicatorTable[17] = { + 0, 2017, 3809, 5227, 6258, 6963, 7424, 7718, + 7901, 8014, 8084, 8126, 8152, 8168, 8177, 8183, 8187 +}; + +// Compute speech/noise probability +// speech/noise probability is returned in: probSpeechFinal +//snrLocPrior is the prior SNR for each frequency (in Q11) +//snrLocPost is the post SNR for each frequency (in Q11) +void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst, + uint16_t* nonSpeechProbFinal, + uint32_t* priorLocSnr, + uint32_t* postLocSnr) { + uint32_t zeros, num, den, tmpU32no1, tmpU32no2, tmpU32no3; + int32_t invLrtFX, indPriorFX, tmp32, tmp32no1, tmp32no2, besselTmpFX32; + int32_t frac32, logTmp; + int32_t logLrtTimeAvgKsumFX; + int16_t indPriorFX16; + int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac, intPart; + size_t i; + int normTmp, normTmp2, nShifts; + + // compute feature based on average LR factor + // this is the average over all frequencies of the smooth log LRT + logLrtTimeAvgKsumFX = 0; + for (i = 0; i < inst->magnLen; i++) { + besselTmpFX32 = (int32_t)postLocSnr[i]; // Q11 + normTmp = WebRtcSpl_NormU32(postLocSnr[i]); + num = postLocSnr[i] << normTmp; // Q(11+normTmp) + if (normTmp > 10) { + den = priorLocSnr[i] << (normTmp - 11); // Q(normTmp) + } else { + den = priorLocSnr[i] >> (11 - normTmp); // Q(normTmp) + } + if (den > 0) { + besselTmpFX32 -= num / den; // Q11 + } else { + besselTmpFX32 = 0; + } + + // inst->logLrtTimeAvg[i] += LRT_TAVG * (besselTmp - log(snrLocPrior) + // - inst->logLrtTimeAvg[i]); + // Here, LRT_TAVG = 0.5 + zeros = WebRtcSpl_NormU32(priorLocSnr[i]); + frac32 = (int32_t)(((priorLocSnr[i] << zeros) & 0x7FFFFFFF) >> 19); + tmp32 = (frac32 * frac32 * -43) >> 19; + tmp32 += ((int16_t)frac32 * 5412) >> 12; + frac32 = tmp32 + 37; + // tmp32 = log2(priorLocSnr[i]) + tmp32 = (int32_t)(((31 - zeros) << 12) + frac32) - (11 << 12); // Q12 + logTmp = (tmp32 * 178) >> 8; // log2(priorLocSnr[i])*log(2) + // tmp32no1 = LRT_TAVG * (log(snrLocPrior) + inst->logLrtTimeAvg[i]) in Q12. + tmp32no1 = (logTmp + inst->logLrtTimeAvgW32[i]) / 2; + inst->logLrtTimeAvgW32[i] += (besselTmpFX32 - tmp32no1); // Q12 + + logLrtTimeAvgKsumFX += inst->logLrtTimeAvgW32[i]; // Q12 + } + inst->featureLogLrt = (logLrtTimeAvgKsumFX * BIN_SIZE_LRT) >> + (inst->stages + 11); + + // done with computation of LR factor + + // + //compute the indicator functions + // + + // average LRT feature + // FLOAT code + // indicator0 = 0.5 * (tanh(widthPrior * + // (logLrtTimeAvgKsum - threshPrior0)) + 1.0); + tmpIndFX = 16384; // Q14(1.0) + tmp32no1 = logLrtTimeAvgKsumFX - inst->thresholdLogLrt; // Q12 + nShifts = 7 - inst->stages; // WIDTH_PR_MAP_SHIFT - inst->stages + 5; + //use larger width in tanh map for pause regions + if (tmp32no1 < 0) { + tmpIndFX = 0; + tmp32no1 = -tmp32no1; + //widthPrior = widthPrior * 2.0; + nShifts++; + } + tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, nShifts); // Q14 + // compute indicator function: sigmoid map + if (tmp32no1 < (16 << 14) && tmp32no1 >= 0) { + tableIndex = (int16_t)(tmp32no1 >> 14); + tmp16no2 = kIndicatorTable[tableIndex]; + tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex]; + frac = (int16_t)(tmp32no1 & 0x00003fff); // Q14 + tmp16no2 += (int16_t)((tmp16no1 * frac) >> 14); + if (tmpIndFX == 0) { + tmpIndFX = 8192 - tmp16no2; // Q14 + } else { + tmpIndFX = 8192 + tmp16no2; // Q14 + } + } + indPriorFX = inst->weightLogLrt * tmpIndFX; // 6*Q14 + + //spectral flatness feature + if (inst->weightSpecFlat) { + tmpU32no1 = WEBRTC_SPL_UMUL(inst->featureSpecFlat, 400); // Q10 + tmpIndFX = 16384; // Q14(1.0) + //use larger width in tanh map for pause regions + tmpU32no2 = inst->thresholdSpecFlat - tmpU32no1; //Q10 + nShifts = 4; + if (inst->thresholdSpecFlat < tmpU32no1) { + tmpIndFX = 0; + tmpU32no2 = tmpU32no1 - inst->thresholdSpecFlat; + //widthPrior = widthPrior * 2.0; + nShifts++; + } + tmpU32no1 = WebRtcSpl_DivU32U16(tmpU32no2 << nShifts, 25); // Q14 + // compute indicator function: sigmoid map + // FLOAT code + // indicator1 = 0.5 * (tanh(sgnMap * widthPrior * + // (threshPrior1 - tmpFloat1)) + 1.0); + if (tmpU32no1 < (16 << 14)) { + tableIndex = (int16_t)(tmpU32no1 >> 14); + tmp16no2 = kIndicatorTable[tableIndex]; + tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex]; + frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14 + tmp16no2 += (int16_t)((tmp16no1 * frac) >> 14); + if (tmpIndFX) { + tmpIndFX = 8192 + tmp16no2; // Q14 + } else { + tmpIndFX = 8192 - tmp16no2; // Q14 + } + } + indPriorFX += inst->weightSpecFlat * tmpIndFX; // 6*Q14 + } + + //for template spectral-difference + if (inst->weightSpecDiff) { + tmpU32no1 = 0; + if (inst->featureSpecDiff) { + normTmp = WEBRTC_SPL_MIN(20 - inst->stages, + WebRtcSpl_NormU32(inst->featureSpecDiff)); + RTC_DCHECK_GE(normTmp, 0); + tmpU32no1 = inst->featureSpecDiff << normTmp; // Q(normTmp-2*stages) + tmpU32no2 = inst->timeAvgMagnEnergy >> (20 - inst->stages - normTmp); + if (tmpU32no2 > 0) { + // Q(20 - inst->stages) + tmpU32no1 /= tmpU32no2; + } else { + tmpU32no1 = (uint32_t)(0x7fffffff); + } + } + tmpU32no3 = (inst->thresholdSpecDiff << 17) / 25; + tmpU32no2 = tmpU32no1 - tmpU32no3; + nShifts = 1; + tmpIndFX = 16384; // Q14(1.0) + //use larger width in tanh map for pause regions + if (tmpU32no2 & 0x80000000) { + tmpIndFX = 0; + tmpU32no2 = tmpU32no3 - tmpU32no1; + //widthPrior = widthPrior * 2.0; + nShifts--; + } + tmpU32no1 = tmpU32no2 >> nShifts; + // compute indicator function: sigmoid map + /* FLOAT code + indicator2 = 0.5 * (tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.0); + */ + if (tmpU32no1 < (16 << 14)) { + tableIndex = (int16_t)(tmpU32no1 >> 14); + tmp16no2 = kIndicatorTable[tableIndex]; + tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex]; + frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14 + tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + tmp16no1, frac, 14); + if (tmpIndFX) { + tmpIndFX = 8192 + tmp16no2; + } else { + tmpIndFX = 8192 - tmp16no2; + } + } + indPriorFX += inst->weightSpecDiff * tmpIndFX; // 6*Q14 + } + + //combine the indicator function with the feature weights + // FLOAT code + // indPrior = 1 - (weightIndPrior0 * indicator0 + weightIndPrior1 * + // indicator1 + weightIndPrior2 * indicator2); + indPriorFX16 = WebRtcSpl_DivW32W16ResW16(98307 - indPriorFX, 6); // Q14 + // done with computing indicator function + + //compute the prior probability + // FLOAT code + // inst->priorNonSpeechProb += PRIOR_UPDATE * + // (indPriorNonSpeech - inst->priorNonSpeechProb); + tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14 + inst->priorNonSpeechProb += (int16_t)((PRIOR_UPDATE_Q14 * tmp16) >> 14); + + //final speech probability: combine prior model with LR factor: + + memset(nonSpeechProbFinal, 0, sizeof(uint16_t) * inst->magnLen); + + if (inst->priorNonSpeechProb > 0) { + for (i = 0; i < inst->magnLen; i++) { + // FLOAT code + // invLrt = exp(inst->logLrtTimeAvg[i]); + // invLrt = inst->priorSpeechProb * invLrt; + // nonSpeechProbFinal[i] = (1.0 - inst->priorSpeechProb) / + // (1.0 - inst->priorSpeechProb + invLrt); + // invLrt = (1.0 - inst->priorNonSpeechProb) * invLrt; + // nonSpeechProbFinal[i] = inst->priorNonSpeechProb / + // (inst->priorNonSpeechProb + invLrt); + if (inst->logLrtTimeAvgW32[i] < 65300) { + tmp32no1 = (inst->logLrtTimeAvgW32[i] * 23637) >> 14; // Q12 + intPart = (int16_t)(tmp32no1 >> 12); + if (intPart < -8) { + intPart = -8; + } + frac = (int16_t)(tmp32no1 & 0x00000fff); // Q12 + + // Quadratic approximation of 2^frac + tmp32no2 = (frac * frac * 44) >> 19; // Q12. + tmp32no2 += (frac * 84) >> 7; // Q12 + invLrtFX = (1 << (8 + intPart)) + + WEBRTC_SPL_SHIFT_W32(tmp32no2, intPart - 4); // Q8 + + normTmp = WebRtcSpl_NormW32(invLrtFX); + normTmp2 = WebRtcSpl_NormW16((16384 - inst->priorNonSpeechProb)); + if (normTmp + normTmp2 >= 7) { + if (normTmp + normTmp2 < 15) { + invLrtFX >>= 15 - normTmp2 - normTmp; + // Q(normTmp+normTmp2-7) + tmp32no1 = invLrtFX * (16384 - inst->priorNonSpeechProb); + // Q(normTmp+normTmp2+7) + invLrtFX = WEBRTC_SPL_SHIFT_W32(tmp32no1, 7 - normTmp - normTmp2); + // Q14 + } else { + tmp32no1 = invLrtFX * (16384 - inst->priorNonSpeechProb); + // Q22 + invLrtFX = tmp32no1 >> 8; // Q14. + } + + tmp32no1 = (int32_t)inst->priorNonSpeechProb << 8; // Q22 + + nonSpeechProbFinal[i] = tmp32no1 / + (inst->priorNonSpeechProb + invLrtFX); // Q8 + } + } + } + } +} + diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core_neon.c b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core_neon.c new file mode 100644 index 000000000..f5675258a --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_core_neon.c @@ -0,0 +1,610 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifdef WEBRTC_ARCH_ARM_FAMILY + +#include "webrtc/modules/audio_processing/ns/nsx_core.h" + +#include + +#include "webrtc/base/checks.h" + +// Constants to compensate for shifting signal log(2^shifts). +const int16_t WebRtcNsx_kLogTable[9] = { + 0, 177, 355, 532, 710, 887, 1065, 1242, 1420 +}; + +const int16_t WebRtcNsx_kCounterDiv[201] = { + 32767, 16384, 10923, 8192, 6554, 5461, 4681, 4096, 3641, 3277, 2979, 2731, + 2521, 2341, 2185, 2048, 1928, 1820, 1725, 1638, 1560, 1489, 1425, 1365, 1311, + 1260, 1214, 1170, 1130, 1092, 1057, 1024, 993, 964, 936, 910, 886, 862, 840, + 819, 799, 780, 762, 745, 728, 712, 697, 683, 669, 655, 643, 630, 618, 607, + 596, 585, 575, 565, 555, 546, 537, 529, 520, 512, 504, 496, 489, 482, 475, + 468, 462, 455, 449, 443, 437, 431, 426, 420, 415, 410, 405, 400, 395, 390, + 386, 381, 377, 372, 368, 364, 360, 356, 352, 349, 345, 341, 338, 334, 331, + 328, 324, 321, 318, 315, 312, 309, 306, 303, 301, 298, 295, 293, 290, 287, + 285, 282, 280, 278, 275, 273, 271, 269, 266, 264, 262, 260, 258, 256, 254, + 252, 250, 248, 246, 245, 243, 241, 239, 237, 236, 234, 232, 231, 229, 228, + 226, 224, 223, 221, 220, 218, 217, 216, 214, 213, 211, 210, 209, 207, 206, + 205, 204, 202, 201, 200, 199, 197, 196, 195, 194, 193, 192, 191, 189, 188, + 187, 186, 185, 184, 183, 182, 181, 180, 179, 178, 177, 176, 175, 174, 173, + 172, 172, 171, 170, 169, 168, 167, 166, 165, 165, 164, 163 +}; + +const int16_t WebRtcNsx_kLogTableFrac[256] = { + 0, 1, 3, 4, 6, 7, 9, 10, 11, 13, 14, 16, 17, 18, 20, 21, + 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, + 44, 45, 46, 47, 49, 50, 51, 52, 54, 55, 56, 57, 59, 60, 61, 62, + 63, 65, 66, 67, 68, 69, 71, 72, 73, 74, 75, 77, 78, 79, 80, 81, + 82, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 178, 179, 180, 181, 182, 183, 184, 185, 185, 186, 187, + 188, 189, 190, 191, 192, 192, 193, 194, 195, 196, 197, 198, 198, 199, 200, + 201, 202, 203, 203, 204, 205, 206, 207, 208, 208, 209, 210, 211, 212, 212, + 213, 214, 215, 216, 216, 217, 218, 219, 220, 220, 221, 222, 223, 224, 224, + 225, 226, 227, 228, 228, 229, 230, 231, 231, 232, 233, 234, 234, 235, 236, + 237, 238, 238, 239, 240, 241, 241, 242, 243, 244, 244, 245, 246, 247, 247, + 248, 249, 249, 250, 251, 252, 252, 253, 254, 255, 255 +}; + +// Update the noise estimation information. +static void UpdateNoiseEstimateNeon(NoiseSuppressionFixedC* inst, int offset) { + const int16_t kExp2Const = 11819; // Q13 + int16_t* ptr_noiseEstLogQuantile = NULL; + int16_t* ptr_noiseEstQuantile = NULL; + int16x4_t kExp2Const16x4 = vdup_n_s16(kExp2Const); + int32x4_t twentyOne32x4 = vdupq_n_s32(21); + int32x4_t constA32x4 = vdupq_n_s32(0x1fffff); + int32x4_t constB32x4 = vdupq_n_s32(0x200000); + + int16_t tmp16 = WebRtcSpl_MaxValueW16(inst->noiseEstLogQuantile + offset, + inst->magnLen); + + // Guarantee a Q-domain as high as possible and still fit in int16 + inst->qNoise = 14 - (int) WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2Const, + tmp16, + 21); + + int32x4_t qNoise32x4 = vdupq_n_s32(inst->qNoise); + + for (ptr_noiseEstLogQuantile = &inst->noiseEstLogQuantile[offset], + ptr_noiseEstQuantile = &inst->noiseEstQuantile[0]; + ptr_noiseEstQuantile < &inst->noiseEstQuantile[inst->magnLen - 3]; + ptr_noiseEstQuantile += 4, ptr_noiseEstLogQuantile += 4) { + + // tmp32no2 = kExp2Const * inst->noiseEstLogQuantile[offset + i]; + int16x4_t v16x4 = vld1_s16(ptr_noiseEstLogQuantile); + int32x4_t v32x4B = vmull_s16(v16x4, kExp2Const16x4); + + // tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF)); // 2^21 + frac + int32x4_t v32x4A = vandq_s32(v32x4B, constA32x4); + v32x4A = vorrq_s32(v32x4A, constB32x4); + + // tmp16 = (int16_t)(tmp32no2 >> 21); + v32x4B = vshrq_n_s32(v32x4B, 21); + + // tmp16 -= 21;// shift 21 to get result in Q0 + v32x4B = vsubq_s32(v32x4B, twentyOne32x4); + + // tmp16 += (int16_t) inst->qNoise; + // shift to get result in Q(qNoise) + v32x4B = vaddq_s32(v32x4B, qNoise32x4); + + // if (tmp16 < 0) { + // tmp32no1 >>= -tmp16; + // } else { + // tmp32no1 <<= tmp16; + // } + v32x4B = vshlq_s32(v32x4A, v32x4B); + + // tmp16 = WebRtcSpl_SatW32ToW16(tmp32no1); + v16x4 = vqmovn_s32(v32x4B); + + //inst->noiseEstQuantile[i] = tmp16; + vst1_s16(ptr_noiseEstQuantile, v16x4); + } + + // Last iteration: + + // inst->quantile[i]=exp(inst->lquantile[offset+i]); + // in Q21 + int32_t tmp32no2 = kExp2Const * *ptr_noiseEstLogQuantile; + int32_t tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF)); // 2^21 + frac + + tmp16 = (int16_t)(tmp32no2 >> 21); + tmp16 -= 21;// shift 21 to get result in Q0 + tmp16 += (int16_t) inst->qNoise; //shift to get result in Q(qNoise) + if (tmp16 < 0) { + tmp32no1 >>= -tmp16; + } else { + tmp32no1 <<= tmp16; + } + *ptr_noiseEstQuantile = WebRtcSpl_SatW32ToW16(tmp32no1); +} + +// Noise Estimation +void WebRtcNsx_NoiseEstimationNeon(NoiseSuppressionFixedC* inst, + uint16_t* magn, + uint32_t* noise, + int16_t* q_noise) { + int16_t lmagn[HALF_ANAL_BLOCKL], counter, countDiv; + int16_t countProd, delta, zeros, frac; + int16_t log2, tabind, logval, tmp16, tmp16no1, tmp16no2; + const int16_t log2_const = 22713; + const int16_t width_factor = 21845; + + size_t i, s, offset; + + tabind = inst->stages - inst->normData; + RTC_DCHECK_LT(tabind, 9); + RTC_DCHECK_GT(tabind, -9); + if (tabind < 0) { + logval = -WebRtcNsx_kLogTable[-tabind]; + } else { + logval = WebRtcNsx_kLogTable[tabind]; + } + + int16x8_t logval_16x8 = vdupq_n_s16(logval); + + // lmagn(i)=log(magn(i))=log(2)*log2(magn(i)) + // magn is in Q(-stages), and the real lmagn values are: + // real_lmagn(i)=log(magn(i)*2^stages)=log(magn(i))+log(2^stages) + // lmagn in Q8 + for (i = 0; i < inst->magnLen; i++) { + if (magn[i]) { + zeros = WebRtcSpl_NormU32((uint32_t)magn[i]); + frac = (int16_t)((((uint32_t)magn[i] << zeros) + & 0x7FFFFFFF) >> 23); + RTC_DCHECK_LT(frac, 256); + // log2(magn(i)) + log2 = (int16_t)(((31 - zeros) << 8) + + WebRtcNsx_kLogTableFrac[frac]); + // log2(magn(i))*log(2) + lmagn[i] = (int16_t)((log2 * log2_const) >> 15); + // + log(2^stages) + lmagn[i] += logval; + } else { + lmagn[i] = logval; + } + } + + int16x4_t Q3_16x4 = vdup_n_s16(3); + int16x8_t WIDTHQ8_16x8 = vdupq_n_s16(WIDTH_Q8); + int16x8_t WIDTHFACTOR_16x8 = vdupq_n_s16(width_factor); + + int16_t factor = FACTOR_Q7; + if (inst->blockIndex < END_STARTUP_LONG) + factor = FACTOR_Q7_STARTUP; + + // Loop over simultaneous estimates + for (s = 0; s < SIMULT; s++) { + offset = s * inst->magnLen; + + // Get counter values from state + counter = inst->noiseEstCounter[s]; + RTC_DCHECK_LT(counter, 201); + countDiv = WebRtcNsx_kCounterDiv[counter]; + countProd = (int16_t)(counter * countDiv); + + // quant_est(...) + int16_t deltaBuff[8]; + int16x4_t tmp16x4_0; + int16x4_t tmp16x4_1; + int16x4_t countDiv_16x4 = vdup_n_s16(countDiv); + int16x8_t countProd_16x8 = vdupq_n_s16(countProd); + int16x8_t tmp16x8_0 = vdupq_n_s16(countDiv); + int16x8_t prod16x8 = vqrdmulhq_s16(WIDTHFACTOR_16x8, tmp16x8_0); + int16x8_t tmp16x8_1; + int16x8_t tmp16x8_2; + int16x8_t tmp16x8_3; + uint16x8_t tmp16x8_4; + int32x4_t tmp32x4; + + for (i = 0; i + 7 < inst->magnLen; i += 8) { + // Compute delta. + // Smaller step size during startup. This prevents from using + // unrealistic values causing overflow. + tmp16x8_0 = vdupq_n_s16(factor); + vst1q_s16(deltaBuff, tmp16x8_0); + + int j; + for (j = 0; j < 8; j++) { + if (inst->noiseEstDensity[offset + i + j] > 512) { + // Get values for deltaBuff by shifting intead of dividing. + int factor = WebRtcSpl_NormW16(inst->noiseEstDensity[offset + i + j]); + deltaBuff[j] = (int16_t)(FACTOR_Q16 >> (14 - factor)); + } + } + + // Update log quantile estimate + + // tmp16 = (int16_t)((delta * countDiv) >> 14); + tmp32x4 = vmull_s16(vld1_s16(&deltaBuff[0]), countDiv_16x4); + tmp16x4_1 = vshrn_n_s32(tmp32x4, 14); + tmp32x4 = vmull_s16(vld1_s16(&deltaBuff[4]), countDiv_16x4); + tmp16x4_0 = vshrn_n_s32(tmp32x4, 14); + tmp16x8_0 = vcombine_s16(tmp16x4_1, tmp16x4_0); // Keep for several lines. + + // prepare for the "if" branch + // tmp16 += 2; + // tmp16_1 = (Word16)(tmp16>>2); + tmp16x8_1 = vrshrq_n_s16(tmp16x8_0, 2); + + // inst->noiseEstLogQuantile[offset+i] + tmp16_1; + tmp16x8_2 = vld1q_s16(&inst->noiseEstLogQuantile[offset + i]); // Keep + tmp16x8_1 = vaddq_s16(tmp16x8_2, tmp16x8_1); // Keep for several lines + + // Prepare for the "else" branch + // tmp16 += 1; + // tmp16_1 = (Word16)(tmp16>>1); + tmp16x8_0 = vrshrq_n_s16(tmp16x8_0, 1); + + // tmp16_2 = (int16_t)((tmp16_1 * 3) >> 1); + tmp32x4 = vmull_s16(vget_low_s16(tmp16x8_0), Q3_16x4); + tmp16x4_1 = vshrn_n_s32(tmp32x4, 1); + + // tmp16_2 = (int16_t)((tmp16_1 * 3) >> 1); + tmp32x4 = vmull_s16(vget_high_s16(tmp16x8_0), Q3_16x4); + tmp16x4_0 = vshrn_n_s32(tmp32x4, 1); + + // inst->noiseEstLogQuantile[offset + i] - tmp16_2; + tmp16x8_0 = vcombine_s16(tmp16x4_1, tmp16x4_0); // keep + tmp16x8_0 = vsubq_s16(tmp16x8_2, tmp16x8_0); + + // logval is the smallest fixed point representation we can have. Values + // below that will correspond to values in the interval [0, 1], which + // can't possibly occur. + tmp16x8_0 = vmaxq_s16(tmp16x8_0, logval_16x8); + + // Do the if-else branches: + tmp16x8_3 = vld1q_s16(&lmagn[i]); // keep for several lines + tmp16x8_4 = vcgtq_s16(tmp16x8_3, tmp16x8_2); + tmp16x8_2 = vbslq_s16(tmp16x8_4, tmp16x8_1, tmp16x8_0); + vst1q_s16(&inst->noiseEstLogQuantile[offset + i], tmp16x8_2); + + // Update density estimate + // tmp16_1 + tmp16_2 + tmp16x8_1 = vld1q_s16(&inst->noiseEstDensity[offset + i]); + tmp16x8_0 = vqrdmulhq_s16(tmp16x8_1, countProd_16x8); + tmp16x8_0 = vaddq_s16(tmp16x8_0, prod16x8); + + // lmagn[i] - inst->noiseEstLogQuantile[offset + i] + tmp16x8_3 = vsubq_s16(tmp16x8_3, tmp16x8_2); + tmp16x8_3 = vabsq_s16(tmp16x8_3); + tmp16x8_4 = vcgtq_s16(WIDTHQ8_16x8, tmp16x8_3); + tmp16x8_1 = vbslq_s16(tmp16x8_4, tmp16x8_0, tmp16x8_1); + vst1q_s16(&inst->noiseEstDensity[offset + i], tmp16x8_1); + } // End loop over magnitude spectrum + + // Last iteration over magnitude spectrum: + // compute delta + if (inst->noiseEstDensity[offset + i] > 512) { + // Get values for deltaBuff by shifting intead of dividing. + int factor = WebRtcSpl_NormW16(inst->noiseEstDensity[offset + i]); + delta = (int16_t)(FACTOR_Q16 >> (14 - factor)); + } else { + delta = FACTOR_Q7; + if (inst->blockIndex < END_STARTUP_LONG) { + // Smaller step size during startup. This prevents from using + // unrealistic values causing overflow. + delta = FACTOR_Q7_STARTUP; + } + } + // update log quantile estimate + tmp16 = (int16_t)((delta * countDiv) >> 14); + if (lmagn[i] > inst->noiseEstLogQuantile[offset + i]) { + // +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2 + // CounterDiv=1/(inst->counter[s]+1) in Q15 + tmp16 += 2; + inst->noiseEstLogQuantile[offset + i] += tmp16 / 4; + } else { + tmp16 += 1; + // *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2 + // TODO(bjornv): investigate why we need to truncate twice. + tmp16no2 = (int16_t)((tmp16 / 2) * 3 / 2); + inst->noiseEstLogQuantile[offset + i] -= tmp16no2; + if (inst->noiseEstLogQuantile[offset + i] < logval) { + // logval is the smallest fixed point representation we can have. + // Values below that will correspond to values in the interval + // [0, 1], which can't possibly occur. + inst->noiseEstLogQuantile[offset + i] = logval; + } + } + + // update density estimate + if (WEBRTC_SPL_ABS_W16(lmagn[i] - inst->noiseEstLogQuantile[offset + i]) + < WIDTH_Q8) { + tmp16no1 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + inst->noiseEstDensity[offset + i], countProd, 15); + tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND( + width_factor, countDiv, 15); + inst->noiseEstDensity[offset + i] = tmp16no1 + tmp16no2; + } + + + if (counter >= END_STARTUP_LONG) { + inst->noiseEstCounter[s] = 0; + if (inst->blockIndex >= END_STARTUP_LONG) { + UpdateNoiseEstimateNeon(inst, offset); + } + } + inst->noiseEstCounter[s]++; + + } // end loop over simultaneous estimates + + // Sequentially update the noise during startup + if (inst->blockIndex < END_STARTUP_LONG) { + UpdateNoiseEstimateNeon(inst, offset); + } + + for (i = 0; i < inst->magnLen; i++) { + noise[i] = (uint32_t)(inst->noiseEstQuantile[i]); // Q(qNoise) + } + (*q_noise) = (int16_t)inst->qNoise; +} + +// Filter the data in the frequency domain, and create spectrum. +void WebRtcNsx_PrepareSpectrumNeon(NoiseSuppressionFixedC* inst, + int16_t* freq_buf) { + RTC_DCHECK_EQ(1, inst->magnLen % 8); + RTC_DCHECK_EQ(0, inst->anaLen2 % 16); + + // (1) Filtering. + + // Fixed point C code for the next block is as follows: + // for (i = 0; i < inst->magnLen; i++) { + // inst->real[i] = (int16_t)((inst->real[i] * + // (int16_t)(inst->noiseSupFilter[i])) >> 14); // Q(normData-stages) + // inst->imag[i] = (int16_t)((inst->imag[i] * + // (int16_t)(inst->noiseSupFilter[i])) >> 14); // Q(normData-stages) + // } + + int16_t* preal = &inst->real[0]; + int16_t* pimag = &inst->imag[0]; + int16_t* pns_filter = (int16_t*)&inst->noiseSupFilter[0]; + int16_t* pimag_end = pimag + inst->magnLen - 4; + + while (pimag < pimag_end) { + int16x8_t real = vld1q_s16(preal); + int16x8_t imag = vld1q_s16(pimag); + int16x8_t ns_filter = vld1q_s16(pns_filter); + + int32x4_t tmp_r_0 = vmull_s16(vget_low_s16(real), vget_low_s16(ns_filter)); + int32x4_t tmp_i_0 = vmull_s16(vget_low_s16(imag), vget_low_s16(ns_filter)); + int32x4_t tmp_r_1 = vmull_s16(vget_high_s16(real), + vget_high_s16(ns_filter)); + int32x4_t tmp_i_1 = vmull_s16(vget_high_s16(imag), + vget_high_s16(ns_filter)); + + int16x4_t result_r_0 = vshrn_n_s32(tmp_r_0, 14); + int16x4_t result_i_0 = vshrn_n_s32(tmp_i_0, 14); + int16x4_t result_r_1 = vshrn_n_s32(tmp_r_1, 14); + int16x4_t result_i_1 = vshrn_n_s32(tmp_i_1, 14); + + vst1q_s16(preal, vcombine_s16(result_r_0, result_r_1)); + vst1q_s16(pimag, vcombine_s16(result_i_0, result_i_1)); + preal += 8; + pimag += 8; + pns_filter += 8; + } + + // Filter the last element + *preal = (int16_t)((*preal * *pns_filter) >> 14); + *pimag = (int16_t)((*pimag * *pns_filter) >> 14); + + // (2) Create spectrum. + + // Fixed point C code for the rest of the function is as follows: + // freq_buf[0] = inst->real[0]; + // freq_buf[1] = -inst->imag[0]; + // for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) { + // freq_buf[j] = inst->real[i]; + // freq_buf[j + 1] = -inst->imag[i]; + // } + // freq_buf[inst->anaLen] = inst->real[inst->anaLen2]; + // freq_buf[inst->anaLen + 1] = -inst->imag[inst->anaLen2]; + + preal = &inst->real[0]; + pimag = &inst->imag[0]; + pimag_end = pimag + inst->anaLen2; + int16_t * freq_buf_start = freq_buf; + while (pimag < pimag_end) { + // loop unroll + int16x8x2_t real_imag_0; + int16x8x2_t real_imag_1; + real_imag_0.val[1] = vld1q_s16(pimag); + real_imag_0.val[0] = vld1q_s16(preal); + preal += 8; + pimag += 8; + real_imag_1.val[1] = vld1q_s16(pimag); + real_imag_1.val[0] = vld1q_s16(preal); + preal += 8; + pimag += 8; + + real_imag_0.val[1] = vnegq_s16(real_imag_0.val[1]); + real_imag_1.val[1] = vnegq_s16(real_imag_1.val[1]); + vst2q_s16(freq_buf_start, real_imag_0); + freq_buf_start += 16; + vst2q_s16(freq_buf_start, real_imag_1); + freq_buf_start += 16; + } + freq_buf[inst->anaLen] = inst->real[inst->anaLen2]; + freq_buf[inst->anaLen + 1] = -inst->imag[inst->anaLen2]; +} + +// For the noise supress process, synthesis, read out fully processed segment, +// and update synthesis buffer. +void WebRtcNsx_SynthesisUpdateNeon(NoiseSuppressionFixedC* inst, + int16_t* out_frame, + int16_t gain_factor) { + RTC_DCHECK_EQ(0, inst->anaLen % 16); + RTC_DCHECK_EQ(0, inst->blockLen10ms % 16); + + int16_t* preal_start = inst->real; + const int16_t* pwindow = inst->window; + int16_t* preal_end = preal_start + inst->anaLen; + int16_t* psynthesis_buffer = inst->synthesisBuffer; + + while (preal_start < preal_end) { + // Loop unroll. + int16x8_t window_0 = vld1q_s16(pwindow); + int16x8_t real_0 = vld1q_s16(preal_start); + int16x8_t synthesis_buffer_0 = vld1q_s16(psynthesis_buffer); + + int16x8_t window_1 = vld1q_s16(pwindow + 8); + int16x8_t real_1 = vld1q_s16(preal_start + 8); + int16x8_t synthesis_buffer_1 = vld1q_s16(psynthesis_buffer + 8); + + int32x4_t tmp32a_0_low = vmull_s16(vget_low_s16(real_0), + vget_low_s16(window_0)); + int32x4_t tmp32a_0_high = vmull_s16(vget_high_s16(real_0), + vget_high_s16(window_0)); + + int32x4_t tmp32a_1_low = vmull_s16(vget_low_s16(real_1), + vget_low_s16(window_1)); + int32x4_t tmp32a_1_high = vmull_s16(vget_high_s16(real_1), + vget_high_s16(window_1)); + + int16x4_t tmp16a_0_low = vqrshrn_n_s32(tmp32a_0_low, 14); + int16x4_t tmp16a_0_high = vqrshrn_n_s32(tmp32a_0_high, 14); + + int16x4_t tmp16a_1_low = vqrshrn_n_s32(tmp32a_1_low, 14); + int16x4_t tmp16a_1_high = vqrshrn_n_s32(tmp32a_1_high, 14); + + int32x4_t tmp32b_0_low = vmull_n_s16(tmp16a_0_low, gain_factor); + int32x4_t tmp32b_0_high = vmull_n_s16(tmp16a_0_high, gain_factor); + + int32x4_t tmp32b_1_low = vmull_n_s16(tmp16a_1_low, gain_factor); + int32x4_t tmp32b_1_high = vmull_n_s16(tmp16a_1_high, gain_factor); + + int16x4_t tmp16b_0_low = vqrshrn_n_s32(tmp32b_0_low, 13); + int16x4_t tmp16b_0_high = vqrshrn_n_s32(tmp32b_0_high, 13); + + int16x4_t tmp16b_1_low = vqrshrn_n_s32(tmp32b_1_low, 13); + int16x4_t tmp16b_1_high = vqrshrn_n_s32(tmp32b_1_high, 13); + + synthesis_buffer_0 = vqaddq_s16(vcombine_s16(tmp16b_0_low, tmp16b_0_high), + synthesis_buffer_0); + synthesis_buffer_1 = vqaddq_s16(vcombine_s16(tmp16b_1_low, tmp16b_1_high), + synthesis_buffer_1); + vst1q_s16(psynthesis_buffer, synthesis_buffer_0); + vst1q_s16(psynthesis_buffer + 8, synthesis_buffer_1); + + pwindow += 16; + preal_start += 16; + psynthesis_buffer += 16; + } + + // Read out fully processed segment. + int16_t * p_start = inst->synthesisBuffer; + int16_t * p_end = inst->synthesisBuffer + inst->blockLen10ms; + int16_t * p_frame = out_frame; + while (p_start < p_end) { + int16x8_t frame_0 = vld1q_s16(p_start); + vst1q_s16(p_frame, frame_0); + p_start += 8; + p_frame += 8; + } + + // Update synthesis buffer. + int16_t* p_start_src = inst->synthesisBuffer + inst->blockLen10ms; + int16_t* p_end_src = inst->synthesisBuffer + inst->anaLen; + int16_t* p_start_dst = inst->synthesisBuffer; + while (p_start_src < p_end_src) { + int16x8_t frame = vld1q_s16(p_start_src); + vst1q_s16(p_start_dst, frame); + p_start_src += 8; + p_start_dst += 8; + } + + p_start = inst->synthesisBuffer + inst->anaLen - inst->blockLen10ms; + p_end = p_start + inst->blockLen10ms; + int16x8_t zero = vdupq_n_s16(0); + for (;p_start < p_end; p_start += 8) { + vst1q_s16(p_start, zero); + } +} + +// Update analysis buffer for lower band, and window data before FFT. +void WebRtcNsx_AnalysisUpdateNeon(NoiseSuppressionFixedC* inst, + int16_t* out, + int16_t* new_speech) { + RTC_DCHECK_EQ(0, inst->blockLen10ms % 16); + RTC_DCHECK_EQ(0, inst->anaLen % 16); + + // For lower band update analysis buffer. + // memcpy(inst->analysisBuffer, inst->analysisBuffer + inst->blockLen10ms, + // (inst->anaLen - inst->blockLen10ms) * sizeof(*inst->analysisBuffer)); + int16_t* p_start_src = inst->analysisBuffer + inst->blockLen10ms; + int16_t* p_end_src = inst->analysisBuffer + inst->anaLen; + int16_t* p_start_dst = inst->analysisBuffer; + while (p_start_src < p_end_src) { + int16x8_t frame = vld1q_s16(p_start_src); + vst1q_s16(p_start_dst, frame); + + p_start_src += 8; + p_start_dst += 8; + } + + // memcpy(inst->analysisBuffer + inst->anaLen - inst->blockLen10ms, + // new_speech, inst->blockLen10ms * sizeof(*inst->analysisBuffer)); + p_start_src = new_speech; + p_end_src = new_speech + inst->blockLen10ms; + p_start_dst = inst->analysisBuffer + inst->anaLen - inst->blockLen10ms; + while (p_start_src < p_end_src) { + int16x8_t frame = vld1q_s16(p_start_src); + vst1q_s16(p_start_dst, frame); + + p_start_src += 8; + p_start_dst += 8; + } + + // Window data before FFT. + int16_t* p_start_window = (int16_t*) inst->window; + int16_t* p_start_buffer = inst->analysisBuffer; + int16_t* p_end_buffer = inst->analysisBuffer + inst->anaLen; + int16_t* p_start_out = out; + + // Load the first element to reduce pipeline bubble. + int16x8_t window = vld1q_s16(p_start_window); + int16x8_t buffer = vld1q_s16(p_start_buffer); + p_start_window += 8; + p_start_buffer += 8; + + while (p_start_buffer < p_end_buffer) { + // Unroll loop. + int32x4_t tmp32_low = vmull_s16(vget_low_s16(window), vget_low_s16(buffer)); + int32x4_t tmp32_high = vmull_s16(vget_high_s16(window), + vget_high_s16(buffer)); + window = vld1q_s16(p_start_window); + buffer = vld1q_s16(p_start_buffer); + + int16x4_t result_low = vrshrn_n_s32(tmp32_low, 14); + int16x4_t result_high = vrshrn_n_s32(tmp32_high, 14); + vst1q_s16(p_start_out, vcombine_s16(result_low, result_high)); + + p_start_buffer += 8; + p_start_window += 8; + p_start_out += 8; + } + int32x4_t tmp32_low = vmull_s16(vget_low_s16(window), vget_low_s16(buffer)); + int32x4_t tmp32_high = vmull_s16(vget_high_s16(window), + vget_high_s16(buffer)); + + int16x4_t result_low = vrshrn_n_s32(tmp32_low, 14); + int16x4_t result_high = vrshrn_n_s32(tmp32_high, 14); + vst1q_s16(p_start_out, vcombine_s16(result_low, result_high)); +} + +#endif diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_defines.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_defines.h new file mode 100644 index 000000000..862dc3cab --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/nsx_defines.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_DEFINES_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_DEFINES_H_ + +#define ANAL_BLOCKL_MAX 256 /* Max analysis block length */ +#define HALF_ANAL_BLOCKL 129 /* Half max analysis block length + 1 */ +#define NUM_HIGH_BANDS_MAX 2 /* Max number of high bands */ +#define SIMULT 3 +#define END_STARTUP_LONG 200 +#define END_STARTUP_SHORT 50 +#define FACTOR_Q16 2621440 /* 40 in Q16 */ +#define FACTOR_Q7 5120 /* 40 in Q7 */ +#define FACTOR_Q7_STARTUP 1024 /* 8 in Q7 */ +#define WIDTH_Q8 3 /* 0.01 in Q8 (or 25 ) */ + +/* PARAMETERS FOR NEW METHOD */ +#define DD_PR_SNR_Q11 2007 /* ~= Q11(0.98) DD update of prior SNR */ +#define ONE_MINUS_DD_PR_SNR_Q11 41 /* DD update of prior SNR */ +#define SPECT_FLAT_TAVG_Q14 4915 /* (0.30) tavg parameter for spectral flatness measure */ +#define SPECT_DIFF_TAVG_Q8 77 /* (0.30) tavg parameter for spectral flatness measure */ +#define PRIOR_UPDATE_Q14 1638 /* Q14(0.1) Update parameter of prior model */ +#define NOISE_UPDATE_Q8 26 /* 26 ~= Q8(0.1) Update parameter for noise */ + +/* Probability threshold for noise state in speech/noise likelihood. */ +#define ONE_MINUS_PROB_RANGE_Q8 205 /* 205 ~= Q8(0.8) */ +#define HIST_PAR_EST 1000 /* Histogram size for estimation of parameters */ + +/* FEATURE EXTRACTION CONFIG */ +/* Bin size of histogram */ +#define BIN_SIZE_LRT 10 +/* Scale parameters: multiply dominant peaks of the histograms by scale factor to obtain. */ +/* Thresholds for prior model */ +#define FACTOR_1_LRT_DIFF 6 /* For LRT and spectral difference (5 times bigger) */ +/* For spectral_flatness: used when noise is flatter than speech (10 times bigger). */ +#define FACTOR_2_FLAT_Q10 922 +/* Peak limit for spectral flatness (varies between 0 and 1) */ +#define THRES_PEAK_FLAT 24 /* * 2 * BIN_SIZE_FLAT_FX */ +/* Limit on spacing of two highest peaks in histogram: spacing determined by bin size. */ +#define LIM_PEAK_SPACE_FLAT_DIFF 4 /* * 2 * BIN_SIZE_DIFF_FX */ +/* Limit on relevance of second peak */ +#define LIM_PEAK_WEIGHT_FLAT_DIFF 2 +#define THRES_FLUCT_LRT 10240 /* = 20 * inst->modelUpdate; fluctuation limit of LRT feat. */ +/* Limit on the max and min values for the feature thresholds */ +#define MAX_FLAT_Q10 38912 /* * 2 * BIN_SIZE_FLAT_FX */ +#define MIN_FLAT_Q10 4096 /* * 2 * BIN_SIZE_FLAT_FX */ +#define MAX_DIFF 100 /* * 2 * BIN_SIZE_DIFF_FX */ +#define MIN_DIFF 16 /* * 2 * BIN_SIZE_DIFF_FX */ +/* Criteria of weight of histogram peak to accept/reject feature */ +#define THRES_WEIGHT_FLAT_DIFF 154 /*(int)(0.3*(inst->modelUpdate)) for flatness and difference */ + +#define STAT_UPDATES 9 /* Update every 512 = 1 << 9 block */ +#define ONE_MINUS_GAMMA_PAUSE_Q8 13 /* ~= Q8(0.05) Update for conservative noise estimate */ +#define GAMMA_NOISE_TRANS_AND_SPEECH_Q8 3 /* ~= Q8(0.01) Update for transition and noise region */ + +#endif /* WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_DEFINES_H_ */ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/windows_private.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/windows_private.h new file mode 100644 index 000000000..44c2e846b --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/ns/windows_private.h @@ -0,0 +1,574 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_WINDOWS_PRIVATE_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_WINDOWS_PRIVATE_H_ + +// Hanning window for 4ms 16kHz +static const float kHanning64w128[128] = { + 0.00000000000000f, 0.02454122852291f, 0.04906767432742f, + 0.07356456359967f, 0.09801714032956f, 0.12241067519922f, + 0.14673047445536f, 0.17096188876030f, 0.19509032201613f, + 0.21910124015687f, 0.24298017990326f, 0.26671275747490f, + 0.29028467725446f, 0.31368174039889f, 0.33688985339222f, + 0.35989503653499f, 0.38268343236509f, 0.40524131400499f, + 0.42755509343028f, 0.44961132965461f, 0.47139673682600f, + 0.49289819222978f, 0.51410274419322f, 0.53499761988710f, + 0.55557023301960f, 0.57580819141785f, 0.59569930449243f, + 0.61523159058063f, 0.63439328416365f, 0.65317284295378f, + 0.67155895484702f, 0.68954054473707f, 0.70710678118655f, + 0.72424708295147f, 0.74095112535496f, 0.75720884650648f, + 0.77301045336274f, 0.78834642762661f, 0.80320753148064f, + 0.81758481315158f, 0.83146961230255f, 0.84485356524971f, + 0.85772861000027f, 0.87008699110871f, 0.88192126434835f, + 0.89322430119552f, 0.90398929312344f, 0.91420975570353f, + 0.92387953251129f, 0.93299279883474f, 0.94154406518302f, + 0.94952818059304f, 0.95694033573221f, 0.96377606579544f, + 0.97003125319454f, 0.97570213003853f, 0.98078528040323f, + 0.98527764238894f, 0.98917650996478f, 0.99247953459871f, + 0.99518472667220f, 0.99729045667869f, 0.99879545620517f, + 0.99969881869620f, 1.00000000000000f, + 0.99969881869620f, 0.99879545620517f, 0.99729045667869f, + 0.99518472667220f, 0.99247953459871f, 0.98917650996478f, + 0.98527764238894f, 0.98078528040323f, 0.97570213003853f, + 0.97003125319454f, 0.96377606579544f, 0.95694033573221f, + 0.94952818059304f, 0.94154406518302f, 0.93299279883474f, + 0.92387953251129f, 0.91420975570353f, 0.90398929312344f, + 0.89322430119552f, 0.88192126434835f, 0.87008699110871f, + 0.85772861000027f, 0.84485356524971f, 0.83146961230255f, + 0.81758481315158f, 0.80320753148064f, 0.78834642762661f, + 0.77301045336274f, 0.75720884650648f, 0.74095112535496f, + 0.72424708295147f, 0.70710678118655f, 0.68954054473707f, + 0.67155895484702f, 0.65317284295378f, 0.63439328416365f, + 0.61523159058063f, 0.59569930449243f, 0.57580819141785f, + 0.55557023301960f, 0.53499761988710f, 0.51410274419322f, + 0.49289819222978f, 0.47139673682600f, 0.44961132965461f, + 0.42755509343028f, 0.40524131400499f, 0.38268343236509f, + 0.35989503653499f, 0.33688985339222f, 0.31368174039889f, + 0.29028467725446f, 0.26671275747490f, 0.24298017990326f, + 0.21910124015687f, 0.19509032201613f, 0.17096188876030f, + 0.14673047445536f, 0.12241067519922f, 0.09801714032956f, + 0.07356456359967f, 0.04906767432742f, 0.02454122852291f +}; + + + +// hybrib Hanning & flat window +static const float kBlocks80w128[128] = { + (float)0.00000000, (float)0.03271908, (float)0.06540313, (float)0.09801714, (float)0.13052619, + (float)0.16289547, (float)0.19509032, (float)0.22707626, (float)0.25881905, (float)0.29028468, + (float)0.32143947, (float)0.35225005, (float)0.38268343, (float)0.41270703, (float)0.44228869, + (float)0.47139674, (float)0.50000000, (float)0.52806785, (float)0.55557023, (float)0.58247770, + (float)0.60876143, (float)0.63439328, (float)0.65934582, (float)0.68359230, (float)0.70710678, + (float)0.72986407, (float)0.75183981, (float)0.77301045, (float)0.79335334, (float)0.81284668, + (float)0.83146961, (float)0.84920218, (float)0.86602540, (float)0.88192126, (float)0.89687274, + (float)0.91086382, (float)0.92387953, (float)0.93590593, (float)0.94693013, (float)0.95694034, + (float)0.96592583, (float)0.97387698, (float)0.98078528, (float)0.98664333, (float)0.99144486, + (float)0.99518473, (float)0.99785892, (float)0.99946459, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)0.99946459, (float)0.99785892, (float)0.99518473, (float)0.99144486, + (float)0.98664333, (float)0.98078528, (float)0.97387698, (float)0.96592583, (float)0.95694034, + (float)0.94693013, (float)0.93590593, (float)0.92387953, (float)0.91086382, (float)0.89687274, + (float)0.88192126, (float)0.86602540, (float)0.84920218, (float)0.83146961, (float)0.81284668, + (float)0.79335334, (float)0.77301045, (float)0.75183981, (float)0.72986407, (float)0.70710678, + (float)0.68359230, (float)0.65934582, (float)0.63439328, (float)0.60876143, (float)0.58247770, + (float)0.55557023, (float)0.52806785, (float)0.50000000, (float)0.47139674, (float)0.44228869, + (float)0.41270703, (float)0.38268343, (float)0.35225005, (float)0.32143947, (float)0.29028468, + (float)0.25881905, (float)0.22707626, (float)0.19509032, (float)0.16289547, (float)0.13052619, + (float)0.09801714, (float)0.06540313, (float)0.03271908 +}; + +// hybrib Hanning & flat window +static const float kBlocks160w256[256] = { + (float)0.00000000, (float)0.01636173, (float)0.03271908, (float)0.04906767, (float)0.06540313, + (float)0.08172107, (float)0.09801714, (float)0.11428696, (float)0.13052619, (float)0.14673047, + (float)0.16289547, (float)0.17901686, (float)0.19509032, (float)0.21111155, (float)0.22707626, + (float)0.24298018, (float)0.25881905, (float)0.27458862, (float)0.29028468, (float)0.30590302, + (float)0.32143947, (float)0.33688985, (float)0.35225005, (float)0.36751594, (float)0.38268343, + (float)0.39774847, (float)0.41270703, (float)0.42755509, (float)0.44228869, (float)0.45690388, + (float)0.47139674, (float)0.48576339, (float)0.50000000, (float)0.51410274, (float)0.52806785, + (float)0.54189158, (float)0.55557023, (float)0.56910015, (float)0.58247770, (float)0.59569930, + (float)0.60876143, (float)0.62166057, (float)0.63439328, (float)0.64695615, (float)0.65934582, + (float)0.67155895, (float)0.68359230, (float)0.69544264, (float)0.70710678, (float)0.71858162, + (float)0.72986407, (float)0.74095113, (float)0.75183981, (float)0.76252720, (float)0.77301045, + (float)0.78328675, (float)0.79335334, (float)0.80320753, (float)0.81284668, (float)0.82226822, + (float)0.83146961, (float)0.84044840, (float)0.84920218, (float)0.85772861, (float)0.86602540, + (float)0.87409034, (float)0.88192126, (float)0.88951608, (float)0.89687274, (float)0.90398929, + (float)0.91086382, (float)0.91749450, (float)0.92387953, (float)0.93001722, (float)0.93590593, + (float)0.94154407, (float)0.94693013, (float)0.95206268, (float)0.95694034, (float)0.96156180, + (float)0.96592583, (float)0.97003125, (float)0.97387698, (float)0.97746197, (float)0.98078528, + (float)0.98384601, (float)0.98664333, (float)0.98917651, (float)0.99144486, (float)0.99344778, + (float)0.99518473, (float)0.99665524, (float)0.99785892, (float)0.99879546, (float)0.99946459, + (float)0.99986614, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)0.99986614, (float)0.99946459, (float)0.99879546, (float)0.99785892, + (float)0.99665524, (float)0.99518473, (float)0.99344778, (float)0.99144486, (float)0.98917651, + (float)0.98664333, (float)0.98384601, (float)0.98078528, (float)0.97746197, (float)0.97387698, + (float)0.97003125, (float)0.96592583, (float)0.96156180, (float)0.95694034, (float)0.95206268, + (float)0.94693013, (float)0.94154407, (float)0.93590593, (float)0.93001722, (float)0.92387953, + (float)0.91749450, (float)0.91086382, (float)0.90398929, (float)0.89687274, (float)0.88951608, + (float)0.88192126, (float)0.87409034, (float)0.86602540, (float)0.85772861, (float)0.84920218, + (float)0.84044840, (float)0.83146961, (float)0.82226822, (float)0.81284668, (float)0.80320753, + (float)0.79335334, (float)0.78328675, (float)0.77301045, (float)0.76252720, (float)0.75183981, + (float)0.74095113, (float)0.72986407, (float)0.71858162, (float)0.70710678, (float)0.69544264, + (float)0.68359230, (float)0.67155895, (float)0.65934582, (float)0.64695615, (float)0.63439328, + (float)0.62166057, (float)0.60876143, (float)0.59569930, (float)0.58247770, (float)0.56910015, + (float)0.55557023, (float)0.54189158, (float)0.52806785, (float)0.51410274, (float)0.50000000, + (float)0.48576339, (float)0.47139674, (float)0.45690388, (float)0.44228869, (float)0.42755509, + (float)0.41270703, (float)0.39774847, (float)0.38268343, (float)0.36751594, (float)0.35225005, + (float)0.33688985, (float)0.32143947, (float)0.30590302, (float)0.29028468, (float)0.27458862, + (float)0.25881905, (float)0.24298018, (float)0.22707626, (float)0.21111155, (float)0.19509032, + (float)0.17901686, (float)0.16289547, (float)0.14673047, (float)0.13052619, (float)0.11428696, + (float)0.09801714, (float)0.08172107, (float)0.06540313, (float)0.04906767, (float)0.03271908, + (float)0.01636173 +}; + +// hybrib Hanning & flat window: for 20ms +static const float kBlocks320w512[512] = { + (float)0.00000000, (float)0.00818114, (float)0.01636173, (float)0.02454123, (float)0.03271908, + (float)0.04089475, (float)0.04906767, (float)0.05723732, (float)0.06540313, (float)0.07356456, + (float)0.08172107, (float)0.08987211, (float)0.09801714, (float)0.10615561, (float)0.11428696, + (float)0.12241068, (float)0.13052619, (float)0.13863297, (float)0.14673047, (float)0.15481816, + (float)0.16289547, (float)0.17096189, (float)0.17901686, (float)0.18705985, (float)0.19509032, + (float)0.20310773, (float)0.21111155, (float)0.21910124, (float)0.22707626, (float)0.23503609, + (float)0.24298018, (float)0.25090801, (float)0.25881905, (float)0.26671276, (float)0.27458862, + (float)0.28244610, (float)0.29028468, (float)0.29810383, (float)0.30590302, (float)0.31368174, + (float)0.32143947, (float)0.32917568, (float)0.33688985, (float)0.34458148, (float)0.35225005, + (float)0.35989504, (float)0.36751594, (float)0.37511224, (float)0.38268343, (float)0.39022901, + (float)0.39774847, (float)0.40524131, (float)0.41270703, (float)0.42014512, (float)0.42755509, + (float)0.43493645, (float)0.44228869, (float)0.44961133, (float)0.45690388, (float)0.46416584, + (float)0.47139674, (float)0.47859608, (float)0.48576339, (float)0.49289819, (float)0.50000000, + (float)0.50706834, (float)0.51410274, (float)0.52110274, (float)0.52806785, (float)0.53499762, + (float)0.54189158, (float)0.54874927, (float)0.55557023, (float)0.56235401, (float)0.56910015, + (float)0.57580819, (float)0.58247770, (float)0.58910822, (float)0.59569930, (float)0.60225052, + (float)0.60876143, (float)0.61523159, (float)0.62166057, (float)0.62804795, (float)0.63439328, + (float)0.64069616, (float)0.64695615, (float)0.65317284, (float)0.65934582, (float)0.66547466, + (float)0.67155895, (float)0.67759830, (float)0.68359230, (float)0.68954054, (float)0.69544264, + (float)0.70129818, (float)0.70710678, (float)0.71286806, (float)0.71858162, (float)0.72424708, + (float)0.72986407, (float)0.73543221, (float)0.74095113, (float)0.74642045, (float)0.75183981, + (float)0.75720885, (float)0.76252720, (float)0.76779452, (float)0.77301045, (float)0.77817464, + (float)0.78328675, (float)0.78834643, (float)0.79335334, (float)0.79830715, (float)0.80320753, + (float)0.80805415, (float)0.81284668, (float)0.81758481, (float)0.82226822, (float)0.82689659, + (float)0.83146961, (float)0.83598698, (float)0.84044840, (float)0.84485357, (float)0.84920218, + (float)0.85349396, (float)0.85772861, (float)0.86190585, (float)0.86602540, (float)0.87008699, + (float)0.87409034, (float)0.87803519, (float)0.88192126, (float)0.88574831, (float)0.88951608, + (float)0.89322430, (float)0.89687274, (float)0.90046115, (float)0.90398929, (float)0.90745693, + (float)0.91086382, (float)0.91420976, (float)0.91749450, (float)0.92071783, (float)0.92387953, + (float)0.92697940, (float)0.93001722, (float)0.93299280, (float)0.93590593, (float)0.93875641, + (float)0.94154407, (float)0.94426870, (float)0.94693013, (float)0.94952818, (float)0.95206268, + (float)0.95453345, (float)0.95694034, (float)0.95928317, (float)0.96156180, (float)0.96377607, + (float)0.96592583, (float)0.96801094, (float)0.97003125, (float)0.97198664, (float)0.97387698, + (float)0.97570213, (float)0.97746197, (float)0.97915640, (float)0.98078528, (float)0.98234852, + (float)0.98384601, (float)0.98527764, (float)0.98664333, (float)0.98794298, (float)0.98917651, + (float)0.99034383, (float)0.99144486, (float)0.99247953, (float)0.99344778, (float)0.99434953, + (float)0.99518473, (float)0.99595331, (float)0.99665524, (float)0.99729046, (float)0.99785892, + (float)0.99836060, (float)0.99879546, (float)0.99916346, (float)0.99946459, (float)0.99969882, + (float)0.99986614, (float)0.99996653, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, + (float)1.00000000, (float)0.99996653, (float)0.99986614, (float)0.99969882, (float)0.99946459, + (float)0.99916346, (float)0.99879546, (float)0.99836060, (float)0.99785892, (float)0.99729046, + (float)0.99665524, (float)0.99595331, (float)0.99518473, (float)0.99434953, (float)0.99344778, + (float)0.99247953, (float)0.99144486, (float)0.99034383, (float)0.98917651, (float)0.98794298, + (float)0.98664333, (float)0.98527764, (float)0.98384601, (float)0.98234852, (float)0.98078528, + (float)0.97915640, (float)0.97746197, (float)0.97570213, (float)0.97387698, (float)0.97198664, + (float)0.97003125, (float)0.96801094, (float)0.96592583, (float)0.96377607, (float)0.96156180, + (float)0.95928317, (float)0.95694034, (float)0.95453345, (float)0.95206268, (float)0.94952818, + (float)0.94693013, (float)0.94426870, (float)0.94154407, (float)0.93875641, (float)0.93590593, + (float)0.93299280, (float)0.93001722, (float)0.92697940, (float)0.92387953, (float)0.92071783, + (float)0.91749450, (float)0.91420976, (float)0.91086382, (float)0.90745693, (float)0.90398929, + (float)0.90046115, (float)0.89687274, (float)0.89322430, (float)0.88951608, (float)0.88574831, + (float)0.88192126, (float)0.87803519, (float)0.87409034, (float)0.87008699, (float)0.86602540, + (float)0.86190585, (float)0.85772861, (float)0.85349396, (float)0.84920218, (float)0.84485357, + (float)0.84044840, (float)0.83598698, (float)0.83146961, (float)0.82689659, (float)0.82226822, + (float)0.81758481, (float)0.81284668, (float)0.80805415, (float)0.80320753, (float)0.79830715, + (float)0.79335334, (float)0.78834643, (float)0.78328675, (float)0.77817464, (float)0.77301045, + (float)0.76779452, (float)0.76252720, (float)0.75720885, (float)0.75183981, (float)0.74642045, + (float)0.74095113, (float)0.73543221, (float)0.72986407, (float)0.72424708, (float)0.71858162, + (float)0.71286806, (float)0.70710678, (float)0.70129818, (float)0.69544264, (float)0.68954054, + (float)0.68359230, (float)0.67759830, (float)0.67155895, (float)0.66547466, (float)0.65934582, + (float)0.65317284, (float)0.64695615, (float)0.64069616, (float)0.63439328, (float)0.62804795, + (float)0.62166057, (float)0.61523159, (float)0.60876143, (float)0.60225052, (float)0.59569930, + (float)0.58910822, (float)0.58247770, (float)0.57580819, (float)0.56910015, (float)0.56235401, + (float)0.55557023, (float)0.54874927, (float)0.54189158, (float)0.53499762, (float)0.52806785, + (float)0.52110274, (float)0.51410274, (float)0.50706834, (float)0.50000000, (float)0.49289819, + (float)0.48576339, (float)0.47859608, (float)0.47139674, (float)0.46416584, (float)0.45690388, + (float)0.44961133, (float)0.44228869, (float)0.43493645, (float)0.42755509, (float)0.42014512, + (float)0.41270703, (float)0.40524131, (float)0.39774847, (float)0.39022901, (float)0.38268343, + (float)0.37511224, (float)0.36751594, (float)0.35989504, (float)0.35225005, (float)0.34458148, + (float)0.33688985, (float)0.32917568, (float)0.32143947, (float)0.31368174, (float)0.30590302, + (float)0.29810383, (float)0.29028468, (float)0.28244610, (float)0.27458862, (float)0.26671276, + (float)0.25881905, (float)0.25090801, (float)0.24298018, (float)0.23503609, (float)0.22707626, + (float)0.21910124, (float)0.21111155, (float)0.20310773, (float)0.19509032, (float)0.18705985, + (float)0.17901686, (float)0.17096189, (float)0.16289547, (float)0.15481816, (float)0.14673047, + (float)0.13863297, (float)0.13052619, (float)0.12241068, (float)0.11428696, (float)0.10615561, + (float)0.09801714, (float)0.08987211, (float)0.08172107, (float)0.07356456, (float)0.06540313, + (float)0.05723732, (float)0.04906767, (float)0.04089475, (float)0.03271908, (float)0.02454123, + (float)0.01636173, (float)0.00818114 +}; + + +// Hanning window: for 15ms at 16kHz with symmetric zeros +static const float kBlocks240w512[512] = { + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00654494, (float)0.01308960, (float)0.01963369, + (float)0.02617695, (float)0.03271908, (float)0.03925982, (float)0.04579887, (float)0.05233596, + (float)0.05887080, (float)0.06540313, (float)0.07193266, (float)0.07845910, (float)0.08498218, + (float)0.09150162, (float)0.09801714, (float)0.10452846, (float)0.11103531, (float)0.11753740, + (float)0.12403446, (float)0.13052620, (float)0.13701233, (float)0.14349262, (float)0.14996676, + (float)0.15643448, (float)0.16289547, (float)0.16934951, (float)0.17579629, (float)0.18223552, + (float)0.18866697, (float)0.19509032, (float)0.20150533, (float)0.20791170, (float)0.21430916, + (float)0.22069745, (float)0.22707628, (float)0.23344538, (float)0.23980446, (float)0.24615330, + (float)0.25249159, (float)0.25881904, (float)0.26513544, (float)0.27144045, (float)0.27773386, + (float)0.28401536, (float)0.29028466, (float)0.29654160, (float)0.30278578, (float)0.30901700, + (float)0.31523499, (float)0.32143945, (float)0.32763019, (float)0.33380687, (float)0.33996925, + (float)0.34611708, (float)0.35225007, (float)0.35836795, (float)0.36447051, (float)0.37055743, + (float)0.37662852, (float)0.38268346, (float)0.38872197, (float)0.39474389, (float)0.40074885, + (float)0.40673664, (float)0.41270703, (float)0.41865975, (float)0.42459452, (float)0.43051112, + (float)0.43640924, (float)0.44228873, (float)0.44814920, (float)0.45399052, (float)0.45981237, + (float)0.46561453, (float)0.47139674, (float)0.47715878, (float)0.48290035, (float)0.48862126, + (float)0.49432120, (float)0.50000000, (float)0.50565743, (float)0.51129311, (float)0.51690692, + (float)0.52249855, (float)0.52806789, (float)0.53361452, (float)0.53913832, (float)0.54463905, + (float)0.55011642, (float)0.55557024, (float)0.56100029, (float)0.56640625, (float)0.57178795, + (float)0.57714522, (float)0.58247769, (float)0.58778524, (float)0.59306765, (float)0.59832460, + (float)0.60355598, (float)0.60876143, (float)0.61394083, (float)0.61909395, (float)0.62422055, + (float)0.62932038, (float)0.63439333, (float)0.63943899, (float)0.64445734, (float)0.64944810, + (float)0.65441096, (float)0.65934587, (float)0.66425246, (float)0.66913062, (float)0.67398012, + (float)0.67880076, (float)0.68359232, (float)0.68835455, (float)0.69308740, (float)0.69779050, + (float)0.70246369, (float)0.70710677, (float)0.71171963, (float)0.71630198, (float)0.72085363, + (float)0.72537440, (float)0.72986406, (float)0.73432255, (float)0.73874950, (float)0.74314487, + (float)0.74750835, (float)0.75183982, (float)0.75613910, (float)0.76040596, (float)0.76464027, + (float)0.76884186, (float)0.77301043, (float)0.77714598, (float)0.78124821, (float)0.78531694, + (float)0.78935206, (float)0.79335338, (float)0.79732066, (float)0.80125386, (float)0.80515265, + (float)0.80901700, (float)0.81284672, (float)0.81664157, (float)0.82040149, (float)0.82412618, + (float)0.82781565, (float)0.83146966, (float)0.83508795, (float)0.83867061, (float)0.84221727, + (float)0.84572780, (float)0.84920216, (float)0.85264021, (float)0.85604161, (float)0.85940641, + (float)0.86273444, (float)0.86602545, (float)0.86927933, (float)0.87249607, (float)0.87567532, + (float)0.87881714, (float)0.88192129, (float)0.88498765, (float)0.88801610, (float)0.89100653, + (float)0.89395881, (float)0.89687276, (float)0.89974827, (float)0.90258533, (float)0.90538365, + (float)0.90814316, (float)0.91086388, (float)0.91354549, (float)0.91618794, (float)0.91879123, + (float)0.92135513, (float)0.92387950, (float)0.92636442, (float)0.92880958, (float)0.93121493, + (float)0.93358046, (float)0.93590593, (float)0.93819135, (float)0.94043654, (float)0.94264150, + (float)0.94480604, (float)0.94693011, (float)0.94901365, (float)0.95105654, (float)0.95305866, + (float)0.95501995, (float)0.95694035, (float)0.95881975, (float)0.96065807, (float)0.96245527, + (float)0.96421117, (float)0.96592581, (float)0.96759909, (float)0.96923089, (float)0.97082120, + (float)0.97236991, (float)0.97387701, (float)0.97534233, (float)0.97676587, (float)0.97814763, + (float)0.97948742, (float)0.98078531, (float)0.98204112, (float)0.98325491, (float)0.98442656, + (float)0.98555607, (float)0.98664331, (float)0.98768836, (float)0.98869103, (float)0.98965138, + (float)0.99056935, (float)0.99144489, (float)0.99227792, (float)0.99306846, (float)0.99381649, + (float)0.99452192, (float)0.99518472, (float)0.99580491, (float)0.99638247, (float)0.99691731, + (float)0.99740952, (float)0.99785894, (float)0.99826562, (float)0.99862951, (float)0.99895066, + (float)0.99922901, (float)0.99946457, (float)0.99965733, (float)0.99980724, (float)0.99991435, + (float)0.99997860, (float)1.00000000, (float)0.99997860, (float)0.99991435, (float)0.99980724, + (float)0.99965733, (float)0.99946457, (float)0.99922901, (float)0.99895066, (float)0.99862951, + (float)0.99826562, (float)0.99785894, (float)0.99740946, (float)0.99691731, (float)0.99638247, + (float)0.99580491, (float)0.99518472, (float)0.99452192, (float)0.99381644, (float)0.99306846, + (float)0.99227792, (float)0.99144489, (float)0.99056935, (float)0.98965138, (float)0.98869103, + (float)0.98768836, (float)0.98664331, (float)0.98555607, (float)0.98442656, (float)0.98325491, + (float)0.98204112, (float)0.98078525, (float)0.97948742, (float)0.97814757, (float)0.97676587, + (float)0.97534227, (float)0.97387695, (float)0.97236991, (float)0.97082120, (float)0.96923089, + (float)0.96759909, (float)0.96592581, (float)0.96421117, (float)0.96245521, (float)0.96065807, + (float)0.95881969, (float)0.95694029, (float)0.95501995, (float)0.95305860, (float)0.95105648, + (float)0.94901365, (float)0.94693011, (float)0.94480604, (float)0.94264150, (float)0.94043654, + (float)0.93819129, (float)0.93590593, (float)0.93358046, (float)0.93121493, (float)0.92880952, + (float)0.92636436, (float)0.92387950, (float)0.92135507, (float)0.91879123, (float)0.91618794, + (float)0.91354543, (float)0.91086382, (float)0.90814310, (float)0.90538365, (float)0.90258527, + (float)0.89974827, (float)0.89687276, (float)0.89395875, (float)0.89100647, (float)0.88801610, + (float)0.88498759, (float)0.88192123, (float)0.87881714, (float)0.87567532, (float)0.87249595, + (float)0.86927933, (float)0.86602539, (float)0.86273432, (float)0.85940641, (float)0.85604161, + (float)0.85264009, (float)0.84920216, (float)0.84572780, (float)0.84221715, (float)0.83867055, + (float)0.83508795, (float)0.83146954, (float)0.82781565, (float)0.82412612, (float)0.82040137, + (float)0.81664157, (float)0.81284660, (float)0.80901700, (float)0.80515265, (float)0.80125374, + (float)0.79732066, (float)0.79335332, (float)0.78935200, (float)0.78531694, (float)0.78124815, + (float)0.77714586, (float)0.77301049, (float)0.76884180, (float)0.76464021, (float)0.76040596, + (float)0.75613904, (float)0.75183970, (float)0.74750835, (float)0.74314481, (float)0.73874938, + (float)0.73432249, (float)0.72986400, (float)0.72537428, (float)0.72085363, (float)0.71630186, + (float)0.71171951, (float)0.70710677, (float)0.70246363, (float)0.69779032, (float)0.69308734, + (float)0.68835449, (float)0.68359220, (float)0.67880070, (float)0.67398006, (float)0.66913044, + (float)0.66425240, (float)0.65934575, (float)0.65441096, (float)0.64944804, (float)0.64445722, + (float)0.63943905, (float)0.63439327, (float)0.62932026, (float)0.62422055, (float)0.61909389, + (float)0.61394072, (float)0.60876143, (float)0.60355592, (float)0.59832448, (float)0.59306765, + (float)0.58778518, (float)0.58247757, (float)0.57714522, (float)0.57178789, (float)0.56640613, + (float)0.56100023, (float)0.55557019, (float)0.55011630, (float)0.54463905, (float)0.53913826, + (float)0.53361434, (float)0.52806783, (float)0.52249849, (float)0.51690674, (float)0.51129305, + (float)0.50565726, (float)0.50000006, (float)0.49432117, (float)0.48862115, (float)0.48290038, + (float)0.47715873, (float)0.47139663, (float)0.46561456, (float)0.45981231, (float)0.45399037, + (float)0.44814920, (float)0.44228864, (float)0.43640912, (float)0.43051112, (float)0.42459446, + (float)0.41865960, (float)0.41270703, (float)0.40673658, (float)0.40074870, (float)0.39474386, + (float)0.38872188, (float)0.38268328, (float)0.37662849, (float)0.37055734, (float)0.36447033, + (float)0.35836792, (float)0.35224995, (float)0.34611690, (float)0.33996922, (float)0.33380675, + (float)0.32763001, (float)0.32143945, (float)0.31523487, (float)0.30901679, (float)0.30278572, + (float)0.29654145, (float)0.29028472, (float)0.28401530, (float)0.27773371, (float)0.27144048, + (float)0.26513538, (float)0.25881892, (float)0.25249159, (float)0.24615324, (float)0.23980433, + (float)0.23344538, (float)0.22707619, (float)0.22069728, (float)0.21430916, (float)0.20791161, + (float)0.20150517, (float)0.19509031, (float)0.18866688, (float)0.18223536, (float)0.17579627, + (float)0.16934940, (float)0.16289529, (float)0.15643445, (float)0.14996666, (float)0.14349243, + (float)0.13701232, (float)0.13052608, (float)0.12403426, (float)0.11753736, (float)0.11103519, + (float)0.10452849, (float)0.09801710, (float)0.09150149, (float)0.08498220, (float)0.07845904, + (float)0.07193252, (float)0.06540315, (float)0.05887074, (float)0.05233581, (float)0.04579888, + (float)0.03925974, (float)0.03271893, (float)0.02617695, (float)0.01963361, (float)0.01308943, + (float)0.00654493, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000 +}; + + +// Hanning window: for 30ms with 1024 fft with symmetric zeros at 16kHz +static const float kBlocks480w1024[1024] = { + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00327249, (float)0.00654494, + (float)0.00981732, (float)0.01308960, (float)0.01636173, (float)0.01963369, (float)0.02290544, + (float)0.02617695, (float)0.02944817, (float)0.03271908, (float)0.03598964, (float)0.03925982, + (float)0.04252957, (float)0.04579887, (float)0.04906768, (float)0.05233596, (float)0.05560368, + (float)0.05887080, (float)0.06213730, (float)0.06540313, (float)0.06866825, (float)0.07193266, + (float)0.07519628, (float)0.07845910, (float)0.08172107, (float)0.08498218, (float)0.08824237, + (float)0.09150162, (float)0.09475989, (float)0.09801714, (float)0.10127335, (float)0.10452846, + (float)0.10778246, (float)0.11103531, (float)0.11428697, (float)0.11753740, (float)0.12078657, + (float)0.12403446, (float)0.12728101, (float)0.13052620, (float)0.13376999, (float)0.13701233, + (float)0.14025325, (float)0.14349262, (float)0.14673047, (float)0.14996676, (float)0.15320145, + (float)0.15643448, (float)0.15966582, (float)0.16289547, (float)0.16612339, (float)0.16934951, + (float)0.17257382, (float)0.17579629, (float)0.17901687, (float)0.18223552, (float)0.18545224, + (float)0.18866697, (float)0.19187967, (float)0.19509032, (float)0.19829889, (float)0.20150533, + (float)0.20470962, (float)0.20791170, (float)0.21111156, (float)0.21430916, (float)0.21750447, + (float)0.22069745, (float)0.22388805, (float)0.22707628, (float)0.23026206, (float)0.23344538, + (float)0.23662618, (float)0.23980446, (float)0.24298020, (float)0.24615330, (float)0.24932377, + (float)0.25249159, (float)0.25565669, (float)0.25881904, (float)0.26197866, (float)0.26513544, + (float)0.26828939, (float)0.27144045, (float)0.27458861, (float)0.27773386, (float)0.28087610, + (float)0.28401536, (float)0.28715158, (float)0.29028466, (float)0.29341471, (float)0.29654160, + (float)0.29966527, (float)0.30278578, (float)0.30590302, (float)0.30901700, (float)0.31212768, + (float)0.31523499, (float)0.31833893, (float)0.32143945, (float)0.32453656, (float)0.32763019, + (float)0.33072028, (float)0.33380687, (float)0.33688986, (float)0.33996925, (float)0.34304500, + (float)0.34611708, (float)0.34918544, (float)0.35225007, (float)0.35531089, (float)0.35836795, + (float)0.36142117, (float)0.36447051, (float)0.36751595, (float)0.37055743, (float)0.37359497, + (float)0.37662852, (float)0.37965801, (float)0.38268346, (float)0.38570479, (float)0.38872197, + (float)0.39173502, (float)0.39474389, (float)0.39774847, (float)0.40074885, (float)0.40374491, + (float)0.40673664, (float)0.40972406, (float)0.41270703, (float)0.41568562, (float)0.41865975, + (float)0.42162940, (float)0.42459452, (float)0.42755508, (float)0.43051112, (float)0.43346250, + (float)0.43640924, (float)0.43935132, (float)0.44228873, (float)0.44522133, (float)0.44814920, + (float)0.45107228, (float)0.45399052, (float)0.45690390, (float)0.45981237, (float)0.46271592, + (float)0.46561453, (float)0.46850815, (float)0.47139674, (float)0.47428030, (float)0.47715878, + (float)0.48003215, (float)0.48290035, (float)0.48576337, (float)0.48862126, (float)0.49147385, + (float)0.49432120, (float)0.49716330, (float)0.50000000, (float)0.50283140, (float)0.50565743, + (float)0.50847799, (float)0.51129311, (float)0.51410276, (float)0.51690692, (float)0.51970553, + (float)0.52249855, (float)0.52528602, (float)0.52806789, (float)0.53084403, (float)0.53361452, + (float)0.53637928, (float)0.53913832, (float)0.54189163, (float)0.54463905, (float)0.54738063, + (float)0.55011642, (float)0.55284631, (float)0.55557024, (float)0.55828828, (float)0.56100029, + (float)0.56370628, (float)0.56640625, (float)0.56910014, (float)0.57178795, (float)0.57446963, + (float)0.57714522, (float)0.57981455, (float)0.58247769, (float)0.58513463, (float)0.58778524, + (float)0.59042960, (float)0.59306765, (float)0.59569931, (float)0.59832460, (float)0.60094351, + (float)0.60355598, (float)0.60616195, (float)0.60876143, (float)0.61135441, (float)0.61394083, + (float)0.61652070, (float)0.61909395, (float)0.62166059, (float)0.62422055, (float)0.62677383, + (float)0.62932038, (float)0.63186020, (float)0.63439333, (float)0.63691956, (float)0.63943899, + (float)0.64195162, (float)0.64445734, (float)0.64695615, (float)0.64944810, (float)0.65193301, + (float)0.65441096, (float)0.65688187, (float)0.65934587, (float)0.66180271, (float)0.66425246, + (float)0.66669512, (float)0.66913062, (float)0.67155898, (float)0.67398012, (float)0.67639405, + (float)0.67880076, (float)0.68120021, (float)0.68359232, (float)0.68597710, (float)0.68835455, + (float)0.69072467, (float)0.69308740, (float)0.69544262, (float)0.69779050, (float)0.70013082, + (float)0.70246369, (float)0.70478904, (float)0.70710677, (float)0.70941699, (float)0.71171963, + (float)0.71401459, (float)0.71630198, (float)0.71858168, (float)0.72085363, (float)0.72311789, + (float)0.72537440, (float)0.72762316, (float)0.72986406, (float)0.73209721, (float)0.73432255, + (float)0.73653996, (float)0.73874950, (float)0.74095118, (float)0.74314487, (float)0.74533057, + (float)0.74750835, (float)0.74967808, (float)0.75183982, (float)0.75399351, (float)0.75613910, + (float)0.75827658, (float)0.76040596, (float)0.76252723, (float)0.76464027, (float)0.76674515, + (float)0.76884186, (float)0.77093029, (float)0.77301043, (float)0.77508241, (float)0.77714598, + (float)0.77920127, (float)0.78124821, (float)0.78328675, (float)0.78531694, (float)0.78733873, + (float)0.78935206, (float)0.79135692, (float)0.79335338, (float)0.79534125, (float)0.79732066, + (float)0.79929149, (float)0.80125386, (float)0.80320752, (float)0.80515265, (float)0.80708915, + (float)0.80901700, (float)0.81093621, (float)0.81284672, (float)0.81474853, (float)0.81664157, + (float)0.81852591, (float)0.82040149, (float)0.82226825, (float)0.82412618, (float)0.82597536, + (float)0.82781565, (float)0.82964706, (float)0.83146966, (float)0.83328325, (float)0.83508795, + (float)0.83688378, (float)0.83867061, (float)0.84044838, (float)0.84221727, (float)0.84397703, + (float)0.84572780, (float)0.84746957, (float)0.84920216, (float)0.85092574, (float)0.85264021, + (float)0.85434544, (float)0.85604161, (float)0.85772866, (float)0.85940641, (float)0.86107504, + (float)0.86273444, (float)0.86438453, (float)0.86602545, (float)0.86765707, (float)0.86927933, + (float)0.87089235, (float)0.87249607, (float)0.87409031, (float)0.87567532, (float)0.87725097, + (float)0.87881714, (float)0.88037390, (float)0.88192129, (float)0.88345921, (float)0.88498765, + (float)0.88650668, (float)0.88801610, (float)0.88951612, (float)0.89100653, (float)0.89248741, + (float)0.89395881, (float)0.89542055, (float)0.89687276, (float)0.89831537, (float)0.89974827, + (float)0.90117162, (float)0.90258533, (float)0.90398932, (float)0.90538365, (float)0.90676826, + (float)0.90814316, (float)0.90950841, (float)0.91086388, (float)0.91220951, (float)0.91354549, + (float)0.91487163, (float)0.91618794, (float)0.91749454, (float)0.91879123, (float)0.92007810, + (float)0.92135513, (float)0.92262226, (float)0.92387950, (float)0.92512691, (float)0.92636442, + (float)0.92759192, (float)0.92880958, (float)0.93001723, (float)0.93121493, (float)0.93240267, + (float)0.93358046, (float)0.93474817, (float)0.93590593, (float)0.93705362, (float)0.93819135, + (float)0.93931901, (float)0.94043654, (float)0.94154406, (float)0.94264150, (float)0.94372880, + (float)0.94480604, (float)0.94587320, (float)0.94693011, (float)0.94797695, (float)0.94901365, + (float)0.95004016, (float)0.95105654, (float)0.95206273, (float)0.95305866, (float)0.95404440, + (float)0.95501995, (float)0.95598525, (float)0.95694035, (float)0.95788521, (float)0.95881975, + (float)0.95974404, (float)0.96065807, (float)0.96156180, (float)0.96245527, (float)0.96333838, + (float)0.96421117, (float)0.96507370, (float)0.96592581, (float)0.96676767, (float)0.96759909, + (float)0.96842021, (float)0.96923089, (float)0.97003126, (float)0.97082120, (float)0.97160077, + (float)0.97236991, (float)0.97312868, (float)0.97387701, (float)0.97461486, (float)0.97534233, + (float)0.97605932, (float)0.97676587, (float)0.97746199, (float)0.97814763, (float)0.97882277, + (float)0.97948742, (float)0.98014158, (float)0.98078531, (float)0.98141843, (float)0.98204112, + (float)0.98265332, (float)0.98325491, (float)0.98384601, (float)0.98442656, (float)0.98499662, + (float)0.98555607, (float)0.98610497, (float)0.98664331, (float)0.98717111, (float)0.98768836, + (float)0.98819500, (float)0.98869103, (float)0.98917651, (float)0.98965138, (float)0.99011570, + (float)0.99056935, (float)0.99101239, (float)0.99144489, (float)0.99186671, (float)0.99227792, + (float)0.99267852, (float)0.99306846, (float)0.99344778, (float)0.99381649, (float)0.99417448, + (float)0.99452192, (float)0.99485862, (float)0.99518472, (float)0.99550015, (float)0.99580491, + (float)0.99609905, (float)0.99638247, (float)0.99665523, (float)0.99691731, (float)0.99716878, + (float)0.99740952, (float)0.99763954, (float)0.99785894, (float)0.99806762, (float)0.99826562, + (float)0.99845290, (float)0.99862951, (float)0.99879545, (float)0.99895066, (float)0.99909520, + (float)0.99922901, (float)0.99935216, (float)0.99946457, (float)0.99956632, (float)0.99965733, + (float)0.99973762, (float)0.99980724, (float)0.99986613, (float)0.99991435, (float)0.99995178, + (float)0.99997860, (float)0.99999464, (float)1.00000000, (float)0.99999464, (float)0.99997860, + (float)0.99995178, (float)0.99991435, (float)0.99986613, (float)0.99980724, (float)0.99973762, + (float)0.99965733, (float)0.99956632, (float)0.99946457, (float)0.99935216, (float)0.99922901, + (float)0.99909520, (float)0.99895066, (float)0.99879545, (float)0.99862951, (float)0.99845290, + (float)0.99826562, (float)0.99806762, (float)0.99785894, (float)0.99763954, (float)0.99740946, + (float)0.99716872, (float)0.99691731, (float)0.99665523, (float)0.99638247, (float)0.99609905, + (float)0.99580491, (float)0.99550015, (float)0.99518472, (float)0.99485862, (float)0.99452192, + (float)0.99417448, (float)0.99381644, (float)0.99344778, (float)0.99306846, (float)0.99267852, + (float)0.99227792, (float)0.99186671, (float)0.99144489, (float)0.99101239, (float)0.99056935, + (float)0.99011564, (float)0.98965138, (float)0.98917651, (float)0.98869103, (float)0.98819494, + (float)0.98768836, (float)0.98717111, (float)0.98664331, (float)0.98610497, (float)0.98555607, + (float)0.98499656, (float)0.98442656, (float)0.98384601, (float)0.98325491, (float)0.98265326, + (float)0.98204112, (float)0.98141843, (float)0.98078525, (float)0.98014158, (float)0.97948742, + (float)0.97882277, (float)0.97814757, (float)0.97746193, (float)0.97676587, (float)0.97605932, + (float)0.97534227, (float)0.97461486, (float)0.97387695, (float)0.97312862, (float)0.97236991, + (float)0.97160077, (float)0.97082120, (float)0.97003126, (float)0.96923089, (float)0.96842015, + (float)0.96759909, (float)0.96676761, (float)0.96592581, (float)0.96507365, (float)0.96421117, + (float)0.96333838, (float)0.96245521, (float)0.96156180, (float)0.96065807, (float)0.95974404, + (float)0.95881969, (float)0.95788515, (float)0.95694029, (float)0.95598525, (float)0.95501995, + (float)0.95404440, (float)0.95305860, (float)0.95206267, (float)0.95105648, (float)0.95004016, + (float)0.94901365, (float)0.94797695, (float)0.94693011, (float)0.94587314, (float)0.94480604, + (float)0.94372880, (float)0.94264150, (float)0.94154406, (float)0.94043654, (float)0.93931895, + (float)0.93819129, (float)0.93705362, (float)0.93590593, (float)0.93474817, (float)0.93358046, + (float)0.93240267, (float)0.93121493, (float)0.93001723, (float)0.92880952, (float)0.92759192, + (float)0.92636436, (float)0.92512691, (float)0.92387950, (float)0.92262226, (float)0.92135507, + (float)0.92007804, (float)0.91879123, (float)0.91749448, (float)0.91618794, (float)0.91487157, + (float)0.91354543, (float)0.91220951, (float)0.91086382, (float)0.90950835, (float)0.90814310, + (float)0.90676820, (float)0.90538365, (float)0.90398932, (float)0.90258527, (float)0.90117157, + (float)0.89974827, (float)0.89831525, (float)0.89687276, (float)0.89542055, (float)0.89395875, + (float)0.89248741, (float)0.89100647, (float)0.88951600, (float)0.88801610, (float)0.88650662, + (float)0.88498759, (float)0.88345915, (float)0.88192123, (float)0.88037384, (float)0.87881714, + (float)0.87725091, (float)0.87567532, (float)0.87409031, (float)0.87249595, (float)0.87089223, + (float)0.86927933, (float)0.86765701, (float)0.86602539, (float)0.86438447, (float)0.86273432, + (float)0.86107504, (float)0.85940641, (float)0.85772860, (float)0.85604161, (float)0.85434544, + (float)0.85264009, (float)0.85092574, (float)0.84920216, (float)0.84746951, (float)0.84572780, + (float)0.84397697, (float)0.84221715, (float)0.84044844, (float)0.83867055, (float)0.83688372, + (float)0.83508795, (float)0.83328319, (float)0.83146954, (float)0.82964706, (float)0.82781565, + (float)0.82597530, (float)0.82412612, (float)0.82226813, (float)0.82040137, (float)0.81852591, + (float)0.81664157, (float)0.81474847, (float)0.81284660, (float)0.81093609, (float)0.80901700, + (float)0.80708915, (float)0.80515265, (float)0.80320752, (float)0.80125374, (float)0.79929143, + (float)0.79732066, (float)0.79534125, (float)0.79335332, (float)0.79135686, (float)0.78935200, + (float)0.78733861, (float)0.78531694, (float)0.78328675, (float)0.78124815, (float)0.77920121, + (float)0.77714586, (float)0.77508223, (float)0.77301049, (float)0.77093029, (float)0.76884180, + (float)0.76674509, (float)0.76464021, (float)0.76252711, (float)0.76040596, (float)0.75827658, + (float)0.75613904, (float)0.75399339, (float)0.75183970, (float)0.74967796, (float)0.74750835, + (float)0.74533057, (float)0.74314481, (float)0.74095106, (float)0.73874938, (float)0.73653996, + (float)0.73432249, (float)0.73209721, (float)0.72986400, (float)0.72762305, (float)0.72537428, + (float)0.72311789, (float)0.72085363, (float)0.71858162, (float)0.71630186, (float)0.71401453, + (float)0.71171951, (float)0.70941705, (float)0.70710677, (float)0.70478898, (float)0.70246363, + (float)0.70013070, (float)0.69779032, (float)0.69544268, (float)0.69308734, (float)0.69072461, + (float)0.68835449, (float)0.68597704, (float)0.68359220, (float)0.68120021, (float)0.67880070, + (float)0.67639399, (float)0.67398006, (float)0.67155886, (float)0.66913044, (float)0.66669512, + (float)0.66425240, (float)0.66180259, (float)0.65934575, (float)0.65688181, (float)0.65441096, + (float)0.65193301, (float)0.64944804, (float)0.64695609, (float)0.64445722, (float)0.64195150, + (float)0.63943905, (float)0.63691956, (float)0.63439327, (float)0.63186014, (float)0.62932026, + (float)0.62677372, (float)0.62422055, (float)0.62166059, (float)0.61909389, (float)0.61652064, + (float)0.61394072, (float)0.61135429, (float)0.60876143, (float)0.60616189, (float)0.60355592, + (float)0.60094339, (float)0.59832448, (float)0.59569913, (float)0.59306765, (float)0.59042960, + (float)0.58778518, (float)0.58513451, (float)0.58247757, (float)0.57981461, (float)0.57714522, + (float)0.57446963, (float)0.57178789, (float)0.56910002, (float)0.56640613, (float)0.56370628, + (float)0.56100023, (float)0.55828822, (float)0.55557019, (float)0.55284619, (float)0.55011630, + (float)0.54738069, (float)0.54463905, (float)0.54189152, (float)0.53913826, (float)0.53637916, + (float)0.53361434, (float)0.53084403, (float)0.52806783, (float)0.52528596, (float)0.52249849, + (float)0.51970541, (float)0.51690674, (float)0.51410276, (float)0.51129305, (float)0.50847787, + (float)0.50565726, (float)0.50283122, (float)0.50000006, (float)0.49716327, (float)0.49432117, + (float)0.49147379, (float)0.48862115, (float)0.48576325, (float)0.48290038, (float)0.48003212, + (float)0.47715873, (float)0.47428021, (float)0.47139663, (float)0.46850798, (float)0.46561456, + (float)0.46271589, (float)0.45981231, (float)0.45690379, (float)0.45399037, (float)0.45107210, + (float)0.44814920, (float)0.44522130, (float)0.44228864, (float)0.43935123, (float)0.43640912, + (float)0.43346232, (float)0.43051112, (float)0.42755505, (float)0.42459446, (float)0.42162928, + (float)0.41865960, (float)0.41568545, (float)0.41270703, (float)0.40972400, (float)0.40673658, + (float)0.40374479, (float)0.40074870, (float)0.39774850, (float)0.39474386, (float)0.39173496, + (float)0.38872188, (float)0.38570464, (float)0.38268328, (float)0.37965804, (float)0.37662849, + (float)0.37359491, (float)0.37055734, (float)0.36751580, (float)0.36447033, (float)0.36142117, + (float)0.35836792, (float)0.35531086, (float)0.35224995, (float)0.34918529, (float)0.34611690, + (float)0.34304500, (float)0.33996922, (float)0.33688980, (float)0.33380675, (float)0.33072016, + (float)0.32763001, (float)0.32453656, (float)0.32143945, (float)0.31833887, (float)0.31523487, + (float)0.31212750, (float)0.30901679, (float)0.30590302, (float)0.30278572, (float)0.29966521, + (float)0.29654145, (float)0.29341453, (float)0.29028472, (float)0.28715155, (float)0.28401530, + (float)0.28087601, (float)0.27773371, (float)0.27458847, (float)0.27144048, (float)0.26828936, + (float)0.26513538, (float)0.26197854, (float)0.25881892, (float)0.25565651, (float)0.25249159, + (float)0.24932374, (float)0.24615324, (float)0.24298008, (float)0.23980433, (float)0.23662600, + (float)0.23344538, (float)0.23026201, (float)0.22707619, (float)0.22388794, (float)0.22069728, + (float)0.21750426, (float)0.21430916, (float)0.21111152, (float)0.20791161, (float)0.20470949, + (float)0.20150517, (float)0.19829892, (float)0.19509031, (float)0.19187963, (float)0.18866688, + (float)0.18545210, (float)0.18223536, (float)0.17901689, (float)0.17579627, (float)0.17257376, + (float)0.16934940, (float)0.16612324, (float)0.16289529, (float)0.15966584, (float)0.15643445, + (float)0.15320137, (float)0.14996666, (float)0.14673033, (float)0.14349243, (float)0.14025325, + (float)0.13701232, (float)0.13376991, (float)0.13052608, (float)0.12728085, (float)0.12403426, + (float)0.12078657, (float)0.11753736, (float)0.11428688, (float)0.11103519, (float)0.10778230, + (float)0.10452849, (float)0.10127334, (float)0.09801710, (float)0.09475980, (float)0.09150149, + (float)0.08824220, (float)0.08498220, (float)0.08172106, (float)0.07845904, (float)0.07519618, + (float)0.07193252, (float)0.06866808, (float)0.06540315, (float)0.06213728, (float)0.05887074, + (float)0.05560357, (float)0.05233581, (float)0.04906749, (float)0.04579888, (float)0.04252954, + (float)0.03925974, (float)0.03598953, (float)0.03271893, (float)0.02944798, (float)0.02617695, + (float)0.02290541, (float)0.01963361, (float)0.01636161, (float)0.01308943, (float)0.00981712, + (float)0.00654493, (float)0.00327244, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, + (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000 +}; + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_WINDOWS_PRIVATE_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/splitting_filter.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/splitting_filter.cc new file mode 100644 index 000000000..9f1093569 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/splitting_filter.cc @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing/splitting_filter.h" + +#include "webrtc/base/checks.h" +#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +#include "webrtc/common_audio/channel_buffer.h" + +namespace webrtc { + +SplittingFilter::SplittingFilter(size_t num_channels, + size_t num_bands, + size_t num_frames) + : num_bands_(num_bands) { + RTC_CHECK(num_bands_ == 2 || num_bands_ == 3); + if (num_bands_ == 2) { + two_bands_states_.resize(num_channels); + } else if (num_bands_ == 3) { + for (size_t i = 0; i < num_channels; ++i) { + three_band_filter_banks_.push_back(std::unique_ptr( + new ThreeBandFilterBank(num_frames))); + } + } +} + +SplittingFilter::~SplittingFilter() = default; + +void SplittingFilter::Analysis(const IFChannelBuffer* data, + IFChannelBuffer* bands) { + RTC_DCHECK_EQ(num_bands_, bands->num_bands()); + RTC_DCHECK_EQ(data->num_channels(), bands->num_channels()); + RTC_DCHECK_EQ(data->num_frames(), + bands->num_frames_per_band() * bands->num_bands()); + if (bands->num_bands() == 2) { + TwoBandsAnalysis(data, bands); + } else if (bands->num_bands() == 3) { + ThreeBandsAnalysis(data, bands); + } +} + +void SplittingFilter::Synthesis(const IFChannelBuffer* bands, + IFChannelBuffer* data) { + RTC_DCHECK_EQ(num_bands_, bands->num_bands()); + RTC_DCHECK_EQ(data->num_channels(), bands->num_channels()); + RTC_DCHECK_EQ(data->num_frames(), + bands->num_frames_per_band() * bands->num_bands()); + if (bands->num_bands() == 2) { + TwoBandsSynthesis(bands, data); + } else if (bands->num_bands() == 3) { + ThreeBandsSynthesis(bands, data); + } +} + +void SplittingFilter::TwoBandsAnalysis(const IFChannelBuffer* data, + IFChannelBuffer* bands) { + RTC_DCHECK_EQ(two_bands_states_.size(), data->num_channels()); + for (size_t i = 0; i < two_bands_states_.size(); ++i) { + WebRtcSpl_AnalysisQMF(data->ibuf_const()->channels()[i], + data->num_frames(), + bands->ibuf()->channels(0)[i], + bands->ibuf()->channels(1)[i], + two_bands_states_[i].analysis_state1, + two_bands_states_[i].analysis_state2); + } +} + +void SplittingFilter::TwoBandsSynthesis(const IFChannelBuffer* bands, + IFChannelBuffer* data) { + RTC_DCHECK_LE(data->num_channels(), two_bands_states_.size()); + for (size_t i = 0; i < data->num_channels(); ++i) { + WebRtcSpl_SynthesisQMF(bands->ibuf_const()->channels(0)[i], + bands->ibuf_const()->channels(1)[i], + bands->num_frames_per_band(), + data->ibuf()->channels()[i], + two_bands_states_[i].synthesis_state1, + two_bands_states_[i].synthesis_state2); + } +} + +void SplittingFilter::ThreeBandsAnalysis(const IFChannelBuffer* data, + IFChannelBuffer* bands) { + RTC_DCHECK_EQ(three_band_filter_banks_.size(), data->num_channels()); + for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) { + three_band_filter_banks_[i]->Analysis(data->fbuf_const()->channels()[i], + data->num_frames(), + bands->fbuf()->bands(i)); + } +} + +void SplittingFilter::ThreeBandsSynthesis(const IFChannelBuffer* bands, + IFChannelBuffer* data) { + RTC_DCHECK_LE(data->num_channels(), three_band_filter_banks_.size()); + for (size_t i = 0; i < data->num_channels(); ++i) { + three_band_filter_banks_[i]->Synthesis(bands->fbuf_const()->bands(i), + bands->num_frames_per_band(), + data->fbuf()->channels()[i]); + } +} + +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/splitting_filter.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/splitting_filter.h new file mode 100644 index 000000000..2d3750ae6 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/splitting_filter.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_SPLITTING_FILTER_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_SPLITTING_FILTER_H_ + +#include +#include +#include + +#include "webrtc/modules/audio_processing/three_band_filter_bank.h" + +namespace webrtc { + +class IFChannelBuffer; + +struct TwoBandsStates { + TwoBandsStates() { + memset(analysis_state1, 0, sizeof(analysis_state1)); + memset(analysis_state2, 0, sizeof(analysis_state2)); + memset(synthesis_state1, 0, sizeof(synthesis_state1)); + memset(synthesis_state2, 0, sizeof(synthesis_state2)); + } + + static const int kStateSize = 6; + int analysis_state1[kStateSize]; + int analysis_state2[kStateSize]; + int synthesis_state1[kStateSize]; + int synthesis_state2[kStateSize]; +}; + +// Splitting filter which is able to split into and merge from 2 or 3 frequency +// bands. The number of channels needs to be provided at construction time. +// +// For each block, Analysis() is called to split into bands and then Synthesis() +// to merge these bands again. The input and output signals are contained in +// IFChannelBuffers and for the different bands an array of IFChannelBuffers is +// used. +class SplittingFilter { + public: + SplittingFilter(size_t num_channels, size_t num_bands, size_t num_frames); + ~SplittingFilter(); + + void Analysis(const IFChannelBuffer* data, IFChannelBuffer* bands); + void Synthesis(const IFChannelBuffer* bands, IFChannelBuffer* data); + + private: + // Two-band analysis and synthesis work for 640 samples or less. + void TwoBandsAnalysis(const IFChannelBuffer* data, IFChannelBuffer* bands); + void TwoBandsSynthesis(const IFChannelBuffer* bands, IFChannelBuffer* data); + void ThreeBandsAnalysis(const IFChannelBuffer* data, IFChannelBuffer* bands); + void ThreeBandsSynthesis(const IFChannelBuffer* bands, IFChannelBuffer* data); + void InitBuffers(); + + const size_t num_bands_; + std::vector two_bands_states_; + std::vector> three_band_filter_banks_; +}; + +} // namespace webrtc + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_SPLITTING_FILTER_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/three_band_filter_bank.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/three_band_filter_bank.cc new file mode 100644 index 000000000..61071bbff --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/three_band_filter_bank.cc @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// An implementation of a 3-band FIR filter-bank with DCT modulation, similar to +// the proposed in "Multirate Signal Processing for Communication Systems" by +// Fredric J Harris. +// +// The idea is to take a heterodyne system and change the order of the +// components to get something which is efficient to implement digitally. +// +// It is possible to separate the filter using the noble identity as follows: +// +// H(z) = H0(z^3) + z^-1 * H1(z^3) + z^-2 * H2(z^3) +// +// This is used in the analysis stage to first downsample serial to parallel +// and then filter each branch with one of these polyphase decompositions of the +// lowpass prototype. Because each filter is only a modulation of the prototype, +// it is enough to multiply each coefficient by the respective cosine value to +// shift it to the desired band. But because the cosine period is 12 samples, +// it requires separating the prototype even further using the noble identity. +// After filtering and modulating for each band, the output of all filters is +// accumulated to get the downsampled bands. +// +// A similar logic can be applied to the synthesis stage. + +// MSVC++ requires this to be set before any other includes to get M_PI. +#define _USE_MATH_DEFINES + +#include "webrtc/modules/audio_processing/three_band_filter_bank.h" + +#include + +#include "webrtc/base/checks.h" + +namespace webrtc { +namespace { + +const size_t kNumBands = 3; +const size_t kSparsity = 4; + +// Factors to take into account when choosing |kNumCoeffs|: +// 1. Higher |kNumCoeffs|, means faster transition, which ensures less +// aliasing. This is especially important when there is non-linear +// processing between the splitting and merging. +// 2. The delay that this filter bank introduces is +// |kNumBands| * |kSparsity| * |kNumCoeffs| / 2, so it increases linearly +// with |kNumCoeffs|. +// 3. The computation complexity also increases linearly with |kNumCoeffs|. +const size_t kNumCoeffs = 4; + +// The Matlab code to generate these |kLowpassCoeffs| is: +// +// N = kNumBands * kSparsity * kNumCoeffs - 1; +// h = fir1(N, 1 / (2 * kNumBands), kaiser(N + 1, 3.5)); +// reshape(h, kNumBands * kSparsity, kNumCoeffs); +// +// Because the total bandwidth of the lower and higher band is double the middle +// one (because of the spectrum parity), the low-pass prototype is half the +// bandwidth of 1 / (2 * |kNumBands|) and is then shifted with cosine modulation +// to the right places. +// A Kaiser window is used because of its flexibility and the alpha is set to +// 3.5, since that sets a stop band attenuation of 40dB ensuring a fast +// transition. +const float kLowpassCoeffs[kNumBands * kSparsity][kNumCoeffs] = + {{-0.00047749f, -0.00496888f, +0.16547118f, +0.00425496f}, + {-0.00173287f, -0.01585778f, +0.14989004f, +0.00994113f}, + {-0.00304815f, -0.02536082f, +0.12154542f, +0.01157993f}, + {-0.00383509f, -0.02982767f, +0.08543175f, +0.00983212f}, + {-0.00346946f, -0.02587886f, +0.04760441f, +0.00607594f}, + {-0.00154717f, -0.01136076f, +0.01387458f, +0.00186353f}, + {+0.00186353f, +0.01387458f, -0.01136076f, -0.00154717f}, + {+0.00607594f, +0.04760441f, -0.02587886f, -0.00346946f}, + {+0.00983212f, +0.08543175f, -0.02982767f, -0.00383509f}, + {+0.01157993f, +0.12154542f, -0.02536082f, -0.00304815f}, + {+0.00994113f, +0.14989004f, -0.01585778f, -0.00173287f}, + {+0.00425496f, +0.16547118f, -0.00496888f, -0.00047749f}}; + +// Downsamples |in| into |out|, taking one every |kNumbands| starting from +// |offset|. |split_length| is the |out| length. |in| has to be at least +// |kNumBands| * |split_length| long. +void Downsample(const float* in, + size_t split_length, + size_t offset, + float* out) { + for (size_t i = 0; i < split_length; ++i) { + out[i] = in[kNumBands * i + offset]; + } +} + +// Upsamples |in| into |out|, scaling by |kNumBands| and accumulating it every +// |kNumBands| starting from |offset|. |split_length| is the |in| length. |out| +// has to be at least |kNumBands| * |split_length| long. +void Upsample(const float* in, size_t split_length, size_t offset, float* out) { + for (size_t i = 0; i < split_length; ++i) { + out[kNumBands * i + offset] += kNumBands * in[i]; + } +} + +} // namespace + +// Because the low-pass filter prototype has half bandwidth it is possible to +// use a DCT to shift it in both directions at the same time, to the center +// frequencies [1 / 12, 3 / 12, 5 / 12]. +ThreeBandFilterBank::ThreeBandFilterBank(size_t length) + : in_buffer_(rtc::CheckedDivExact(length, kNumBands)), + out_buffer_(in_buffer_.size()) { + for (size_t i = 0; i < kSparsity; ++i) { + for (size_t j = 0; j < kNumBands; ++j) { + analysis_filters_.push_back( + std::unique_ptr(new SparseFIRFilter( + kLowpassCoeffs[i * kNumBands + j], kNumCoeffs, kSparsity, i))); + synthesis_filters_.push_back( + std::unique_ptr(new SparseFIRFilter( + kLowpassCoeffs[i * kNumBands + j], kNumCoeffs, kSparsity, i))); + } + } + dct_modulation_.resize(kNumBands * kSparsity); + for (size_t i = 0; i < dct_modulation_.size(); ++i) { + dct_modulation_[i].resize(kNumBands); + for (size_t j = 0; j < kNumBands; ++j) { + dct_modulation_[i][j] = + 2.f * cos(2.f * M_PI * i * (2.f * j + 1.f) / dct_modulation_.size()); + } + } +} + +ThreeBandFilterBank::~ThreeBandFilterBank() = default; + +// The analysis can be separated in these steps: +// 1. Serial to parallel downsampling by a factor of |kNumBands|. +// 2. Filtering of |kSparsity| different delayed signals with polyphase +// decomposition of the low-pass prototype filter and upsampled by a factor +// of |kSparsity|. +// 3. Modulating with cosines and accumulating to get the desired band. +void ThreeBandFilterBank::Analysis(const float* in, + size_t length, + float* const* out) { + RTC_CHECK_EQ(in_buffer_.size(), rtc::CheckedDivExact(length, kNumBands)); + for (size_t i = 0; i < kNumBands; ++i) { + memset(out[i], 0, in_buffer_.size() * sizeof(*out[i])); + } + for (size_t i = 0; i < kNumBands; ++i) { + Downsample(in, in_buffer_.size(), kNumBands - i - 1, &in_buffer_[0]); + for (size_t j = 0; j < kSparsity; ++j) { + const size_t offset = i + j * kNumBands; + analysis_filters_[offset]->Filter(&in_buffer_[0], + in_buffer_.size(), + &out_buffer_[0]); + DownModulate(&out_buffer_[0], out_buffer_.size(), offset, out); + } + } +} + +// The synthesis can be separated in these steps: +// 1. Modulating with cosines. +// 2. Filtering each one with a polyphase decomposition of the low-pass +// prototype filter upsampled by a factor of |kSparsity| and accumulating +// |kSparsity| signals with different delays. +// 3. Parallel to serial upsampling by a factor of |kNumBands|. +void ThreeBandFilterBank::Synthesis(const float* const* in, + size_t split_length, + float* out) { + RTC_CHECK_EQ(in_buffer_.size(), split_length); + memset(out, 0, kNumBands * in_buffer_.size() * sizeof(*out)); + for (size_t i = 0; i < kNumBands; ++i) { + for (size_t j = 0; j < kSparsity; ++j) { + const size_t offset = i + j * kNumBands; + UpModulate(in, in_buffer_.size(), offset, &in_buffer_[0]); + synthesis_filters_[offset]->Filter(&in_buffer_[0], + in_buffer_.size(), + &out_buffer_[0]); + Upsample(&out_buffer_[0], out_buffer_.size(), i, out); + } + } +} + + +// Modulates |in| by |dct_modulation_| and accumulates it in each of the +// |kNumBands| bands of |out|. |offset| is the index in the period of the +// cosines used for modulation. |split_length| is the length of |in| and each +// band of |out|. +void ThreeBandFilterBank::DownModulate(const float* in, + size_t split_length, + size_t offset, + float* const* out) { + for (size_t i = 0; i < kNumBands; ++i) { + for (size_t j = 0; j < split_length; ++j) { + out[i][j] += dct_modulation_[offset][i] * in[j]; + } + } +} + +// Modulates each of the |kNumBands| bands of |in| by |dct_modulation_| and +// accumulates them in |out|. |out| is cleared before starting to accumulate. +// |offset| is the index in the period of the cosines used for modulation. +// |split_length| is the length of each band of |in| and |out|. +void ThreeBandFilterBank::UpModulate(const float* const* in, + size_t split_length, + size_t offset, + float* out) { + memset(out, 0, split_length * sizeof(*out)); + for (size_t i = 0; i < kNumBands; ++i) { + for (size_t j = 0; j < split_length; ++j) { + out[j] += dct_modulation_[offset][i] * in[i][j]; + } + } +} + +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/three_band_filter_bank.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/three_band_filter_bank.h new file mode 100644 index 000000000..24e7831d7 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/three_band_filter_bank.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_THREE_BAND_FILTER_BANK_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_THREE_BAND_FILTER_BANK_H_ + +#include +#include +#include + +#include "webrtc/common_audio/sparse_fir_filter.h" + +namespace webrtc { + +// An implementation of a 3-band FIR filter-bank with DCT modulation, similar to +// the proposed in "Multirate Signal Processing for Communication Systems" by +// Fredric J Harris. +// The low-pass filter prototype has these characteristics: +// * Pass-band ripple = 0.3dB +// * Pass-band frequency = 0.147 (7kHz at 48kHz) +// * Stop-band attenuation = 40dB +// * Stop-band frequency = 0.192 (9.2kHz at 48kHz) +// * Delay = 24 samples (500us at 48kHz) +// * Linear phase +// This filter bank does not satisfy perfect reconstruction. The SNR after +// analysis and synthesis (with no processing in between) is approximately 9.5dB +// depending on the input signal after compensating for the delay. +class ThreeBandFilterBank final { + public: + explicit ThreeBandFilterBank(size_t length); + ~ThreeBandFilterBank(); + + // Splits |in| into 3 downsampled frequency bands in |out|. + // |length| is the |in| length. Each of the 3 bands of |out| has to have a + // length of |length| / 3. + void Analysis(const float* in, size_t length, float* const* out); + + // Merges the 3 downsampled frequency bands in |in| into |out|. + // |split_length| is the length of each band of |in|. |out| has to have at + // least a length of 3 * |split_length|. + void Synthesis(const float* const* in, size_t split_length, float* out); + + private: + void DownModulate(const float* in, + size_t split_length, + size_t offset, + float* const* out); + void UpModulate(const float* const* in, + size_t split_length, + size_t offset, + float* out); + + std::vector in_buffer_; + std::vector out_buffer_; + std::vector> analysis_filters_; + std::vector> synthesis_filters_; + std::vector> dct_modulation_; +}; + +} // namespace webrtc + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_THREE_BAND_FILTER_BANK_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/block_mean_calculator.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/block_mean_calculator.cc new file mode 100644 index 000000000..7f4508ecc --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/block_mean_calculator.cc @@ -0,0 +1,53 @@ +/* + * Copyright 2016 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing/utility/block_mean_calculator.h" + +#include "webrtc/base/checks.h" + +namespace webrtc { + +BlockMeanCalculator::BlockMeanCalculator(size_t block_length) + : block_length_(block_length), + count_(0), + sum_(0.0), + mean_(0.0) { + RTC_DCHECK(block_length_ != 0); +} + +void BlockMeanCalculator::Reset() { + Clear(); + mean_ = 0.0; +} + +void BlockMeanCalculator::AddValue(float value) { + sum_ += value; + ++count_; + if (count_ == block_length_) { + mean_ = sum_ / block_length_; + Clear(); + } +} + +bool BlockMeanCalculator::EndOfBlock() const { + return count_ == 0; +} + +float BlockMeanCalculator::GetLatestMean() const { + return mean_; +} + +// Flush all samples added. +void BlockMeanCalculator::Clear() { + count_ = 0; + sum_ = 0.0; +} + +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/block_mean_calculator.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/block_mean_calculator.h new file mode 100644 index 000000000..71e8b63ce --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/block_mean_calculator.h @@ -0,0 +1,52 @@ +/* + * Copyright 2016 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_BLOCK_MEAN_CALCULATOR_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_BLOCK_MEAN_CALCULATOR_H_ + +#include + +#include "webrtc/base/constructormagic.h" + +namespace webrtc { + +// BlockMeanCalculator calculates the mean of a block of values. Values are +// added one after another, and the mean is updated at the end of every block. +class BlockMeanCalculator { + public: + explicit BlockMeanCalculator(size_t block_length); + + // Reset. + void Reset(); + + // Add one value to the sequence. + void AddValue(float value); + + // Return whether the latest added value was at the end of a block. + bool EndOfBlock() const; + + // Return the latest mean. + float GetLatestMean() const; + + private: + // Clear all values added. + void Clear(); + + const size_t block_length_; + size_t count_; + float sum_; + float mean_; + + RTC_DISALLOW_COPY_AND_ASSIGN(BlockMeanCalculator); +}; + +} // namespace webrtc + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_BLOCK_MEAN_CALCULATOR_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator.cc new file mode 100644 index 000000000..bc67ba1fe --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator.cc @@ -0,0 +1,703 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing/utility/delay_estimator.h" + +#include +#include +#include + +#include "webrtc/base/checks.h" + +// Number of right shifts for scaling is linearly depending on number of bits in +// the far-end binary spectrum. +static const int kShiftsAtZero = 13; // Right shifts at zero binary spectrum. +static const int kShiftsLinearSlope = 3; + +static const int32_t kProbabilityOffset = 1024; // 2 in Q9. +static const int32_t kProbabilityLowerLimit = 8704; // 17 in Q9. +static const int32_t kProbabilityMinSpread = 2816; // 5.5 in Q9. + +// Robust validation settings +static const float kHistogramMax = 3000.f; +static const float kLastHistogramMax = 250.f; +static const float kMinHistogramThreshold = 1.5f; +static const int kMinRequiredHits = 10; +static const int kMaxHitsWhenPossiblyNonCausal = 10; +static const int kMaxHitsWhenPossiblyCausal = 1000; +static const float kQ14Scaling = 1.f / (1 << 14); // Scaling by 2^14 to get Q0. +static const float kFractionSlope = 0.05f; +static const float kMinFractionWhenPossiblyCausal = 0.5f; +static const float kMinFractionWhenPossiblyNonCausal = 0.25f; + +// Counts and returns number of bits of a 32-bit word. +static int BitCount(uint32_t u32) { + uint32_t tmp = u32 - ((u32 >> 1) & 033333333333) - + ((u32 >> 2) & 011111111111); + tmp = ((tmp + (tmp >> 3)) & 030707070707); + tmp = (tmp + (tmp >> 6)); + tmp = (tmp + (tmp >> 12) + (tmp >> 24)) & 077; + + return ((int) tmp); +} + +// Compares the |binary_vector| with all rows of the |binary_matrix| and counts +// per row the number of times they have the same value. +// +// Inputs: +// - binary_vector : binary "vector" stored in a long +// - binary_matrix : binary "matrix" stored as a vector of long +// - matrix_size : size of binary "matrix" +// +// Output: +// - bit_counts : "Vector" stored as a long, containing for each +// row the number of times the matrix row and the +// input vector have the same value +// +static void BitCountComparison(uint32_t binary_vector, + const uint32_t* binary_matrix, + int matrix_size, + int32_t* bit_counts) { + int n = 0; + + // Compare |binary_vector| with all rows of the |binary_matrix| + for (; n < matrix_size; n++) { + bit_counts[n] = (int32_t) BitCount(binary_vector ^ binary_matrix[n]); + } +} + +// Collects necessary statistics for the HistogramBasedValidation(). This +// function has to be called prior to calling HistogramBasedValidation(). The +// statistics updated and used by the HistogramBasedValidation() are: +// 1. the number of |candidate_hits|, which states for how long we have had the +// same |candidate_delay| +// 2. the |histogram| of candidate delays over time. This histogram is +// weighted with respect to a reliability measure and time-varying to cope +// with possible delay shifts. +// For further description see commented code. +// +// Inputs: +// - candidate_delay : The delay to validate. +// - valley_depth_q14 : The cost function has a valley/minimum at the +// |candidate_delay| location. |valley_depth_q14| is the +// cost function difference between the minimum and +// maximum locations. The value is in the Q14 domain. +// - valley_level_q14 : Is the cost function value at the minimum, in Q14. +static void UpdateRobustValidationStatistics(BinaryDelayEstimator* self, + int candidate_delay, + int32_t valley_depth_q14, + int32_t valley_level_q14) { + const float valley_depth = valley_depth_q14 * kQ14Scaling; + float decrease_in_last_set = valley_depth; + const int max_hits_for_slow_change = (candidate_delay < self->last_delay) ? + kMaxHitsWhenPossiblyNonCausal : kMaxHitsWhenPossiblyCausal; + int i = 0; + + RTC_DCHECK_EQ(self->history_size, self->farend->history_size); + // Reset |candidate_hits| if we have a new candidate. + if (candidate_delay != self->last_candidate_delay) { + self->candidate_hits = 0; + self->last_candidate_delay = candidate_delay; + } + self->candidate_hits++; + + // The |histogram| is updated differently across the bins. + // 1. The |candidate_delay| histogram bin is increased with the + // |valley_depth|, which is a simple measure of how reliable the + // |candidate_delay| is. The histogram is not increased above + // |kHistogramMax|. + self->histogram[candidate_delay] += valley_depth; + if (self->histogram[candidate_delay] > kHistogramMax) { + self->histogram[candidate_delay] = kHistogramMax; + } + // 2. The histogram bins in the neighborhood of |candidate_delay| are + // unaffected. The neighborhood is defined as x + {-2, -1, 0, 1}. + // 3. The histogram bins in the neighborhood of |last_delay| are decreased + // with |decrease_in_last_set|. This value equals the difference between + // the cost function values at the locations |candidate_delay| and + // |last_delay| until we reach |max_hits_for_slow_change| consecutive hits + // at the |candidate_delay|. If we exceed this amount of hits the + // |candidate_delay| is a "potential" candidate and we start decreasing + // these histogram bins more rapidly with |valley_depth|. + if (self->candidate_hits < max_hits_for_slow_change) { + decrease_in_last_set = (self->mean_bit_counts[self->compare_delay] - + valley_level_q14) * kQ14Scaling; + } + // 4. All other bins are decreased with |valley_depth|. + // TODO(bjornv): Investigate how to make this loop more efficient. Split up + // the loop? Remove parts that doesn't add too much. + for (i = 0; i < self->history_size; ++i) { + int is_in_last_set = (i >= self->last_delay - 2) && + (i <= self->last_delay + 1) && (i != candidate_delay); + int is_in_candidate_set = (i >= candidate_delay - 2) && + (i <= candidate_delay + 1); + self->histogram[i] -= decrease_in_last_set * is_in_last_set + + valley_depth * (!is_in_last_set && !is_in_candidate_set); + // 5. No histogram bin can go below 0. + if (self->histogram[i] < 0) { + self->histogram[i] = 0; + } + } +} + +// Validates the |candidate_delay|, estimated in WebRtc_ProcessBinarySpectrum(), +// based on a mix of counting concurring hits with a modified histogram +// of recent delay estimates. In brief a candidate is valid (returns 1) if it +// is the most likely according to the histogram. There are a couple of +// exceptions that are worth mentioning: +// 1. If the |candidate_delay| < |last_delay| it can be that we are in a +// non-causal state, breaking a possible echo control algorithm. Hence, we +// open up for a quicker change by allowing the change even if the +// |candidate_delay| is not the most likely one according to the histogram. +// 2. There's a minimum number of hits (kMinRequiredHits) and the histogram +// value has to reached a minimum (kMinHistogramThreshold) to be valid. +// 3. The action is also depending on the filter length used for echo control. +// If the delay difference is larger than what the filter can capture, we +// also move quicker towards a change. +// For further description see commented code. +// +// Input: +// - candidate_delay : The delay to validate. +// +// Return value: +// - is_histogram_valid : 1 - The |candidate_delay| is valid. +// 0 - Otherwise. +static int HistogramBasedValidation(const BinaryDelayEstimator* self, + int candidate_delay) { + float fraction = 1.f; + float histogram_threshold = self->histogram[self->compare_delay]; + const int delay_difference = candidate_delay - self->last_delay; + int is_histogram_valid = 0; + + // The histogram based validation of |candidate_delay| is done by comparing + // the |histogram| at bin |candidate_delay| with a |histogram_threshold|. + // This |histogram_threshold| equals a |fraction| of the |histogram| at bin + // |last_delay|. The |fraction| is a piecewise linear function of the + // |delay_difference| between the |candidate_delay| and the |last_delay| + // allowing for a quicker move if + // i) a potential echo control filter can not handle these large differences. + // ii) keeping |last_delay| instead of updating to |candidate_delay| could + // force an echo control into a non-causal state. + // We further require the histogram to have reached a minimum value of + // |kMinHistogramThreshold|. In addition, we also require the number of + // |candidate_hits| to be more than |kMinRequiredHits| to remove spurious + // values. + + // Calculate a comparison histogram value (|histogram_threshold|) that is + // depending on the distance between the |candidate_delay| and |last_delay|. + // TODO(bjornv): How much can we gain by turning the fraction calculation + // into tables? + if (delay_difference > self->allowed_offset) { + fraction = 1.f - kFractionSlope * (delay_difference - self->allowed_offset); + fraction = (fraction > kMinFractionWhenPossiblyCausal ? fraction : + kMinFractionWhenPossiblyCausal); + } else if (delay_difference < 0) { + fraction = kMinFractionWhenPossiblyNonCausal - + kFractionSlope * delay_difference; + fraction = (fraction > 1.f ? 1.f : fraction); + } + histogram_threshold *= fraction; + histogram_threshold = (histogram_threshold > kMinHistogramThreshold ? + histogram_threshold : kMinHistogramThreshold); + + is_histogram_valid = + (self->histogram[candidate_delay] >= histogram_threshold) && + (self->candidate_hits > kMinRequiredHits); + + return is_histogram_valid; +} + +// Performs a robust validation of the |candidate_delay| estimated in +// WebRtc_ProcessBinarySpectrum(). The algorithm takes the +// |is_instantaneous_valid| and the |is_histogram_valid| and combines them +// into a robust validation. The HistogramBasedValidation() has to be called +// prior to this call. +// For further description on how the combination is done, see commented code. +// +// Inputs: +// - candidate_delay : The delay to validate. +// - is_instantaneous_valid : The instantaneous validation performed in +// WebRtc_ProcessBinarySpectrum(). +// - is_histogram_valid : The histogram based validation. +// +// Return value: +// - is_robust : 1 - The candidate_delay is valid according to a +// combination of the two inputs. +// : 0 - Otherwise. +static int RobustValidation(const BinaryDelayEstimator* self, + int candidate_delay, + int is_instantaneous_valid, + int is_histogram_valid) { + int is_robust = 0; + + // The final robust validation is based on the two algorithms; 1) the + // |is_instantaneous_valid| and 2) the histogram based with result stored in + // |is_histogram_valid|. + // i) Before we actually have a valid estimate (|last_delay| == -2), we say + // a candidate is valid if either algorithm states so + // (|is_instantaneous_valid| OR |is_histogram_valid|). + is_robust = (self->last_delay < 0) && + (is_instantaneous_valid || is_histogram_valid); + // ii) Otherwise, we need both algorithms to be certain + // (|is_instantaneous_valid| AND |is_histogram_valid|) + is_robust |= is_instantaneous_valid && is_histogram_valid; + // iii) With one exception, i.e., the histogram based algorithm can overrule + // the instantaneous one if |is_histogram_valid| = 1 and the histogram + // is significantly strong. + is_robust |= is_histogram_valid && + (self->histogram[candidate_delay] > self->last_delay_histogram); + + return is_robust; +} + +void WebRtc_FreeBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self) { + + if (self == NULL) { + return; + } + + free(self->binary_far_history); + self->binary_far_history = NULL; + + free(self->far_bit_counts); + self->far_bit_counts = NULL; + + free(self); +} + +BinaryDelayEstimatorFarend* WebRtc_CreateBinaryDelayEstimatorFarend( + int history_size) { + BinaryDelayEstimatorFarend* self = NULL; + + if (history_size > 1) { + // Sanity conditions fulfilled. + self = static_cast( + malloc(sizeof(BinaryDelayEstimatorFarend))); + } + if (self == NULL) { + return NULL; + } + + self->history_size = 0; + self->binary_far_history = NULL; + self->far_bit_counts = NULL; + if (WebRtc_AllocateFarendBufferMemory(self, history_size) == 0) { + WebRtc_FreeBinaryDelayEstimatorFarend(self); + self = NULL; + } + return self; +} + +int WebRtc_AllocateFarendBufferMemory(BinaryDelayEstimatorFarend* self, + int history_size) { + RTC_DCHECK(self); + // (Re-)Allocate memory for history buffers. + self->binary_far_history = static_cast( + realloc(self->binary_far_history, + history_size * sizeof(*self->binary_far_history))); + self->far_bit_counts = static_cast( + realloc(self->far_bit_counts, + history_size * sizeof(*self->far_bit_counts))); + if ((self->binary_far_history == NULL) || (self->far_bit_counts == NULL)) { + history_size = 0; + } + // Fill with zeros if we have expanded the buffers. + if (history_size > self->history_size) { + int size_diff = history_size - self->history_size; + memset(&self->binary_far_history[self->history_size], + 0, + sizeof(*self->binary_far_history) * size_diff); + memset(&self->far_bit_counts[self->history_size], + 0, + sizeof(*self->far_bit_counts) * size_diff); + } + self->history_size = history_size; + + return self->history_size; +} + +void WebRtc_InitBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self) { + RTC_DCHECK(self); + memset(self->binary_far_history, 0, sizeof(uint32_t) * self->history_size); + memset(self->far_bit_counts, 0, sizeof(int) * self->history_size); +} + +void WebRtc_SoftResetBinaryDelayEstimatorFarend( + BinaryDelayEstimatorFarend* self, int delay_shift) { + int abs_shift = abs(delay_shift); + int shift_size = 0; + int dest_index = 0; + int src_index = 0; + int padding_index = 0; + + RTC_DCHECK(self); + shift_size = self->history_size - abs_shift; + RTC_DCHECK_GT(shift_size, 0); + if (delay_shift == 0) { + return; + } else if (delay_shift > 0) { + dest_index = abs_shift; + } else if (delay_shift < 0) { + src_index = abs_shift; + padding_index = shift_size; + } + + // Shift and zero pad buffers. + memmove(&self->binary_far_history[dest_index], + &self->binary_far_history[src_index], + sizeof(*self->binary_far_history) * shift_size); + memset(&self->binary_far_history[padding_index], 0, + sizeof(*self->binary_far_history) * abs_shift); + memmove(&self->far_bit_counts[dest_index], + &self->far_bit_counts[src_index], + sizeof(*self->far_bit_counts) * shift_size); + memset(&self->far_bit_counts[padding_index], 0, + sizeof(*self->far_bit_counts) * abs_shift); +} + +void WebRtc_AddBinaryFarSpectrum(BinaryDelayEstimatorFarend* handle, + uint32_t binary_far_spectrum) { + RTC_DCHECK(handle); + // Shift binary spectrum history and insert current |binary_far_spectrum|. + memmove(&(handle->binary_far_history[1]), &(handle->binary_far_history[0]), + (handle->history_size - 1) * sizeof(uint32_t)); + handle->binary_far_history[0] = binary_far_spectrum; + + // Shift history of far-end binary spectrum bit counts and insert bit count + // of current |binary_far_spectrum|. + memmove(&(handle->far_bit_counts[1]), &(handle->far_bit_counts[0]), + (handle->history_size - 1) * sizeof(int)); + handle->far_bit_counts[0] = BitCount(binary_far_spectrum); +} + +void WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* self) { + + if (self == NULL) { + return; + } + + free(self->mean_bit_counts); + self->mean_bit_counts = NULL; + + free(self->bit_counts); + self->bit_counts = NULL; + + free(self->binary_near_history); + self->binary_near_history = NULL; + + free(self->histogram); + self->histogram = NULL; + + // BinaryDelayEstimator does not have ownership of |farend|, hence we do not + // free the memory here. That should be handled separately by the user. + self->farend = NULL; + + free(self); +} + +BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator( + BinaryDelayEstimatorFarend* farend, int max_lookahead) { + BinaryDelayEstimator* self = NULL; + + if ((farend != NULL) && (max_lookahead >= 0)) { + // Sanity conditions fulfilled. + self = static_cast( + malloc(sizeof(BinaryDelayEstimator))); + } + if (self == NULL) { + return NULL; + } + + self->farend = farend; + self->near_history_size = max_lookahead + 1; + self->history_size = 0; + self->robust_validation_enabled = 0; // Disabled by default. + self->allowed_offset = 0; + + self->lookahead = max_lookahead; + + // Allocate memory for spectrum and history buffers. + self->mean_bit_counts = NULL; + self->bit_counts = NULL; + self->histogram = NULL; + self->binary_near_history = static_cast( + malloc((max_lookahead + 1) * sizeof(*self->binary_near_history))); + if (self->binary_near_history == NULL || + WebRtc_AllocateHistoryBufferMemory(self, farend->history_size) == 0) { + WebRtc_FreeBinaryDelayEstimator(self); + self = NULL; + } + + return self; +} + +int WebRtc_AllocateHistoryBufferMemory(BinaryDelayEstimator* self, + int history_size) { + BinaryDelayEstimatorFarend* far = self->farend; + // (Re-)Allocate memory for spectrum and history buffers. + if (history_size != far->history_size) { + // Only update far-end buffers if we need. + history_size = WebRtc_AllocateFarendBufferMemory(far, history_size); + } + // The extra array element in |mean_bit_counts| and |histogram| is a dummy + // element only used while |last_delay| == -2, i.e., before we have a valid + // estimate. + self->mean_bit_counts = static_cast( + realloc(self->mean_bit_counts, + (history_size + 1) * sizeof(*self->mean_bit_counts))); + self->bit_counts = static_cast( + realloc(self->bit_counts, history_size * sizeof(*self->bit_counts))); + self->histogram = static_cast( + realloc(self->histogram, (history_size + 1) * sizeof(*self->histogram))); + + if ((self->mean_bit_counts == NULL) || + (self->bit_counts == NULL) || + (self->histogram == NULL)) { + history_size = 0; + } + // Fill with zeros if we have expanded the buffers. + if (history_size > self->history_size) { + int size_diff = history_size - self->history_size; + memset(&self->mean_bit_counts[self->history_size], + 0, + sizeof(*self->mean_bit_counts) * size_diff); + memset(&self->bit_counts[self->history_size], + 0, + sizeof(*self->bit_counts) * size_diff); + memset(&self->histogram[self->history_size], + 0, + sizeof(*self->histogram) * size_diff); + } + self->history_size = history_size; + + return self->history_size; +} + +void WebRtc_InitBinaryDelayEstimator(BinaryDelayEstimator* self) { + int i = 0; + RTC_DCHECK(self); + + memset(self->bit_counts, 0, sizeof(int32_t) * self->history_size); + memset(self->binary_near_history, + 0, + sizeof(uint32_t) * self->near_history_size); + for (i = 0; i <= self->history_size; ++i) { + self->mean_bit_counts[i] = (20 << 9); // 20 in Q9. + self->histogram[i] = 0.f; + } + self->minimum_probability = kMaxBitCountsQ9; // 32 in Q9. + self->last_delay_probability = (int) kMaxBitCountsQ9; // 32 in Q9. + + // Default return value if we're unable to estimate. -1 is used for errors. + self->last_delay = -2; + + self->last_candidate_delay = -2; + self->compare_delay = self->history_size; + self->candidate_hits = 0; + self->last_delay_histogram = 0.f; +} + +int WebRtc_SoftResetBinaryDelayEstimator(BinaryDelayEstimator* self, + int delay_shift) { + int lookahead = 0; + RTC_DCHECK(self); + lookahead = self->lookahead; + self->lookahead -= delay_shift; + if (self->lookahead < 0) { + self->lookahead = 0; + } + if (self->lookahead > self->near_history_size - 1) { + self->lookahead = self->near_history_size - 1; + } + return lookahead - self->lookahead; +} + +int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self, + uint32_t binary_near_spectrum) { + int i = 0; + int candidate_delay = -1; + int valid_candidate = 0; + + int32_t value_best_candidate = kMaxBitCountsQ9; + int32_t value_worst_candidate = 0; + int32_t valley_depth = 0; + + RTC_DCHECK(self); + if (self->farend->history_size != self->history_size) { + // Non matching history sizes. + return -1; + } + if (self->near_history_size > 1) { + // If we apply lookahead, shift near-end binary spectrum history. Insert + // current |binary_near_spectrum| and pull out the delayed one. + memmove(&(self->binary_near_history[1]), &(self->binary_near_history[0]), + (self->near_history_size - 1) * sizeof(uint32_t)); + self->binary_near_history[0] = binary_near_spectrum; + binary_near_spectrum = self->binary_near_history[self->lookahead]; + } + + // Compare with delayed spectra and store the |bit_counts| for each delay. + BitCountComparison(binary_near_spectrum, self->farend->binary_far_history, + self->history_size, self->bit_counts); + + // Update |mean_bit_counts|, which is the smoothed version of |bit_counts|. + for (i = 0; i < self->history_size; i++) { + // |bit_counts| is constrained to [0, 32], meaning we can smooth with a + // factor up to 2^26. We use Q9. + int32_t bit_count = (self->bit_counts[i] << 9); // Q9. + + // Update |mean_bit_counts| only when far-end signal has something to + // contribute. If |far_bit_counts| is zero the far-end signal is weak and + // we likely have a poor echo condition, hence don't update. + if (self->farend->far_bit_counts[i] > 0) { + // Make number of right shifts piecewise linear w.r.t. |far_bit_counts|. + int shifts = kShiftsAtZero; + shifts -= (kShiftsLinearSlope * self->farend->far_bit_counts[i]) >> 4; + WebRtc_MeanEstimatorFix(bit_count, shifts, &(self->mean_bit_counts[i])); + } + } + + // Find |candidate_delay|, |value_best_candidate| and |value_worst_candidate| + // of |mean_bit_counts|. + for (i = 0; i < self->history_size; i++) { + if (self->mean_bit_counts[i] < value_best_candidate) { + value_best_candidate = self->mean_bit_counts[i]; + candidate_delay = i; + } + if (self->mean_bit_counts[i] > value_worst_candidate) { + value_worst_candidate = self->mean_bit_counts[i]; + } + } + valley_depth = value_worst_candidate - value_best_candidate; + + // The |value_best_candidate| is a good indicator on the probability of + // |candidate_delay| being an accurate delay (a small |value_best_candidate| + // means a good binary match). In the following sections we make a decision + // whether to update |last_delay| or not. + // 1) If the difference bit counts between the best and the worst delay + // candidates is too small we consider the situation to be unreliable and + // don't update |last_delay|. + // 2) If the situation is reliable we update |last_delay| if the value of the + // best candidate delay has a value less than + // i) an adaptive threshold |minimum_probability|, or + // ii) this corresponding value |last_delay_probability|, but updated at + // this time instant. + + // Update |minimum_probability|. + if ((self->minimum_probability > kProbabilityLowerLimit) && + (valley_depth > kProbabilityMinSpread)) { + // The "hard" threshold can't be lower than 17 (in Q9). + // The valley in the curve also has to be distinct, i.e., the + // difference between |value_worst_candidate| and |value_best_candidate| has + // to be large enough. + int32_t threshold = value_best_candidate + kProbabilityOffset; + if (threshold < kProbabilityLowerLimit) { + threshold = kProbabilityLowerLimit; + } + if (self->minimum_probability > threshold) { + self->minimum_probability = threshold; + } + } + // Update |last_delay_probability|. + // We use a Markov type model, i.e., a slowly increasing level over time. + self->last_delay_probability++; + // Validate |candidate_delay|. We have a reliable instantaneous delay + // estimate if + // 1) The valley is distinct enough (|valley_depth| > |kProbabilityOffset|) + // and + // 2) The depth of the valley is deep enough + // (|value_best_candidate| < |minimum_probability|) + // and deeper than the best estimate so far + // (|value_best_candidate| < |last_delay_probability|) + valid_candidate = ((valley_depth > kProbabilityOffset) && + ((value_best_candidate < self->minimum_probability) || + (value_best_candidate < self->last_delay_probability))); + + // Check for nonstationary farend signal. + const bool non_stationary_farend = + std::any_of(self->farend->far_bit_counts, + self->farend->far_bit_counts + self->history_size, + [](int a) { return a > 0; }); + + if (non_stationary_farend) { + // Only update the validation statistics when the farend is nonstationary + // as the underlying estimates are otherwise frozen. + UpdateRobustValidationStatistics(self, candidate_delay, valley_depth, + value_best_candidate); + } + + if (self->robust_validation_enabled) { + int is_histogram_valid = HistogramBasedValidation(self, candidate_delay); + valid_candidate = RobustValidation(self, candidate_delay, valid_candidate, + is_histogram_valid); + + } + + // Only update the delay estimate when the farend is nonstationary and when + // a valid delay candidate is available. + if (non_stationary_farend && valid_candidate) { + if (candidate_delay != self->last_delay) { + self->last_delay_histogram = + (self->histogram[candidate_delay] > kLastHistogramMax ? + kLastHistogramMax : self->histogram[candidate_delay]); + // Adjust the histogram if we made a change to |last_delay|, though it was + // not the most likely one according to the histogram. + if (self->histogram[candidate_delay] < + self->histogram[self->compare_delay]) { + self->histogram[self->compare_delay] = self->histogram[candidate_delay]; + } + } + self->last_delay = candidate_delay; + if (value_best_candidate < self->last_delay_probability) { + self->last_delay_probability = value_best_candidate; + } + self->compare_delay = self->last_delay; + } + + return self->last_delay; +} + +int WebRtc_binary_last_delay(BinaryDelayEstimator* self) { + RTC_DCHECK(self); + return self->last_delay; +} + +float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self) { + float quality = 0; + RTC_DCHECK(self); + + if (self->robust_validation_enabled) { + // Simply a linear function of the histogram height at delay estimate. + quality = self->histogram[self->compare_delay] / kHistogramMax; + } else { + // Note that |last_delay_probability| states how deep the minimum of the + // cost function is, so it is rather an error probability. + quality = (float) (kMaxBitCountsQ9 - self->last_delay_probability) / + kMaxBitCountsQ9; + if (quality < 0) { + quality = 0; + } + } + return quality; +} + +void WebRtc_MeanEstimatorFix(int32_t new_value, + int factor, + int32_t* mean_value) { + int32_t diff = new_value - *mean_value; + + // mean_new = mean_value + ((new_value - mean_value) >> factor); + if (diff < 0) { + diff = -((-diff) >> factor); + } else { + diff = (diff >> factor); + } + *mean_value += diff; +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator.h new file mode 100644 index 000000000..65c3f034b --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator.h @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Performs delay estimation on binary converted spectra. +// The return value is 0 - OK and -1 - Error, unless otherwise stated. + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_ + +#include "webrtc/typedefs.h" + +static const int32_t kMaxBitCountsQ9 = (32 << 9); // 32 matching bits in Q9. + +typedef struct { + // Pointer to bit counts. + int* far_bit_counts; + // Binary history variables. + uint32_t* binary_far_history; + int history_size; +} BinaryDelayEstimatorFarend; + +typedef struct { + // Pointer to bit counts. + int32_t* mean_bit_counts; + // Array only used locally in ProcessBinarySpectrum() but whose size is + // determined at run-time. + int32_t* bit_counts; + + // Binary history variables. + uint32_t* binary_near_history; + int near_history_size; + int history_size; + + // Delay estimation variables. + int32_t minimum_probability; + int last_delay_probability; + + // Delay memory. + int last_delay; + + // Robust validation + int robust_validation_enabled; + int allowed_offset; + int last_candidate_delay; + int compare_delay; + int candidate_hits; + float* histogram; + float last_delay_histogram; + + // For dynamically changing the lookahead when using SoftReset...(). + int lookahead; + + // Far-end binary spectrum history buffer etc. + BinaryDelayEstimatorFarend* farend; +} BinaryDelayEstimator; + +// Releases the memory allocated by +// WebRtc_CreateBinaryDelayEstimatorFarend(...). +// Input: +// - self : Pointer to the binary delay estimation far-end +// instance which is the return value of +// WebRtc_CreateBinaryDelayEstimatorFarend(). +// +void WebRtc_FreeBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self); + +// Allocates the memory needed by the far-end part of the binary delay +// estimation. The memory needs to be initialized separately through +// WebRtc_InitBinaryDelayEstimatorFarend(...). +// +// Inputs: +// - history_size : Size of the far-end binary spectrum history. +// +// Return value: +// - BinaryDelayEstimatorFarend* +// : Created |handle|. If the memory can't be allocated +// or if any of the input parameters are invalid NULL +// is returned. +// +BinaryDelayEstimatorFarend* WebRtc_CreateBinaryDelayEstimatorFarend( + int history_size); + +// Re-allocates the buffers. +// +// Inputs: +// - self : Pointer to the binary estimation far-end instance +// which is the return value of +// WebRtc_CreateBinaryDelayEstimatorFarend(). +// - history_size : Size of the far-end binary spectrum history. +// +// Return value: +// - history_size : The history size allocated. +int WebRtc_AllocateFarendBufferMemory(BinaryDelayEstimatorFarend* self, + int history_size); + +// Initializes the delay estimation far-end instance created with +// WebRtc_CreateBinaryDelayEstimatorFarend(...). +// +// Input: +// - self : Pointer to the delay estimation far-end instance. +// +// Output: +// - self : Initialized far-end instance. +// +void WebRtc_InitBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self); + +// Soft resets the delay estimation far-end instance created with +// WebRtc_CreateBinaryDelayEstimatorFarend(...). +// +// Input: +// - delay_shift : The amount of blocks to shift history buffers. +// +void WebRtc_SoftResetBinaryDelayEstimatorFarend( + BinaryDelayEstimatorFarend* self, int delay_shift); + +// Adds the binary far-end spectrum to the internal far-end history buffer. This +// spectrum is used as reference when calculating the delay using +// WebRtc_ProcessBinarySpectrum(). +// +// Inputs: +// - self : Pointer to the delay estimation far-end +// instance. +// - binary_far_spectrum : Far-end binary spectrum. +// +// Output: +// - self : Updated far-end instance. +// +void WebRtc_AddBinaryFarSpectrum(BinaryDelayEstimatorFarend* self, + uint32_t binary_far_spectrum); + +// Releases the memory allocated by WebRtc_CreateBinaryDelayEstimator(...). +// +// Note that BinaryDelayEstimator utilizes BinaryDelayEstimatorFarend, but does +// not take ownership of it, hence the BinaryDelayEstimator has to be torn down +// before the far-end. +// +// Input: +// - self : Pointer to the binary delay estimation instance +// which is the return value of +// WebRtc_CreateBinaryDelayEstimator(). +// +void WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* self); + +// Allocates the memory needed by the binary delay estimation. The memory needs +// to be initialized separately through WebRtc_InitBinaryDelayEstimator(...). +// +// See WebRtc_CreateDelayEstimator(..) in delay_estimator_wrapper.c for detailed +// description. +BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator( + BinaryDelayEstimatorFarend* farend, int max_lookahead); + +// Re-allocates |history_size| dependent buffers. The far-end buffers will be +// updated at the same time if needed. +// +// Input: +// - self : Pointer to the binary estimation instance which is +// the return value of +// WebRtc_CreateBinaryDelayEstimator(). +// - history_size : Size of the history buffers. +// +// Return value: +// - history_size : The history size allocated. +int WebRtc_AllocateHistoryBufferMemory(BinaryDelayEstimator* self, + int history_size); + +// Initializes the delay estimation instance created with +// WebRtc_CreateBinaryDelayEstimator(...). +// +// Input: +// - self : Pointer to the delay estimation instance. +// +// Output: +// - self : Initialized instance. +// +void WebRtc_InitBinaryDelayEstimator(BinaryDelayEstimator* self); + +// Soft resets the delay estimation instance created with +// WebRtc_CreateBinaryDelayEstimator(...). +// +// Input: +// - delay_shift : The amount of blocks to shift history buffers. +// +// Return value: +// - actual_shifts : The actual number of shifts performed. +// +int WebRtc_SoftResetBinaryDelayEstimator(BinaryDelayEstimator* self, + int delay_shift); + +// Estimates and returns the delay between the binary far-end and binary near- +// end spectra. It is assumed the binary far-end spectrum has been added using +// WebRtc_AddBinaryFarSpectrum() prior to this call. The value will be offset by +// the lookahead (i.e. the lookahead should be subtracted from the returned +// value). +// +// Inputs: +// - self : Pointer to the delay estimation instance. +// - binary_near_spectrum : Near-end binary spectrum of the current block. +// +// Output: +// - self : Updated instance. +// +// Return value: +// - delay : >= 0 - Calculated delay value. +// -2 - Insufficient data for estimation. +// +int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self, + uint32_t binary_near_spectrum); + +// Returns the last calculated delay updated by the function +// WebRtc_ProcessBinarySpectrum(...). +// +// Input: +// - self : Pointer to the delay estimation instance. +// +// Return value: +// - delay : >= 0 - Last calculated delay value +// -2 - Insufficient data for estimation. +// +int WebRtc_binary_last_delay(BinaryDelayEstimator* self); + +// Returns the estimation quality of the last calculated delay updated by the +// function WebRtc_ProcessBinarySpectrum(...). The estimation quality is a value +// in the interval [0, 1]. The higher the value, the better the quality. +// +// Return value: +// - delay_quality : >= 0 - Estimation quality of last calculated +// delay value. +float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self); + +// Updates the |mean_value| recursively with a step size of 2^-|factor|. This +// function is used internally in the Binary Delay Estimator as well as the +// Fixed point wrapper. +// +// Inputs: +// - new_value : The new value the mean should be updated with. +// - factor : The step size, in number of right shifts. +// +// Input/Output: +// - mean_value : Pointer to the mean value. +// +void WebRtc_MeanEstimatorFix(int32_t new_value, + int factor, + int32_t* mean_value); + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_internal.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_internal.h new file mode 100644 index 000000000..fd11028fd --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_internal.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Header file including the delay estimator handle used for testing. + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_ + +#include "webrtc/modules/audio_processing/utility/delay_estimator.h" +#include "webrtc/typedefs.h" + +typedef union { + float float_; + int32_t int32_; +} SpectrumType; + +typedef struct { + // Pointers to mean values of spectrum. + SpectrumType* mean_far_spectrum; + // |mean_far_spectrum| initialization indicator. + int far_spectrum_initialized; + + int spectrum_size; + + // Far-end part of binary spectrum based delay estimation. + BinaryDelayEstimatorFarend* binary_farend; +} DelayEstimatorFarend; + +typedef struct { + // Pointers to mean values of spectrum. + SpectrumType* mean_near_spectrum; + // |mean_near_spectrum| initialization indicator. + int near_spectrum_initialized; + + int spectrum_size; + + // Binary spectrum based delay estimator + BinaryDelayEstimator* binary_handle; +} DelayEstimator; + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.cc new file mode 100644 index 000000000..2dd092ce2 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.cc @@ -0,0 +1,486 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h" + +#include +#include + +#include "webrtc/base/checks.h" +#include "webrtc/modules/audio_processing/utility/delay_estimator.h" +#include "webrtc/modules/audio_processing/utility/delay_estimator_internal.h" + +// Only bit |kBandFirst| through bit |kBandLast| are processed and +// |kBandFirst| - |kBandLast| must be < 32. +enum { kBandFirst = 12 }; +enum { kBandLast = 43 }; + +static __inline uint32_t SetBit(uint32_t in, int pos) { + uint32_t mask = (1 << pos); + uint32_t out = (in | mask); + + return out; +} + +// Calculates the mean recursively. Same version as WebRtc_MeanEstimatorFix(), +// but for float. +// +// Inputs: +// - new_value : New additional value. +// - scale : Scale for smoothing (should be less than 1.0). +// +// Input/Output: +// - mean_value : Pointer to the mean value for updating. +// +static void MeanEstimatorFloat(float new_value, + float scale, + float* mean_value) { + RTC_DCHECK_LT(scale, 1.0f); + *mean_value += (new_value - *mean_value) * scale; +} + +// Computes the binary spectrum by comparing the input |spectrum| with a +// |threshold_spectrum|. Float and fixed point versions. +// +// Inputs: +// - spectrum : Spectrum of which the binary spectrum should be +// calculated. +// - threshold_spectrum : Threshold spectrum with which the input +// spectrum is compared. +// Return: +// - out : Binary spectrum. +// +static uint32_t BinarySpectrumFix(const uint16_t* spectrum, + SpectrumType* threshold_spectrum, + int q_domain, + int* threshold_initialized) { + int i = kBandFirst; + uint32_t out = 0; + + RTC_DCHECK_LT(q_domain, 16); + + if (!(*threshold_initialized)) { + // Set the |threshold_spectrum| to half the input |spectrum| as starting + // value. This speeds up the convergence. + for (i = kBandFirst; i <= kBandLast; i++) { + if (spectrum[i] > 0) { + // Convert input spectrum from Q(|q_domain|) to Q15. + int32_t spectrum_q15 = ((int32_t) spectrum[i]) << (15 - q_domain); + threshold_spectrum[i].int32_ = (spectrum_q15 >> 1); + *threshold_initialized = 1; + } + } + } + for (i = kBandFirst; i <= kBandLast; i++) { + // Convert input spectrum from Q(|q_domain|) to Q15. + int32_t spectrum_q15 = ((int32_t) spectrum[i]) << (15 - q_domain); + // Update the |threshold_spectrum|. + WebRtc_MeanEstimatorFix(spectrum_q15, 6, &(threshold_spectrum[i].int32_)); + // Convert |spectrum| at current frequency bin to a binary value. + if (spectrum_q15 > threshold_spectrum[i].int32_) { + out = SetBit(out, i - kBandFirst); + } + } + + return out; +} + +static uint32_t BinarySpectrumFloat(const float* spectrum, + SpectrumType* threshold_spectrum, + int* threshold_initialized) { + int i = kBandFirst; + uint32_t out = 0; + const float kScale = 1 / 64.0; + + if (!(*threshold_initialized)) { + // Set the |threshold_spectrum| to half the input |spectrum| as starting + // value. This speeds up the convergence. + for (i = kBandFirst; i <= kBandLast; i++) { + if (spectrum[i] > 0.0f) { + threshold_spectrum[i].float_ = (spectrum[i] / 2); + *threshold_initialized = 1; + } + } + } + + for (i = kBandFirst; i <= kBandLast; i++) { + // Update the |threshold_spectrum|. + MeanEstimatorFloat(spectrum[i], kScale, &(threshold_spectrum[i].float_)); + // Convert |spectrum| at current frequency bin to a binary value. + if (spectrum[i] > threshold_spectrum[i].float_) { + out = SetBit(out, i - kBandFirst); + } + } + + return out; +} + +void WebRtc_FreeDelayEstimatorFarend(void* handle) { + DelayEstimatorFarend* self = (DelayEstimatorFarend*) handle; + + if (handle == NULL) { + return; + } + + free(self->mean_far_spectrum); + self->mean_far_spectrum = NULL; + + WebRtc_FreeBinaryDelayEstimatorFarend(self->binary_farend); + self->binary_farend = NULL; + + free(self); +} + +void* WebRtc_CreateDelayEstimatorFarend(int spectrum_size, int history_size) { + DelayEstimatorFarend* self = NULL; + + // Check if the sub band used in the delay estimation is small enough to fit + // the binary spectra in a uint32_t. + static_assert(kBandLast - kBandFirst < 32, ""); + + if (spectrum_size >= kBandLast) { + self = static_cast( + malloc(sizeof(DelayEstimatorFarend))); + } + + if (self != NULL) { + int memory_fail = 0; + + // Allocate memory for the binary far-end spectrum handling. + self->binary_farend = WebRtc_CreateBinaryDelayEstimatorFarend(history_size); + memory_fail |= (self->binary_farend == NULL); + + // Allocate memory for spectrum buffers. + self->mean_far_spectrum = + static_cast(malloc(spectrum_size * sizeof(SpectrumType))); + memory_fail |= (self->mean_far_spectrum == NULL); + + self->spectrum_size = spectrum_size; + + if (memory_fail) { + WebRtc_FreeDelayEstimatorFarend(self); + self = NULL; + } + } + + return self; +} + +int WebRtc_InitDelayEstimatorFarend(void* handle) { + DelayEstimatorFarend* self = (DelayEstimatorFarend*) handle; + + if (self == NULL) { + return -1; + } + + // Initialize far-end part of binary delay estimator. + WebRtc_InitBinaryDelayEstimatorFarend(self->binary_farend); + + // Set averaged far and near end spectra to zero. + memset(self->mean_far_spectrum, 0, + sizeof(SpectrumType) * self->spectrum_size); + // Reset initialization indicators. + self->far_spectrum_initialized = 0; + + return 0; +} + +void WebRtc_SoftResetDelayEstimatorFarend(void* handle, int delay_shift) { + DelayEstimatorFarend* self = (DelayEstimatorFarend*) handle; + RTC_DCHECK(self); + WebRtc_SoftResetBinaryDelayEstimatorFarend(self->binary_farend, delay_shift); +} + +int WebRtc_AddFarSpectrumFix(void* handle, + const uint16_t* far_spectrum, + int spectrum_size, + int far_q) { + DelayEstimatorFarend* self = (DelayEstimatorFarend*) handle; + uint32_t binary_spectrum = 0; + + if (self == NULL) { + return -1; + } + if (far_spectrum == NULL) { + // Empty far end spectrum. + return -1; + } + if (spectrum_size != self->spectrum_size) { + // Data sizes don't match. + return -1; + } + if (far_q > 15) { + // If |far_q| is larger than 15 we cannot guarantee no wrap around. + return -1; + } + + // Get binary spectrum. + binary_spectrum = BinarySpectrumFix(far_spectrum, self->mean_far_spectrum, + far_q, &(self->far_spectrum_initialized)); + WebRtc_AddBinaryFarSpectrum(self->binary_farend, binary_spectrum); + + return 0; +} + +int WebRtc_AddFarSpectrumFloat(void* handle, + const float* far_spectrum, + int spectrum_size) { + DelayEstimatorFarend* self = (DelayEstimatorFarend*) handle; + uint32_t binary_spectrum = 0; + + if (self == NULL) { + return -1; + } + if (far_spectrum == NULL) { + // Empty far end spectrum. + return -1; + } + if (spectrum_size != self->spectrum_size) { + // Data sizes don't match. + return -1; + } + + // Get binary spectrum. + binary_spectrum = BinarySpectrumFloat(far_spectrum, self->mean_far_spectrum, + &(self->far_spectrum_initialized)); + WebRtc_AddBinaryFarSpectrum(self->binary_farend, binary_spectrum); + + return 0; +} + +void WebRtc_FreeDelayEstimator(void* handle) { + DelayEstimator* self = (DelayEstimator*) handle; + + if (handle == NULL) { + return; + } + + free(self->mean_near_spectrum); + self->mean_near_spectrum = NULL; + + WebRtc_FreeBinaryDelayEstimator(self->binary_handle); + self->binary_handle = NULL; + + free(self); +} + +void* WebRtc_CreateDelayEstimator(void* farend_handle, int max_lookahead) { + DelayEstimator* self = NULL; + DelayEstimatorFarend* farend = (DelayEstimatorFarend*) farend_handle; + + if (farend_handle != NULL) { + self = static_cast(malloc(sizeof(DelayEstimator))); + } + + if (self != NULL) { + int memory_fail = 0; + + // Allocate memory for the farend spectrum handling. + self->binary_handle = + WebRtc_CreateBinaryDelayEstimator(farend->binary_farend, max_lookahead); + memory_fail |= (self->binary_handle == NULL); + + // Allocate memory for spectrum buffers. + self->mean_near_spectrum = static_cast( + malloc(farend->spectrum_size * sizeof(SpectrumType))); + memory_fail |= (self->mean_near_spectrum == NULL); + + self->spectrum_size = farend->spectrum_size; + + if (memory_fail) { + WebRtc_FreeDelayEstimator(self); + self = NULL; + } + } + + return self; +} + +int WebRtc_InitDelayEstimator(void* handle) { + DelayEstimator* self = (DelayEstimator*) handle; + + if (self == NULL) { + return -1; + } + + // Initialize binary delay estimator. + WebRtc_InitBinaryDelayEstimator(self->binary_handle); + + // Set averaged far and near end spectra to zero. + memset(self->mean_near_spectrum, 0, + sizeof(SpectrumType) * self->spectrum_size); + // Reset initialization indicators. + self->near_spectrum_initialized = 0; + + return 0; +} + +int WebRtc_SoftResetDelayEstimator(void* handle, int delay_shift) { + DelayEstimator* self = (DelayEstimator*) handle; + RTC_DCHECK(self); + return WebRtc_SoftResetBinaryDelayEstimator(self->binary_handle, delay_shift); +} + +int WebRtc_set_history_size(void* handle, int history_size) { + DelayEstimator* self = static_cast(handle); + + if ((self == NULL) || (history_size <= 1)) { + return -1; + } + return WebRtc_AllocateHistoryBufferMemory(self->binary_handle, history_size); +} + +int WebRtc_history_size(const void* handle) { + const DelayEstimator* self = static_cast(handle); + + if (self == NULL) { + return -1; + } + if (self->binary_handle->farend->history_size != + self->binary_handle->history_size) { + // Non matching history sizes. + return -1; + } + return self->binary_handle->history_size; +} + +int WebRtc_set_lookahead(void* handle, int lookahead) { + DelayEstimator* self = (DelayEstimator*) handle; + RTC_DCHECK(self); + RTC_DCHECK(self->binary_handle); + if ((lookahead > self->binary_handle->near_history_size - 1) || + (lookahead < 0)) { + return -1; + } + self->binary_handle->lookahead = lookahead; + return self->binary_handle->lookahead; +} + +int WebRtc_lookahead(void* handle) { + DelayEstimator* self = (DelayEstimator*) handle; + RTC_DCHECK(self); + RTC_DCHECK(self->binary_handle); + return self->binary_handle->lookahead; +} + +int WebRtc_set_allowed_offset(void* handle, int allowed_offset) { + DelayEstimator* self = (DelayEstimator*) handle; + + if ((self == NULL) || (allowed_offset < 0)) { + return -1; + } + self->binary_handle->allowed_offset = allowed_offset; + return 0; +} + +int WebRtc_get_allowed_offset(const void* handle) { + const DelayEstimator* self = (const DelayEstimator*) handle; + + if (self == NULL) { + return -1; + } + return self->binary_handle->allowed_offset; +} + +int WebRtc_enable_robust_validation(void* handle, int enable) { + DelayEstimator* self = (DelayEstimator*) handle; + + if (self == NULL) { + return -1; + } + if ((enable < 0) || (enable > 1)) { + return -1; + } + RTC_DCHECK(self->binary_handle); + self->binary_handle->robust_validation_enabled = enable; + return 0; +} + +int WebRtc_is_robust_validation_enabled(const void* handle) { + const DelayEstimator* self = (const DelayEstimator*) handle; + + if (self == NULL) { + return -1; + } + return self->binary_handle->robust_validation_enabled; +} + +int WebRtc_DelayEstimatorProcessFix(void* handle, + const uint16_t* near_spectrum, + int spectrum_size, + int near_q) { + DelayEstimator* self = (DelayEstimator*) handle; + uint32_t binary_spectrum = 0; + + if (self == NULL) { + return -1; + } + if (near_spectrum == NULL) { + // Empty near end spectrum. + return -1; + } + if (spectrum_size != self->spectrum_size) { + // Data sizes don't match. + return -1; + } + if (near_q > 15) { + // If |near_q| is larger than 15 we cannot guarantee no wrap around. + return -1; + } + + // Get binary spectra. + binary_spectrum = BinarySpectrumFix(near_spectrum, + self->mean_near_spectrum, + near_q, + &(self->near_spectrum_initialized)); + + return WebRtc_ProcessBinarySpectrum(self->binary_handle, binary_spectrum); +} + +int WebRtc_DelayEstimatorProcessFloat(void* handle, + const float* near_spectrum, + int spectrum_size) { + DelayEstimator* self = (DelayEstimator*) handle; + uint32_t binary_spectrum = 0; + + if (self == NULL) { + return -1; + } + if (near_spectrum == NULL) { + // Empty near end spectrum. + return -1; + } + if (spectrum_size != self->spectrum_size) { + // Data sizes don't match. + return -1; + } + + // Get binary spectrum. + binary_spectrum = BinarySpectrumFloat(near_spectrum, self->mean_near_spectrum, + &(self->near_spectrum_initialized)); + + return WebRtc_ProcessBinarySpectrum(self->binary_handle, binary_spectrum); +} + +int WebRtc_last_delay(void* handle) { + DelayEstimator* self = (DelayEstimator*) handle; + + if (self == NULL) { + return -1; + } + + return WebRtc_binary_last_delay(self->binary_handle); +} + +float WebRtc_last_delay_quality(void* handle) { + DelayEstimator* self = (DelayEstimator*) handle; + RTC_DCHECK(self); + return WebRtc_binary_last_delay_quality(self->binary_handle); +} diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h new file mode 100644 index 000000000..fdadebeb3 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Performs delay estimation on block by block basis. +// The return value is 0 - OK and -1 - Error, unless otherwise stated. + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_ + +#include "webrtc/typedefs.h" + +// Releases the memory allocated by WebRtc_CreateDelayEstimatorFarend(...) +void WebRtc_FreeDelayEstimatorFarend(void* handle); + +// Allocates the memory needed by the far-end part of the delay estimation. The +// memory needs to be initialized separately through +// WebRtc_InitDelayEstimatorFarend(...). +// +// Inputs: +// - spectrum_size : Size of the spectrum used both in far-end and +// near-end. Used to allocate memory for spectrum +// specific buffers. +// - history_size : The far-end history buffer size. A change in buffer +// size can be forced with WebRtc_set_history_size(). +// Note that the maximum delay which can be estimated is +// determined together with WebRtc_set_lookahead(). +// +// Return value: +// - void* : Created |handle|. If the memory can't be allocated or +// if any of the input parameters are invalid NULL is +// returned. +void* WebRtc_CreateDelayEstimatorFarend(int spectrum_size, int history_size); + +// Initializes the far-end part of the delay estimation instance returned by +// WebRtc_CreateDelayEstimatorFarend(...) +int WebRtc_InitDelayEstimatorFarend(void* handle); + +// Soft resets the far-end part of the delay estimation instance returned by +// WebRtc_CreateDelayEstimatorFarend(...). +// Input: +// - delay_shift : The amount of blocks to shift history buffers. +void WebRtc_SoftResetDelayEstimatorFarend(void* handle, int delay_shift); + +// Adds the far-end spectrum to the far-end history buffer. This spectrum is +// used as reference when calculating the delay using +// WebRtc_ProcessSpectrum(). +// +// Inputs: +// - far_spectrum : Far-end spectrum. +// - spectrum_size : The size of the data arrays (same for both far- and +// near-end). +// - far_q : The Q-domain of the far-end data. +// +// Output: +// - handle : Updated far-end instance. +// +int WebRtc_AddFarSpectrumFix(void* handle, + const uint16_t* far_spectrum, + int spectrum_size, + int far_q); + +// See WebRtc_AddFarSpectrumFix() for description. +int WebRtc_AddFarSpectrumFloat(void* handle, + const float* far_spectrum, + int spectrum_size); + +// Releases the memory allocated by WebRtc_CreateDelayEstimator(...) +void WebRtc_FreeDelayEstimator(void* handle); + +// Allocates the memory needed by the delay estimation. The memory needs to be +// initialized separately through WebRtc_InitDelayEstimator(...). +// +// Inputs: +// - farend_handle : Pointer to the far-end part of the delay estimation +// instance created prior to this call using +// WebRtc_CreateDelayEstimatorFarend(). +// +// Note that WebRtc_CreateDelayEstimator does not take +// ownership of |farend_handle|, which has to be torn +// down properly after this instance. +// +// - max_lookahead : Maximum amount of non-causal lookahead allowed. The +// actual amount of lookahead used can be controlled by +// WebRtc_set_lookahead(...). The default |lookahead| is +// set to |max_lookahead| at create time. Use +// WebRtc_set_lookahead(...) before start if a different +// value is desired. +// +// Using lookahead can detect cases in which a near-end +// signal occurs before the corresponding far-end signal. +// It will delay the estimate for the current block by an +// equal amount, and the returned values will be offset +// by it. +// +// A value of zero is the typical no-lookahead case. +// This also represents the minimum delay which can be +// estimated. +// +// Note that the effective range of delay estimates is +// [-|lookahead|,... ,|history_size|-|lookahead|) +// where |history_size| is set through +// WebRtc_set_history_size(). +// +// Return value: +// - void* : Created |handle|. If the memory can't be allocated or +// if any of the input parameters are invalid NULL is +// returned. +void* WebRtc_CreateDelayEstimator(void* farend_handle, int max_lookahead); + +// Initializes the delay estimation instance returned by +// WebRtc_CreateDelayEstimator(...) +int WebRtc_InitDelayEstimator(void* handle); + +// Soft resets the delay estimation instance returned by +// WebRtc_CreateDelayEstimator(...) +// Input: +// - delay_shift : The amount of blocks to shift history buffers. +// +// Return value: +// - actual_shifts : The actual number of shifts performed. +int WebRtc_SoftResetDelayEstimator(void* handle, int delay_shift); + +// Sets the effective |history_size| used. Valid values from 2. We simply need +// at least two delays to compare to perform an estimate. If |history_size| is +// changed, buffers are reallocated filling in with zeros if necessary. +// Note that changing the |history_size| affects both buffers in far-end and +// near-end. Hence it is important to change all DelayEstimators that use the +// same reference far-end, to the same |history_size| value. +// Inputs: +// - handle : Pointer to the delay estimation instance. +// - history_size : Effective history size to be used. +// Return value: +// - new_history_size : The new history size used. If the memory was not able +// to be allocated 0 is returned. +int WebRtc_set_history_size(void* handle, int history_size); + +// Returns the history_size currently used. +// Input: +// - handle : Pointer to the delay estimation instance. +int WebRtc_history_size(const void* handle); + +// Sets the amount of |lookahead| to use. Valid values are [0, max_lookahead] +// where |max_lookahead| was set at create time through +// WebRtc_CreateDelayEstimator(...). +// +// Input: +// - handle : Pointer to the delay estimation instance. +// - lookahead : The amount of lookahead to be used. +// +// Return value: +// - new_lookahead : The actual amount of lookahead set, unless |handle| is +// a NULL pointer or |lookahead| is invalid, for which an +// error is returned. +int WebRtc_set_lookahead(void* handle, int lookahead); + +// Returns the amount of lookahead we currently use. +// Input: +// - handle : Pointer to the delay estimation instance. +int WebRtc_lookahead(void* handle); + +// Sets the |allowed_offset| used in the robust validation scheme. If the +// delay estimator is used in an echo control component, this parameter is +// related to the filter length. In principle |allowed_offset| should be set to +// the echo control filter length minus the expected echo duration, i.e., the +// delay offset the echo control can handle without quality regression. The +// default value, used if not set manually, is zero. Note that |allowed_offset| +// has to be non-negative. +// Inputs: +// - handle : Pointer to the delay estimation instance. +// - allowed_offset : The amount of delay offset, measured in partitions, +// the echo control filter can handle. +int WebRtc_set_allowed_offset(void* handle, int allowed_offset); + +// Returns the |allowed_offset| in number of partitions. +int WebRtc_get_allowed_offset(const void* handle); + +// Enables/Disables a robust validation functionality in the delay estimation. +// This is by default set to disabled at create time. The state is preserved +// over a reset. +// Inputs: +// - handle : Pointer to the delay estimation instance. +// - enable : Enable (1) or disable (0) this feature. +int WebRtc_enable_robust_validation(void* handle, int enable); + +// Returns 1 if robust validation is enabled and 0 if disabled. +int WebRtc_is_robust_validation_enabled(const void* handle); + +// Estimates and returns the delay between the far-end and near-end blocks. The +// value will be offset by the lookahead (i.e. the lookahead should be +// subtracted from the returned value). +// Inputs: +// - handle : Pointer to the delay estimation instance. +// - near_spectrum : Pointer to the near-end spectrum data of the current +// block. +// - spectrum_size : The size of the data arrays (same for both far- and +// near-end). +// - near_q : The Q-domain of the near-end data. +// +// Output: +// - handle : Updated instance. +// +// Return value: +// - delay : >= 0 - Calculated delay value. +// -1 - Error. +// -2 - Insufficient data for estimation. +int WebRtc_DelayEstimatorProcessFix(void* handle, + const uint16_t* near_spectrum, + int spectrum_size, + int near_q); + +// See WebRtc_DelayEstimatorProcessFix() for description. +int WebRtc_DelayEstimatorProcessFloat(void* handle, + const float* near_spectrum, + int spectrum_size); + +// Returns the last calculated delay updated by the function +// WebRtc_DelayEstimatorProcess(...). +// +// Input: +// - handle : Pointer to the delay estimation instance. +// +// Return value: +// - delay : >= 0 - Last calculated delay value. +// -1 - Error. +// -2 - Insufficient data for estimation. +int WebRtc_last_delay(void* handle); + +// Returns the estimation quality/probability of the last calculated delay +// updated by the function WebRtc_DelayEstimatorProcess(...). The estimation +// quality is a value in the interval [0, 1]. The higher the value, the better +// the quality. +// +// Return value: +// - delay_quality : >= 0 - Estimation quality of last calculated delay. +float WebRtc_last_delay_quality(void* handle); + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft.cc new file mode 100644 index 000000000..30f203480 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft.cc @@ -0,0 +1,543 @@ +/* + * http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html + * Copyright Takuya OOURA, 1996-2001 + * + * You may use, copy, modify and distribute this code for any purpose (include + * commercial use) and without fee. Please refer to this package when you modify + * this code. + * + * Changes by the WebRTC authors: + * - Trivial type modifications. + * - Minimal code subset to do rdft of length 128. + * - Optimizations because of known length. + * - Removed the global variables by moving the code in to a class in order + * to make it thread safe. + * + * All changes are covered by the WebRTC license and IP grant: + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing//utility/ooura_fft.h" + +#include + +#include "webrtc/modules/audio_processing/utility/ooura_fft_tables_common.h" +#include "webrtc/system_wrappers/include/cpu_features_wrapper.h" +#include "webrtc/typedefs.h" + +namespace webrtc { + +namespace { + +#if !(defined(MIPS_FPU_LE) || defined(WEBRTC_HAS_NEON)) +static void cft1st_128_C(float* a) { + const int n = 128; + int j, k1, k2; + float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + // The processing of the first set of elements was simplified in C to avoid + // some operations (multiplication by zero or one, addition of two elements + // multiplied by the same weight, ...). + x0r = a[0] + a[2]; + x0i = a[1] + a[3]; + x1r = a[0] - a[2]; + x1i = a[1] - a[3]; + x2r = a[4] + a[6]; + x2i = a[5] + a[7]; + x3r = a[4] - a[6]; + x3i = a[5] - a[7]; + a[0] = x0r + x2r; + a[1] = x0i + x2i; + a[4] = x0r - x2r; + a[5] = x0i - x2i; + a[2] = x1r - x3i; + a[3] = x1i + x3r; + a[6] = x1r + x3i; + a[7] = x1i - x3r; + wk1r = rdft_w[2]; + x0r = a[8] + a[10]; + x0i = a[9] + a[11]; + x1r = a[8] - a[10]; + x1i = a[9] - a[11]; + x2r = a[12] + a[14]; + x2i = a[13] + a[15]; + x3r = a[12] - a[14]; + x3i = a[13] - a[15]; + a[8] = x0r + x2r; + a[9] = x0i + x2i; + a[12] = x2i - x0i; + a[13] = x0r - x2r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[10] = wk1r * (x0r - x0i); + a[11] = wk1r * (x0r + x0i); + x0r = x3i + x1r; + x0i = x3r - x1i; + a[14] = wk1r * (x0i - x0r); + a[15] = wk1r * (x0i + x0r); + k1 = 0; + for (j = 16; j < n; j += 16) { + k1 += 2; + k2 = 2 * k1; + wk2r = rdft_w[k1 + 0]; + wk2i = rdft_w[k1 + 1]; + wk1r = rdft_w[k2 + 0]; + wk1i = rdft_w[k2 + 1]; + wk3r = rdft_wk3ri_first[k1 + 0]; + wk3i = rdft_wk3ri_first[k1 + 1]; + x0r = a[j + 0] + a[j + 2]; + x0i = a[j + 1] + a[j + 3]; + x1r = a[j + 0] - a[j + 2]; + x1i = a[j + 1] - a[j + 3]; + x2r = a[j + 4] + a[j + 6]; + x2i = a[j + 5] + a[j + 7]; + x3r = a[j + 4] - a[j + 6]; + x3i = a[j + 5] - a[j + 7]; + a[j + 0] = x0r + x2r; + a[j + 1] = x0i + x2i; + x0r -= x2r; + x0i -= x2i; + a[j + 4] = wk2r * x0r - wk2i * x0i; + a[j + 5] = wk2r * x0i + wk2i * x0r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j + 2] = wk1r * x0r - wk1i * x0i; + a[j + 3] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[j + 6] = wk3r * x0r - wk3i * x0i; + a[j + 7] = wk3r * x0i + wk3i * x0r; + wk1r = rdft_w[k2 + 2]; + wk1i = rdft_w[k2 + 3]; + wk3r = rdft_wk3ri_second[k1 + 0]; + wk3i = rdft_wk3ri_second[k1 + 1]; + x0r = a[j + 8] + a[j + 10]; + x0i = a[j + 9] + a[j + 11]; + x1r = a[j + 8] - a[j + 10]; + x1i = a[j + 9] - a[j + 11]; + x2r = a[j + 12] + a[j + 14]; + x2i = a[j + 13] + a[j + 15]; + x3r = a[j + 12] - a[j + 14]; + x3i = a[j + 13] - a[j + 15]; + a[j + 8] = x0r + x2r; + a[j + 9] = x0i + x2i; + x0r -= x2r; + x0i -= x2i; + a[j + 12] = -wk2i * x0r - wk2r * x0i; + a[j + 13] = -wk2i * x0i + wk2r * x0r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j + 10] = wk1r * x0r - wk1i * x0i; + a[j + 11] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[j + 14] = wk3r * x0r - wk3i * x0i; + a[j + 15] = wk3r * x0i + wk3i * x0r; + } +} + +static void cftmdl_128_C(float* a) { + const int l = 8; + const int n = 128; + const int m = 32; + int j0, j1, j2, j3, k, k1, k2, m2; + float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + for (j0 = 0; j0 < l; j0 += 2) { + j1 = j0 + 8; + j2 = j0 + 16; + j3 = j0 + 24; + x0r = a[j0 + 0] + a[j1 + 0]; + x0i = a[j0 + 1] + a[j1 + 1]; + x1r = a[j0 + 0] - a[j1 + 0]; + x1i = a[j0 + 1] - a[j1 + 1]; + x2r = a[j2 + 0] + a[j3 + 0]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2 + 0] - a[j3 + 0]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j0 + 0] = x0r + x2r; + a[j0 + 1] = x0i + x2i; + a[j2 + 0] = x0r - x2r; + a[j2 + 1] = x0i - x2i; + a[j1 + 0] = x1r - x3i; + a[j1 + 1] = x1i + x3r; + a[j3 + 0] = x1r + x3i; + a[j3 + 1] = x1i - x3r; + } + wk1r = rdft_w[2]; + for (j0 = m; j0 < l + m; j0 += 2) { + j1 = j0 + 8; + j2 = j0 + 16; + j3 = j0 + 24; + x0r = a[j0 + 0] + a[j1 + 0]; + x0i = a[j0 + 1] + a[j1 + 1]; + x1r = a[j0 + 0] - a[j1 + 0]; + x1i = a[j0 + 1] - a[j1 + 1]; + x2r = a[j2 + 0] + a[j3 + 0]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2 + 0] - a[j3 + 0]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j0 + 0] = x0r + x2r; + a[j0 + 1] = x0i + x2i; + a[j2 + 0] = x2i - x0i; + a[j2 + 1] = x0r - x2r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j1 + 0] = wk1r * (x0r - x0i); + a[j1 + 1] = wk1r * (x0r + x0i); + x0r = x3i + x1r; + x0i = x3r - x1i; + a[j3 + 0] = wk1r * (x0i - x0r); + a[j3 + 1] = wk1r * (x0i + x0r); + } + k1 = 0; + m2 = 2 * m; + for (k = m2; k < n; k += m2) { + k1 += 2; + k2 = 2 * k1; + wk2r = rdft_w[k1 + 0]; + wk2i = rdft_w[k1 + 1]; + wk1r = rdft_w[k2 + 0]; + wk1i = rdft_w[k2 + 1]; + wk3r = rdft_wk3ri_first[k1 + 0]; + wk3i = rdft_wk3ri_first[k1 + 1]; + for (j0 = k; j0 < l + k; j0 += 2) { + j1 = j0 + 8; + j2 = j0 + 16; + j3 = j0 + 24; + x0r = a[j0 + 0] + a[j1 + 0]; + x0i = a[j0 + 1] + a[j1 + 1]; + x1r = a[j0 + 0] - a[j1 + 0]; + x1i = a[j0 + 1] - a[j1 + 1]; + x2r = a[j2 + 0] + a[j3 + 0]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2 + 0] - a[j3 + 0]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j0 + 0] = x0r + x2r; + a[j0 + 1] = x0i + x2i; + x0r -= x2r; + x0i -= x2i; + a[j2 + 0] = wk2r * x0r - wk2i * x0i; + a[j2 + 1] = wk2r * x0i + wk2i * x0r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j1 + 0] = wk1r * x0r - wk1i * x0i; + a[j1 + 1] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[j3 + 0] = wk3r * x0r - wk3i * x0i; + a[j3 + 1] = wk3r * x0i + wk3i * x0r; + } + wk1r = rdft_w[k2 + 2]; + wk1i = rdft_w[k2 + 3]; + wk3r = rdft_wk3ri_second[k1 + 0]; + wk3i = rdft_wk3ri_second[k1 + 1]; + for (j0 = k + m; j0 < l + (k + m); j0 += 2) { + j1 = j0 + 8; + j2 = j0 + 16; + j3 = j0 + 24; + x0r = a[j0 + 0] + a[j1 + 0]; + x0i = a[j0 + 1] + a[j1 + 1]; + x1r = a[j0 + 0] - a[j1 + 0]; + x1i = a[j0 + 1] - a[j1 + 1]; + x2r = a[j2 + 0] + a[j3 + 0]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2 + 0] - a[j3 + 0]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j0 + 0] = x0r + x2r; + a[j0 + 1] = x0i + x2i; + x0r -= x2r; + x0i -= x2i; + a[j2 + 0] = -wk2i * x0r - wk2r * x0i; + a[j2 + 1] = -wk2i * x0i + wk2r * x0r; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[j1 + 0] = wk1r * x0r - wk1i * x0i; + a[j1 + 1] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[j3 + 0] = wk3r * x0r - wk3i * x0i; + a[j3 + 1] = wk3r * x0i + wk3i * x0r; + } + } +} + +static void rftfsub_128_C(float* a) { + const float* c = rdft_w + 32; + int j1, j2, k1, k2; + float wkr, wki, xr, xi, yr, yi; + + for (j1 = 1, j2 = 2; j2 < 64; j1 += 1, j2 += 2) { + k2 = 128 - j2; + k1 = 32 - j1; + wkr = 0.5f - c[k1]; + wki = c[j1]; + xr = a[j2 + 0] - a[k2 + 0]; + xi = a[j2 + 1] + a[k2 + 1]; + yr = wkr * xr - wki * xi; + yi = wkr * xi + wki * xr; + a[j2 + 0] -= yr; + a[j2 + 1] -= yi; + a[k2 + 0] += yr; + a[k2 + 1] -= yi; + } +} + +static void rftbsub_128_C(float* a) { + const float* c = rdft_w + 32; + int j1, j2, k1, k2; + float wkr, wki, xr, xi, yr, yi; + + a[1] = -a[1]; + for (j1 = 1, j2 = 2; j2 < 64; j1 += 1, j2 += 2) { + k2 = 128 - j2; + k1 = 32 - j1; + wkr = 0.5f - c[k1]; + wki = c[j1]; + xr = a[j2 + 0] - a[k2 + 0]; + xi = a[j2 + 1] + a[k2 + 1]; + yr = wkr * xr + wki * xi; + yi = wkr * xi - wki * xr; + a[j2 + 0] = a[j2 + 0] - yr; + a[j2 + 1] = yi - a[j2 + 1]; + a[k2 + 0] = yr + a[k2 + 0]; + a[k2 + 1] = yi - a[k2 + 1]; + } + a[65] = -a[65]; +} +#endif + + +} // namespace + +OouraFft::OouraFft() { +#if defined(WEBRTC_ARCH_X86_FAMILY) + use_sse2_ = (WebRtc_GetCPUInfo(kSSE2) != 0); +#else + use_sse2_ = false; +#endif +} + +OouraFft::~OouraFft() = default; + +void OouraFft::Fft(float* a) const { + float xi; + bitrv2_128(a); + cftfsub_128(a); + rftfsub_128(a); + xi = a[0] - a[1]; + a[0] += a[1]; + a[1] = xi; +} +void OouraFft::InverseFft(float* a) const { + a[1] = 0.5f * (a[0] - a[1]); + a[0] -= a[1]; + rftbsub_128(a); + bitrv2_128(a); + cftbsub_128(a); +} + +void OouraFft::cft1st_128(float* a) const { +#if defined(MIPS_FPU_LE) + cft1st_128_mips(a); +#elif defined(WEBRTC_HAS_NEON) + cft1st_128_neon(a); +#elif defined(WEBRTC_ARCH_X86_FAMILY) + if (use_sse2_) { + cft1st_128_SSE2(a); + } else { + cft1st_128_C(a); + } +#else + cft1st_128_C(a); +#endif +} +void OouraFft::cftmdl_128(float* a) const { +#if defined(MIPS_FPU_LE) + cftmdl_128_mips(a); +#elif defined(WEBRTC_HAS_NEON) + cftmdl_128_neon(a); +#elif defined(WEBRTC_ARCH_X86_FAMILY) + if (use_sse2_) { + cftmdl_128_SSE2(a); + } else { + cftmdl_128_C(a); + } +#else + cftmdl_128_C(a); +#endif +} +void OouraFft::rftfsub_128(float* a) const { +#if defined(MIPS_FPU_LE) + rftfsub_128_mips(a); +#elif defined(WEBRTC_HAS_NEON) + rftfsub_128_neon(a); +#elif defined(WEBRTC_ARCH_X86_FAMILY) + if (use_sse2_) { + rftfsub_128_SSE2(a); + } else { + rftfsub_128_C(a); + } +#else + rftfsub_128_C(a); +#endif +} + +void OouraFft::rftbsub_128(float* a) const { +#if defined(MIPS_FPU_LE) + rftbsub_128_mips(a); +#elif defined(WEBRTC_HAS_NEON) + rftbsub_128_neon(a); +#elif defined(WEBRTC_ARCH_X86_FAMILY) + if (use_sse2_) { + rftbsub_128_SSE2(a); + } else { + rftbsub_128_C(a); + } +#else + rftbsub_128_C(a); +#endif +} + +void OouraFft::cftbsub_128(float* a) const { + int j, j1, j2, j3, l; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + cft1st_128(a); + cftmdl_128(a); + l = 32; + + for (j = 0; j < l; j += 2) { + j1 = j + l; + j2 = j1 + l; + j3 = j2 + l; + x0r = a[j] + a[j1]; + x0i = -a[j + 1] - a[j1 + 1]; + x1r = a[j] - a[j1]; + x1i = -a[j + 1] + a[j1 + 1]; + x2r = a[j2] + a[j3]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2] - a[j3]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j] = x0r + x2r; + a[j + 1] = x0i - x2i; + a[j2] = x0r - x2r; + a[j2 + 1] = x0i + x2i; + a[j1] = x1r - x3i; + a[j1 + 1] = x1i - x3r; + a[j3] = x1r + x3i; + a[j3 + 1] = x1i + x3r; + } +} + +void OouraFft::cftfsub_128(float* a) const { + int j, j1, j2, j3, l; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + cft1st_128(a); + cftmdl_128(a); + l = 32; + for (j = 0; j < l; j += 2) { + j1 = j + l; + j2 = j1 + l; + j3 = j2 + l; + x0r = a[j] + a[j1]; + x0i = a[j + 1] + a[j1 + 1]; + x1r = a[j] - a[j1]; + x1i = a[j + 1] - a[j1 + 1]; + x2r = a[j2] + a[j3]; + x2i = a[j2 + 1] + a[j3 + 1]; + x3r = a[j2] - a[j3]; + x3i = a[j2 + 1] - a[j3 + 1]; + a[j] = x0r + x2r; + a[j + 1] = x0i + x2i; + a[j2] = x0r - x2r; + a[j2 + 1] = x0i - x2i; + a[j1] = x1r - x3i; + a[j1 + 1] = x1i + x3r; + a[j3] = x1r + x3i; + a[j3 + 1] = x1i - x3r; + } +} + +void OouraFft::bitrv2_128(float* a) const { + /* + Following things have been attempted but are no faster: + (a) Storing the swap indexes in a LUT (index calculations are done + for 'free' while waiting on memory/L1). + (b) Consolidate the load/store of two consecutive floats by a 64 bit + integer (execution is memory/L1 bound). + (c) Do a mix of floats and 64 bit integer to maximize register + utilization (execution is memory/L1 bound). + (d) Replacing ip[i] by ((k<<31)>>25) + ((k >> 1)<<5). + (e) Hard-coding of the offsets to completely eliminates index + calculations. + */ + + unsigned int j, j1, k, k1; + float xr, xi, yr, yi; + + const int ip[4] = {0, 64, 32, 96}; + for (k = 0; k < 4; k++) { + for (j = 0; j < k; j++) { + j1 = 2 * j + ip[k]; + k1 = 2 * k + ip[j]; + xr = a[j1 + 0]; + xi = a[j1 + 1]; + yr = a[k1 + 0]; + yi = a[k1 + 1]; + a[j1 + 0] = yr; + a[j1 + 1] = yi; + a[k1 + 0] = xr; + a[k1 + 1] = xi; + j1 += 8; + k1 += 16; + xr = a[j1 + 0]; + xi = a[j1 + 1]; + yr = a[k1 + 0]; + yi = a[k1 + 1]; + a[j1 + 0] = yr; + a[j1 + 1] = yi; + a[k1 + 0] = xr; + a[k1 + 1] = xi; + j1 += 8; + k1 -= 8; + xr = a[j1 + 0]; + xi = a[j1 + 1]; + yr = a[k1 + 0]; + yi = a[k1 + 1]; + a[j1 + 0] = yr; + a[j1 + 1] = yi; + a[k1 + 0] = xr; + a[k1 + 1] = xi; + j1 += 8; + k1 += 16; + xr = a[j1 + 0]; + xi = a[j1 + 1]; + yr = a[k1 + 0]; + yi = a[k1 + 1]; + a[j1 + 0] = yr; + a[j1 + 1] = yi; + a[k1 + 0] = xr; + a[k1 + 1] = xi; + } + j1 = 2 * k + 8 + ip[k]; + k1 = j1 + 8; + xr = a[j1 + 0]; + xi = a[j1 + 1]; + yr = a[k1 + 0]; + yi = a[k1 + 1]; + a[j1 + 0] = yr; + a[j1 + 1] = yi; + a[k1 + 0] = xr; + a[k1 + 1] = xi; + } +} + +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft.h new file mode 100644 index 000000000..a1b9f04f5 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_H_ + +#include "webrtc/typedefs.h" + +namespace webrtc { + +#if defined(WEBRTC_ARCH_X86_FAMILY) +void cft1st_128_SSE2(float* a); +void cftmdl_128_SSE2(float* a); +void rftfsub_128_SSE2(float* a); +void rftbsub_128_SSE2(float* a); +#endif + +#if defined(MIPS_FPU_LE) +void cft1st_128_mips(float* a); +void cftmdl_128_mips(float* a); +void rftfsub_128_mips(float* a); +void rftbsub_128_mips(float* a); +#endif + +#if defined(WEBRTC_HAS_NEON) +void cft1st_128_neon(float* a); +void cftmdl_128_neon(float* a); +void rftfsub_128_neon(float* a); +void rftbsub_128_neon(float* a); +#endif + +class OouraFft { + public: + OouraFft(); + ~OouraFft(); + void Fft(float* a) const; + void InverseFft(float* a) const; + + private: + void cft1st_128(float* a) const; + void cftmdl_128(float* a) const; + void rftfsub_128(float* a) const; + void rftbsub_128(float* a) const; + + void cftfsub_128(float* a) const; + void cftbsub_128(float* a) const; + void bitrv2_128(float* a) const; + bool use_sse2_; +}; + +} // namespace webrtc + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_neon.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_neon.cc new file mode 100644 index 000000000..2e4567d83 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_neon.cc @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +/* + * The rdft AEC algorithm, neon version of speed-critical functions. + * + * Based on the sse2 version. + */ + +#include "webrtc/modules/audio_processing/utility/ooura_fft.h" + +#if defined(WEBRTC_HAS_NEON) +#include +#endif + +#include "webrtc/modules/audio_processing/utility/ooura_fft_tables_common.h" +#include "webrtc/modules/audio_processing/utility/ooura_fft_tables_neon_sse2.h" + +namespace webrtc { + +#if defined(WEBRTC_HAS_NEON) +void cft1st_128_neon(float* a) { + const float32x4_t vec_swap_sign = vld1q_f32((float32_t*)k_swap_sign); + int j, k2; + + for (k2 = 0, j = 0; j < 128; j += 16, k2 += 4) { + float32x4_t a00v = vld1q_f32(&a[j + 0]); + float32x4_t a04v = vld1q_f32(&a[j + 4]); + float32x4_t a08v = vld1q_f32(&a[j + 8]); + float32x4_t a12v = vld1q_f32(&a[j + 12]); + float32x4_t a01v = vcombine_f32(vget_low_f32(a00v), vget_low_f32(a08v)); + float32x4_t a23v = vcombine_f32(vget_high_f32(a00v), vget_high_f32(a08v)); + float32x4_t a45v = vcombine_f32(vget_low_f32(a04v), vget_low_f32(a12v)); + float32x4_t a67v = vcombine_f32(vget_high_f32(a04v), vget_high_f32(a12v)); + const float32x4_t wk1rv = vld1q_f32(&rdft_wk1r[k2]); + const float32x4_t wk1iv = vld1q_f32(&rdft_wk1i[k2]); + const float32x4_t wk2rv = vld1q_f32(&rdft_wk2r[k2]); + const float32x4_t wk2iv = vld1q_f32(&rdft_wk2i[k2]); + const float32x4_t wk3rv = vld1q_f32(&rdft_wk3r[k2]); + const float32x4_t wk3iv = vld1q_f32(&rdft_wk3i[k2]); + float32x4_t x0v = vaddq_f32(a01v, a23v); + const float32x4_t x1v = vsubq_f32(a01v, a23v); + const float32x4_t x2v = vaddq_f32(a45v, a67v); + const float32x4_t x3v = vsubq_f32(a45v, a67v); + const float32x4_t x3w = vrev64q_f32(x3v); + float32x4_t x0w; + a01v = vaddq_f32(x0v, x2v); + x0v = vsubq_f32(x0v, x2v); + x0w = vrev64q_f32(x0v); + a45v = vmulq_f32(wk2rv, x0v); + a45v = vmlaq_f32(a45v, wk2iv, x0w); + x0v = vmlaq_f32(x1v, x3w, vec_swap_sign); + x0w = vrev64q_f32(x0v); + a23v = vmulq_f32(wk1rv, x0v); + a23v = vmlaq_f32(a23v, wk1iv, x0w); + x0v = vmlsq_f32(x1v, x3w, vec_swap_sign); + x0w = vrev64q_f32(x0v); + a67v = vmulq_f32(wk3rv, x0v); + a67v = vmlaq_f32(a67v, wk3iv, x0w); + a00v = vcombine_f32(vget_low_f32(a01v), vget_low_f32(a23v)); + a04v = vcombine_f32(vget_low_f32(a45v), vget_low_f32(a67v)); + a08v = vcombine_f32(vget_high_f32(a01v), vget_high_f32(a23v)); + a12v = vcombine_f32(vget_high_f32(a45v), vget_high_f32(a67v)); + vst1q_f32(&a[j + 0], a00v); + vst1q_f32(&a[j + 4], a04v); + vst1q_f32(&a[j + 8], a08v); + vst1q_f32(&a[j + 12], a12v); + } +} + +void cftmdl_128_neon(float* a) { + int j; + const int l = 8; + const float32x4_t vec_swap_sign = vld1q_f32((float32_t*)k_swap_sign); + float32x4_t wk1rv = vld1q_f32(cftmdl_wk1r); + + for (j = 0; j < l; j += 2) { + const float32x2_t a_00 = vld1_f32(&a[j + 0]); + const float32x2_t a_08 = vld1_f32(&a[j + 8]); + const float32x2_t a_32 = vld1_f32(&a[j + 32]); + const float32x2_t a_40 = vld1_f32(&a[j + 40]); + const float32x4_t a_00_32 = vcombine_f32(a_00, a_32); + const float32x4_t a_08_40 = vcombine_f32(a_08, a_40); + const float32x4_t x0r0_0i0_0r1_x0i1 = vaddq_f32(a_00_32, a_08_40); + const float32x4_t x1r0_1i0_1r1_x1i1 = vsubq_f32(a_00_32, a_08_40); + const float32x2_t a_16 = vld1_f32(&a[j + 16]); + const float32x2_t a_24 = vld1_f32(&a[j + 24]); + const float32x2_t a_48 = vld1_f32(&a[j + 48]); + const float32x2_t a_56 = vld1_f32(&a[j + 56]); + const float32x4_t a_16_48 = vcombine_f32(a_16, a_48); + const float32x4_t a_24_56 = vcombine_f32(a_24, a_56); + const float32x4_t x2r0_2i0_2r1_x2i1 = vaddq_f32(a_16_48, a_24_56); + const float32x4_t x3r0_3i0_3r1_x3i1 = vsubq_f32(a_16_48, a_24_56); + const float32x4_t xx0 = vaddq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const float32x4_t xx1 = vsubq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const float32x4_t x3i0_3r0_3i1_x3r1 = vrev64q_f32(x3r0_3i0_3r1_x3i1); + const float32x4_t x1_x3_add = + vmlaq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1); + const float32x4_t x1_x3_sub = + vmlsq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1); + const float32x2_t yy0_a = vdup_lane_f32(vget_high_f32(x1_x3_add), 0); + const float32x2_t yy0_s = vdup_lane_f32(vget_high_f32(x1_x3_sub), 0); + const float32x4_t yy0_as = vcombine_f32(yy0_a, yy0_s); + const float32x2_t yy1_a = vdup_lane_f32(vget_high_f32(x1_x3_add), 1); + const float32x2_t yy1_s = vdup_lane_f32(vget_high_f32(x1_x3_sub), 1); + const float32x4_t yy1_as = vcombine_f32(yy1_a, yy1_s); + const float32x4_t yy0 = vmlaq_f32(yy0_as, vec_swap_sign, yy1_as); + const float32x4_t yy4 = vmulq_f32(wk1rv, yy0); + const float32x4_t xx1_rev = vrev64q_f32(xx1); + const float32x4_t yy4_rev = vrev64q_f32(yy4); + + vst1_f32(&a[j + 0], vget_low_f32(xx0)); + vst1_f32(&a[j + 32], vget_high_f32(xx0)); + vst1_f32(&a[j + 16], vget_low_f32(xx1)); + vst1_f32(&a[j + 48], vget_high_f32(xx1_rev)); + + a[j + 48] = -a[j + 48]; + + vst1_f32(&a[j + 8], vget_low_f32(x1_x3_add)); + vst1_f32(&a[j + 24], vget_low_f32(x1_x3_sub)); + vst1_f32(&a[j + 40], vget_low_f32(yy4)); + vst1_f32(&a[j + 56], vget_high_f32(yy4_rev)); + } + + { + const int k = 64; + const int k1 = 2; + const int k2 = 2 * k1; + const float32x4_t wk2rv = vld1q_f32(&rdft_wk2r[k2 + 0]); + const float32x4_t wk2iv = vld1q_f32(&rdft_wk2i[k2 + 0]); + const float32x4_t wk1iv = vld1q_f32(&rdft_wk1i[k2 + 0]); + const float32x4_t wk3rv = vld1q_f32(&rdft_wk3r[k2 + 0]); + const float32x4_t wk3iv = vld1q_f32(&rdft_wk3i[k2 + 0]); + wk1rv = vld1q_f32(&rdft_wk1r[k2 + 0]); + for (j = k; j < l + k; j += 2) { + const float32x2_t a_00 = vld1_f32(&a[j + 0]); + const float32x2_t a_08 = vld1_f32(&a[j + 8]); + const float32x2_t a_32 = vld1_f32(&a[j + 32]); + const float32x2_t a_40 = vld1_f32(&a[j + 40]); + const float32x4_t a_00_32 = vcombine_f32(a_00, a_32); + const float32x4_t a_08_40 = vcombine_f32(a_08, a_40); + const float32x4_t x0r0_0i0_0r1_x0i1 = vaddq_f32(a_00_32, a_08_40); + const float32x4_t x1r0_1i0_1r1_x1i1 = vsubq_f32(a_00_32, a_08_40); + const float32x2_t a_16 = vld1_f32(&a[j + 16]); + const float32x2_t a_24 = vld1_f32(&a[j + 24]); + const float32x2_t a_48 = vld1_f32(&a[j + 48]); + const float32x2_t a_56 = vld1_f32(&a[j + 56]); + const float32x4_t a_16_48 = vcombine_f32(a_16, a_48); + const float32x4_t a_24_56 = vcombine_f32(a_24, a_56); + const float32x4_t x2r0_2i0_2r1_x2i1 = vaddq_f32(a_16_48, a_24_56); + const float32x4_t x3r0_3i0_3r1_x3i1 = vsubq_f32(a_16_48, a_24_56); + const float32x4_t xx = vaddq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const float32x4_t xx1 = vsubq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const float32x4_t x3i0_3r0_3i1_x3r1 = vrev64q_f32(x3r0_3i0_3r1_x3i1); + const float32x4_t x1_x3_add = + vmlaq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1); + const float32x4_t x1_x3_sub = + vmlsq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1); + float32x4_t xx4 = vmulq_f32(wk2rv, xx1); + float32x4_t xx12 = vmulq_f32(wk1rv, x1_x3_add); + float32x4_t xx22 = vmulq_f32(wk3rv, x1_x3_sub); + xx4 = vmlaq_f32(xx4, wk2iv, vrev64q_f32(xx1)); + xx12 = vmlaq_f32(xx12, wk1iv, vrev64q_f32(x1_x3_add)); + xx22 = vmlaq_f32(xx22, wk3iv, vrev64q_f32(x1_x3_sub)); + + vst1_f32(&a[j + 0], vget_low_f32(xx)); + vst1_f32(&a[j + 32], vget_high_f32(xx)); + vst1_f32(&a[j + 16], vget_low_f32(xx4)); + vst1_f32(&a[j + 48], vget_high_f32(xx4)); + vst1_f32(&a[j + 8], vget_low_f32(xx12)); + vst1_f32(&a[j + 40], vget_high_f32(xx12)); + vst1_f32(&a[j + 24], vget_low_f32(xx22)); + vst1_f32(&a[j + 56], vget_high_f32(xx22)); + } + } +} + +__inline static float32x4_t reverse_order_f32x4(float32x4_t in) { + // A B C D -> C D A B + const float32x4_t rev = vcombine_f32(vget_high_f32(in), vget_low_f32(in)); + // C D A B -> D C B A + return vrev64q_f32(rev); +} + +void rftfsub_128_neon(float* a) { + const float* c = rdft_w + 32; + int j1, j2; + const float32x4_t mm_half = vdupq_n_f32(0.5f); + + // Vectorized code (four at once). + // Note: commented number are indexes for the first iteration of the loop. + for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) { + // Load 'wk'. + const float32x4_t c_j1 = vld1q_f32(&c[j1]); // 1, 2, 3, 4, + const float32x4_t c_k1 = vld1q_f32(&c[29 - j1]); // 28, 29, 30, 31, + const float32x4_t wkrt = vsubq_f32(mm_half, c_k1); // 28, 29, 30, 31, + const float32x4_t wkr_ = reverse_order_f32x4(wkrt); // 31, 30, 29, 28, + const float32x4_t wki_ = c_j1; // 1, 2, 3, 4, + // Load and shuffle 'a'. + // 2, 4, 6, 8, 3, 5, 7, 9 + float32x4x2_t a_j2_p = vld2q_f32(&a[0 + j2]); + // 120, 122, 124, 126, 121, 123, 125, 127, + const float32x4x2_t k2_0_4 = vld2q_f32(&a[122 - j2]); + // 126, 124, 122, 120 + const float32x4_t a_k2_p0 = reverse_order_f32x4(k2_0_4.val[0]); + // 127, 125, 123, 121 + const float32x4_t a_k2_p1 = reverse_order_f32x4(k2_0_4.val[1]); + // Calculate 'x'. + const float32x4_t xr_ = vsubq_f32(a_j2_p.val[0], a_k2_p0); + // 2-126, 4-124, 6-122, 8-120, + const float32x4_t xi_ = vaddq_f32(a_j2_p.val[1], a_k2_p1); + // 3-127, 5-125, 7-123, 9-121, + // Calculate product into 'y'. + // yr = wkr * xr - wki * xi; + // yi = wkr * xi + wki * xr; + const float32x4_t a_ = vmulq_f32(wkr_, xr_); + const float32x4_t b_ = vmulq_f32(wki_, xi_); + const float32x4_t c_ = vmulq_f32(wkr_, xi_); + const float32x4_t d_ = vmulq_f32(wki_, xr_); + const float32x4_t yr_ = vsubq_f32(a_, b_); // 2-126, 4-124, 6-122, 8-120, + const float32x4_t yi_ = vaddq_f32(c_, d_); // 3-127, 5-125, 7-123, 9-121, + // Update 'a'. + // a[j2 + 0] -= yr; + // a[j2 + 1] -= yi; + // a[k2 + 0] += yr; + // a[k2 + 1] -= yi; + // 126, 124, 122, 120, + const float32x4_t a_k2_p0n = vaddq_f32(a_k2_p0, yr_); + // 127, 125, 123, 121, + const float32x4_t a_k2_p1n = vsubq_f32(a_k2_p1, yi_); + // Shuffle in right order and store. + const float32x4_t a_k2_p0nr = vrev64q_f32(a_k2_p0n); + const float32x4_t a_k2_p1nr = vrev64q_f32(a_k2_p1n); + // 124, 125, 126, 127, 120, 121, 122, 123 + const float32x4x2_t a_k2_n = vzipq_f32(a_k2_p0nr, a_k2_p1nr); + // 2, 4, 6, 8, + a_j2_p.val[0] = vsubq_f32(a_j2_p.val[0], yr_); + // 3, 5, 7, 9, + a_j2_p.val[1] = vsubq_f32(a_j2_p.val[1], yi_); + // 2, 3, 4, 5, 6, 7, 8, 9, + vst2q_f32(&a[0 + j2], a_j2_p); + + vst1q_f32(&a[122 - j2], a_k2_n.val[1]); + vst1q_f32(&a[126 - j2], a_k2_n.val[0]); + } + + // Scalar code for the remaining items. + for (; j2 < 64; j1 += 1, j2 += 2) { + const int k2 = 128 - j2; + const int k1 = 32 - j1; + const float wkr = 0.5f - c[k1]; + const float wki = c[j1]; + const float xr = a[j2 + 0] - a[k2 + 0]; + const float xi = a[j2 + 1] + a[k2 + 1]; + const float yr = wkr * xr - wki * xi; + const float yi = wkr * xi + wki * xr; + a[j2 + 0] -= yr; + a[j2 + 1] -= yi; + a[k2 + 0] += yr; + a[k2 + 1] -= yi; + } +} + +void rftbsub_128_neon(float* a) { + const float* c = rdft_w + 32; + int j1, j2; + const float32x4_t mm_half = vdupq_n_f32(0.5f); + + a[1] = -a[1]; + // Vectorized code (four at once). + // Note: commented number are indexes for the first iteration of the loop. + for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) { + // Load 'wk'. + const float32x4_t c_j1 = vld1q_f32(&c[j1]); // 1, 2, 3, 4, + const float32x4_t c_k1 = vld1q_f32(&c[29 - j1]); // 28, 29, 30, 31, + const float32x4_t wkrt = vsubq_f32(mm_half, c_k1); // 28, 29, 30, 31, + const float32x4_t wkr_ = reverse_order_f32x4(wkrt); // 31, 30, 29, 28, + const float32x4_t wki_ = c_j1; // 1, 2, 3, 4, + // Load and shuffle 'a'. + // 2, 4, 6, 8, 3, 5, 7, 9 + float32x4x2_t a_j2_p = vld2q_f32(&a[0 + j2]); + // 120, 122, 124, 126, 121, 123, 125, 127, + const float32x4x2_t k2_0_4 = vld2q_f32(&a[122 - j2]); + // 126, 124, 122, 120 + const float32x4_t a_k2_p0 = reverse_order_f32x4(k2_0_4.val[0]); + // 127, 125, 123, 121 + const float32x4_t a_k2_p1 = reverse_order_f32x4(k2_0_4.val[1]); + // Calculate 'x'. + const float32x4_t xr_ = vsubq_f32(a_j2_p.val[0], a_k2_p0); + // 2-126, 4-124, 6-122, 8-120, + const float32x4_t xi_ = vaddq_f32(a_j2_p.val[1], a_k2_p1); + // 3-127, 5-125, 7-123, 9-121, + // Calculate product into 'y'. + // yr = wkr * xr - wki * xi; + // yi = wkr * xi + wki * xr; + const float32x4_t a_ = vmulq_f32(wkr_, xr_); + const float32x4_t b_ = vmulq_f32(wki_, xi_); + const float32x4_t c_ = vmulq_f32(wkr_, xi_); + const float32x4_t d_ = vmulq_f32(wki_, xr_); + const float32x4_t yr_ = vaddq_f32(a_, b_); // 2-126, 4-124, 6-122, 8-120, + const float32x4_t yi_ = vsubq_f32(c_, d_); // 3-127, 5-125, 7-123, 9-121, + // Update 'a'. + // a[j2 + 0] -= yr; + // a[j2 + 1] -= yi; + // a[k2 + 0] += yr; + // a[k2 + 1] -= yi; + // 126, 124, 122, 120, + const float32x4_t a_k2_p0n = vaddq_f32(a_k2_p0, yr_); + // 127, 125, 123, 121, + const float32x4_t a_k2_p1n = vsubq_f32(yi_, a_k2_p1); + // Shuffle in right order and store. + // 2, 3, 4, 5, 6, 7, 8, 9, + const float32x4_t a_k2_p0nr = vrev64q_f32(a_k2_p0n); + const float32x4_t a_k2_p1nr = vrev64q_f32(a_k2_p1n); + // 124, 125, 126, 127, 120, 121, 122, 123 + const float32x4x2_t a_k2_n = vzipq_f32(a_k2_p0nr, a_k2_p1nr); + // 2, 4, 6, 8, + a_j2_p.val[0] = vsubq_f32(a_j2_p.val[0], yr_); + // 3, 5, 7, 9, + a_j2_p.val[1] = vsubq_f32(yi_, a_j2_p.val[1]); + // 2, 3, 4, 5, 6, 7, 8, 9, + vst2q_f32(&a[0 + j2], a_j2_p); + + vst1q_f32(&a[122 - j2], a_k2_n.val[1]); + vst1q_f32(&a[126 - j2], a_k2_n.val[0]); + } + + // Scalar code for the remaining items. + for (; j2 < 64; j1 += 1, j2 += 2) { + const int k2 = 128 - j2; + const int k1 = 32 - j1; + const float wkr = 0.5f - c[k1]; + const float wki = c[j1]; + const float xr = a[j2 + 0] - a[k2 + 0]; + const float xi = a[j2 + 1] + a[k2 + 1]; + const float yr = wkr * xr + wki * xi; + const float yi = wkr * xi - wki * xr; + a[j2 + 0] = a[j2 + 0] - yr; + a[j2 + 1] = yi - a[j2 + 1]; + a[k2 + 0] = yr + a[k2 + 0]; + a[k2 + 1] = yi - a[k2 + 1]; + } + a[65] = -a[65]; +} +#endif + +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_sse2.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_sse2.cc new file mode 100644 index 000000000..42ee804c6 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_sse2.cc @@ -0,0 +1,440 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "webrtc/modules/audio_processing//utility/ooura_fft.h" + +#if defined(WEBRTC_ARCH_X86_FAMILY) +#include +#endif + +#include "webrtc/modules/audio_processing/utility/ooura_fft_tables_common.h" +#include "webrtc/modules/audio_processing/utility/ooura_fft_tables_neon_sse2.h" + +namespace webrtc { + +#if defined(WEBRTC_ARCH_X86_FAMILY) + +namespace { +// These intrinsics were unavailable before VS 2008. +// TODO(andrew): move to a common file. +#if defined(_MSC_VER) && _MSC_VER < 1500 +static __inline __m128 _mm_castsi128_ps(__m128i a) { + return *(__m128*)&a; +} +static __inline __m128i _mm_castps_si128(__m128 a) { + return *(__m128i*)&a; +} +#endif + +} // namespace + +void cft1st_128_SSE2(float* a) { + const __m128 mm_swap_sign = _mm_load_ps(k_swap_sign); + int j, k2; + + for (k2 = 0, j = 0; j < 128; j += 16, k2 += 4) { + __m128 a00v = _mm_loadu_ps(&a[j + 0]); + __m128 a04v = _mm_loadu_ps(&a[j + 4]); + __m128 a08v = _mm_loadu_ps(&a[j + 8]); + __m128 a12v = _mm_loadu_ps(&a[j + 12]); + __m128 a01v = _mm_shuffle_ps(a00v, a08v, _MM_SHUFFLE(1, 0, 1, 0)); + __m128 a23v = _mm_shuffle_ps(a00v, a08v, _MM_SHUFFLE(3, 2, 3, 2)); + __m128 a45v = _mm_shuffle_ps(a04v, a12v, _MM_SHUFFLE(1, 0, 1, 0)); + __m128 a67v = _mm_shuffle_ps(a04v, a12v, _MM_SHUFFLE(3, 2, 3, 2)); + + const __m128 wk1rv = _mm_load_ps(&rdft_wk1r[k2]); + const __m128 wk1iv = _mm_load_ps(&rdft_wk1i[k2]); + const __m128 wk2rv = _mm_load_ps(&rdft_wk2r[k2]); + const __m128 wk2iv = _mm_load_ps(&rdft_wk2i[k2]); + const __m128 wk3rv = _mm_load_ps(&rdft_wk3r[k2]); + const __m128 wk3iv = _mm_load_ps(&rdft_wk3i[k2]); + __m128 x0v = _mm_add_ps(a01v, a23v); + const __m128 x1v = _mm_sub_ps(a01v, a23v); + const __m128 x2v = _mm_add_ps(a45v, a67v); + const __m128 x3v = _mm_sub_ps(a45v, a67v); + __m128 x0w; + a01v = _mm_add_ps(x0v, x2v); + x0v = _mm_sub_ps(x0v, x2v); + x0w = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0, 1)); + { + const __m128 a45_0v = _mm_mul_ps(wk2rv, x0v); + const __m128 a45_1v = _mm_mul_ps(wk2iv, x0w); + a45v = _mm_add_ps(a45_0v, a45_1v); + } + { + __m128 a23_0v, a23_1v; + const __m128 x3w = _mm_shuffle_ps(x3v, x3v, _MM_SHUFFLE(2, 3, 0, 1)); + const __m128 x3s = _mm_mul_ps(mm_swap_sign, x3w); + x0v = _mm_add_ps(x1v, x3s); + x0w = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0, 1)); + a23_0v = _mm_mul_ps(wk1rv, x0v); + a23_1v = _mm_mul_ps(wk1iv, x0w); + a23v = _mm_add_ps(a23_0v, a23_1v); + + x0v = _mm_sub_ps(x1v, x3s); + x0w = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0, 1)); + } + { + const __m128 a67_0v = _mm_mul_ps(wk3rv, x0v); + const __m128 a67_1v = _mm_mul_ps(wk3iv, x0w); + a67v = _mm_add_ps(a67_0v, a67_1v); + } + + a00v = _mm_shuffle_ps(a01v, a23v, _MM_SHUFFLE(1, 0, 1, 0)); + a04v = _mm_shuffle_ps(a45v, a67v, _MM_SHUFFLE(1, 0, 1, 0)); + a08v = _mm_shuffle_ps(a01v, a23v, _MM_SHUFFLE(3, 2, 3, 2)); + a12v = _mm_shuffle_ps(a45v, a67v, _MM_SHUFFLE(3, 2, 3, 2)); + _mm_storeu_ps(&a[j + 0], a00v); + _mm_storeu_ps(&a[j + 4], a04v); + _mm_storeu_ps(&a[j + 8], a08v); + _mm_storeu_ps(&a[j + 12], a12v); + } +} + +void cftmdl_128_SSE2(float* a) { + const int l = 8; + const __m128 mm_swap_sign = _mm_load_ps(k_swap_sign); + int j0; + + __m128 wk1rv = _mm_load_ps(cftmdl_wk1r); + for (j0 = 0; j0 < l; j0 += 2) { + const __m128i a_00 = _mm_loadl_epi64((__m128i*)&a[j0 + 0]); + const __m128i a_08 = _mm_loadl_epi64((__m128i*)&a[j0 + 8]); + const __m128i a_32 = _mm_loadl_epi64((__m128i*)&a[j0 + 32]); + const __m128i a_40 = _mm_loadl_epi64((__m128i*)&a[j0 + 40]); + const __m128 a_00_32 = + _mm_shuffle_ps(_mm_castsi128_ps(a_00), _mm_castsi128_ps(a_32), + _MM_SHUFFLE(1, 0, 1, 0)); + const __m128 a_08_40 = + _mm_shuffle_ps(_mm_castsi128_ps(a_08), _mm_castsi128_ps(a_40), + _MM_SHUFFLE(1, 0, 1, 0)); + __m128 x0r0_0i0_0r1_x0i1 = _mm_add_ps(a_00_32, a_08_40); + const __m128 x1r0_1i0_1r1_x1i1 = _mm_sub_ps(a_00_32, a_08_40); + + const __m128i a_16 = _mm_loadl_epi64((__m128i*)&a[j0 + 16]); + const __m128i a_24 = _mm_loadl_epi64((__m128i*)&a[j0 + 24]); + const __m128i a_48 = _mm_loadl_epi64((__m128i*)&a[j0 + 48]); + const __m128i a_56 = _mm_loadl_epi64((__m128i*)&a[j0 + 56]); + const __m128 a_16_48 = + _mm_shuffle_ps(_mm_castsi128_ps(a_16), _mm_castsi128_ps(a_48), + _MM_SHUFFLE(1, 0, 1, 0)); + const __m128 a_24_56 = + _mm_shuffle_ps(_mm_castsi128_ps(a_24), _mm_castsi128_ps(a_56), + _MM_SHUFFLE(1, 0, 1, 0)); + const __m128 x2r0_2i0_2r1_x2i1 = _mm_add_ps(a_16_48, a_24_56); + const __m128 x3r0_3i0_3r1_x3i1 = _mm_sub_ps(a_16_48, a_24_56); + + const __m128 xx0 = _mm_add_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const __m128 xx1 = _mm_sub_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + + const __m128 x3i0_3r0_3i1_x3r1 = _mm_castsi128_ps(_mm_shuffle_epi32( + _mm_castps_si128(x3r0_3i0_3r1_x3i1), _MM_SHUFFLE(2, 3, 0, 1))); + const __m128 x3_swapped = _mm_mul_ps(mm_swap_sign, x3i0_3r0_3i1_x3r1); + const __m128 x1_x3_add = _mm_add_ps(x1r0_1i0_1r1_x1i1, x3_swapped); + const __m128 x1_x3_sub = _mm_sub_ps(x1r0_1i0_1r1_x1i1, x3_swapped); + + const __m128 yy0 = + _mm_shuffle_ps(x1_x3_add, x1_x3_sub, _MM_SHUFFLE(2, 2, 2, 2)); + const __m128 yy1 = + _mm_shuffle_ps(x1_x3_add, x1_x3_sub, _MM_SHUFFLE(3, 3, 3, 3)); + const __m128 yy2 = _mm_mul_ps(mm_swap_sign, yy1); + const __m128 yy3 = _mm_add_ps(yy0, yy2); + const __m128 yy4 = _mm_mul_ps(wk1rv, yy3); + + _mm_storel_epi64((__m128i*)&a[j0 + 0], _mm_castps_si128(xx0)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 32], + _mm_shuffle_epi32(_mm_castps_si128(xx0), _MM_SHUFFLE(3, 2, 3, 2))); + + _mm_storel_epi64((__m128i*)&a[j0 + 16], _mm_castps_si128(xx1)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 48], + _mm_shuffle_epi32(_mm_castps_si128(xx1), _MM_SHUFFLE(2, 3, 2, 3))); + a[j0 + 48] = -a[j0 + 48]; + + _mm_storel_epi64((__m128i*)&a[j0 + 8], _mm_castps_si128(x1_x3_add)); + _mm_storel_epi64((__m128i*)&a[j0 + 24], _mm_castps_si128(x1_x3_sub)); + + _mm_storel_epi64((__m128i*)&a[j0 + 40], _mm_castps_si128(yy4)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 56], + _mm_shuffle_epi32(_mm_castps_si128(yy4), _MM_SHUFFLE(2, 3, 2, 3))); + } + + { + int k = 64; + int k1 = 2; + int k2 = 2 * k1; + const __m128 wk2rv = _mm_load_ps(&rdft_wk2r[k2 + 0]); + const __m128 wk2iv = _mm_load_ps(&rdft_wk2i[k2 + 0]); + const __m128 wk1iv = _mm_load_ps(&rdft_wk1i[k2 + 0]); + const __m128 wk3rv = _mm_load_ps(&rdft_wk3r[k2 + 0]); + const __m128 wk3iv = _mm_load_ps(&rdft_wk3i[k2 + 0]); + wk1rv = _mm_load_ps(&rdft_wk1r[k2 + 0]); + for (j0 = k; j0 < l + k; j0 += 2) { + const __m128i a_00 = _mm_loadl_epi64((__m128i*)&a[j0 + 0]); + const __m128i a_08 = _mm_loadl_epi64((__m128i*)&a[j0 + 8]); + const __m128i a_32 = _mm_loadl_epi64((__m128i*)&a[j0 + 32]); + const __m128i a_40 = _mm_loadl_epi64((__m128i*)&a[j0 + 40]); + const __m128 a_00_32 = + _mm_shuffle_ps(_mm_castsi128_ps(a_00), _mm_castsi128_ps(a_32), + _MM_SHUFFLE(1, 0, 1, 0)); + const __m128 a_08_40 = + _mm_shuffle_ps(_mm_castsi128_ps(a_08), _mm_castsi128_ps(a_40), + _MM_SHUFFLE(1, 0, 1, 0)); + __m128 x0r0_0i0_0r1_x0i1 = _mm_add_ps(a_00_32, a_08_40); + const __m128 x1r0_1i0_1r1_x1i1 = _mm_sub_ps(a_00_32, a_08_40); + + const __m128i a_16 = _mm_loadl_epi64((__m128i*)&a[j0 + 16]); + const __m128i a_24 = _mm_loadl_epi64((__m128i*)&a[j0 + 24]); + const __m128i a_48 = _mm_loadl_epi64((__m128i*)&a[j0 + 48]); + const __m128i a_56 = _mm_loadl_epi64((__m128i*)&a[j0 + 56]); + const __m128 a_16_48 = + _mm_shuffle_ps(_mm_castsi128_ps(a_16), _mm_castsi128_ps(a_48), + _MM_SHUFFLE(1, 0, 1, 0)); + const __m128 a_24_56 = + _mm_shuffle_ps(_mm_castsi128_ps(a_24), _mm_castsi128_ps(a_56), + _MM_SHUFFLE(1, 0, 1, 0)); + const __m128 x2r0_2i0_2r1_x2i1 = _mm_add_ps(a_16_48, a_24_56); + const __m128 x3r0_3i0_3r1_x3i1 = _mm_sub_ps(a_16_48, a_24_56); + + const __m128 xx = _mm_add_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const __m128 xx1 = _mm_sub_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1); + const __m128 xx2 = _mm_mul_ps(xx1, wk2rv); + const __m128 xx3 = _mm_mul_ps( + wk2iv, _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(xx1), + _MM_SHUFFLE(2, 3, 0, 1)))); + const __m128 xx4 = _mm_add_ps(xx2, xx3); + + const __m128 x3i0_3r0_3i1_x3r1 = _mm_castsi128_ps(_mm_shuffle_epi32( + _mm_castps_si128(x3r0_3i0_3r1_x3i1), _MM_SHUFFLE(2, 3, 0, 1))); + const __m128 x3_swapped = _mm_mul_ps(mm_swap_sign, x3i0_3r0_3i1_x3r1); + const __m128 x1_x3_add = _mm_add_ps(x1r0_1i0_1r1_x1i1, x3_swapped); + const __m128 x1_x3_sub = _mm_sub_ps(x1r0_1i0_1r1_x1i1, x3_swapped); + + const __m128 xx10 = _mm_mul_ps(x1_x3_add, wk1rv); + const __m128 xx11 = _mm_mul_ps( + wk1iv, _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(x1_x3_add), + _MM_SHUFFLE(2, 3, 0, 1)))); + const __m128 xx12 = _mm_add_ps(xx10, xx11); + + const __m128 xx20 = _mm_mul_ps(x1_x3_sub, wk3rv); + const __m128 xx21 = _mm_mul_ps( + wk3iv, _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(x1_x3_sub), + _MM_SHUFFLE(2, 3, 0, 1)))); + const __m128 xx22 = _mm_add_ps(xx20, xx21); + + _mm_storel_epi64((__m128i*)&a[j0 + 0], _mm_castps_si128(xx)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 32], + _mm_shuffle_epi32(_mm_castps_si128(xx), _MM_SHUFFLE(3, 2, 3, 2))); + + _mm_storel_epi64((__m128i*)&a[j0 + 16], _mm_castps_si128(xx4)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 48], + _mm_shuffle_epi32(_mm_castps_si128(xx4), _MM_SHUFFLE(3, 2, 3, 2))); + + _mm_storel_epi64((__m128i*)&a[j0 + 8], _mm_castps_si128(xx12)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 40], + _mm_shuffle_epi32(_mm_castps_si128(xx12), _MM_SHUFFLE(3, 2, 3, 2))); + + _mm_storel_epi64((__m128i*)&a[j0 + 24], _mm_castps_si128(xx22)); + _mm_storel_epi64( + (__m128i*)&a[j0 + 56], + _mm_shuffle_epi32(_mm_castps_si128(xx22), _MM_SHUFFLE(3, 2, 3, 2))); + } + } +} + +void rftfsub_128_SSE2(float* a) { + const float* c = rdft_w + 32; + int j1, j2, k1, k2; + float wkr, wki, xr, xi, yr, yi; + + static const ALIGN16_BEG float ALIGN16_END k_half[4] = {0.5f, 0.5f, 0.5f, + 0.5f}; + const __m128 mm_half = _mm_load_ps(k_half); + + // Vectorized code (four at once). + // Note: commented number are indexes for the first iteration of the loop. + for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) { + // Load 'wk'. + const __m128 c_j1 = _mm_loadu_ps(&c[j1]); // 1, 2, 3, 4, + const __m128 c_k1 = _mm_loadu_ps(&c[29 - j1]); // 28, 29, 30, 31, + const __m128 wkrt = _mm_sub_ps(mm_half, c_k1); // 28, 29, 30, 31, + const __m128 wkr_ = + _mm_shuffle_ps(wkrt, wkrt, _MM_SHUFFLE(0, 1, 2, 3)); // 31, 30, 29, 28, + const __m128 wki_ = c_j1; // 1, 2, 3, 4, + // Load and shuffle 'a'. + const __m128 a_j2_0 = _mm_loadu_ps(&a[0 + j2]); // 2, 3, 4, 5, + const __m128 a_j2_4 = _mm_loadu_ps(&a[4 + j2]); // 6, 7, 8, 9, + const __m128 a_k2_0 = _mm_loadu_ps(&a[122 - j2]); // 120, 121, 122, 123, + const __m128 a_k2_4 = _mm_loadu_ps(&a[126 - j2]); // 124, 125, 126, 127, + const __m128 a_j2_p0 = _mm_shuffle_ps( + a_j2_0, a_j2_4, _MM_SHUFFLE(2, 0, 2, 0)); // 2, 4, 6, 8, + const __m128 a_j2_p1 = _mm_shuffle_ps( + a_j2_0, a_j2_4, _MM_SHUFFLE(3, 1, 3, 1)); // 3, 5, 7, 9, + const __m128 a_k2_p0 = _mm_shuffle_ps( + a_k2_4, a_k2_0, _MM_SHUFFLE(0, 2, 0, 2)); // 126, 124, 122, 120, + const __m128 a_k2_p1 = _mm_shuffle_ps( + a_k2_4, a_k2_0, _MM_SHUFFLE(1, 3, 1, 3)); // 127, 125, 123, 121, + // Calculate 'x'. + const __m128 xr_ = _mm_sub_ps(a_j2_p0, a_k2_p0); + // 2-126, 4-124, 6-122, 8-120, + const __m128 xi_ = _mm_add_ps(a_j2_p1, a_k2_p1); + // 3-127, 5-125, 7-123, 9-121, + // Calculate product into 'y'. + // yr = wkr * xr - wki * xi; + // yi = wkr * xi + wki * xr; + const __m128 a_ = _mm_mul_ps(wkr_, xr_); + const __m128 b_ = _mm_mul_ps(wki_, xi_); + const __m128 c_ = _mm_mul_ps(wkr_, xi_); + const __m128 d_ = _mm_mul_ps(wki_, xr_); + const __m128 yr_ = _mm_sub_ps(a_, b_); // 2-126, 4-124, 6-122, 8-120, + const __m128 yi_ = _mm_add_ps(c_, d_); // 3-127, 5-125, 7-123, 9-121, + // Update 'a'. + // a[j2 + 0] -= yr; + // a[j2 + 1] -= yi; + // a[k2 + 0] += yr; + // a[k2 + 1] -= yi; + const __m128 a_j2_p0n = _mm_sub_ps(a_j2_p0, yr_); // 2, 4, 6, 8, + const __m128 a_j2_p1n = _mm_sub_ps(a_j2_p1, yi_); // 3, 5, 7, 9, + const __m128 a_k2_p0n = _mm_add_ps(a_k2_p0, yr_); // 126, 124, 122, 120, + const __m128 a_k2_p1n = _mm_sub_ps(a_k2_p1, yi_); // 127, 125, 123, 121, + // Shuffle in right order and store. + const __m128 a_j2_0n = _mm_unpacklo_ps(a_j2_p0n, a_j2_p1n); + // 2, 3, 4, 5, + const __m128 a_j2_4n = _mm_unpackhi_ps(a_j2_p0n, a_j2_p1n); + // 6, 7, 8, 9, + const __m128 a_k2_0nt = _mm_unpackhi_ps(a_k2_p0n, a_k2_p1n); + // 122, 123, 120, 121, + const __m128 a_k2_4nt = _mm_unpacklo_ps(a_k2_p0n, a_k2_p1n); + // 126, 127, 124, 125, + const __m128 a_k2_0n = _mm_shuffle_ps( + a_k2_0nt, a_k2_0nt, _MM_SHUFFLE(1, 0, 3, 2)); // 120, 121, 122, 123, + const __m128 a_k2_4n = _mm_shuffle_ps( + a_k2_4nt, a_k2_4nt, _MM_SHUFFLE(1, 0, 3, 2)); // 124, 125, 126, 127, + _mm_storeu_ps(&a[0 + j2], a_j2_0n); + _mm_storeu_ps(&a[4 + j2], a_j2_4n); + _mm_storeu_ps(&a[122 - j2], a_k2_0n); + _mm_storeu_ps(&a[126 - j2], a_k2_4n); + } + // Scalar code for the remaining items. + for (; j2 < 64; j1 += 1, j2 += 2) { + k2 = 128 - j2; + k1 = 32 - j1; + wkr = 0.5f - c[k1]; + wki = c[j1]; + xr = a[j2 + 0] - a[k2 + 0]; + xi = a[j2 + 1] + a[k2 + 1]; + yr = wkr * xr - wki * xi; + yi = wkr * xi + wki * xr; + a[j2 + 0] -= yr; + a[j2 + 1] -= yi; + a[k2 + 0] += yr; + a[k2 + 1] -= yi; + } +} + +void rftbsub_128_SSE2(float* a) { + const float* c = rdft_w + 32; + int j1, j2, k1, k2; + float wkr, wki, xr, xi, yr, yi; + + static const ALIGN16_BEG float ALIGN16_END k_half[4] = {0.5f, 0.5f, 0.5f, + 0.5f}; + const __m128 mm_half = _mm_load_ps(k_half); + + a[1] = -a[1]; + // Vectorized code (four at once). + // Note: commented number are indexes for the first iteration of the loop. + for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) { + // Load 'wk'. + const __m128 c_j1 = _mm_loadu_ps(&c[j1]); // 1, 2, 3, 4, + const __m128 c_k1 = _mm_loadu_ps(&c[29 - j1]); // 28, 29, 30, 31, + const __m128 wkrt = _mm_sub_ps(mm_half, c_k1); // 28, 29, 30, 31, + const __m128 wkr_ = + _mm_shuffle_ps(wkrt, wkrt, _MM_SHUFFLE(0, 1, 2, 3)); // 31, 30, 29, 28, + const __m128 wki_ = c_j1; // 1, 2, 3, 4, + // Load and shuffle 'a'. + const __m128 a_j2_0 = _mm_loadu_ps(&a[0 + j2]); // 2, 3, 4, 5, + const __m128 a_j2_4 = _mm_loadu_ps(&a[4 + j2]); // 6, 7, 8, 9, + const __m128 a_k2_0 = _mm_loadu_ps(&a[122 - j2]); // 120, 121, 122, 123, + const __m128 a_k2_4 = _mm_loadu_ps(&a[126 - j2]); // 124, 125, 126, 127, + const __m128 a_j2_p0 = _mm_shuffle_ps( + a_j2_0, a_j2_4, _MM_SHUFFLE(2, 0, 2, 0)); // 2, 4, 6, 8, + const __m128 a_j2_p1 = _mm_shuffle_ps( + a_j2_0, a_j2_4, _MM_SHUFFLE(3, 1, 3, 1)); // 3, 5, 7, 9, + const __m128 a_k2_p0 = _mm_shuffle_ps( + a_k2_4, a_k2_0, _MM_SHUFFLE(0, 2, 0, 2)); // 126, 124, 122, 120, + const __m128 a_k2_p1 = _mm_shuffle_ps( + a_k2_4, a_k2_0, _MM_SHUFFLE(1, 3, 1, 3)); // 127, 125, 123, 121, + // Calculate 'x'. + const __m128 xr_ = _mm_sub_ps(a_j2_p0, a_k2_p0); + // 2-126, 4-124, 6-122, 8-120, + const __m128 xi_ = _mm_add_ps(a_j2_p1, a_k2_p1); + // 3-127, 5-125, 7-123, 9-121, + // Calculate product into 'y'. + // yr = wkr * xr + wki * xi; + // yi = wkr * xi - wki * xr; + const __m128 a_ = _mm_mul_ps(wkr_, xr_); + const __m128 b_ = _mm_mul_ps(wki_, xi_); + const __m128 c_ = _mm_mul_ps(wkr_, xi_); + const __m128 d_ = _mm_mul_ps(wki_, xr_); + const __m128 yr_ = _mm_add_ps(a_, b_); // 2-126, 4-124, 6-122, 8-120, + const __m128 yi_ = _mm_sub_ps(c_, d_); // 3-127, 5-125, 7-123, 9-121, + // Update 'a'. + // a[j2 + 0] = a[j2 + 0] - yr; + // a[j2 + 1] = yi - a[j2 + 1]; + // a[k2 + 0] = yr + a[k2 + 0]; + // a[k2 + 1] = yi - a[k2 + 1]; + const __m128 a_j2_p0n = _mm_sub_ps(a_j2_p0, yr_); // 2, 4, 6, 8, + const __m128 a_j2_p1n = _mm_sub_ps(yi_, a_j2_p1); // 3, 5, 7, 9, + const __m128 a_k2_p0n = _mm_add_ps(a_k2_p0, yr_); // 126, 124, 122, 120, + const __m128 a_k2_p1n = _mm_sub_ps(yi_, a_k2_p1); // 127, 125, 123, 121, + // Shuffle in right order and store. + const __m128 a_j2_0n = _mm_unpacklo_ps(a_j2_p0n, a_j2_p1n); + // 2, 3, 4, 5, + const __m128 a_j2_4n = _mm_unpackhi_ps(a_j2_p0n, a_j2_p1n); + // 6, 7, 8, 9, + const __m128 a_k2_0nt = _mm_unpackhi_ps(a_k2_p0n, a_k2_p1n); + // 122, 123, 120, 121, + const __m128 a_k2_4nt = _mm_unpacklo_ps(a_k2_p0n, a_k2_p1n); + // 126, 127, 124, 125, + const __m128 a_k2_0n = _mm_shuffle_ps( + a_k2_0nt, a_k2_0nt, _MM_SHUFFLE(1, 0, 3, 2)); // 120, 121, 122, 123, + const __m128 a_k2_4n = _mm_shuffle_ps( + a_k2_4nt, a_k2_4nt, _MM_SHUFFLE(1, 0, 3, 2)); // 124, 125, 126, 127, + _mm_storeu_ps(&a[0 + j2], a_j2_0n); + _mm_storeu_ps(&a[4 + j2], a_j2_4n); + _mm_storeu_ps(&a[122 - j2], a_k2_0n); + _mm_storeu_ps(&a[126 - j2], a_k2_4n); + } + // Scalar code for the remaining items. + for (; j2 < 64; j1 += 1, j2 += 2) { + k2 = 128 - j2; + k1 = 32 - j1; + wkr = 0.5f - c[k1]; + wki = c[j1]; + xr = a[j2 + 0] - a[k2 + 0]; + xi = a[j2 + 1] + a[k2 + 1]; + yr = wkr * xr + wki * xi; + yi = wkr * xi - wki * xr; + a[j2 + 0] = a[j2 + 0] - yr; + a[j2 + 1] = yi - a[j2 + 1]; + a[k2 + 0] = yr + a[k2 + 0]; + a[k2 + 1] = yi - a[k2 + 1]; + } + a[65] = -a[65]; +} +#endif + +} // namespace webrtc diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_tables_common.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_tables_common.h new file mode 100644 index 000000000..548027cf2 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_tables_common.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_COMMON_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_COMMON_H_ + +#include "webrtc/modules/audio_processing/utility/ooura_fft.h" + +namespace webrtc { + +// This tables used to be computed at run-time. For example, refer to: +// https://code.google.com/p/webrtc/source/browse/trunk/webrtc/modules/audio_processing/utility/apm_rdft.c?r=6564 +// to see the initialization code. +// Constants shared by all paths (C, SSE2, NEON). +const float rdft_w[64] = { + 1.0000000000f, 0.0000000000f, 0.7071067691f, 0.7071067691f, 0.9238795638f, + 0.3826834559f, 0.3826834559f, 0.9238795638f, 0.9807852507f, 0.1950903237f, + 0.5555702448f, 0.8314695954f, 0.8314695954f, 0.5555702448f, 0.1950903237f, + 0.9807852507f, 0.9951847196f, 0.0980171412f, 0.6343933344f, 0.7730104327f, + 0.8819212914f, 0.4713967443f, 0.2902846634f, 0.9569403529f, 0.9569403529f, + 0.2902846634f, 0.4713967443f, 0.8819212914f, 0.7730104327f, 0.6343933344f, + 0.0980171412f, 0.9951847196f, 0.7071067691f, 0.4993977249f, 0.4975923598f, + 0.4945882559f, 0.4903926253f, 0.4850156307f, 0.4784701765f, 0.4707720280f, + 0.4619397819f, 0.4519946277f, 0.4409606457f, 0.4288643003f, 0.4157347977f, + 0.4016037583f, 0.3865052164f, 0.3704755902f, 0.3535533845f, 0.3357794881f, + 0.3171966672f, 0.2978496552f, 0.2777851224f, 0.2570513785f, 0.2356983721f, + 0.2137775421f, 0.1913417280f, 0.1684449315f, 0.1451423317f, 0.1214900985f, + 0.0975451618f, 0.0733652338f, 0.0490085706f, 0.0245338380f, +}; + +// Constants used by the C and MIPS paths. +const float rdft_wk3ri_first[16] = { + 1.000000000f, 0.000000000f, 0.382683456f, 0.923879564f, + 0.831469536f, 0.555570245f, -0.195090353f, 0.980785251f, + 0.956940353f, 0.290284693f, 0.098017156f, 0.995184720f, + 0.634393334f, 0.773010492f, -0.471396863f, 0.881921172f, +}; +const float rdft_wk3ri_second[16] = { + -0.707106769f, 0.707106769f, -0.923879564f, -0.382683456f, + -0.980785251f, 0.195090353f, -0.555570245f, -0.831469536f, + -0.881921172f, 0.471396863f, -0.773010492f, -0.634393334f, + -0.995184720f, -0.098017156f, -0.290284693f, -0.956940353f, +}; + +} // namespace webrtc + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_COMMON_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_tables_neon_sse2.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_tables_neon_sse2.h new file mode 100644 index 000000000..1ed646d6f --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/modules/audio_processing/utility/ooura_fft_tables_neon_sse2.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_NEON_SSE2_H_ +#define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_NEON_SSE2_H_ + +#include "webrtc/modules/audio_processing/utility/ooura_fft.h" + +#ifdef _MSC_VER /* visual c++ */ +#define ALIGN16_BEG __declspec(align(16)) +#define ALIGN16_END +#else /* gcc or icc */ +#define ALIGN16_BEG +#define ALIGN16_END __attribute__((aligned(16))) +#endif + +namespace webrtc { + +// These tables used to be computed at run-time. For example, refer to: +// https://code.google.com/p/webrtc/source/browse/trunk/webrtc/modules/audio_processing/utility/apm_rdft.c?r=6564 +// to see the initialization code. +#if defined(WEBRTC_ARCH_X86_FAMILY) || defined(WEBRTC_HAS_NEON) +// Constants used by SSE2 and NEON but initialized in the C path. +const ALIGN16_BEG float ALIGN16_END k_swap_sign[4] = {-1.f, 1.f, -1.f, 1.f}; + +ALIGN16_BEG const float ALIGN16_END rdft_wk1r[32] = { + 1.000000000f, 1.000000000f, 0.707106769f, 0.707106769f, 0.923879564f, + 0.923879564f, 0.382683456f, 0.382683456f, 0.980785251f, 0.980785251f, + 0.555570245f, 0.555570245f, 0.831469595f, 0.831469595f, 0.195090324f, + 0.195090324f, 0.995184720f, 0.995184720f, 0.634393334f, 0.634393334f, + 0.881921291f, 0.881921291f, 0.290284663f, 0.290284663f, 0.956940353f, + 0.956940353f, 0.471396744f, 0.471396744f, 0.773010433f, 0.773010433f, + 0.098017141f, 0.098017141f, +}; +ALIGN16_BEG const float ALIGN16_END rdft_wk2r[32] = { + 1.000000000f, 1.000000000f, -0.000000000f, -0.000000000f, 0.707106769f, + 0.707106769f, -0.707106769f, -0.707106769f, 0.923879564f, 0.923879564f, + -0.382683456f, -0.382683456f, 0.382683456f, 0.382683456f, -0.923879564f, + -0.923879564f, 0.980785251f, 0.980785251f, -0.195090324f, -0.195090324f, + 0.555570245f, 0.555570245f, -0.831469595f, -0.831469595f, 0.831469595f, + 0.831469595f, -0.555570245f, -0.555570245f, 0.195090324f, 0.195090324f, + -0.980785251f, -0.980785251f, +}; +ALIGN16_BEG const float ALIGN16_END rdft_wk3r[32] = { + 1.000000000f, 1.000000000f, -0.707106769f, -0.707106769f, 0.382683456f, + 0.382683456f, -0.923879564f, -0.923879564f, 0.831469536f, 0.831469536f, + -0.980785251f, -0.980785251f, -0.195090353f, -0.195090353f, -0.555570245f, + -0.555570245f, 0.956940353f, 0.956940353f, -0.881921172f, -0.881921172f, + 0.098017156f, 0.098017156f, -0.773010492f, -0.773010492f, 0.634393334f, + 0.634393334f, -0.995184720f, -0.995184720f, -0.471396863f, -0.471396863f, + -0.290284693f, -0.290284693f, +}; +ALIGN16_BEG const float ALIGN16_END rdft_wk1i[32] = { + -0.000000000f, 0.000000000f, -0.707106769f, 0.707106769f, -0.382683456f, + 0.382683456f, -0.923879564f, 0.923879564f, -0.195090324f, 0.195090324f, + -0.831469595f, 0.831469595f, -0.555570245f, 0.555570245f, -0.980785251f, + 0.980785251f, -0.098017141f, 0.098017141f, -0.773010433f, 0.773010433f, + -0.471396744f, 0.471396744f, -0.956940353f, 0.956940353f, -0.290284663f, + 0.290284663f, -0.881921291f, 0.881921291f, -0.634393334f, 0.634393334f, + -0.995184720f, 0.995184720f, +}; +ALIGN16_BEG const float ALIGN16_END rdft_wk2i[32] = { + -0.000000000f, 0.000000000f, -1.000000000f, 1.000000000f, -0.707106769f, + 0.707106769f, -0.707106769f, 0.707106769f, -0.382683456f, 0.382683456f, + -0.923879564f, 0.923879564f, -0.923879564f, 0.923879564f, -0.382683456f, + 0.382683456f, -0.195090324f, 0.195090324f, -0.980785251f, 0.980785251f, + -0.831469595f, 0.831469595f, -0.555570245f, 0.555570245f, -0.555570245f, + 0.555570245f, -0.831469595f, 0.831469595f, -0.980785251f, 0.980785251f, + -0.195090324f, 0.195090324f, +}; +ALIGN16_BEG const float ALIGN16_END rdft_wk3i[32] = { + -0.000000000f, 0.000000000f, -0.707106769f, 0.707106769f, -0.923879564f, + 0.923879564f, 0.382683456f, -0.382683456f, -0.555570245f, 0.555570245f, + -0.195090353f, 0.195090353f, -0.980785251f, 0.980785251f, 0.831469536f, + -0.831469536f, -0.290284693f, 0.290284693f, -0.471396863f, 0.471396863f, + -0.995184720f, 0.995184720f, 0.634393334f, -0.634393334f, -0.773010492f, + 0.773010492f, 0.098017156f, -0.098017156f, -0.881921172f, 0.881921172f, + 0.956940353f, -0.956940353f, +}; +ALIGN16_BEG const float ALIGN16_END cftmdl_wk1r[4] = { + 0.707106769f, 0.707106769f, 0.707106769f, -0.707106769f, +}; +#endif + +} // namespace webrtc + +#endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_NEON_SSE2_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/asm_defines.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/asm_defines.h new file mode 100644 index 000000000..fe4c05eff --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/asm_defines.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_SYSTEM_WRAPPERS_INCLUDE_ASM_DEFINES_H_ +#define WEBRTC_SYSTEM_WRAPPERS_INCLUDE_ASM_DEFINES_H_ + +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",%progbits +#endif + +// Define the macros used in ARM assembly code, so that for Mac or iOS builds +// we add leading underscores for the function names. +#ifdef __APPLE__ +.macro GLOBAL_FUNCTION name +.global _\name +.private_extern _\name +.endm +.macro DEFINE_FUNCTION name +_\name: +.endm +.macro CALL_FUNCTION name +bl _\name +.endm +.macro GLOBAL_LABEL name +.global _\name +.private_extern _\name +.endm +#else +.macro GLOBAL_FUNCTION name +.global \name +.hidden \name +.endm +.macro DEFINE_FUNCTION name +#if defined(__linux__) && defined(__ELF__) +.type \name,%function +#endif +\name: +.endm +.macro CALL_FUNCTION name +bl \name +.endm +.macro GLOBAL_LABEL name +.global \name +.hidden \name +.endm +#endif + +// With Apple's clang compiler, for instructions ldrb, strh, etc., +// the condition code is after the width specifier. Here we define +// only the ones that are actually used in the assembly files. +#if (defined __llvm__) && (defined __APPLE__) +.macro streqh reg1, reg2, num +strheq \reg1, \reg2, \num +.endm +#endif + +.text + +#endif // WEBRTC_SYSTEM_WRAPPERS_INCLUDE_ASM_DEFINES_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/compile_assert_c.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/compile_assert_c.h new file mode 100644 index 000000000..00f6306e9 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/compile_assert_c.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_SYSTEM_WRAPPERS_INCLUDE_COMPILE_ASSERT_H_ +#define WEBRTC_SYSTEM_WRAPPERS_INCLUDE_COMPILE_ASSERT_H_ + +// Use this macro to verify at compile time that certain restrictions are met. +// The argument is the boolean expression to evaluate. +// Example: +// COMPILE_ASSERT(sizeof(foo) < 128); +// Note: In C++, use static_assert instead! +#define COMPILE_ASSERT(expression) switch (0) {case 0: case expression:;} + +#endif // WEBRTC_SYSTEM_WRAPPERS_INCLUDE_COMPILE_ASSERT_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/cpu_features_wrapper.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/cpu_features_wrapper.h new file mode 100644 index 000000000..9838d94e5 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/cpu_features_wrapper.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_SYSTEM_WRAPPERS_INCLUDE_CPU_FEATURES_WRAPPER_H_ +#define WEBRTC_SYSTEM_WRAPPERS_INCLUDE_CPU_FEATURES_WRAPPER_H_ + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +#include "webrtc/typedefs.h" + +// List of features in x86. +typedef enum { + kSSE2, + kSSE3 +} CPUFeature; + +// List of features in ARM. +enum { + kCPUFeatureARMv7 = (1 << 0), + kCPUFeatureVFPv3 = (1 << 1), + kCPUFeatureNEON = (1 << 2), + kCPUFeatureLDREXSTREX = (1 << 3) +}; + +typedef int (*WebRtc_CPUInfo)(CPUFeature feature); + +// Returns true if the CPU supports the feature. +extern WebRtc_CPUInfo WebRtc_GetCPUInfo; + +// No CPU feature is available => straight C path. +extern WebRtc_CPUInfo WebRtc_GetCPUInfoNoASM; + +// Return the features in an ARM device. +// It detects the features in the hardware platform, and returns supported +// values in the above enum definition as a bitmask. +extern uint64_t WebRtc_GetCPUFeaturesARM(void); + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif + +#endif // WEBRTC_SYSTEM_WRAPPERS_INCLUDE_CPU_FEATURES_WRAPPER_H_ diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/metrics.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/metrics.h new file mode 100644 index 000000000..e84e8ecd4 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/include/metrics.h @@ -0,0 +1,245 @@ +// +// Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// + +#ifndef WEBRTC_SYSTEM_WRAPPERS_INCLUDE_METRICS_H_ +#define WEBRTC_SYSTEM_WRAPPERS_INCLUDE_METRICS_H_ + +#include + +#include "webrtc/base/atomicops.h" +#include "webrtc/base/checks.h" +#include "webrtc/common_types.h" +#include "webrtc/system_wrappers/include/logging.h" + +// Macros for allowing WebRTC clients (e.g. Chrome) to gather and aggregate +// statistics. +// +// Histogram for counters. +// RTC_HISTOGRAM_COUNTS(name, sample, min, max, bucket_count); +// +// Histogram for enumerators. +// The boundary should be above the max enumerator sample. +// RTC_HISTOGRAM_ENUMERATION(name, sample, boundary); +// +// +// The macros use the methods HistogramFactoryGetCounts, +// HistogramFactoryGetEnumeration and HistogramAdd. +// +// Therefore, WebRTC clients must either: +// +// - provide implementations of +// Histogram* webrtc::metrics::HistogramFactoryGetCounts( +// const std::string& name, int sample, int min, int max, +// int bucket_count); +// Histogram* webrtc::metrics::HistogramFactoryGetEnumeration( +// const std::string& name, int sample, int boundary); +// void webrtc::metrics::HistogramAdd( +// Histogram* histogram_pointer, const std::string& name, int sample); +// +// - or link with the default implementations (i.e. +// system_wrappers/system_wrappers.gyp:metrics_default). +// +// +// Example usage: +// +// RTC_HISTOGRAM_COUNTS("WebRTC.Video.NacksSent", nacks_sent, 1, 100000, 100); +// +// enum Types { +// kTypeX, +// kTypeY, +// kBoundary, +// }; +// +// RTC_HISTOGRAM_ENUMERATION("WebRTC.Types", kTypeX, kBoundary); +// +// NOTE: It is recommended to do the Chromium review for modifications to +// histograms.xml before new metrics are committed to WebRTC. + + +// Macros for adding samples to a named histogram. + +// Histogram for counters (exponentially spaced buckets). +#define RTC_HISTOGRAM_COUNTS_100(name, sample) \ + RTC_HISTOGRAM_COUNTS(name, sample, 1, 100, 50) + +#define RTC_HISTOGRAM_COUNTS_200(name, sample) \ + RTC_HISTOGRAM_COUNTS(name, sample, 1, 200, 50) + +#define RTC_HISTOGRAM_COUNTS_500(name, sample) \ + RTC_HISTOGRAM_COUNTS(name, sample, 1, 500, 50) + +#define RTC_HISTOGRAM_COUNTS_1000(name, sample) \ + RTC_HISTOGRAM_COUNTS(name, sample, 1, 1000, 50) + +#define RTC_HISTOGRAM_COUNTS_10000(name, sample) \ + RTC_HISTOGRAM_COUNTS(name, sample, 1, 10000, 50) + +#define RTC_HISTOGRAM_COUNTS_100000(name, sample) \ + RTC_HISTOGRAM_COUNTS(name, sample, 1, 100000, 50) + +#define RTC_HISTOGRAM_COUNTS(name, sample, min, max, bucket_count) \ + RTC_HISTOGRAM_COMMON_BLOCK(name, sample, \ + webrtc::metrics::HistogramFactoryGetCounts(name, min, max, bucket_count)) + +#define RTC_HISTOGRAM_COUNTS_LINEAR(name, sample, min, max, bucket_count) \ + RTC_HISTOGRAM_COMMON_BLOCK(name, sample, \ + webrtc::metrics::HistogramFactoryGetCountsLinear( \ + name, min, max, bucket_count)) + +// Deprecated. +// TODO(asapersson): Remove. +#define RTC_HISTOGRAM_COUNTS_SPARSE_100(name, sample) \ + RTC_HISTOGRAM_COUNTS_SPARSE(name, sample, 1, 100, 50) + +#define RTC_HISTOGRAM_COUNTS_SPARSE(name, sample, min, max, bucket_count) \ + RTC_HISTOGRAM_COMMON_BLOCK_SLOW(name, sample, \ + webrtc::metrics::HistogramFactoryGetCounts(name, min, max, bucket_count)) + +// Histogram for percentage (evenly spaced buckets). +#define RTC_HISTOGRAM_PERCENTAGE(name, sample) \ + RTC_HISTOGRAM_ENUMERATION(name, sample, 101) + +// Histogram for booleans. +#define RTC_HISTOGRAM_BOOLEAN(name, sample) \ + RTC_HISTOGRAM_ENUMERATION(name, sample, 2) + +// Histogram for enumerators (evenly spaced buckets). +// |boundary| should be above the max enumerator sample. +#define RTC_HISTOGRAM_ENUMERATION(name, sample, boundary) \ + RTC_HISTOGRAM_COMMON_BLOCK(name, sample, \ + webrtc::metrics::HistogramFactoryGetEnumeration(name, boundary)) + +// The name of the histogram should not vary. +// TODO(asapersson): Consider changing string to const char*. +#define RTC_HISTOGRAM_COMMON_BLOCK(constant_name, sample, \ + factory_get_invocation) \ + do { \ + static webrtc::metrics::Histogram* atomic_histogram_pointer = nullptr; \ + webrtc::metrics::Histogram* histogram_pointer = \ + rtc::AtomicOps::AcquireLoadPtr(&atomic_histogram_pointer); \ + if (!histogram_pointer) { \ + histogram_pointer = factory_get_invocation; \ + webrtc::metrics::Histogram* prev_pointer = \ + rtc::AtomicOps::CompareAndSwapPtr( \ + &atomic_histogram_pointer, \ + static_cast(nullptr), \ + histogram_pointer); \ + RTC_DCHECK(prev_pointer == nullptr || \ + prev_pointer == histogram_pointer); \ + } \ + if (histogram_pointer) { \ + RTC_DCHECK_EQ(constant_name, \ + webrtc::metrics::GetHistogramName(histogram_pointer)) \ + << "The name should not vary."; \ + webrtc::metrics::HistogramAdd(histogram_pointer, sample); \ + } \ + } while (0) + +// Deprecated. +// The histogram is constructed/found for each call. +// May be used for histograms with infrequent updates.` +#define RTC_HISTOGRAM_COMMON_BLOCK_SLOW(name, sample, factory_get_invocation) \ + do { \ + webrtc::metrics::Histogram* histogram_pointer = factory_get_invocation; \ + if (histogram_pointer) { \ + webrtc::metrics::HistogramAdd(histogram_pointer, sample); \ + } \ + } while (0) + +// Helper macros. +// Macros for calling a histogram with varying name (e.g. when using a metric +// in different modes such as real-time vs screenshare). +#define RTC_HISTOGRAMS_COUNTS_100(index, name, sample) \ + RTC_HISTOGRAMS_COMMON(index, name, sample, \ + RTC_HISTOGRAM_COUNTS(name, sample, 1, 100, 50)) + +#define RTC_HISTOGRAMS_COUNTS_200(index, name, sample) \ + RTC_HISTOGRAMS_COMMON(index, name, sample, \ + RTC_HISTOGRAM_COUNTS(name, sample, 1, 200, 50)) + +#define RTC_HISTOGRAMS_COUNTS_500(index, name, sample) \ + RTC_HISTOGRAMS_COMMON(index, name, sample, \ + RTC_HISTOGRAM_COUNTS(name, sample, 1, 500, 50)) + +#define RTC_HISTOGRAMS_COUNTS_1000(index, name, sample) \ + RTC_HISTOGRAMS_COMMON(index, name, sample, \ + RTC_HISTOGRAM_COUNTS(name, sample, 1, 1000, 50)) + +#define RTC_HISTOGRAMS_COUNTS_10000(index, name, sample) \ + RTC_HISTOGRAMS_COMMON(index, name, sample, \ + RTC_HISTOGRAM_COUNTS(name, sample, 1, 10000, 50)) + +#define RTC_HISTOGRAMS_COUNTS_100000(index, name, sample) \ + RTC_HISTOGRAMS_COMMON(index, name, sample, \ + RTC_HISTOGRAM_COUNTS(name, sample, 1, 100000, 50)) + +#define RTC_HISTOGRAMS_ENUMERATION(index, name, sample, boundary) \ + RTC_HISTOGRAMS_COMMON(index, name, sample, \ + RTC_HISTOGRAM_ENUMERATION(name, sample, boundary)) + +#define RTC_HISTOGRAMS_PERCENTAGE(index, name, sample) \ + RTC_HISTOGRAMS_COMMON(index, name, sample, \ + RTC_HISTOGRAM_PERCENTAGE(name, sample)) + +#define RTC_HISTOGRAMS_COMMON(index, name, sample, macro_invocation) \ + do { \ + switch (index) { \ + case 0: \ + macro_invocation; \ + break; \ + case 1: \ + macro_invocation; \ + break; \ + case 2: \ + macro_invocation; \ + break; \ + default: \ + RTC_NOTREACHED(); \ + } \ + } while (0) + + +namespace webrtc { +namespace metrics { + +// Time that should have elapsed for stats that are gathered once per call. +enum { kMinRunTimeInSeconds = 10 }; + +class Histogram; + +// Functions for getting pointer to histogram (constructs or finds the named +// histogram). + +// Get histogram for counters. +Histogram* HistogramFactoryGetCounts( + const std::string& name, int min, int max, int bucket_count); + +// Get histogram for counters with linear bucket spacing. +Histogram* HistogramFactoryGetCountsLinear(const std::string& name, + int min, + int max, + int bucket_count); + +// Get histogram for enumerators. +// |boundary| should be above the max enumerator sample. +Histogram* HistogramFactoryGetEnumeration( + const std::string& name, int boundary); + +// Returns name of the histogram. +const std::string& GetHistogramName(Histogram* histogram_pointer); + +// Function for adding a |sample| to a histogram. +void HistogramAdd(Histogram* histogram_pointer, int sample); + +} // namespace metrics +} // namespace webrtc + +#endif // WEBRTC_SYSTEM_WRAPPERS_INCLUDE_METRICS_H_ + diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/source/cpu_features.cc b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/source/cpu_features.cc new file mode 100644 index 000000000..49840eb90 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/system_wrappers/source/cpu_features.cc @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// Parts of this file derived from Chromium's base/cpu.cc. + +#include "webrtc/system_wrappers/include/cpu_features_wrapper.h" + +#if defined(WEBRTC_ARCH_X86_FAMILY) && defined(_MSC_VER) +#include +#endif + +#include "webrtc/typedefs.h" + +// No CPU feature is available => straight C path. +int GetCPUInfoNoASM(CPUFeature feature) { + (void)feature; + return 0; +} + +#if defined(WEBRTC_ARCH_X86_FAMILY) +#ifndef _MSC_VER +// Intrinsic for "cpuid". +#if defined(__pic__) && defined(__i386__) +static inline void __cpuid(int cpu_info[4], int info_type) { + __asm__ volatile( + "mov %%ebx, %%edi\n" + "cpuid\n" + "xchg %%edi, %%ebx\n" + : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) + : "a"(info_type)); +} +#else +static inline void __cpuid(int cpu_info[4], int info_type) { + __asm__ volatile( + "cpuid\n" + : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) + : "a"(info_type)); +} +#endif +#endif // _MSC_VER +#endif // WEBRTC_ARCH_X86_FAMILY + +#if defined(WEBRTC_ARCH_X86_FAMILY) +// Actual feature detection for x86. +static int GetCPUInfo(CPUFeature feature) { + int cpu_info[4]; + __cpuid(cpu_info, 1); + if (feature == kSSE2) { + return 0 != (cpu_info[3] & 0x04000000); + } + if (feature == kSSE3) { + return 0 != (cpu_info[2] & 0x00000001); + } + return 0; +} +#else +// Default to straight C for other platforms. +static int GetCPUInfo(CPUFeature feature) { + (void)feature; + return 0; +} +#endif + +WebRtc_CPUInfo WebRtc_GetCPUInfo = GetCPUInfo; +WebRtc_CPUInfo WebRtc_GetCPUInfoNoASM = GetCPUInfoNoASM; diff --git a/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/typedefs.h b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/typedefs.h new file mode 100644 index 000000000..c960d95a4 --- /dev/null +++ b/Telegram/ThirdParty/libtgvoip/webrtc_dsp/webrtc/typedefs.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This file contains platform-specific typedefs and defines. +// Much of it is derived from Chromium's build/build_config.h. + +#ifndef WEBRTC_TYPEDEFS_H_ +#define WEBRTC_TYPEDEFS_H_ + +// Processor architecture detection. For more info on what's defined, see: +// http://msdn.microsoft.com/en-us/library/b0084kay.aspx +// http://www.agner.org/optimize/calling_conventions.pdf +// or with gcc, run: "echo | gcc -E -dM -" +#if defined(_M_X64) || defined(__x86_64__) +#define WEBRTC_ARCH_X86_FAMILY +#define WEBRTC_ARCH_X86_64 +#define WEBRTC_ARCH_64_BITS +#define WEBRTC_ARCH_LITTLE_ENDIAN +#elif defined(__aarch64__) +#define WEBRTC_ARCH_ARM_FAMILY +#define WEBRTC_ARCH_64_BITS +#define WEBRTC_ARCH_LITTLE_ENDIAN +#elif defined(_M_IX86) || defined(__i386__) +#define WEBRTC_ARCH_X86_FAMILY +#define WEBRTC_ARCH_X86 +#define WEBRTC_ARCH_32_BITS +#define WEBRTC_ARCH_LITTLE_ENDIAN +#elif defined(__ARMEL__) || defined(_M_ARM) +#define WEBRTC_ARCH_ARM_FAMILY +#define WEBRTC_ARCH_32_BITS +#define WEBRTC_ARCH_LITTLE_ENDIAN +#elif defined(__MIPSEL__) +#define WEBRTC_ARCH_MIPS_FAMILY +#if defined(__LP64__) +#define WEBRTC_ARCH_64_BITS +#else +#define WEBRTC_ARCH_32_BITS +#endif +#define WEBRTC_ARCH_LITTLE_ENDIAN +#elif defined(__pnacl__) +#define WEBRTC_ARCH_32_BITS +#define WEBRTC_ARCH_LITTLE_ENDIAN +#else +#error Please add support for your architecture in typedefs.h +#endif + +#if !(defined(WEBRTC_ARCH_LITTLE_ENDIAN) ^ defined(WEBRTC_ARCH_BIG_ENDIAN)) +#error Define either WEBRTC_ARCH_LITTLE_ENDIAN or WEBRTC_ARCH_BIG_ENDIAN +#endif + +// TODO(zhongwei.yao): WEBRTC_CPU_DETECTION is only used in one place; we should +// probably just remove it. +#if (defined(WEBRTC_ARCH_X86_FAMILY) && !defined(__SSE2__)) +#define WEBRTC_CPU_DETECTION +#endif + +// TODO(pbos): Use webrtc/base/basictypes.h instead to include fixed-size ints. +#include + +#if defined(_MSC_VER) && _MSC_VER<=1800 && !defined(__cplusplus) +#define inline __inline +#endif + +// Annotate a function indicating the caller must examine the return value. +// Use like: +// int foo() WARN_UNUSED_RESULT; +// To explicitly ignore a result, see |ignore_result()| in . +// TODO(ajm): Hack to avoid multiple definitions until the base/ of webrtc and +// libjingle are merged. +#if !defined(WARN_UNUSED_RESULT) +#if defined(__GNUC__) || defined(__clang__) +#define WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__)) +#else +#define WARN_UNUSED_RESULT +#endif +#endif // WARN_UNUSED_RESULT + +// Put after a variable that might not be used, to prevent compiler warnings: +// int result ATTRIBUTE_UNUSED = DoSomething(); +// assert(result == 17); +#ifndef ATTRIBUTE_UNUSED +#if defined(__GNUC__) || defined(__clang__) +#define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) +#else +#define ATTRIBUTE_UNUSED +#endif +#endif + +// Macro to be used for switch-case fallthrough (required for enabling +// -Wimplicit-fallthrough warning on Clang). +#ifndef FALLTHROUGH +#if defined(__clang__) +#define FALLTHROUGH() [[clang::fallthrough]] +#else +#define FALLTHROUGH() do { } while (0) +#endif +#endif + +// Annotate a function that will not return control flow to the caller. +#if defined(_MSC_VER) +#define NO_RETURN __declspec(noreturn) +#elif defined(__GNUC__) +#define NO_RETURN __attribute__ ((__noreturn__)) +#else +#define NO_RETURN +#endif + +#endif // WEBRTC_TYPEDEFS_H_