Tests that all available audio devices can be selected and used by the ADM.
New tests are:
- AudioDeviceTest.StartStopPlayoutWithRealDevice
- AudioDeviceTest.StartStopRecordingWithRealDevice
(the comments below only affects ADM2 on Windows):
When adding these tests it was found that we could hit the same known issue
as in https://bugs.chromium.org/p/chromium/issues/detail?id=803056 and the
same solution as in Chrome was therefore ported from Chrome to WebRTC.
Hence, this change also adds support for core_audio_utility::WaveFormatWrapper
to support devices that can return a format where only the WAVEFORMATEX parts is
initialized. The old version would only DCHECK for these devices and that could
lead to an unpredictable behavior.
Tbr: minyue
Bug: webrtc:11093
Change-Id: Icb238c5475100f251ce4e55e39a03653da04dbda
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/159982
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Commit-Queue: Henrik Andreassson <henrika@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#29824}
diff --git a/modules/audio_device/audio_device_unittest.cc b/modules/audio_device/audio_device_unittest.cc
index 1b970d5..0479a0b 100644
--- a/modules/audio_device/audio_device_unittest.cc
+++ b/modules/audio_device/audio_device_unittest.cc
@@ -23,6 +23,7 @@
#include "api/task_queue/task_queue_factory.h"
#include "modules/audio_device/audio_device_impl.h"
#include "modules/audio_device/include/mock_audio_transport.h"
+#include "rtc_base/arraysize.h"
#include "rtc_base/buffer.h"
#include "rtc_base/critical_section.h"
#include "rtc_base/event.h"
@@ -808,6 +809,60 @@
StopRecording();
}
+// Tests Start/Stop playout for all available input devices to ensure that
+// the selected device can be created and used as intended.
+TEST_P(MAYBE_AudioDeviceTest, StartStopPlayoutWithRealDevice) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ int num_devices = audio_device()->PlayoutDevices();
+ if (NewWindowsAudioDeviceModuleIsUsed()) {
+ num_devices += 2;
+ }
+ EXPECT_GT(num_devices, 0);
+ // Verify that all available playout devices can be set and used.
+ for (int i = 0; i < num_devices; ++i) {
+ EXPECT_EQ(0, audio_device()->SetPlayoutDevice(i));
+ StartPlayout();
+ StopPlayout();
+ }
+#ifdef WEBRTC_WIN
+ AudioDeviceModule::WindowsDeviceType device_role[] = {
+ AudioDeviceModule::kDefaultDevice,
+ AudioDeviceModule::kDefaultCommunicationDevice};
+ for (size_t i = 0; i < arraysize(device_role); ++i) {
+ EXPECT_EQ(0, audio_device()->SetPlayoutDevice(device_role[i]));
+ StartPlayout();
+ StopPlayout();
+ }
+#endif
+}
+
+// Tests Start/Stop recording for all available input devices to ensure that
+// the selected device can be created and used as intended.
+TEST_P(MAYBE_AudioDeviceTest, StartStopRecordingWithRealDevice) {
+ SKIP_TEST_IF_NOT(requirements_satisfied());
+ int num_devices = audio_device()->RecordingDevices();
+ if (NewWindowsAudioDeviceModuleIsUsed()) {
+ num_devices += 2;
+ }
+ EXPECT_GT(num_devices, 0);
+ // Verify that all available recording devices can be set and used.
+ for (int i = 0; i < num_devices; ++i) {
+ EXPECT_EQ(0, audio_device()->SetRecordingDevice(i));
+ StartRecording();
+ StopRecording();
+ }
+#ifdef WEBRTC_WIN
+ AudioDeviceModule::WindowsDeviceType device_role[] = {
+ AudioDeviceModule::kDefaultDevice,
+ AudioDeviceModule::kDefaultCommunicationDevice};
+ for (size_t i = 0; i < arraysize(device_role); ++i) {
+ EXPECT_EQ(0, audio_device()->SetRecordingDevice(device_role[i]));
+ StartRecording();
+ StopRecording();
+ }
+#endif
+}
+
// Tests Init/Stop/Init recording without any registered audio callback.
// See https://bugs.chromium.org/p/webrtc/issues/detail?id=8041 for details
// on why this test is useful.
diff --git a/modules/audio_device/win/core_audio_base_win.cc b/modules/audio_device/win/core_audio_base_win.cc
index 56abe85..c7887ca 100644
--- a/modules/audio_device/win/core_audio_base_win.cc
+++ b/modules/audio_device/win/core_audio_base_win.cc
@@ -391,7 +391,7 @@
format_.dwChannelMask =
format->nChannels == 1 ? KSAUDIO_SPEAKER_MONO : KSAUDIO_SPEAKER_STEREO;
format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
- RTC_DLOG(INFO) << core_audio_utility::WaveFormatExToString(&format_);
+ RTC_DLOG(INFO) << core_audio_utility::WaveFormatToString(&format_);
// Verify that the format is supported but exclude the test if the default
// sample rate has been overridden. If so, the WASAPI audio engine will do
diff --git a/modules/audio_device/win/core_audio_utility_win.cc b/modules/audio_device/win/core_audio_utility_win.cc
index 85234ac..1f60e76 100644
--- a/modules/audio_device/win/core_audio_utility_win.cc
+++ b/modules/audio_device/win/core_audio_utility_win.cc
@@ -157,50 +157,20 @@
#define AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000
#endif
-// Converts from channel mask to DirectSound speaker configuration.
-// The values below are copied from ksmedia.h.
-// Example: KSAUDIO_SPEAKER_STEREO = (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT).
-const char* DirectSoundConfigToString(DWORD channel_mask) {
- switch (channel_mask) {
- case KSAUDIO_SPEAKER_DIRECTOUT:
- return "KSAUDIO_DIRECTOUT";
- case KSAUDIO_SPEAKER_MONO:
- // Front center (C)
- return "KSAUDIO_MONO";
- case KSAUDIO_SPEAKER_1POINT1:
- return "KSAUDIO_1POINT1";
- case KSAUDIO_SPEAKER_STEREO:
- // Front left (L), front right (R).
- return "KSAUDIO_STEREO";
- case KSAUDIO_SPEAKER_2POINT1:
- return "KSAUDIO_2POINT1";
- case KSAUDIO_SPEAKER_3POINT0:
- return "KSAUDIO_3POINT0";
- case KSAUDIO_SPEAKER_3POINT1:
- return "KSAUDIO_3POINT1";
- case KSAUDIO_SPEAKER_QUAD:
- // L, R, back left (Lb), back right (Rb).
- return "KSAUDIO_QUAD";
- case KSAUDIO_SPEAKER_SURROUND:
- // L, R, front center (C), back center (Cb).
- return "KSAUDIO_SURROUND";
- case KSAUDIO_SPEAKER_5POINT0:
- return "KSAUDIO_5POINT0";
- case KSAUDIO_SPEAKER_5POINT1:
- return "KSAUDIO_5POINT1";
- case KSAUDIO_SPEAKER_7POINT0:
- return "KSAUDIO_7POINT0";
- case KSAUDIO_SPEAKER_7POINT1:
- // L, R, C, Lb, Rb, front left-of-center, front right-of-center, LFE.
- return "KSAUDIO_7POINT1";
- case KSAUDIO_SPEAKER_5POINT1_SURROUND:
- // L, R, C, side left (Ls), side right (Rs), LFE.
- return "KSAUDIO_5POINT1_SURROUND";
- case KSAUDIO_SPEAKER_7POINT1_SURROUND:
- // L, R, C, Lb, Rb, Ls, Rs, LFE.
- return "KSAUDIO_7POINT1_SURROUND";
+// Converts the most common format tags defined in mmreg.h into string
+// equivalents. Mainly intended for log messages.
+const char* WaveFormatTagToString(WORD format_tag) {
+ switch (format_tag) {
+ case WAVE_FORMAT_UNKNOWN:
+ return "WAVE_FORMAT_UNKNOWN";
+ case WAVE_FORMAT_PCM:
+ return "WAVE_FORMAT_PCM";
+ case WAVE_FORMAT_IEEE_FLOAT:
+ return "WAVE_FORMAT_IEEE_FLOAT";
+ case WAVE_FORMAT_EXTENSIBLE:
+ return "WAVE_FORMAT_EXTENSIBLE";
default:
- return "KSAUDIO_INVALID";
+ return "UNKNOWN";
}
}
@@ -589,6 +559,31 @@
namespace core_audio_utility {
+// core_audio_utility::WaveFormatWrapper implementation.
+WAVEFORMATEXTENSIBLE* WaveFormatWrapper::GetExtensible() const {
+ RTC_CHECK(IsExtensible());
+ return reinterpret_cast<WAVEFORMATEXTENSIBLE*>(ptr_);
+}
+
+bool WaveFormatWrapper::IsExtensible() const {
+ return ptr_->wFormatTag == WAVE_FORMAT_EXTENSIBLE && ptr_->cbSize >= 22;
+}
+
+bool WaveFormatWrapper::IsPcm() const {
+ return IsExtensible() ? GetExtensible()->SubFormat == KSDATAFORMAT_SUBTYPE_PCM
+ : ptr_->wFormatTag == WAVE_FORMAT_PCM;
+}
+
+bool WaveFormatWrapper::IsFloat() const {
+ return IsExtensible()
+ ? GetExtensible()->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT
+ : ptr_->wFormatTag == WAVE_FORMAT_IEEE_FLOAT;
+}
+
+size_t WaveFormatWrapper::size() const {
+ return sizeof(*ptr_) + ptr_->cbSize;
+}
+
bool IsSupported() {
RTC_DLOG(INFO) << "IsSupported";
static bool g_is_supported = IsSupportedInternal();
@@ -904,19 +899,52 @@
WAVEFORMATEXTENSIBLE* format) {
RTC_DLOG(INFO) << "GetSharedModeMixFormat";
RTC_DCHECK(client);
- ScopedCoMem<WAVEFORMATEXTENSIBLE> format_ex;
+
+ // The GetMixFormat method retrieves the stream format that the audio engine
+ // uses for its internal processing of shared-mode streams. The method
+ // allocates the storage for the structure and this memory will be released
+ // when |mix_format| goes out of scope. The GetMixFormat method retrieves a
+ // format descriptor that is in the form of a WAVEFORMATEXTENSIBLE structure
+ // instead of a standalone WAVEFORMATEX structure. The method outputs a
+ // pointer to the WAVEFORMATEX structure that is embedded at the start of
+ // this WAVEFORMATEXTENSIBLE structure.
+ // Note that, crbug/803056 indicates that some devices can return a format
+ // where only the WAVEFORMATEX parts is initialized and we must be able to
+ // account for that.
+ ScopedCoMem<WAVEFORMATEXTENSIBLE> mix_format;
_com_error error =
- client->GetMixFormat(reinterpret_cast<WAVEFORMATEX**>(&format_ex));
+ client->GetMixFormat(reinterpret_cast<WAVEFORMATEX**>(&mix_format));
if (FAILED(error.Error())) {
RTC_LOG(LS_ERROR) << "IAudioClient::GetMixFormat failed: "
<< ErrorToString(error);
return error.Error();
}
- size_t bytes = sizeof(WAVEFORMATEX) + format_ex->Format.cbSize;
- RTC_DCHECK_EQ(bytes, sizeof(WAVEFORMATEXTENSIBLE));
- memcpy(format, format_ex, bytes);
- RTC_DLOG(INFO) << WaveFormatExToString(format);
+ // Use a wave format wrapper to make things simpler.
+ WaveFormatWrapper wrapped_format(mix_format.Get());
+
+ // Verify that the reported format can be mixed by the audio engine in
+ // shared mode.
+ if (!wrapped_format.IsPcm() && !wrapped_format.IsFloat()) {
+ RTC_DLOG(LS_ERROR)
+ << "Only pure PCM or float audio streams can be mixed in shared mode";
+ return AUDCLNT_E_UNSUPPORTED_FORMAT;
+ }
+
+ // Log a warning for the rare case where |mix_format| only contains a
+ // stand-alone WAVEFORMATEX structure but don't return.
+ if (!wrapped_format.IsExtensible()) {
+ RTC_DLOG(WARNING)
+ << "The returned format contains no extended information. "
+ "The size is "
+ << wrapped_format.size() << " bytes.";
+ }
+
+ // Copy the correct number of bytes into |*format| taking into account if
+ // the returned structure is correctly extended or not.
+ RTC_CHECK_LE(wrapped_format.size(), sizeof(WAVEFORMATEXTENSIBLE));
+ memcpy(format, wrapped_format.get(), wrapped_format.size());
+ RTC_DLOG(INFO) << WaveFormatToString(format);
return error.Error();
}
@@ -926,7 +954,7 @@
const WAVEFORMATEXTENSIBLE* format) {
RTC_DLOG(INFO) << "IsFormatSupported";
RTC_DCHECK(client);
- ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
+ ScopedCoMem<WAVEFORMATEX> closest_match;
// This method provides a way for a client to determine, before calling
// IAudioClient::Initialize, whether the audio engine supports a particular
// stream format or not. In shared mode, the audio engine always supports
@@ -934,7 +962,9 @@
// TODO(henrika): verify support for exclusive mode as well?
_com_error error = client->IsFormatSupported(
share_mode, reinterpret_cast<const WAVEFORMATEX*>(format),
- reinterpret_cast<WAVEFORMATEX**>(&closest_match));
+ &closest_match);
+ RTC_LOG(INFO) << WaveFormatToString(
+ const_cast<WAVEFORMATEXTENSIBLE*>(format));
if ((error.Error() == S_OK) && (closest_match == nullptr)) {
RTC_DLOG(INFO)
<< "The audio endpoint device supports the specified stream format";
@@ -943,7 +973,7 @@
// only be triggered for shared mode.
RTC_LOG(LS_WARNING)
<< "Exact format is not supported, but a closest match exists";
- RTC_LOG(INFO) << WaveFormatExToString(closest_match);
+ RTC_LOG(INFO) << WaveFormatToString(closest_match.Get());
} else if ((error.Error() == AUDCLNT_E_UNSUPPORTED_FORMAT) &&
(closest_match == nullptr)) {
// The audio engine does not support the caller-specified format or any
@@ -1381,31 +1411,34 @@
return true;
}
-std::string WaveFormatExToString(const WAVEFORMATEXTENSIBLE* format) {
- RTC_DCHECK_EQ(format->Format.wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+std::string WaveFormatToString(const WaveFormatWrapper format) {
char ss_buf[1024];
rtc::SimpleStringBuilder ss(ss_buf);
- ss.AppendFormat("wFormatTag: WAVE_FORMAT_EXTENSIBLE");
- ss.AppendFormat(", nChannels: %d", format->Format.nChannels);
- ss.AppendFormat(", nSamplesPerSec: %d", format->Format.nSamplesPerSec);
- ss.AppendFormat(", nAvgBytesPerSec: %d", format->Format.nAvgBytesPerSec);
- ss.AppendFormat(", nBlockAlign: %d", format->Format.nBlockAlign);
- ss.AppendFormat(", wBitsPerSample: %d", format->Format.wBitsPerSample);
- ss.AppendFormat(", cbSize: %d", format->Format.cbSize);
- ss.AppendFormat(", wValidBitsPerSample: %d",
- format->Samples.wValidBitsPerSample);
- ss.AppendFormat(", dwChannelMask: 0x%X", format->dwChannelMask);
- if (format->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) {
- ss << ", SubFormat: KSDATAFORMAT_SUBTYPE_PCM";
- } else if (format->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) {
- ss << ", SubFormat: KSDATAFORMAT_SUBTYPE_IEEE_FLOAT";
+ // Start with the WAVEFORMATEX part (which always exists).
+ ss.AppendFormat("wFormatTag: %s (0x%X)",
+ WaveFormatTagToString(format->wFormatTag),
+ format->wFormatTag);
+ ss.AppendFormat(", nChannels: %d", format->nChannels);
+ ss.AppendFormat(", nSamplesPerSec: %d", format->nSamplesPerSec);
+ ss.AppendFormat(", nAvgBytesPerSec: %d", format->nAvgBytesPerSec);
+ ss.AppendFormat(", nBlockAlign: %d", format->nBlockAlign);
+ ss.AppendFormat(", wBitsPerSample: %d", format->wBitsPerSample);
+ ss.AppendFormat(", cbSize: %d", format->cbSize);
+ if (!format.IsExtensible())
+ return ss.str();
+
+ // Append the WAVEFORMATEXTENSIBLE part (which we know exists).
+ ss.AppendFormat(
+ " [+] wValidBitsPerSample: %d, dwChannelMask: %s",
+ format.GetExtensible()->Samples.wValidBitsPerSample,
+ ChannelMaskToString(format.GetExtensible()->dwChannelMask).c_str());
+ if (format.IsPcm()) {
+ ss.AppendFormat("%s", ", SubFormat: KSDATAFORMAT_SUBTYPE_PCM");
+ } else if (format.IsFloat()) {
+ ss.AppendFormat("%s", ", SubFormat: KSDATAFORMAT_SUBTYPE_IEEE_FLOAT");
} else {
- ss << ", SubFormat: NOT_SUPPORTED";
+ ss.AppendFormat("%s", ", SubFormat: NOT_SUPPORTED");
}
- ss.AppendFormat("\nChannel configuration: %s",
- ChannelMaskToString(format->dwChannelMask).c_str());
- ss.AppendFormat("\nDirectSound configuration : %s",
- DirectSoundConfigToString(format->dwChannelMask));
return ss.str();
}
diff --git a/modules/audio_device/win/core_audio_utility_win.h b/modules/audio_device/win/core_audio_utility_win.h
index 6e2c85b..5a27edb 100644
--- a/modules/audio_device/win/core_audio_utility_win.h
+++ b/modules/audio_device/win/core_audio_utility_win.h
@@ -327,6 +327,32 @@
// These methods are based on media::CoreAudioUtil in Chrome.
namespace core_audio_utility {
+// Helper class which automates casting between WAVEFORMATEX and
+// WAVEFORMATEXTENSIBLE raw pointers using implicit constructors and
+// operator overloading. Note that, no memory is allocated by this utility
+// structure. It only serves as a handle (or a wrapper) of the structure
+// provided to it at construction.
+class WaveFormatWrapper {
+ public:
+ WaveFormatWrapper(WAVEFORMATEXTENSIBLE* p)
+ : ptr_(reinterpret_cast<WAVEFORMATEX*>(p)) {}
+ WaveFormatWrapper(WAVEFORMATEX* p) : ptr_(p) {}
+ ~WaveFormatWrapper() = default;
+
+ operator WAVEFORMATEX*() const { return ptr_; }
+ WAVEFORMATEX* operator->() const { return ptr_; }
+ WAVEFORMATEX* get() const { return ptr_; }
+ WAVEFORMATEXTENSIBLE* GetExtensible() const;
+
+ bool IsExtensible() const;
+ bool IsPcm() const;
+ bool IsFloat() const;
+ size_t size() const;
+
+ private:
+ WAVEFORMATEX* ptr_;
+};
+
// Returns true if Windows Core Audio is supported.
// Always verify that this method returns true before using any of the
// other methods in this class.
@@ -576,8 +602,10 @@
// given by |render_client|.
bool FillRenderEndpointBufferWithSilence(IAudioClient* client,
IAudioRenderClient* render_client);
-// Transforms a WAVEFORMATEXTENSIBLE struct to a human-readable string.
-std::string WaveFormatExToString(const WAVEFORMATEXTENSIBLE* format);
+
+// Prints/logs all fields of the format structure in |format|.
+// Also supports extended versions (WAVEFORMATEXTENSIBLE).
+std::string WaveFormatToString(const WaveFormatWrapper format);
// Converts Windows internal REFERENCE_TIME (100 nanosecond units) into
// generic webrtc::TimeDelta which then can be converted to any time unit.
diff --git a/modules/audio_device/win/core_audio_utility_win_unittest.cc b/modules/audio_device/win/core_audio_utility_win_unittest.cc
index 2d423fe..52b647d 100644
--- a/modules/audio_device/win/core_audio_utility_win_unittest.cc
+++ b/modules/audio_device/win/core_audio_utility_win_unittest.cc
@@ -81,6 +81,68 @@
ScopedCOMInitializer com_init_;
};
+TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapper) {
+ // Use default constructor for WAVEFORMATEX and verify its size.
+ WAVEFORMATEX format = {};
+ core_audio_utility::WaveFormatWrapper wave_format(&format);
+ EXPECT_FALSE(wave_format.IsExtensible());
+ EXPECT_EQ(wave_format.size(), sizeof(WAVEFORMATEX));
+ EXPECT_EQ(wave_format->cbSize, 0);
+
+ // Ensure that the stand-alone WAVEFORMATEX structure has a valid format tag
+ // and that all accessors work.
+ format.wFormatTag = WAVE_FORMAT_PCM;
+ EXPECT_FALSE(wave_format.IsExtensible());
+ EXPECT_EQ(wave_format.size(), sizeof(WAVEFORMATEX));
+ EXPECT_EQ(wave_format.get()->wFormatTag, WAVE_FORMAT_PCM);
+ EXPECT_EQ(wave_format->wFormatTag, WAVE_FORMAT_PCM);
+
+ // Next, ensure that the size is valid. Stand-alone is not extended.
+ EXPECT_EQ(wave_format.size(), sizeof(WAVEFORMATEX));
+
+ // Verify format types for the stand-alone version.
+ EXPECT_TRUE(wave_format.IsPcm());
+ EXPECT_FALSE(wave_format.IsFloat());
+ format.wFormatTag = WAVE_FORMAT_IEEE_FLOAT;
+ EXPECT_TRUE(wave_format.IsFloat());
+}
+
+TEST_F(CoreAudioUtilityWinTest, WaveFormatWrapperExtended) {
+ // Use default constructor for WAVEFORMATEXTENSIBLE and verify that it
+ // results in same size as for WAVEFORMATEX even if the size of |format_ex|
+ // equals the size of WAVEFORMATEXTENSIBLE.
+ WAVEFORMATEXTENSIBLE format_ex = {};
+ core_audio_utility::WaveFormatWrapper wave_format_ex(&format_ex);
+ EXPECT_FALSE(wave_format_ex.IsExtensible());
+ EXPECT_EQ(wave_format_ex.size(), sizeof(WAVEFORMATEX));
+ EXPECT_EQ(wave_format_ex->cbSize, 0);
+
+ // Ensure that the extended structure has a valid format tag and that all
+ // accessors work.
+ format_ex.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ EXPECT_FALSE(wave_format_ex.IsExtensible());
+ EXPECT_EQ(wave_format_ex.size(), sizeof(WAVEFORMATEX));
+ EXPECT_EQ(wave_format_ex->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+ EXPECT_EQ(wave_format_ex.get()->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+
+ // Next, ensure that the size is valid (sum of stand-alone and extended).
+ // Now the structure qualifies as extended.
+ format_ex.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
+ EXPECT_TRUE(wave_format_ex.IsExtensible());
+ EXPECT_EQ(wave_format_ex.size(), sizeof(WAVEFORMATEXTENSIBLE));
+ EXPECT_TRUE(wave_format_ex.GetExtensible());
+ EXPECT_EQ(wave_format_ex.GetExtensible()->Format.wFormatTag,
+ WAVE_FORMAT_EXTENSIBLE);
+
+ // Verify format types for the extended version.
+ EXPECT_FALSE(wave_format_ex.IsPcm());
+ format_ex.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ EXPECT_TRUE(wave_format_ex.IsPcm());
+ EXPECT_FALSE(wave_format_ex.IsFloat());
+ format_ex.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ EXPECT_TRUE(wave_format_ex.IsFloat());
+}
+
TEST_F(CoreAudioUtilityWinTest, NumberOfActiveDevices) {
ABORT_TEST_IF_NOT(DevicesAvailable());
int render_devices = core_audio_utility::NumberOfActiveDevices(eRender);
@@ -438,14 +500,20 @@
EXPECT_TRUE(client.Get());
// Perform a simple sanity test of the acquired format structure.
- WAVEFORMATPCMEX format;
+ WAVEFORMATEXTENSIBLE format;
EXPECT_TRUE(SUCCEEDED(
core_audio_utility::GetSharedModeMixFormat(client.Get(), &format)));
- EXPECT_GE(format.Format.nChannels, 1);
- EXPECT_GE(format.Format.nSamplesPerSec, 8000u);
- EXPECT_GE(format.Format.wBitsPerSample, 16);
- EXPECT_GE(format.Samples.wValidBitsPerSample, 16);
- EXPECT_EQ(format.Format.wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+ core_audio_utility::WaveFormatWrapper wformat(&format);
+ EXPECT_GE(wformat->nChannels, 1);
+ EXPECT_GE(wformat->nSamplesPerSec, 8000u);
+ EXPECT_GE(wformat->wBitsPerSample, 16);
+ if (wformat.IsExtensible()) {
+ EXPECT_EQ(wformat->wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+ EXPECT_GE(wformat->cbSize, 22);
+ EXPECT_GE(wformat.GetExtensible()->Samples.wValidBitsPerSample, 16);
+ } else {
+ EXPECT_EQ(wformat->cbSize, 0);
+ }
}
TEST_F(CoreAudioUtilityWinTest, IsFormatSupported) {