WASAPI录制麦克风,录制扬声器

代码非原创:这部分是录制麦克风,其中的重点标记:

https://blog.csdn.net/su_vast/article/details/78318999?utm_source=blogxgwz3
https://blog.csdn.net/lcalqf/article/details/52982550
-------------------------------------------------------------------------------
#include "stdafx.h" #include <MMDeviceAPI.h> #include <AudioClient.h> #include <AudioPolicy.h> #define MAX_AUDIO_FRAME_SIZE 192000 template <class T> void SafeRelease(T **ppT) { if (*ppT) { (*ppT)->Release(); *ppT = NULL; } } int _tmain(int argc, _TCHAR* argv[]) { IAudioClient * _AudioClient; IAudioCaptureClient *_CaptureClient; IMMDevice * _Device; IMMDeviceEnumerator *deviceEnumerator = NULL; HANDLE _AudioSamplesReadyEvent=NULL; HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED); hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&deviceEnumerator)); if (FAILED(hr)) { printf("Unable to retrieve CoCreateInstance %x ", hr); goto Exit; } //这里可以调用EnumAudioEndpoints选择使用其它设备 hr = deviceEnumerator->GetDefaultAudioEndpoint(eCapture,eMultimedia,&_Device); if (FAILED(hr)) { printf("Unable to retrieve device %x ", hr); goto Exit; } SafeRelease(&deviceEnumerator); _Device->AddRef(); // Since we're holding a copy of the endpoint, take a reference to it. It'll be released in Shutdown(); _AudioSamplesReadyEvent = CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE); if (_AudioSamplesReadyEvent == NULL) { printf("Unable to create samples ready event: %d. ", GetLastError()); return false; } hr = _Device->Activate(__uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL, reinterpret_cast<void **>(&_AudioClient)); if (FAILED(hr)) { printf("Unable to activate audio client: %x. ", hr); return false; } WAVEFORMATEX * _MixFormat; UINT32 _BufferSize; hr = _AudioClient->GetMixFormat(&_MixFormat); if (FAILED(hr)) { printf("Unable to get mix format on audio client: %x. ", hr); return false; } size_t _FrameSize = (_MixFormat->wBitsPerSample / 8) * _MixFormat->nChannels; //InitializeAudioEngine hr = _AudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST, 20*10000, 0, _MixFormat, NULL); if (FAILED(hr)) { printf("Unable to initialize audio client: %x. ", hr); return false; } // // Retrieve the buffer size for the audio client. // hr = _AudioClient->GetBufferSize(&_BufferSize); if(FAILED(hr)) { printf("Unable to get audio client buffer: %x. ", hr); return false; } hr = _AudioClient->SetEventHandle(_AudioSamplesReadyEvent); if (FAILED(hr)) { printf("Unable to set ready event: %x. ", hr); return false; } hr = _AudioClient->GetService(IID_PPV_ARGS(&_CaptureClient)); if (FAILED(hr)) { printf("Unable to get new capture client: %x. ", hr); return false; } //开始采集 hr = _AudioClient->Start(); if (FAILED(hr)) { printf("Unable to get new capture client: %x. ", hr); return false; } bool stillPlaying = true; while (stillPlaying) { DWORD waitResult = WaitForSingleObject( _AudioSamplesReadyEvent, INFINITE); BYTE *pData,*pBuffer; INT nBufferLenght; UINT32 framesAvailable; DWORD flags; pBuffer=new BYTE[MAX_AUDIO_FRAME_SIZE]; hr = _CaptureClient->GetBuffer(&pData, &framesAvailable, &flags, NULL, NULL); if (SUCCEEDED(hr)) { if (framesAvailable!=0) { if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { // // Fill 0s from the capture buffer to the output buffer. // } else { // // Copy data from the audio engine buffer to the output buffer. // CopyMemory(pBuffer,pData,framesAvailable*_FrameSize); printf("get capture frames: %d! ", framesAvailable); } } delete[] pBuffer; hr = _CaptureClient->ReleaseBuffer(framesAvailable); if (FAILED(hr)) { printf("Unable to release capture buffer: %x! ", hr); } } } Exit: return 0; }
采集扬声器,内部声音录制
#include "stdafx.h" #include <MMDeviceAPI.h> #include <AudioClient.h> #include <AudioPolicy.h> #define MAX_AUDIO_FRAME_SIZE 192000 template <class T> void SafeRelease(T **ppT) { if (*ppT) { (*ppT)->Release(); *ppT = NULL; } } int _tmain(int argc, _TCHAR* argv[]) { IAudioClient * _AudioClient; IAudioCaptureClient *_CaptureClient; IMMDevice * _Device; IMMDeviceEnumerator *deviceEnumerator = NULL; HANDLE _AudioSamplesReadyEvent=NULL; HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED); hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL, IID_PPV_ARGS(&deviceEnumerator)); if (FAILED(hr)) { printf("Unable to retrieve CoCreateInstance %x ", hr); goto Exit; } //这里可以调用EnumAudioEndpoints选择使用其它设备 hr = deviceEnumerator->GetDefaultAudioEndpoint(eRENDER,eMultimedia,&_Device); if (FAILED(hr)) { printf("Unable to retrieve device %x ", hr); goto Exit; } SafeRelease(&deviceEnumerator); _Device->AddRef(); // Since we're holding a copy of the endpoint, take a reference to it. It'll be released in Shutdown(); _AudioSamplesReadyEvent = CreateEventEx(NULL, NULL, 0, EVENT_MODIFY_STATE | SYNCHRONIZE); if (_AudioSamplesReadyEvent == NULL) { printf("Unable to create samples ready event: %d. ", GetLastError()); return false; } hr = _Device->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, reinterpret_cast<void **>(&_AudioClient)); if (FAILED(hr)) { printf("Unable to activate audio client: %x. ", hr); return false; } WAVEFORMATEX * _MixFormat; UINT32 _BufferSize; hr = _AudioClient->GetMixFormat(&_MixFormat); if (FAILED(hr)) { printf("Unable to get mix format on audio client: %x. ", hr); return false; } size_t _FrameSize = (_MixFormat->wBitsPerSample / 8) * _MixFormat->nChannels; //InitializeAudioEngine hr = _AudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 20*10000, 0, _MixFormat, NULL); if (FAILED(hr)) { printf("Unable to initialize audio client: %x. ", hr); return false; } // // Retrieve the buffer size for the audio client. // hr = _AudioClient->GetBufferSize(&_BufferSize); if(FAILED(hr)) { printf("Unable to get audio client buffer: %x. ", hr); return false; }
/*不需要 hr
= _AudioClient->SetEventHandle(_AudioSamplesReadyEvent); if (FAILED(hr)) { printf("Unable to set ready event: %x. ", hr); return false; }*/ hr = _AudioClient->GetService(IID_PPV_ARGS(&_CaptureClient)); if (FAILED(hr)) { printf("Unable to get new capture client: %x. ", hr); return false; } //开始采集 hr = _AudioClient->Start(); if (FAILED(hr)) { printf("Unable to get new capture client: %x. ", hr); return false; } bool stillPlaying = true; while (stillPlaying) { // 内部录制不需要 DWORD waitResult = WaitForSingleObject( _AudioSamplesReadyEvent, INFINITE); //注意存在一次拿不干净的情况,尤其是上头有阻塞。
//以及某些机器,没有内部声音的时候,拿到的是全0 ,但有些机器是拿不到任何数据的 BYTE
*pData,*pBuffer; INT nBufferLenght; UINT32 framesAvailable; DWORD flags; pBuffer=new BYTE[MAX_AUDIO_FRAME_SIZE]; hr = _CaptureClient->GetBuffer(&pData, &framesAvailable, &flags, NULL, NULL); if (SUCCEEDED(hr)) { if (framesAvailable!=0) { if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { // // Fill 0s from the capture buffer to the output buffer. // } else { // // Copy data from the audio engine buffer to the output buffer. // CopyMemory(pBuffer,pData,framesAvailable*_FrameSize); printf("get capture frames: %d! ", framesAvailable); } } delete[] pBuffer; hr = _CaptureClient->ReleaseBuffer(framesAvailable); if (FAILED(hr)) { printf("Unable to release capture buffer: %x! ", hr); } } } Exit: return 0; }
原文地址:https://www.cnblogs.com/8335IT/p/15269670.html