win32

仅供参考

文档: Rendering a Stream

代码示例:

#include <cstdio>
#include <Windows.h>

// Windows multimedia device
#include <Mmdeviceapi.h>
#include <Functiondiscoverykeys_devpkey.h>

// WASAPI
#include <Audiopolicy.h>
#include <Audioclient.h>

#include <random>


class Noise_Gen {
public:
    Noise_Gen() : format(), engine(__rdtsc()), float_dist(-1.f, 1.f) {};

    void SetFormat(WAVEFORMATEX* wfex) {
        if (wfex->wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
            format = *reinterpret_cast<WAVEFORMATEXTENSIBLE*>(wfex);
        }
        else {
            format.Format = *wfex;
            format.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
            INIT_WAVEFORMATEX_GUID(&format.SubFormat, wfex->wFormatTag);
            format.Samples.wValidBitsPerSample = format.Format.wBitsPerSample;
            format.dwChannelMask = 0;
        }
    }

    // (The size of an audio frame = nChannels * wBitsPerSample)
    void FillBuffer(UINT32 bufferFrameCount, BYTE* pData, DWORD* flags) {
        const UINT16 formatTag = EXTRACT_WAVEFORMATEX_ID(&format.SubFormat);
        if (formatTag == WAVE_FORMAT_IEEE_FLOAT) {
            float* fData = (float*)pData;
            for (UINT32 i = 0; i < format.Format.nChannels * bufferFrameCount; i++) {
                fData[i] = float_dist(engine);
            }
        }
        else if (formatTag == WAVE_FORMAT_PCM) {
            using rndT = decltype(engine)::result_type;
            UINT32 iterations = format.Format.nBlockAlign * bufferFrameCount / sizeof(rndT);
            UINT32 leftoverBytes = format.Format.nBlockAlign * bufferFrameCount % sizeof(rndT);
            rndT* iData = (rndT*)pData;
            UINT32 i = 0;
            for (; i < iterations; i++) {
                iData[i] = engine();
            }
            if (leftoverBytes != 0) {
                rndT lastRnd = engine();
                BYTE* pLastBytes = pData + i * sizeof(rndT);
                for (UINT32 j = 0; j < leftoverBytes; ++j) {
                    pLastBytes[j] = lastRnd >> (j * 8) & 0xFF;
                }
            }
        }
        else {
            //memset(pData, 0, wfex.Format.nBlockAlign * bufferFrameCount);
            *flags = AUDCLNT_BUFFERFLAGS_SILENT;
        }
    }

private:
    WAVEFORMATEXTENSIBLE format;

    std::mt19937_64 engine;
    std::uniform_real_distribution<float> float_dist;
};

// REFERENCE_TIME time units per second and per millisecond
#define REFTIMES_PER_SEC  10000000ll
#define REFTIMES_PER_MILLISEC  10000

#define EXIT_ON_ERROR(hres)  
              if (FAILED(hres)) { goto Exit; }
#define SAFE_RELEASE(punk)  
              if ((punk) != NULL)  
                { (punk)->Release(); (punk) = NULL; }

HRESULT PlayAudioStream(Noise_Gen* pMySource) {
    HRESULT hr;
    REFERENCE_TIME hnsRequestedDuration = REFTIMES_PER_SEC;
    REFERENCE_TIME hnsActualDuration;
    IMMDeviceEnumerator* pEnumerator = NULL;
    IPropertyStore* pPropertyStore = NULL;
    IMMDevice* pDevice = NULL;
    IAudioClient* pAudioClient = NULL;
    IAudioRenderClient* pRenderClient = NULL;
    WAVEFORMATEX* pwfx = NULL;
    UINT32 bufferFrameCount;
    BYTE* pData;
    DWORD flags = 0;
    PROPVARIANT name;

    hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL,
        CLSCTX_ALL, IID_PPV_ARGS(&pEnumerator));
    EXIT_ON_ERROR(hr);
    hr = pEnumerator->GetDefaultAudioEndpoint(
        eRender, eConsole, &pDevice);
    EXIT_ON_ERROR(hr);

    hr = pDevice->OpenPropertyStore(STGM_READ, &pPropertyStore);
    EXIT_ON_ERROR(hr);
    PropVariantInit(&name);
    hr = pPropertyStore->GetValue(PKEY_Device_FriendlyName, &name);
    EXIT_ON_ERROR(hr);
    printf("%S", name.pwszVal);
    printf("
");
    hr = pDevice->Activate(__uuidof(pAudioClient), CLSCTX_ALL,
        NULL, (void**)&pAudioClient);
    EXIT_ON_ERROR(hr);
    hr = pAudioClient->GetMixFormat(&pwfx);
    EXIT_ON_ERROR(hr);

    hr = pAudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED,
        0, hnsRequestedDuration,
        0, pwfx, NULL);
    EXIT_ON_ERROR(hr);
    // Tell the audio source which format to use.
    pMySource->SetFormat(pwfx);
    // Get the actual size of the allocated buffer.
    hr = pAudioClient->GetBufferSize(&bufferFrameCount);
    EXIT_ON_ERROR(hr);
    hr = pAudioClient->GetService(IID_PPV_ARGS(&pRenderClient));
    EXIT_ON_ERROR(hr);
    // Grab the entire buffer for the initial fill operation.
    hr = pRenderClient->GetBuffer(bufferFrameCount, &pData);
    EXIT_ON_ERROR(hr);

    // Load the initial data into the shared buffer.
    pMySource->FillBuffer(bufferFrameCount, pData, &flags);

    hr = pRenderClient->ReleaseBuffer(bufferFrameCount, flags);
    EXIT_ON_ERROR(hr);
    // Calculate the actual duration of the allocated buffer.
    hnsActualDuration = REFTIMES_PER_SEC * bufferFrameCount / pwfx->nSamplesPerSec;
    hr = pAudioClient->Start();  // Start playing.
    EXIT_ON_ERROR(hr);
    // Each loop fills about half of the shared buffer.
    DWORD sleepTime;
    while (flags != AUDCLNT_BUFFERFLAGS_SILENT) {
        // Sleep for half the buffer duration.
        sleepTime = (DWORD)(hnsActualDuration / REFTIMES_PER_MILLISEC / 2);
        if (sleepTime != 0)
            Sleep(sleepTime);
        // See how much buffer space is available.
        UINT32 numFramesPadding;
        hr = pAudioClient->GetCurrentPadding(&numFramesPadding);
        EXIT_ON_ERROR(hr);

        UINT32 numFramesAvailable = bufferFrameCount - numFramesPadding;
        // Grab all the available space in the shared buffer.
        hr = pRenderClient->GetBuffer(numFramesAvailable, &pData);
        EXIT_ON_ERROR(hr);

        // Get next 1/2-second of data from the audio source.
        pMySource->FillBuffer(numFramesAvailable, pData, &flags);

        hr = pRenderClient->ReleaseBuffer(numFramesAvailable, flags);
        EXIT_ON_ERROR(hr);
    }
    // Wait for last data in buffer to play before stopping.
    sleepTime = (DWORD)(hnsActualDuration / REFTIMES_PER_MILLISEC / 2);
    if (sleepTime != 0)
        Sleep(sleepTime);
    hr = pAudioClient->Stop();  // Stop playing.
    EXIT_ON_ERROR(hr);

Exit:
    CoTaskMemFree(pwfx);
    SAFE_RELEASE(pRenderClient);
    SAFE_RELEASE(pAudioClient);
    SAFE_RELEASE(pDevice);
    SAFE_RELEASE(pPropertyStore); // you forgot to free the property store
    SAFE_RELEASE(pEnumerator);
    return hr;
}

int main() {
    HRESULT hr = CoInitialize(nullptr);
    if (FAILED(hr)) { return hr; }

    Noise_Gen ng;
    PlayAudioStream(&ng);

    CoUninitialize();
}

结果: 播放噪音

外文链接:Filling audio endpoint buffer provided by WASAPI not playing sound

原文地址:https://www.cnblogs.com/strive-sun/p/14178722.html