UWP 的 AvSetMmThreadCharacteristicsW

AvSetMmThreadCharacteristicsW for UWP

我正在使用 cpp/winrt 开发 WASAPI UWP 音频应用程序,它需要从输入中获取音频并在处理后将其发送到输出。

我想用 AvSetMmThreadCharacteristicsW(L"Pro Audio", &taskIndex) 设置我的音频线程特性,但我只是注意到这个功能(以及 avrt.h 的大部分功能)仅限于 WINAPI_PARTITION_DESKTOPWINAPI_PARTITION_GAMES .

我想我需要这个,因为当我的代码集成到我的 UWP 应用程序中时,音频输入充满了不连续性,而且我使用 avrt [=37= 的测试代码没有问题].

还有其他方法可以配置我的音频处理线程吗?


编辑:这是我的测试程序https://github.com/loics2/test-wasapi。有趣的部分发生在 AudioStream class 中。我无法共享我的 UWP 应用程序,但我可以将这些 classes 复制到 Windows 运行时组件中。


编辑 2:这是音频线程代码:

void AudioStream::StreamWorker()
    {
        WAVEFORMATEX* captureFormat = nullptr;
        WAVEFORMATEX* renderFormat = nullptr;

        RingBuffer<float> captureBuffer;
        RingBuffer<float> renderBuffer;

        BYTE* streamBuffer = nullptr;
        unsigned int streamBufferSize = 0;
        unsigned int bufferFrameCount = 0;
        unsigned int numFramesPadding = 0;
        unsigned int inputBufferSize = 0;
        unsigned int outputBufferSize = 0;
        DWORD captureFlags = 0;

        winrt::hresult hr = S_OK;

        // m_inputClient is a winrt::com_ptr<IAudioClient3>
        if (m_inputClient) {

            hr = m_inputClient->GetMixFormat(&captureFormat);
            
            // m_audioCaptureClient is a winrt::com_ptr<IAudioCaptureClient>
            if (!m_audioCaptureClient) {
                hr = m_inputClient->Initialize(
                    AUDCLNT_SHAREMODE_SHARED,
                    AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
                    0, 
                    0,
                    captureFormat,
                    nullptr);
               
                hr = m_inputClient->GetService(__uuidof(IAudioCaptureClient), m_audioCaptureClient.put_void());
                hr = m_inputClient->SetEventHandle(m_inputReadyEvent.get());
                hr = m_inputClient->Reset();
                hr = m_inputClient->Start();
            }
        }

        hr = m_inputClient->GetBufferSize(&inputBufferSize);

        // multiplying the buffer size by the number of channels
        inputBufferSize *= 2;

        // m_outputClient is a winrt::com_ptr<IAudioClient3>
        if (m_outputClient) {
            hr = m_outputClient->GetMixFormat(&renderFormat);

            // m_audioRenderClientis a winrt::com_ptr<IAudioRenderClient>
            if (!m_audioRenderClient) {
                hr = m_outputClient->Initialize(
                    AUDCLNT_SHAREMODE_SHARED,
                    AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
                    0,
                    0,
                    captureFormat,
                    nullptr);
                hr = m_outputClient->GetService(__uuidof(IAudioRenderClient), m_audioRenderClient.put_void());
                hr = m_outputClient->SetEventHandle(m_outputReadyEvent.get());

                hr = m_outputClient->Reset();
                hr = m_outputClient->Start();
            }
        }

        hr = m_outputClient->GetBufferSize(&outputBufferSize);

        // multiplying the buffer size by the number of channels
        outputBufferSize *= 2;

        while (m_isRunning)
        {
            // ===== INPUT =====

            // waiting for the capture event
            WaitForSingleObject(m_inputReadyEvent.get(), INFINITE);

            // getting the input buffer data
            hr = m_audioCaptureClient->GetNextPacketSize(&bufferFrameCount);

            while (SUCCEEDED(hr) && bufferFrameCount > 0) {
                m_audioCaptureClient->GetBuffer(&streamBuffer, &bufferFrameCount, &captureFlags, nullptr, nullptr);
                if (bufferFrameCount != 0) {
                    captureBuffer.write(reinterpret_cast<float*>(streamBuffer), bufferFrameCount * 2);

                    hr = m_audioCaptureClient->ReleaseBuffer(bufferFrameCount);
                    if (FAILED(hr)) {
                        m_audioCaptureClient->ReleaseBuffer(0);
                    }
                }
                else
                {
                    m_audioCaptureClient->ReleaseBuffer(0);
                }

                hr = m_audioCaptureClient->GetNextPacketSize(&bufferFrameCount);
            }

            // ===== CALLBACK =====

            auto size = captureBuffer.size();
            float* userInputData = (float*)calloc(size, sizeof(float));
            float* userOutputData = (float*)calloc(size, sizeof(float));
            captureBuffer.read(userInputData, size);

            OnData(userInputData, userOutputData, size / 2, 2, 48000);

            renderBuffer.write(userOutputData, size);

            free(userInputData);
            free(userOutputData);

            // ===== OUTPUT =====

            // waiting for the render event
            WaitForSingleObject(m_outputReadyEvent.get(), INFINITE);

            // getting information about the output buffer
            hr = m_outputClient->GetBufferSize(&bufferFrameCount);
            hr = m_outputClient->GetCurrentPadding(&numFramesPadding);

            // adjust the frame count with the padding
            bufferFrameCount -= numFramesPadding;

            if (bufferFrameCount != 0) {
                hr = m_audioRenderClient->GetBuffer(bufferFrameCount, &streamBuffer);

                auto count = (bufferFrameCount * 2);
                if (renderBuffer.read(reinterpret_cast<float*>(streamBuffer), count) < count) {
                    // captureBuffer is not full enough, we should fill the remainder with 0
                }

                hr = m_audioRenderClient->ReleaseBuffer(bufferFrameCount, 0);
                if (FAILED(hr)) {
                    m_audioRenderClient->ReleaseBuffer(0, 0);
                }
            }
            else
            {
                m_audioRenderClient->ReleaseBuffer(0, 0);
            }
        }

    exit:
        // Cleanup code

    }

为了清楚起见,我删除了错误处理代码,其中大部分是:

if (FAILED(hr)) 
    goto exit;

@IInspectable 是对的,我的代码有问题:音频处理由一个库完成,然后调用回调函数获得一些结果。

在我的回调中,我尝试引发一个 winrt::event,但有时需要超过 50 毫秒。当它发生时,它会阻塞音频线程,并造成不连续性...