UE4 使用WASAPI获取系统音频
标签:man continue ini format pac ack oat ocr windows
多线程+WASAPI获取系统音频:
#pragma once
#include "CoreMinimal.h"
//MultiThread
#include "Runnable.h"
#include "RunnableThread.h"
#include "ThreadSafeCounter.h"
#include "ThreadSafeBool.h"
struct IAudioClient;
struct IAudioCaptureClient;
typedef LONGLONG REFERENCE_TIME;
typedef struct tWAVEFORMATEX WAVEFORMATEX;
class TESTJIGOU_API AudioStreamThread : public FRunnable
{
public:
AudioStreamThread();
~AudioStreamThread();
//暂停线程
void PauseThread();
//继续线程
void ContinueThread();
//停止线程
void StopThread();
bool IsThreadPaused();
bool IsThreadKilled();
//向zego传音频数据
void CopyData2Zego(unsigned char* data,int dataLen);
public:
WAVEFORMATEX* m_Format;
//音频数据
TArraychar> m_audioData;
private:
FRunnableThread* Thread;
FThreadSafeCounter StopTaskCounter;
FCriticalSection m_mutex;
private:
int curIndex;
//WASAPI Variable
IAudioClient* m_Client;
IAudioCaptureClient* m_Capture;
REFERENCE_TIME m_WaitTime;
//WASAPI Function
void SetWaveFormat(WAVEFORMATEX& format);
bool InitLoopBack();
void AudioStreamLoop();
void StopAudioStream();
private:
//override function
virtual bool Init() override;
virtual uint32 Run() override;
virtual void Stop() override;
virtual void Exit() override;
private:
FThreadSafeBool m_Kill;
FThreadSafeBool m_Pause;
};
#include "AudioStreamThread.h"
#include "WindowsPlatformProcess.h"
//WASAPI
#include
#include
AudioStreamThread::AudioStreamThread()
{
m_Kill = false;
m_Pause = false;
m_Client = nullptr;
m_Capture = nullptr;
m_Format = nullptr;
curIndex = 0;
Thread = FRunnableThread::Create(this, TEXT("AudioStreamThread"), 0, TPri_Normal);
}
AudioStreamThread::~AudioStreamThread()
{
if (Thread)
{
delete Thread;
Thread = nullptr;
}
}
void AudioStreamThread::PauseThread()
{
m_Pause = true;
}
void AudioStreamThread::ContinueThread()
{
m_Pause = false;
}
void AudioStreamThread::StopThread()
{
Stop();
if (Thread)
{
Thread->WaitForCompletion();
}
}
bool AudioStreamThread::IsThreadPaused()
{
return (bool)m_Pause;
}
bool AudioStreamThread::IsThreadKilled()
{
return (bool)m_Kill;
}
void AudioStreamThread::CopyData2Zego(unsigned char* data, int dataLen)
{
m_mutex.Lock();
if (m_audioData.Num() >= dataLen)
{
memcpy(data, m_audioData.GetData(), dataLen);
//curIndex += dataLen;
m_audioData.RemoveAt(0, dataLen);
}
m_mutex.Unlock();
}
void AudioStreamThread::SetWaveFormat(WAVEFORMATEX& format)
{
if (format.wFormatTag == WAVE_FORMAT_IEEE_FLOAT)
{
format.wFormatTag = WAVE_FORMAT_PCM;
}
else if (format.wFormatTag == WAVE_FORMAT_EXTENSIBLE)
{
PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast(&format);
if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat))
{
pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
pEx->Samples.wValidBitsPerSample = 16;
}
}
format.wBitsPerSample = 16;
format.nBlockAlign = format.nChannels * format.wBitsPerSample / 8;
format.nAvgBytesPerSec = format.nBlockAlign * format.nSamplesPerSec;
}
bool AudioStreamThread::InitLoopBack()
{
IMMDeviceEnumerator* enumerator;
HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, __uuidof(IMMDeviceEnumerator), reinterpret_castvoid**>(&enumerator));
IMMDevice* device;
hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device);
hr = device->Activate(__uuidof(IAudioClient), CLSCTX_SERVER, NULL, reinterpret_castvoid**>(&m_Client));
hr = m_Client->GetMixFormat(&m_Format);
SetWaveFormat(*m_Format);
hr = m_Client->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0, 0, m_Format, NULL);
hr = m_Client->GetService(__uuidof(IAudioCaptureClient), reinterpret_castvoid**>(&m_Capture));
hr = m_Client->GetDevicePeriod(&m_WaitTime, NULL);
hr = m_Client->Start();
if (enumerator != nullptr)
{
enumerator->Release();
}
if (device != nullptr)
{
device->Release();
}
return SUCCEEDED(hr);
}
void AudioStreamThread::AudioStreamLoop()
{
BYTE* data = nullptr;
UINT32 size;
DWORD flags;
UINT64 device, performance;
HRESULT hr = m_Capture->GetNextPacketSize(&size);
hr = m_Capture->GetBuffer(&data, &size, &flags, &device, &performance);
int byteWrite = size*m_Format->nBlockAlign;
if (data != nullptr)
{
m_audioData.Append(data, size);
}
uint8_t* formatData = (uint8_t*)data;
hr = m_Capture->ReleaseBuffer(size);
}
void AudioStreamThread::StopAudioStream()
{
}
bool AudioStreamThread::Init()
{
return InitLoopBack();
}
uint32 AudioStreamThread::Run()
{
FPlatformProcess::Sleep(0.03);
while (StopTaskCounter.GetValue() == 0 && !m_Kill)
{
if (m_Pause)
{
if (m_Kill)
{
return 0;
}
}
else
{
m_mutex.Lock();
//需要同步处理的内容
//AudioStreamLoop();
BYTE* data = nullptr;
UINT32 size;
DWORD flags;
UINT64 device, performance;
HRESULT hr = m_Capture->GetNextPacketSize(&size);
hr = m_Capture->GetBuffer(&data, &size, &flags, &device, &performance);
int byteWrite = size*m_Format->nBlockAlign;
if (data != nullptr)
{
m_audioData.Append(data, byteWrite);
}
uint8_t* formatData = (uint8_t*)data;
hr = m_Capture->ReleaseBuffer(size);
m_mutex.Unlock();
FPlatformProcess::Sleep((m_WaitTime) / 2 / (10 * 1000) / 1000);
}
}
return 0;
}
void AudioStreamThread::Stop()
{
StopTaskCounter.Increment();
m_Kill = true;
m_Pause = false;
}
void AudioStreamThread::Exit()
{
}
音频数据存储在m_AudioData,因为是实时获取,这里得到相应长度的数据后就会把它删除,如果要实现录音功能,改为不删除取出的数据即可。
UE4 使用WASAPI获取系统音频
标签:man continue ini format pac ack oat ocr windows
原文地址:https://www.cnblogs.com/litmin/p/8371274.html
评论