概述
CEchoCanceller3::CEchoCanceller3(int ch, int sample_rate, int framelen)
{
m_ch = ch;
m_sample_rate = sample_rate;
m_bytes_per_frame = framelen;
m_aec_config.filter.export_linear_aec_output = true;
m_aec_factory = std::make_unique<webrtc::EchoCanceller3Factory>(m_aec_config);
m_echo_controler = m_aec_factory->Create(m_sample_rate, m_ch, m_ch);
m_hp_filter = std::make_unique<webrtc::HighPassFilter>(m_sample_rate, ch);
m_config = webrtc::StreamConfig(m_sample_rate, m_ch, false);
m_ref_audio = std::make_unique<webrtc::AudioBuffer>(
m_config.sample_rate_hz(), m_config.num_channels(),
m_config.sample_rate_hz(), m_config.num_channels(),
m_config.sample_rate_hz(), m_config.num_channels());
m_aec_audio = std::make_unique<webrtc::AudioBuffer>(
m_config.sample_rate_hz(), m_config.num_channels(),
m_config.sample_rate_hz(), m_config.num_channels(),
m_config.sample_rate_hz(), m_config.num_channels());
int kLinearOutputRateHz = m_sample_rate;
m_aec_linear_audio = std::make_unique<webrtc::AudioBuffer>(
kLinearOutputRateHz, m_config.num_channels(),
kLinearOutputRateHz, m_config.num_channels(),
kLinearOutputRateHz, m_config.num_channels());
//m_samples_per_frame = m_sample_rate /100;
//m_bytes_per_frame = m_samples_per_frame * 16 / 8;
m_samples_per_frame = m_bytes_per_frame / 2;
}
CEchoCanceller3::~CEchoCanceller3()
{
}
CObjPtr<CObjNetIOBuffer> CEchoCanceller3::Process(CObjNetIOBuffer *mic, CObjNetIOBuffer *ref)
{
// if (ref == NULL)
{
return CObjPtr<CObjNetIOBuffer>(mic);
}
if (mic->m_nDataLen != m_bytes_per_frame || mic->m_nDataLen != ref->m_nDataLen)
{
return CObjPtr<CObjNetIOBuffer>(mic);
}
CObjPtr<CObjNetIOBuffer> out(CObjNetIOBufferSharedMemory::CreateObj(NULL, mic->m_nBufLen), TRUE);
unsigned char* ref_tmp = (unsigned char*)ref->m_pBuf;
unsigned char* aec_tmp = (unsigned char*)mic->m_pBuf;
webrtc::AudioFrame ref_frame, aec_frame;
ref_frame.UpdateFrame(0, reinterpret_cast<int16_t*>(ref_tmp), m_samples_per_frame, m_sample_rate, webrtc::AudioFrame::kNormalSpeech, webrtc::AudioFrame::kVadActive, 1);
aec_frame.UpdateFrame(0, reinterpret_cast<int16_t*>(aec_tmp), m_samples_per_frame, m_sample_rate, webrtc::AudioFrame::kNormalSpeech, webrtc::AudioFrame::kVadActive, 1);
m_ref_audio->CopyFrom(&ref_frame);
m_aec_audio->CopyFrom(&aec_frame);
m_ref_audio->SplitIntoFrequencyBands();
m_echo_controler->AnalyzeRender(m_ref_audio.get());
m_ref_audio->MergeFrequencyBands();
m_echo_controler->AnalyzeCapture(m_aec_audio.get());
m_aec_audio->SplitIntoFrequencyBands();
m_hp_filter->Process(m_aec_audio.get(), true);
m_echo_controler->SetAudioBufferDelay(0);
m_echo_controler->ProcessCapture(m_aec_audio.get(), m_aec_linear_audio.get(), false);
m_aec_audio->MergeFrequencyBands();
//aec_audio->CopyTo(&aec_frame);
//memcpy(aec_tmp, aec_frame.data(), bytes_per_frame);
//wav_write_data(h_out, aec_tmp, bytes_per_frame);
aec_frame.UpdateFrame(0, nullptr, m_samples_per_frame, m_sample_rate, webrtc::AudioFrame::kNormalSpeech, webrtc::AudioFrame::kVadActive, 1);
m_aec_linear_audio->CopyTo(&aec_frame);
memcpy(out->m_pBuf, aec_frame.data(), m_bytes_per_frame);
out->m_nDataLen = m_bytes_per_frame;
return out;
}
最后
以上就是勤恳招牌为你收集整理的webrtc aec 用法2的全部内容,希望文章能够帮你解决webrtc aec 用法2所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复