2010-11-14 16:56:26 -07:00
|
|
|
// Copyright (C) 2003 Dolphin Project.
|
|
|
|
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, version 2.0.
|
|
|
|
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License 2.0 for more details.
|
|
|
|
|
|
|
|
// A copy of the GPL 2.0 should have been included with the program.
|
|
|
|
// If not, see http://www.gnu.org/licenses/
|
|
|
|
|
|
|
|
// Official SVN repository and contact information can be found at
|
|
|
|
// http://code.google.com/p/dolphin-emu/
|
|
|
|
|
|
|
|
#include "AudioCommon.h"
|
|
|
|
#include "XAudio2Stream.h"
|
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
const int NUM_BUFFERS = 3;
|
|
|
|
const int SAMPLES_PER_BUFFER = 96;
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
const int NUM_CHANNELS = 2;
|
|
|
|
const int BUFFER_SIZE = SAMPLES_PER_BUFFER * NUM_CHANNELS;
|
|
|
|
const int BUFFER_SIZE_BYTES = BUFFER_SIZE * sizeof(s16);
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
void StreamingVoiceContext::SubmitBuffer(PBYTE buf_data)
|
|
|
|
{
|
|
|
|
XAUDIO2_BUFFER buf = {};
|
|
|
|
buf.AudioBytes = BUFFER_SIZE_BYTES;
|
|
|
|
buf.pContext = buf_data;
|
|
|
|
buf.pAudioData = buf_data;
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
m_source_voice->SubmitSourceBuffer(&buf);
|
|
|
|
}
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
StreamingVoiceContext::StreamingVoiceContext(IXAudio2 *pXAudio2, CMixer *pMixer, Common::Event& pSyncEvent)
|
|
|
|
: m_mixer(pMixer)
|
|
|
|
, m_sound_sync_event(pSyncEvent)
|
|
|
|
, xaudio_buffer(new BYTE[NUM_BUFFERS * BUFFER_SIZE_BYTES]())
|
|
|
|
{
|
|
|
|
WAVEFORMATEXTENSIBLE wfx = {};
|
|
|
|
|
|
|
|
wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
|
|
|
|
wfx.Format.nSamplesPerSec = m_mixer->GetSampleRate();
|
|
|
|
wfx.Format.nChannels = 2;
|
|
|
|
wfx.Format.wBitsPerSample = 16;
|
|
|
|
wfx.Format.nBlockAlign = wfx.Format.nChannels*wfx.Format.wBitsPerSample / 8;
|
|
|
|
wfx.Format.nAvgBytesPerSec = wfx.Format.nSamplesPerSec * wfx.Format.nBlockAlign;
|
|
|
|
wfx.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
|
|
|
|
wfx.Samples.wValidBitsPerSample = 16;
|
|
|
|
wfx.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT;
|
|
|
|
wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
|
|
|
|
|
|
|
|
// create source voice
|
|
|
|
HRESULT hr;
|
|
|
|
if (FAILED(hr = pXAudio2->CreateSourceVoice(&m_source_voice, &wfx.Format, XAUDIO2_VOICE_NOSRC, 1.0f, this)))
|
|
|
|
{
|
|
|
|
PanicAlertT("XAudio2 CreateSourceVoice failed: %#X", hr);
|
|
|
|
return;
|
2010-11-14 16:56:26 -07:00
|
|
|
}
|
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
m_source_voice->Start();
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
// start buffers with silence
|
|
|
|
for (int i = 0; i != NUM_BUFFERS; ++i)
|
|
|
|
SubmitBuffer(xaudio_buffer.get() + (i * BUFFER_SIZE_BYTES));
|
|
|
|
}
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
StreamingVoiceContext::~StreamingVoiceContext()
|
|
|
|
{
|
|
|
|
if (m_source_voice)
|
|
|
|
{
|
|
|
|
m_source_voice->Stop();
|
|
|
|
m_source_voice->DestroyVoice();
|
|
|
|
}
|
|
|
|
}
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
void StreamingVoiceContext::Stop()
|
|
|
|
{
|
|
|
|
if (m_source_voice)
|
|
|
|
m_source_voice->Stop();
|
|
|
|
}
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
void StreamingVoiceContext::Play()
|
|
|
|
{
|
|
|
|
if (m_source_voice)
|
|
|
|
m_source_voice->Start();
|
|
|
|
}
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
void StreamingVoiceContext::OnBufferEnd(void* context)
|
|
|
|
{
|
|
|
|
// buffer end callback; gets SAMPLES_PER_BUFFER samples for a new buffer
|
|
|
|
|
|
|
|
if (!m_source_voice || !context)
|
|
|
|
return;
|
|
|
|
|
|
|
|
//m_sound_sync_event->Wait(); // sync
|
|
|
|
//m_sound_sync_event->Spin(); // or tight sync
|
|
|
|
|
|
|
|
m_mixer->Mix(static_cast<short*>(context), SAMPLES_PER_BUFFER);
|
|
|
|
SubmitBuffer(static_cast<BYTE*>(context));
|
|
|
|
}
|
2010-11-14 16:56:26 -07:00
|
|
|
|
|
|
|
bool XAudio2::Start()
|
|
|
|
{
|
|
|
|
HRESULT hr;
|
2011-03-15 17:09:12 -06:00
|
|
|
|
|
|
|
// callback dosent seem to run on a speecific cpu anyways
|
|
|
|
IXAudio2* xaudptr;
|
|
|
|
if (FAILED(hr = XAudio2Create(&xaudptr, 0, XAUDIO2_DEFAULT_PROCESSOR)))
|
|
|
|
{
|
|
|
|
PanicAlertT("XAudio2 init failed: %#X", hr);
|
|
|
|
Stop();
|
2010-11-14 16:56:26 -07:00
|
|
|
return false;
|
2011-03-15 17:09:12 -06:00
|
|
|
}
|
|
|
|
m_xaudio2 = std::unique_ptr<IXAudio2, Releaser>(xaudptr);
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
// XAudio2 master voice
|
2010-11-14 16:56:26 -07:00
|
|
|
// XAUDIO2_DEFAULT_CHANNELS instead of 2 for expansion?
|
2011-03-15 17:09:12 -06:00
|
|
|
if (FAILED(hr = m_xaudio2->CreateMasteringVoice(&m_mastering_voice, 2, m_mixer->GetSampleRate())))
|
|
|
|
{
|
|
|
|
PanicAlertT("XAudio2 master voice creation failed: %#X", hr);
|
|
|
|
Stop();
|
2010-11-14 16:56:26 -07:00
|
|
|
return false;
|
2011-03-15 17:09:12 -06:00
|
|
|
}
|
2010-11-14 16:56:26 -07:00
|
|
|
|
|
|
|
// Volume
|
2011-03-15 17:09:12 -06:00
|
|
|
m_mastering_voice->SetVolume(m_volume);
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
m_voice_context = std::unique_ptr<StreamingVoiceContext>
|
|
|
|
(new StreamingVoiceContext(m_xaudio2.get(), m_mixer, m_sound_sync_event));
|
2010-11-14 16:56:26 -07:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void XAudio2::SetVolume(int volume)
|
|
|
|
{
|
|
|
|
//linear 1- .01
|
2010-12-29 06:07:00 -07:00
|
|
|
m_volume = (float)volume / 100.f;
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
if (m_mastering_voice)
|
|
|
|
m_mastering_voice->SetVolume(m_volume);
|
2010-11-14 16:56:26 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void XAudio2::Update()
|
|
|
|
{
|
2011-03-15 17:09:12 -06:00
|
|
|
//m_sound_sync_event.Set();
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
//static int xi = 0;
|
|
|
|
//if (100000 == ++xi)
|
|
|
|
//{
|
2010-11-14 16:56:26 -07:00
|
|
|
// xi = 0;
|
2011-03-15 17:09:12 -06:00
|
|
|
// XAUDIO2_PERFORMANCE_DATA perfData;
|
2010-11-14 16:56:26 -07:00
|
|
|
// pXAudio2->GetPerformanceData(&perfData);
|
2011-03-15 17:09:12 -06:00
|
|
|
// NOTICE_LOG(DSPHLE, "XAudio2 latency (samples): %i", perfData.CurrentLatencyInSamples);
|
|
|
|
// NOTICE_LOG(DSPHLE, "XAudio2 total glitches: %i", perfData.GlitchesSinceEngineStarted);
|
2010-11-14 16:56:26 -07:00
|
|
|
//}
|
|
|
|
}
|
|
|
|
|
|
|
|
void XAudio2::Clear(bool mute)
|
|
|
|
{
|
|
|
|
m_muted = mute;
|
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
if (m_voice_context)
|
2010-11-14 16:56:26 -07:00
|
|
|
{
|
|
|
|
if (m_muted)
|
2011-03-15 17:09:12 -06:00
|
|
|
m_voice_context->Stop();
|
2010-11-14 16:56:26 -07:00
|
|
|
else
|
2011-03-15 17:09:12 -06:00
|
|
|
m_voice_context->Play();
|
2010-11-14 16:56:26 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void XAudio2::Stop()
|
|
|
|
{
|
2011-03-15 17:09:12 -06:00
|
|
|
//m_sound_sync_event.Set();
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
m_voice_context.reset();
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
if (m_mastering_voice)
|
|
|
|
{
|
|
|
|
m_mastering_voice->DestroyVoice();
|
|
|
|
m_mastering_voice = nullptr;
|
|
|
|
}
|
2010-11-14 16:56:26 -07:00
|
|
|
|
2011-03-15 17:09:12 -06:00
|
|
|
m_xaudio2.reset(); // release interface
|
2010-11-14 16:56:26 -07:00
|
|
|
}
|