如何使用 NAudio 将声音文件加载到内存中并在以后使用?
How can I load a sound file into memory using NAudio and use it later?
我每秒播放 5 次相同的声音文件(在它们之间随机选择),而且我总是加载到内存中,所以程序使用了大量内存。我怎样才能将声音文件加载到内存中,然后从那里开始呢?我正在使用 NAudio。当前代码:
var sound = "sounds/test.mp3";
using (var audioFile = new AudioFileReader(sound))
using (var outputDevice = new WaveOutEvent())
{
outputDevice.Init(audioFile);
outputDevice.Play();
while (outputDevice.PlaybackState == PlaybackState.Playing)
{
Thread.Sleep(1000);
}
threadStop();
}
如果您删除 using
块,则 audioFile
和 outputDevice
将不会被丢弃。然后您可以将它们保留在内存中,每次播放音频时都会使用相同的引用。
对于 using
个块,您正在重复实例化其内存可能不会立即释放的 NAudio 对象。
var sound = "sounds/test.mp3";
var audioFile = new AudioFileReader(sound);
var outputDevice = new WaveOutEvent();
outputDevice.Init(audioFile);
outputDevice.Play();
while (outputDevice.PlaybackState == PlaybackState.Playing)
{
Thread.Sleep(1000);
}
threadStop();
我使用 article 中的代码解决了整个问题。
它使用 MixingSampleProvider
。我将声音加载到名为 CachedSound
的自定义 class 中。然后我使用另一个 class 播放它们:AudioPlaybackEngine
。它处理混音器,我使用 CachedSoundSampleProvider
class 读取缓存的声音。
代码如下所示:
class AudioPlaybackEngine : IDisposable
{
private readonly IWavePlayer outputDevice;
private readonly MixingSampleProvider mixer;
public AudioPlaybackEngine(int sampleRate = 44100, int channelCount = 2)
{
outputDevice = new WaveOutEvent();
mixer = new MixingSampleProvider(WaveFormat.CreateIeeeFloatWaveFormat(sampleRate, channelCount));
mixer.ReadFully = true;
outputDevice.Init(mixer);
outputDevice.Play();
}
public void PlaySound(string fileName)
{
var input = new AudioFileReader(fileName);
AddMixerInput(new AutoDisposeFileReader(input));
}
private ISampleProvider ConvertToRightChannelCount(ISampleProvider input)
{
if (input.WaveFormat.Channels == mixer.WaveFormat.Channels)
{
return input;
}
if (input.WaveFormat.Channels == 1 && mixer.WaveFormat.Channels == 2)
{
return new MonoToStereoSampleProvider(input);
}
throw new NotImplementedException("Not yet implemented this channel count conversion");
}
public void PlaySound(CachedSound sound)
{
AddMixerInput(new CachedSoundSampleProvider(sound));
}
private void AddMixerInput(ISampleProvider input)
{
mixer.AddMixerInput(ConvertToRightChannelCount(input));
}
public void Dispose()
{
outputDevice.Dispose();
}
public static readonly AudioPlaybackEngine Instance = new AudioPlaybackEngine(44100, 2);
}
class CachedSound
{
public float[] AudioData { get; private set; }
public WaveFormat WaveFormat { get; private set; }
public CachedSound(string audioFileName)
{
using (var audioFileReader = new AudioFileReader(audioFileName))
{
// TODO: could add resampling in here if required
WaveFormat = audioFileReader.WaveFormat;
var wholeFile = new List<float>((int)(audioFileReader.Length / 4));
var readBuffer= new float[audioFileReader.WaveFormat.SampleRate * audioFileReader.WaveFormat.Channels];
int samplesRead;
while((samplesRead = audioFileReader.Read(readBuffer,0,readBuffer.Length)) > 0)
{
wholeFile.AddRange(readBuffer.Take(samplesRead));
}
AudioData = wholeFile.ToArray();
}
}
}
class CachedSoundSampleProvider : ISampleProvider
{
private readonly CachedSound cachedSound;
private long position;
public CachedSoundSampleProvider(CachedSound cachedSound)
{
this.cachedSound = cachedSound;
}
public int Read(float[] buffer, int offset, int count)
{
var availableSamples = cachedSound.AudioData.Length - position;
var samplesToCopy = Math.Min(availableSamples, count);
Array.Copy(cachedSound.AudioData, position, buffer, offset, samplesToCopy);
position += samplesToCopy;
return (int)samplesToCopy;
}
public WaveFormat WaveFormat { get { return cachedSound.WaveFormat; } }
}
// This class automatically disposes the file reader that it contains.
class AutoDisposeFileReader : ISampleProvider
{
private readonly AudioFileReader reader;
private bool isDisposed;
public AutoDisposeFileReader(AudioFileReader reader)
{
this.reader = reader;
this.WaveFormat = reader.WaveFormat;
}
public int Read(float[] buffer, int offset, int count)
{
if (isDisposed)
return 0;
int read = reader.Read(buffer, offset, count);
if (read == 0)
{
reader.Dispose();
isDisposed = true;
}
return read;
}
public WaveFormat WaveFormat { get; private set; }
}
我每秒播放 5 次相同的声音文件(在它们之间随机选择),而且我总是加载到内存中,所以程序使用了大量内存。我怎样才能将声音文件加载到内存中,然后从那里开始呢?我正在使用 NAudio。当前代码:
var sound = "sounds/test.mp3";
using (var audioFile = new AudioFileReader(sound))
using (var outputDevice = new WaveOutEvent())
{
outputDevice.Init(audioFile);
outputDevice.Play();
while (outputDevice.PlaybackState == PlaybackState.Playing)
{
Thread.Sleep(1000);
}
threadStop();
}
如果您删除 using
块,则 audioFile
和 outputDevice
将不会被丢弃。然后您可以将它们保留在内存中,每次播放音频时都会使用相同的引用。
对于 using
个块,您正在重复实例化其内存可能不会立即释放的 NAudio 对象。
var sound = "sounds/test.mp3";
var audioFile = new AudioFileReader(sound);
var outputDevice = new WaveOutEvent();
outputDevice.Init(audioFile);
outputDevice.Play();
while (outputDevice.PlaybackState == PlaybackState.Playing)
{
Thread.Sleep(1000);
}
threadStop();
我使用 article 中的代码解决了整个问题。
它使用 MixingSampleProvider
。我将声音加载到名为 CachedSound
的自定义 class 中。然后我使用另一个 class 播放它们:AudioPlaybackEngine
。它处理混音器,我使用 CachedSoundSampleProvider
class 读取缓存的声音。
代码如下所示:
class AudioPlaybackEngine : IDisposable
{
private readonly IWavePlayer outputDevice;
private readonly MixingSampleProvider mixer;
public AudioPlaybackEngine(int sampleRate = 44100, int channelCount = 2)
{
outputDevice = new WaveOutEvent();
mixer = new MixingSampleProvider(WaveFormat.CreateIeeeFloatWaveFormat(sampleRate, channelCount));
mixer.ReadFully = true;
outputDevice.Init(mixer);
outputDevice.Play();
}
public void PlaySound(string fileName)
{
var input = new AudioFileReader(fileName);
AddMixerInput(new AutoDisposeFileReader(input));
}
private ISampleProvider ConvertToRightChannelCount(ISampleProvider input)
{
if (input.WaveFormat.Channels == mixer.WaveFormat.Channels)
{
return input;
}
if (input.WaveFormat.Channels == 1 && mixer.WaveFormat.Channels == 2)
{
return new MonoToStereoSampleProvider(input);
}
throw new NotImplementedException("Not yet implemented this channel count conversion");
}
public void PlaySound(CachedSound sound)
{
AddMixerInput(new CachedSoundSampleProvider(sound));
}
private void AddMixerInput(ISampleProvider input)
{
mixer.AddMixerInput(ConvertToRightChannelCount(input));
}
public void Dispose()
{
outputDevice.Dispose();
}
public static readonly AudioPlaybackEngine Instance = new AudioPlaybackEngine(44100, 2);
}
class CachedSound
{
public float[] AudioData { get; private set; }
public WaveFormat WaveFormat { get; private set; }
public CachedSound(string audioFileName)
{
using (var audioFileReader = new AudioFileReader(audioFileName))
{
// TODO: could add resampling in here if required
WaveFormat = audioFileReader.WaveFormat;
var wholeFile = new List<float>((int)(audioFileReader.Length / 4));
var readBuffer= new float[audioFileReader.WaveFormat.SampleRate * audioFileReader.WaveFormat.Channels];
int samplesRead;
while((samplesRead = audioFileReader.Read(readBuffer,0,readBuffer.Length)) > 0)
{
wholeFile.AddRange(readBuffer.Take(samplesRead));
}
AudioData = wholeFile.ToArray();
}
}
}
class CachedSoundSampleProvider : ISampleProvider
{
private readonly CachedSound cachedSound;
private long position;
public CachedSoundSampleProvider(CachedSound cachedSound)
{
this.cachedSound = cachedSound;
}
public int Read(float[] buffer, int offset, int count)
{
var availableSamples = cachedSound.AudioData.Length - position;
var samplesToCopy = Math.Min(availableSamples, count);
Array.Copy(cachedSound.AudioData, position, buffer, offset, samplesToCopy);
position += samplesToCopy;
return (int)samplesToCopy;
}
public WaveFormat WaveFormat { get { return cachedSound.WaveFormat; } }
}
// This class automatically disposes the file reader that it contains.
class AutoDisposeFileReader : ISampleProvider
{
private readonly AudioFileReader reader;
private bool isDisposed;
public AutoDisposeFileReader(AudioFileReader reader)
{
this.reader = reader;
this.WaveFormat = reader.WaveFormat;
}
public int Read(float[] buffer, int offset, int count)
{
if (isDisposed)
return 0;
int read = reader.Read(buffer, offset, count);
if (read == 0)
{
reader.Dispose();
isDisposed = true;
}
return read;
}
public WaveFormat WaveFormat { get; private set; }
}