在 java 中使用 FFT 从 .wav 创建频谱图
Creating spectrogram from .wav using FFT in java
经过研究和大量的反复试验,我得出了一个观点,我可以构建一个频谱图,我认为它具有 对与错 的元素。
1.首先,我将.wav文件读入一个字节数组,只提取数据部分。
2. 我将字节数组转换为双精度数组,取左右声道的平均值。我还注意到 1 个通道的 1 个样本由 2 个字节组成。所以,4 个字节变成 1 个双字节。
3. 对于某个 window 大小的 2 次方,我从 here 应用 FFT 并获得频域中的振幅。这是频谱图图像的垂直条带。
4. 我用相同的 window 大小和整个数据的重叠重复执行此操作并获得频谱图。
下面是将.wav读入double数组的代码
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
public class readWAV2Array {
private byte[] entireFileData;
//SR = sampling rate
public double getSR(){
ByteBuffer wrapped = ByteBuffer.wrap(Arrays.copyOfRange(entireFileData, 24, 28)); // big-endian by default
double SR = wrapped.order(java.nio.ByteOrder.LITTLE_ENDIAN).getInt();
return SR;
}
public readWAV2Array(String filepath, boolean print_info) throws IOException{
Path path = Paths.get(filepath);
this.entireFileData = Files.readAllBytes(path);
if (print_info){
//extract format
String format = new String(Arrays.copyOfRange(entireFileData, 8, 12), "UTF-8");
//extract number of channels
int noOfChannels = entireFileData[22];
String noOfChannels_str;
if (noOfChannels == 2)
noOfChannels_str = "2 (stereo)";
else if (noOfChannels == 1)
noOfChannels_str = "1 (mono)";
else
noOfChannels_str = noOfChannels + "(more than 2 channels)";
//extract sampling rate (SR)
int SR = (int) this.getSR();
//extract Bit Per Second (BPS/Bit depth)
int BPS = entireFileData[34];
System.out.println("---------------------------------------------------");
System.out.println("File path: " + filepath);
System.out.println("File format: " + format);
System.out.println("Number of channels: " + noOfChannels_str);
System.out.println("Sampling rate: " + SR);
System.out.println("Bit depth: " + BPS);
System.out.println("---------------------------------------------------");
}
}
public double[] getByteArray (){
byte[] data_raw = Arrays.copyOfRange(entireFileData, 44, entireFileData.length);
int totalLength = data_raw.length;
//declare double array for mono
int new_length = totalLength/4;
double[] data_mono = new double[new_length];
double left, right;
for (int i = 0; i < new_length; i++){
left = ((data_raw[i] & 0xff) << 8) | (data_raw[i+1] & 0xff);
right = ((data_raw[i+2] & 0xff) << 8) | (data_raw[i+3] & 0xff);
data_mono[i] = (left+right)/2.0;
}
return data_mono;
}
}
下面的代码是运行
的主程序
import java.awt.Color;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import javax.imageio.ImageIO;
public class App {
public static Color getColor(double power) {
double H = power * 0.4; // Hue (note 0.4 = Green, see huge chart below)
double S = 1.0; // Saturation
double B = 1.0; // Brightness
return Color.getHSBColor((float)H, (float)S, (float)B);
}
public static void main(String[] args) {
// TODO Auto-generated method stub
String filepath = "audio_work/Sine_Sweep_Full_Spectrum_20_Hz_20_kHz_audiocheck.wav";
try {
//get raw double array containing .WAV data
readWAV2Array audioTest = new readWAV2Array(filepath, true);
double[] rawData = audioTest.getByteArray();
int length = rawData.length;
//initialize parameters for FFT
int WS = 2048; //WS = window size
int OF = 8; //OF = overlap factor
int windowStep = WS/OF;
//calculate FFT parameters
double SR = audioTest.getSR();
double time_resolution = WS/SR;
double frequency_resolution = SR/WS;
double highest_detectable_frequency = SR/2.0;
double lowest_detectable_frequency = 5.0*SR/WS;
System.out.println("time_resolution: " + time_resolution*1000 + " ms");
System.out.println("frequency_resolution: " + frequency_resolution + " Hz");
System.out.println("highest_detectable_frequency: " + highest_detectable_frequency + " Hz");
System.out.println("lowest_detectable_frequency: " + lowest_detectable_frequency + " Hz");
//initialize plotData array
int nX = (length-WS)/windowStep;
int nY = WS;
double[][] plotData = new double[nX][nY];
//apply FFT and find MAX and MIN amplitudes
double maxAmp = Double.MIN_VALUE;
double minAmp = Double.MAX_VALUE;
double amp_square;
double[] inputImag = new double[length];
for (int i = 0; i < nX; i++){
Arrays.fill(inputImag, 0.0);
double[] WS_array = FFT.fft(Arrays.copyOfRange(rawData, i*windowStep, i*windowStep+WS), inputImag, true);
for (int j = 0; j < nY; j++){
amp_square = (WS_array[2*j]*WS_array[2*j]) + (WS_array[2*j+1]*WS_array[2*j+1]);
if (amp_square == 0.0){
plotData[i][j] = amp_square;
}
else{
plotData[i][j] = 10 * Math.log10(amp_square);
}
//find MAX and MIN amplitude
if (plotData[i][j] > maxAmp)
maxAmp = plotData[i][j];
else if (plotData[i][j] < minAmp)
minAmp = plotData[i][j];
}
}
System.out.println("---------------------------------------------------");
System.out.println("Maximum amplitude: " + maxAmp);
System.out.println("Minimum amplitude: " + minAmp);
System.out.println("---------------------------------------------------");
//Normalization
double diff = maxAmp - minAmp;
for (int i = 0; i < nX; i++){
for (int j = 0; j < nY; j++){
plotData[i][j] = (plotData[i][j]-minAmp)/diff;
}
}
//plot image
BufferedImage theImage = new BufferedImage(nX, nY, BufferedImage.TYPE_INT_RGB);
double ratio;
for(int x = 0; x<nX; x++){
for(int y = 0; y<nY; y++){
ratio = plotData[x][y];
//theImage.setRGB(x, y, new Color(red, green, 0).getRGB());
Color newColor = getColor(1.0-ratio);
theImage.setRGB(x, y, newColor.getRGB());
}
}
File outputfile = new File("saved.png");
ImageIO.write(theImage, "png", outputfile);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
但是,我从.wav中播放20-20kHz扫频声音得到的图像是这样的:
颜色表示声音强度 红色(高)-->绿色(低)
没错,应该是下图这样的:
如果我能在我的项目中得到任何 correct/improvement/suggest,我将不胜感激。预先感谢您对我的问题发表评论。
幸运的是,你的权利似乎多于错误。
导致额外红线的第一个也是主要问题是您如何解码 readWAV2Array.getByteArray
中的数据。由于样本跨越 4 个字节,因此您必须以 4 的倍数进行索引(例如,样本 0 的字节 0、1、2、3,样本 1 的字节 4、5、6、7)否则您将读取 4 字节的重叠块(例如,样本 0 的字节 0、1、2、3,样本 1 的字节 1、2、3、4)。此转换的另一件事是,您必须将结果显式转换为带符号的 short
类型,然后才能将其分配给 left
和 right
(类型为 double
) 以便从无符号字节中获得带符号的 16 位结果。这应该给你一个转换循环,看起来像:
for (int i = 0; 4*i+3 < totalLength; i++){
left = (short)((data_raw[4*i+1] & 0xff) << 8) | (data_raw[4*i] & 0xff);
right = (short)((data_raw[4*i+3] & 0xff) << 8) | (data_raw[4*i+2] & 0xff);
data_mono[i] = (left+right)/2.0;
}
在这一点上,你应该开始得到一个具有强线代表你的 20Hz-20kHz 线性调频的图:
但是你应该注意到你实际上得到了 2 行。这是因为对于实值信号,频谱具有厄密对称性。因此,高于奈奎斯特频率的频谱幅度(采样率的一半,在本例中为 44100Hz/2)是低于奈奎斯特频率的频谱的冗余反射。通过将main
中的nY
的定义改为:
,可以只绘制奈奎斯特频率以下的非冗余部分
int nY = WS/2 + 1;
并且会给你:
几乎就是我们要找的东西,但是随着频率的增加扫描会生成一个图形,其中有一条直线在减小。那是因为您的索引使 0Hz 频率位于图顶部的索引 0 处,而 22050Hz 频率位于图底部的索引 nY-1
处。要翻转图形并在底部获得更常见的 0Hz,在顶部获得 22050Hz,您可以更改索引以使用:
plotData[i][nY-j-1] = 10 * Math.log10(amp_square);
现在你应该有一个看起来像你期待的情节(虽然有不同的颜色图):
最后一点:虽然我理解您打算避免在转换为分贝时取 0 的对数,但在这种特定情况下将输出设置为线性刻度振幅可能会产生意想不到的结果。相反,我会 select 保护的截止阈值振幅:
// select threshold based on the expected spectrum amplitudes
// e.g. 80dB below your signal's spectrum peak amplitude
double threshold = 1.0;
// limit values and convert to dB
plotData[i][nY-j-1] = 10 * Math.log10(Math.max(amp_square,threshold));
经过研究和大量的反复试验,我得出了一个观点,我可以构建一个频谱图,我认为它具有 对与错 的元素。
1.首先,我将.wav文件读入一个字节数组,只提取数据部分。
2. 我将字节数组转换为双精度数组,取左右声道的平均值。我还注意到 1 个通道的 1 个样本由 2 个字节组成。所以,4 个字节变成 1 个双字节。
3. 对于某个 window 大小的 2 次方,我从 here 应用 FFT 并获得频域中的振幅。这是频谱图图像的垂直条带。
4. 我用相同的 window 大小和整个数据的重叠重复执行此操作并获得频谱图。
下面是将.wav读入double数组的代码
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
public class readWAV2Array {
private byte[] entireFileData;
//SR = sampling rate
public double getSR(){
ByteBuffer wrapped = ByteBuffer.wrap(Arrays.copyOfRange(entireFileData, 24, 28)); // big-endian by default
double SR = wrapped.order(java.nio.ByteOrder.LITTLE_ENDIAN).getInt();
return SR;
}
public readWAV2Array(String filepath, boolean print_info) throws IOException{
Path path = Paths.get(filepath);
this.entireFileData = Files.readAllBytes(path);
if (print_info){
//extract format
String format = new String(Arrays.copyOfRange(entireFileData, 8, 12), "UTF-8");
//extract number of channels
int noOfChannels = entireFileData[22];
String noOfChannels_str;
if (noOfChannels == 2)
noOfChannels_str = "2 (stereo)";
else if (noOfChannels == 1)
noOfChannels_str = "1 (mono)";
else
noOfChannels_str = noOfChannels + "(more than 2 channels)";
//extract sampling rate (SR)
int SR = (int) this.getSR();
//extract Bit Per Second (BPS/Bit depth)
int BPS = entireFileData[34];
System.out.println("---------------------------------------------------");
System.out.println("File path: " + filepath);
System.out.println("File format: " + format);
System.out.println("Number of channels: " + noOfChannels_str);
System.out.println("Sampling rate: " + SR);
System.out.println("Bit depth: " + BPS);
System.out.println("---------------------------------------------------");
}
}
public double[] getByteArray (){
byte[] data_raw = Arrays.copyOfRange(entireFileData, 44, entireFileData.length);
int totalLength = data_raw.length;
//declare double array for mono
int new_length = totalLength/4;
double[] data_mono = new double[new_length];
double left, right;
for (int i = 0; i < new_length; i++){
left = ((data_raw[i] & 0xff) << 8) | (data_raw[i+1] & 0xff);
right = ((data_raw[i+2] & 0xff) << 8) | (data_raw[i+3] & 0xff);
data_mono[i] = (left+right)/2.0;
}
return data_mono;
}
}
下面的代码是运行
的主程序import java.awt.Color;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import javax.imageio.ImageIO;
public class App {
public static Color getColor(double power) {
double H = power * 0.4; // Hue (note 0.4 = Green, see huge chart below)
double S = 1.0; // Saturation
double B = 1.0; // Brightness
return Color.getHSBColor((float)H, (float)S, (float)B);
}
public static void main(String[] args) {
// TODO Auto-generated method stub
String filepath = "audio_work/Sine_Sweep_Full_Spectrum_20_Hz_20_kHz_audiocheck.wav";
try {
//get raw double array containing .WAV data
readWAV2Array audioTest = new readWAV2Array(filepath, true);
double[] rawData = audioTest.getByteArray();
int length = rawData.length;
//initialize parameters for FFT
int WS = 2048; //WS = window size
int OF = 8; //OF = overlap factor
int windowStep = WS/OF;
//calculate FFT parameters
double SR = audioTest.getSR();
double time_resolution = WS/SR;
double frequency_resolution = SR/WS;
double highest_detectable_frequency = SR/2.0;
double lowest_detectable_frequency = 5.0*SR/WS;
System.out.println("time_resolution: " + time_resolution*1000 + " ms");
System.out.println("frequency_resolution: " + frequency_resolution + " Hz");
System.out.println("highest_detectable_frequency: " + highest_detectable_frequency + " Hz");
System.out.println("lowest_detectable_frequency: " + lowest_detectable_frequency + " Hz");
//initialize plotData array
int nX = (length-WS)/windowStep;
int nY = WS;
double[][] plotData = new double[nX][nY];
//apply FFT and find MAX and MIN amplitudes
double maxAmp = Double.MIN_VALUE;
double minAmp = Double.MAX_VALUE;
double amp_square;
double[] inputImag = new double[length];
for (int i = 0; i < nX; i++){
Arrays.fill(inputImag, 0.0);
double[] WS_array = FFT.fft(Arrays.copyOfRange(rawData, i*windowStep, i*windowStep+WS), inputImag, true);
for (int j = 0; j < nY; j++){
amp_square = (WS_array[2*j]*WS_array[2*j]) + (WS_array[2*j+1]*WS_array[2*j+1]);
if (amp_square == 0.0){
plotData[i][j] = amp_square;
}
else{
plotData[i][j] = 10 * Math.log10(amp_square);
}
//find MAX and MIN amplitude
if (plotData[i][j] > maxAmp)
maxAmp = plotData[i][j];
else if (plotData[i][j] < minAmp)
minAmp = plotData[i][j];
}
}
System.out.println("---------------------------------------------------");
System.out.println("Maximum amplitude: " + maxAmp);
System.out.println("Minimum amplitude: " + minAmp);
System.out.println("---------------------------------------------------");
//Normalization
double diff = maxAmp - minAmp;
for (int i = 0; i < nX; i++){
for (int j = 0; j < nY; j++){
plotData[i][j] = (plotData[i][j]-minAmp)/diff;
}
}
//plot image
BufferedImage theImage = new BufferedImage(nX, nY, BufferedImage.TYPE_INT_RGB);
double ratio;
for(int x = 0; x<nX; x++){
for(int y = 0; y<nY; y++){
ratio = plotData[x][y];
//theImage.setRGB(x, y, new Color(red, green, 0).getRGB());
Color newColor = getColor(1.0-ratio);
theImage.setRGB(x, y, newColor.getRGB());
}
}
File outputfile = new File("saved.png");
ImageIO.write(theImage, "png", outputfile);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
但是,我从.wav中播放20-20kHz扫频声音得到的图像是这样的:
颜色表示声音强度 红色(高)-->绿色(低)
没错,应该是下图这样的:
如果我能在我的项目中得到任何 correct/improvement/suggest,我将不胜感激。预先感谢您对我的问题发表评论。
幸运的是,你的权利似乎多于错误。
导致额外红线的第一个也是主要问题是您如何解码 readWAV2Array.getByteArray
中的数据。由于样本跨越 4 个字节,因此您必须以 4 的倍数进行索引(例如,样本 0 的字节 0、1、2、3,样本 1 的字节 4、5、6、7)否则您将读取 4 字节的重叠块(例如,样本 0 的字节 0、1、2、3,样本 1 的字节 1、2、3、4)。此转换的另一件事是,您必须将结果显式转换为带符号的 short
类型,然后才能将其分配给 left
和 right
(类型为 double
) 以便从无符号字节中获得带符号的 16 位结果。这应该给你一个转换循环,看起来像:
for (int i = 0; 4*i+3 < totalLength; i++){
left = (short)((data_raw[4*i+1] & 0xff) << 8) | (data_raw[4*i] & 0xff);
right = (short)((data_raw[4*i+3] & 0xff) << 8) | (data_raw[4*i+2] & 0xff);
data_mono[i] = (left+right)/2.0;
}
在这一点上,你应该开始得到一个具有强线代表你的 20Hz-20kHz 线性调频的图:
但是你应该注意到你实际上得到了 2 行。这是因为对于实值信号,频谱具有厄密对称性。因此,高于奈奎斯特频率的频谱幅度(采样率的一半,在本例中为 44100Hz/2)是低于奈奎斯特频率的频谱的冗余反射。通过将main
中的nY
的定义改为:
int nY = WS/2 + 1;
并且会给你:
几乎就是我们要找的东西,但是随着频率的增加扫描会生成一个图形,其中有一条直线在减小。那是因为您的索引使 0Hz 频率位于图顶部的索引 0 处,而 22050Hz 频率位于图底部的索引 nY-1
处。要翻转图形并在底部获得更常见的 0Hz,在顶部获得 22050Hz,您可以更改索引以使用:
plotData[i][nY-j-1] = 10 * Math.log10(amp_square);
现在你应该有一个看起来像你期待的情节(虽然有不同的颜色图):
最后一点:虽然我理解您打算避免在转换为分贝时取 0 的对数,但在这种特定情况下将输出设置为线性刻度振幅可能会产生意想不到的结果。相反,我会 select 保护的截止阈值振幅:
// select threshold based on the expected spectrum amplitudes
// e.g. 80dB below your signal's spectrum peak amplitude
double threshold = 1.0;
// limit values and convert to dB
plotData[i][nY-j-1] = 10 * Math.log10(Math.max(amp_square,threshold));