java UDP 声音流:为什么我有干扰?
java UDP sound streaming : why do I have interferences?
我正在尝试构建一个非常简单的带有源和接收器的音频流。但是当我在 "receiver" 中接收声音时,我有一些干扰。我正在使用 UDP 协议。有没有办法 "improve" 我的代码避免这些干扰?
这是我的音频服务器:
import java.io.File;
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.InetAddress;
import java.net.SocketException;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.SourceDataLine;
public class AudioPlayerServer implements Runnable {
private SourceDataLine sLine;
private AudioFormat audioFormat;
private AudioInputStream audioInputStream=null;
private String host="127.0.0.1";
private int port=8000;
private DatagramSocket server;
private DatagramPacket packet;
private long startTime;
private long endTime=System.nanoTime();;
private long elapsed=System.nanoTime();;
private double sleepTime;
private long sleepTimeMillis;
private int sleepTimeNanos, epsilon;
AudioPlayerServer(String host, int port) {
this.host=host;
this.port=port;
init();
}
public void init() {
File file = new File("test.wav");
try {
audioInputStream=AudioSystem.getAudioInputStream(file);
} catch (Exception e) {
e.printStackTrace();
}
audioFormat = new AudioFormat(44100, 16, 2, true, false);
DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat);
System.out.println(info);
try {
server = new DatagramSocket();
System.out.println("Server started");
} catch (SocketException e) {
e.printStackTrace();
}
}
public void run() {
try {
byte bytes[] = new byte[4096];
byte bytes2[] = new byte[1024];
int bytesRead=0;
//The sending rythm of the data have to be compatible with an audio streaming.
//So, I'll sleep the streaming thread for (1/SampleRate) seconds * (bytes.lenght/4) - epsilon
//=> bytes.lenght/4 because 4 values = 1 frame => For ex, in 1024 bits, there are 1024/4 = 256 frames
//epsilon because the instructions themselves takes time.
//The value have to be convert in milliseconds et nanoseconds.
sleepTime=(1024/audioFormat.getSampleRate());
epsilon=400000;
sleepTimeMillis=(long)(sleepTime*1000);
sleepTimeNanos=(int)((sleepTime*1000-sleepTimeMillis)*1000000);
System.out.println("Sleep time :"+sleepTimeMillis+" ms, "+sleepTimeNanos+" ns");
while ((bytesRead=audioInputStream.read(bytes, 0, bytes.length))!= -1) {
//getSignalLevel(bytes);
try {
//startTime=System.nanoTime();
packet = new DatagramPacket(bytes, bytes.length, InetAddress.getByName(host), port);
packet.setData(bytes);
server.send(packet);
packet.setLength(bytes.length);
//endTime=System.nanoTime();
//System.out.println(endTime-startTime);
Thread.sleep(sleepTimeMillis,sleepTimeNanos);
} catch (IOException e) {
e.printStackTrace();
}
}
System.out.println("No bytes anymore !");
} catch (Exception e) {
e.printStackTrace();
}
sLine.close();
System.out.println("Line closed");
}
}
这是客户:
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.UnknownHostException;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.SourceDataLine;
public class AudioReceiver implements Runnable{
private String host;
private int port;
private SourceDataLine sLine;
private AudioFormat audioFormat;
byte[] buffer=new byte[4096];
DatagramPacket packet;
AudioReceiver (String host, int port) {
this.host=host;
this.port=port;
init();
Thread t1=new Thread(new Reader());
t1.start();
}
public void init() {
audioFormat = new AudioFormat(44100, 16, 2, true, false);
DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat);
try {
System.out.println(info);
sLine=(SourceDataLine) AudioSystem.getLine(info);
System.out.println(sLine.getLineInfo() + " - sample rate : "+audioFormat.getSampleRate());
} catch (Exception e) {
e.printStackTrace();
}
}
public void run() {
System.out.println("Client started");
try {
sLine.open(audioFormat);
} catch (Exception e){
e.printStackTrace();
}
sLine.start();
System.out.println("Line started");
try {
DatagramSocket client = new DatagramSocket(port, InetAddress.getByName(host));
while (true) {
try {
packet = new DatagramPacket(buffer, buffer.length);
//System.out.println("Reception beggins for host "+host+" : "+port);
client.receive(packet);
//System.out.println("Reception ends");
buffer=packet.getData();
//sLine.write(packet.getData(), 0, buffer.length);
packet.setLength(buffer.length);
} catch (UnknownHostException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
} catch (SocketException e) {
e.printStackTrace();
} catch (UnknownHostException e1) {
e1.printStackTrace();
}
}
public class Reader implements Runnable {
public void run() {
while (true) {
if (packet!=null) {
sLine.write(packet.getData(), 0, buffer.length);
}
}
}
}
}
I'm using an UDP protocol. Is there a way to "improve" my code to avoid those interferences ?
有两种可能:
UDP 消息正在丢失。
客户端或服务器端的应用程序逻辑出现问题,导致音频流数据损坏。
假设问题是#1,最简单的选择是切换到 TCP。
UDP 天生就容易丢包,丢包会造成失真。 TCP 不是有损的,如果 "pipeline" 中有一些延迟(即 client-side 缓冲),您应该能够避免由于偶尔的数据包丢失和重传(在 TCP 级别)引起的抖动造成的失真).
我还注意到您当前的客户端/服务器逻辑正在尝试使用 sleep
控制服务器端的播放速率。您需要注意 sleep
不能保证在您期望的 准确 的时间点唤醒您的休眠线程。语义是 "sleep for at least the specified time"。这一点,以及客户端缺乏任何缓冲,也可能导致失真。
在创建UDP流媒体系统时,经常会用到RTP协议。 RTP 使用 UDP,它是一种无连接的不可靠协议。在传输层 (UDP),您需要处理丢失和无序到达。此外,网络层是突发的,数据不会以均匀的速率到达。相反,数据包将以不一致的到达间隔率到达。因此,您必须在本地缓冲数据以处理这种网络抖动。
涉及 java、UDP、RTP、网络抖动、缓冲、丢包。
处理损失也有不同的策略。您可以用沉默填充它或估计丢失的数据。此外,您的客户端播放样本的速度可能比您的服务器快,并最终 运行 输出数据。这是由于没有公共总线的两个系统之间时钟晶体的变化。
处理丢包和时钟漂移。
我正在尝试构建一个非常简单的带有源和接收器的音频流。但是当我在 "receiver" 中接收声音时,我有一些干扰。我正在使用 UDP 协议。有没有办法 "improve" 我的代码避免这些干扰?
这是我的音频服务器:
import java.io.File;
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.InetAddress;
import java.net.SocketException;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.SourceDataLine;
public class AudioPlayerServer implements Runnable {
private SourceDataLine sLine;
private AudioFormat audioFormat;
private AudioInputStream audioInputStream=null;
private String host="127.0.0.1";
private int port=8000;
private DatagramSocket server;
private DatagramPacket packet;
private long startTime;
private long endTime=System.nanoTime();;
private long elapsed=System.nanoTime();;
private double sleepTime;
private long sleepTimeMillis;
private int sleepTimeNanos, epsilon;
AudioPlayerServer(String host, int port) {
this.host=host;
this.port=port;
init();
}
public void init() {
File file = new File("test.wav");
try {
audioInputStream=AudioSystem.getAudioInputStream(file);
} catch (Exception e) {
e.printStackTrace();
}
audioFormat = new AudioFormat(44100, 16, 2, true, false);
DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat);
System.out.println(info);
try {
server = new DatagramSocket();
System.out.println("Server started");
} catch (SocketException e) {
e.printStackTrace();
}
}
public void run() {
try {
byte bytes[] = new byte[4096];
byte bytes2[] = new byte[1024];
int bytesRead=0;
//The sending rythm of the data have to be compatible with an audio streaming.
//So, I'll sleep the streaming thread for (1/SampleRate) seconds * (bytes.lenght/4) - epsilon
//=> bytes.lenght/4 because 4 values = 1 frame => For ex, in 1024 bits, there are 1024/4 = 256 frames
//epsilon because the instructions themselves takes time.
//The value have to be convert in milliseconds et nanoseconds.
sleepTime=(1024/audioFormat.getSampleRate());
epsilon=400000;
sleepTimeMillis=(long)(sleepTime*1000);
sleepTimeNanos=(int)((sleepTime*1000-sleepTimeMillis)*1000000);
System.out.println("Sleep time :"+sleepTimeMillis+" ms, "+sleepTimeNanos+" ns");
while ((bytesRead=audioInputStream.read(bytes, 0, bytes.length))!= -1) {
//getSignalLevel(bytes);
try {
//startTime=System.nanoTime();
packet = new DatagramPacket(bytes, bytes.length, InetAddress.getByName(host), port);
packet.setData(bytes);
server.send(packet);
packet.setLength(bytes.length);
//endTime=System.nanoTime();
//System.out.println(endTime-startTime);
Thread.sleep(sleepTimeMillis,sleepTimeNanos);
} catch (IOException e) {
e.printStackTrace();
}
}
System.out.println("No bytes anymore !");
} catch (Exception e) {
e.printStackTrace();
}
sLine.close();
System.out.println("Line closed");
}
}
这是客户:
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.UnknownHostException;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.SourceDataLine;
public class AudioReceiver implements Runnable{
private String host;
private int port;
private SourceDataLine sLine;
private AudioFormat audioFormat;
byte[] buffer=new byte[4096];
DatagramPacket packet;
AudioReceiver (String host, int port) {
this.host=host;
this.port=port;
init();
Thread t1=new Thread(new Reader());
t1.start();
}
public void init() {
audioFormat = new AudioFormat(44100, 16, 2, true, false);
DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat);
try {
System.out.println(info);
sLine=(SourceDataLine) AudioSystem.getLine(info);
System.out.println(sLine.getLineInfo() + " - sample rate : "+audioFormat.getSampleRate());
} catch (Exception e) {
e.printStackTrace();
}
}
public void run() {
System.out.println("Client started");
try {
sLine.open(audioFormat);
} catch (Exception e){
e.printStackTrace();
}
sLine.start();
System.out.println("Line started");
try {
DatagramSocket client = new DatagramSocket(port, InetAddress.getByName(host));
while (true) {
try {
packet = new DatagramPacket(buffer, buffer.length);
//System.out.println("Reception beggins for host "+host+" : "+port);
client.receive(packet);
//System.out.println("Reception ends");
buffer=packet.getData();
//sLine.write(packet.getData(), 0, buffer.length);
packet.setLength(buffer.length);
} catch (UnknownHostException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
} catch (SocketException e) {
e.printStackTrace();
} catch (UnknownHostException e1) {
e1.printStackTrace();
}
}
public class Reader implements Runnable {
public void run() {
while (true) {
if (packet!=null) {
sLine.write(packet.getData(), 0, buffer.length);
}
}
}
}
}
I'm using an UDP protocol. Is there a way to "improve" my code to avoid those interferences ?
有两种可能:
UDP 消息正在丢失。
客户端或服务器端的应用程序逻辑出现问题,导致音频流数据损坏。
假设问题是#1,最简单的选择是切换到 TCP。
UDP 天生就容易丢包,丢包会造成失真。 TCP 不是有损的,如果 "pipeline" 中有一些延迟(即 client-side 缓冲),您应该能够避免由于偶尔的数据包丢失和重传(在 TCP 级别)引起的抖动造成的失真).
我还注意到您当前的客户端/服务器逻辑正在尝试使用 sleep
控制服务器端的播放速率。您需要注意 sleep
不能保证在您期望的 准确 的时间点唤醒您的休眠线程。语义是 "sleep for at least the specified time"。这一点,以及客户端缺乏任何缓冲,也可能导致失真。
在创建UDP流媒体系统时,经常会用到RTP协议。 RTP 使用 UDP,它是一种无连接的不可靠协议。在传输层 (UDP),您需要处理丢失和无序到达。此外,网络层是突发的,数据不会以均匀的速率到达。相反,数据包将以不一致的到达间隔率到达。因此,您必须在本地缓冲数据以处理这种网络抖动。