当缓冲区大于 1 字节时,动态解压缩流会导致伪影
Stream on the fly decompressing causes artifacts when buffer is larger than 1byte
我目前正在为我参与的一个项目测试几个解压库,以动态解压 http 文件流。我尝试了两个非常有前途的库,发现一个问题似乎出现在它们两个中。
这就是我正在做的事情:
- video.avi 在 HTTP 服务器测试中压缩为 video.zip。com/video.zip (~20MB)
- HttpWebRequest 从服务器读取流
- 将HttpWebRequest ResponseStream数据写入MemoryStream
- 让解压库从MemoryStream中读取
- 在 HttpWebRequest 下载时读取解压缩的文件流
整个想法工作正常,我能够解压缩压缩视频并将其直接流式传输到 VLC stdin 并且渲染得很好。但是我必须在解压库上使用一个字节的读取缓冲区。任何大于一个字节的缓冲区都会导致未压缩的数据流被截断。为了进行测试,我将解压缩的流写入文件并将其与原始 video.avi 进行比较,一些数据只是被解压缩跳过了。将这些损坏的数据流式传输到 VLC 时,会导致大量视频伪影,播放速度也会大大降低。
如果我知道可供读取的内容的大小,我可以相应地 trim 我的缓冲区,但没有图书馆会制作此信息 public 所以我所能做的就是用一个字节缓冲区。也许我的方法是错误的?或者也许我忽略了什么?
这是一个示例代码(需要 VLC):
ICSharpCode.SharpZLib (http://icsharpcode.github.io/SharpZipLib/)
static void Main(string[] args)
{
// Initialise VLC
Process vlc = new Process()
{
StartInfo =
{
FileName = @"C:\Program Files\VideoLAN\vlc.exe", // Adjust as required to test the code
RedirectStandardInput = true,
UseShellExecute = false,
Arguments = "-"
}
};
vlc.Start();
Stream outStream = vlc.StandardInput.BaseStream;
// Get source stream
HttpWebRequest stream = (HttpWebRequest)WebRequest.Create("http://codefreak.net/~daniel/apps/stream60s-large.zip");
Stream compressedVideoStream = stream.GetResponse().GetResponseStream();
// Create local decompression loop
MemoryStream compressedLoopback = new MemoryStream();
ZipInputStream zipStream = new ZipInputStream(compressedLoopback);
ZipEntry currentEntry = null;
byte[] videoStreamBuffer = new byte[8129]; // 8kb read buffer
int read = 0;
long totalRead = 0;
while ((read = compressedVideoStream.Read(videoStreamBuffer, 0, videoStreamBuffer.Length)) > 0)
{
// Write compressed video stream into compressed loopback without affecting current read position
long previousPosition = compressedLoopback.Position; // Store current read position
compressedLoopback.Position = totalRead; // Jump to last write position
totalRead += read; // Increase last write position by current read size
compressedLoopback.Write(videoStreamBuffer, 0, read); // Write data into loopback
compressedLoopback.Position = previousPosition; // Restore reading position
// If not already, move to first entry
if (currentEntry == null)
currentEntry = zipStream.GetNextEntry();
byte[] outputBuffer = new byte[1]; // Decompression read buffer, this is the bad one!
int zipRead = 0;
while ((zipRead = zipStream.Read(outputBuffer, 0, outputBuffer.Length)) > 0)
outStream.Write(outputBuffer, 0, outputBuffer.Length); // Write directly to VLC stdin
}
}
SharpCompress (https://github.com/adamhathcock/sharpcompress)
static void Main(string[] args)
{
// Initialise VLC
Process vlc = new Process()
{
StartInfo =
{
FileName = @"C:\Program Files\VideoLAN\vlc.exe", // Adjust as required to test the code
RedirectStandardInput = true,
UseShellExecute = false,
Arguments = "-"
}
};
vlc.Start();
Stream outStream = vlc.StandardInput.BaseStream;
// Get source stream
HttpWebRequest stream = (HttpWebRequest)WebRequest.Create("http://codefreak.net/~daniel/apps/stream60s-large.zip");
Stream compressedVideoStream = stream.GetResponse().GetResponseStream();
// Create local decompression loop
MemoryStream compressedLoopback = new MemoryStream();
ZipReader zipStream = null;
EntryStream currentEntry = null;
byte[] videoStreamBuffer = new byte[8129]; // 8kb read buffer
int read = 0;
long totalRead = 0;
while ((read = compressedVideoStream.Read(videoStreamBuffer, 0, videoStreamBuffer.Length)) > 0)
{
// Write compressed video stream into compressed loopback without affecting current read position
long previousPosition = compressedLoopback.Position; // Store current read position
compressedLoopback.Position = totalRead; // Jump to last write position
totalRead += read; // Increase last write position by current read size
compressedLoopback.Write(videoStreamBuffer, 0, read); // Write data into loopback
compressedLoopback.Position = previousPosition; // Restore reading position
// Open stream after writing to it because otherwise it will not be able to identify the compression type
if (zipStream == null)
zipStream = (ZipReader)ReaderFactory.Open(compressedLoopback); // Cast to ZipReader, as we know the type
// If not already, move to first entry
if (currentEntry == null)
{
zipStream.MoveToNextEntry();
currentEntry = zipStream.OpenEntryStream();
}
byte[] outputBuffer = new byte[1]; // Decompression read buffer, this is the bad one!
int zipRead = 0;
while ((zipRead = currentEntry.Read(outputBuffer, 0, outputBuffer.Length)) > 0)
outStream.Write(outputBuffer, 0, outputBuffer.Length); // Write directly to VLC stdin
}
}
为了测试此代码,我建议将 SharpZipLib 的输出缓冲区设置为 2 个字节,将 SharpCompress 的输出缓冲区设置为 8 个字节。您会看到瑕疵,而且视频的播放速度有误,搜索时间应始终与视频中计数的数字对齐。
我还没有真正找到任何好的解释来解释为什么从解压缩库中读取的更大的 outputBuffer 会导致这些问题,也没有找到解决这个问题的方法,而不是拥有尽可能小的缓冲区。
所以我的问题是我做错了什么,或者这是否是从流中读取压缩文件时的普遍问题?如何在读取正确数据的同时增加 outputBuffer?
非常感谢任何帮助!
此致,
加尔
您只需要写入您读取的字节数。写入整个缓冲区大小将添加额外的字节(无论之前发生在缓冲区中的是什么)。 zipStream.Read
不需要读取您请求的字节数。
while ((zipRead = zipStream.Read(outputBuffer, 0, outputBuffer.Length)) > 0)
outStream.Write(outputBuffer, 0, zipRead); // Write directly to VLC stdin
我目前正在为我参与的一个项目测试几个解压库,以动态解压 http 文件流。我尝试了两个非常有前途的库,发现一个问题似乎出现在它们两个中。
这就是我正在做的事情:
- video.avi 在 HTTP 服务器测试中压缩为 video.zip。com/video.zip (~20MB)
- HttpWebRequest 从服务器读取流
- 将HttpWebRequest ResponseStream数据写入MemoryStream
- 让解压库从MemoryStream中读取
- 在 HttpWebRequest 下载时读取解压缩的文件流
整个想法工作正常,我能够解压缩压缩视频并将其直接流式传输到 VLC stdin 并且渲染得很好。但是我必须在解压库上使用一个字节的读取缓冲区。任何大于一个字节的缓冲区都会导致未压缩的数据流被截断。为了进行测试,我将解压缩的流写入文件并将其与原始 video.avi 进行比较,一些数据只是被解压缩跳过了。将这些损坏的数据流式传输到 VLC 时,会导致大量视频伪影,播放速度也会大大降低。
如果我知道可供读取的内容的大小,我可以相应地 trim 我的缓冲区,但没有图书馆会制作此信息 public 所以我所能做的就是用一个字节缓冲区。也许我的方法是错误的?或者也许我忽略了什么?
这是一个示例代码(需要 VLC):
ICSharpCode.SharpZLib (http://icsharpcode.github.io/SharpZipLib/)
static void Main(string[] args)
{
// Initialise VLC
Process vlc = new Process()
{
StartInfo =
{
FileName = @"C:\Program Files\VideoLAN\vlc.exe", // Adjust as required to test the code
RedirectStandardInput = true,
UseShellExecute = false,
Arguments = "-"
}
};
vlc.Start();
Stream outStream = vlc.StandardInput.BaseStream;
// Get source stream
HttpWebRequest stream = (HttpWebRequest)WebRequest.Create("http://codefreak.net/~daniel/apps/stream60s-large.zip");
Stream compressedVideoStream = stream.GetResponse().GetResponseStream();
// Create local decompression loop
MemoryStream compressedLoopback = new MemoryStream();
ZipInputStream zipStream = new ZipInputStream(compressedLoopback);
ZipEntry currentEntry = null;
byte[] videoStreamBuffer = new byte[8129]; // 8kb read buffer
int read = 0;
long totalRead = 0;
while ((read = compressedVideoStream.Read(videoStreamBuffer, 0, videoStreamBuffer.Length)) > 0)
{
// Write compressed video stream into compressed loopback without affecting current read position
long previousPosition = compressedLoopback.Position; // Store current read position
compressedLoopback.Position = totalRead; // Jump to last write position
totalRead += read; // Increase last write position by current read size
compressedLoopback.Write(videoStreamBuffer, 0, read); // Write data into loopback
compressedLoopback.Position = previousPosition; // Restore reading position
// If not already, move to first entry
if (currentEntry == null)
currentEntry = zipStream.GetNextEntry();
byte[] outputBuffer = new byte[1]; // Decompression read buffer, this is the bad one!
int zipRead = 0;
while ((zipRead = zipStream.Read(outputBuffer, 0, outputBuffer.Length)) > 0)
outStream.Write(outputBuffer, 0, outputBuffer.Length); // Write directly to VLC stdin
}
}
SharpCompress (https://github.com/adamhathcock/sharpcompress)
static void Main(string[] args)
{
// Initialise VLC
Process vlc = new Process()
{
StartInfo =
{
FileName = @"C:\Program Files\VideoLAN\vlc.exe", // Adjust as required to test the code
RedirectStandardInput = true,
UseShellExecute = false,
Arguments = "-"
}
};
vlc.Start();
Stream outStream = vlc.StandardInput.BaseStream;
// Get source stream
HttpWebRequest stream = (HttpWebRequest)WebRequest.Create("http://codefreak.net/~daniel/apps/stream60s-large.zip");
Stream compressedVideoStream = stream.GetResponse().GetResponseStream();
// Create local decompression loop
MemoryStream compressedLoopback = new MemoryStream();
ZipReader zipStream = null;
EntryStream currentEntry = null;
byte[] videoStreamBuffer = new byte[8129]; // 8kb read buffer
int read = 0;
long totalRead = 0;
while ((read = compressedVideoStream.Read(videoStreamBuffer, 0, videoStreamBuffer.Length)) > 0)
{
// Write compressed video stream into compressed loopback without affecting current read position
long previousPosition = compressedLoopback.Position; // Store current read position
compressedLoopback.Position = totalRead; // Jump to last write position
totalRead += read; // Increase last write position by current read size
compressedLoopback.Write(videoStreamBuffer, 0, read); // Write data into loopback
compressedLoopback.Position = previousPosition; // Restore reading position
// Open stream after writing to it because otherwise it will not be able to identify the compression type
if (zipStream == null)
zipStream = (ZipReader)ReaderFactory.Open(compressedLoopback); // Cast to ZipReader, as we know the type
// If not already, move to first entry
if (currentEntry == null)
{
zipStream.MoveToNextEntry();
currentEntry = zipStream.OpenEntryStream();
}
byte[] outputBuffer = new byte[1]; // Decompression read buffer, this is the bad one!
int zipRead = 0;
while ((zipRead = currentEntry.Read(outputBuffer, 0, outputBuffer.Length)) > 0)
outStream.Write(outputBuffer, 0, outputBuffer.Length); // Write directly to VLC stdin
}
}
为了测试此代码,我建议将 SharpZipLib 的输出缓冲区设置为 2 个字节,将 SharpCompress 的输出缓冲区设置为 8 个字节。您会看到瑕疵,而且视频的播放速度有误,搜索时间应始终与视频中计数的数字对齐。
我还没有真正找到任何好的解释来解释为什么从解压缩库中读取的更大的 outputBuffer 会导致这些问题,也没有找到解决这个问题的方法,而不是拥有尽可能小的缓冲区。
所以我的问题是我做错了什么,或者这是否是从流中读取压缩文件时的普遍问题?如何在读取正确数据的同时增加 outputBuffer?
非常感谢任何帮助!
此致, 加尔
您只需要写入您读取的字节数。写入整个缓冲区大小将添加额外的字节(无论之前发生在缓冲区中的是什么)。 zipStream.Read
不需要读取您请求的字节数。
while ((zipRead = zipStream.Read(outputBuffer, 0, outputBuffer.Length)) > 0)
outStream.Write(outputBuffer, 0, zipRead); // Write directly to VLC stdin