从 ByteArrayOutputStream 而不是文件中读取镶木地板数据
Read parquet data from ByteArrayOutputStream instead of file
我想转换这个代码:
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.column.page.PageReadStore;
import org.apache.parquet.example.data.simple.SimpleGroup;
import org.apache.parquet.example.data.simple.convert.GroupRecordConverter;
import org.apache.parquet.hadoop.ParquetFileReader;
import org.apache.parquet.hadoop.util.HadoopInputFile;
import org.apache.parquet.io.ColumnIOFactory;
import org.apache.parquet.io.MessageColumnIO;
import org.apache.parquet.io.RecordReader;
import org.apache.parquet.schema.MessageType;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class ParquetReaderUtils {
public static Parquet getParquetData(String filePath) throws IOException {
List<SimpleGroup> simpleGroups = new ArrayList<>();
ParquetFileReader reader = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(filePath), new Configuration()));
MessageType schema = reader.getFooter().getFileMetaData().getSchema();
//List<Type> fields = schema.getFields();
PageReadStore pages;
while ((pages = reader.readNextRowGroup()) != null) {
long rows = pages.getRowCount();
MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema);
RecordReader recordReader = columnIO.getRecordReader(pages, new GroupRecordConverter(schema));
for (int i = 0; i < rows; i++) {
SimpleGroup simpleGroup = (SimpleGroup) recordReader.read();
simpleGroups.add(simpleGroup);
}
}
reader.close();
return new Parquet(simpleGroups, schema);
}
}
(来自https://www.arm64.ca/post/reading-parquet-files-java/)
采用 ByteArrayOutputStream 参数而不是文件路径。
这可能吗?我在 org.apache.parquet.hadoop.
中没有看到 ParquetStreamReader
感谢任何帮助。我正在尝试为来自 kafka 的 parquet 编写一个测试应用程序,并且将许多消息中的每一个写入文件都相当慢。
所以如果没有更深入的测试,我会尝试使用这个 class(尽管输出流的内容应该是 parquet 兼容的)。我在那里放了一个 streamId 以便更容易地识别已处理的字节数组(如果出现问题,ParquetFileReader 会打印 instance.toString())。
public class ParquetStream implements InputFile {
private final String streamId;
private final byte[] data;
private static class SeekableByteArrayInputStream extends ByteArrayInputStream {
public SeekableByteArrayInputStream(byte[] buf) {
super(buf);
}
public void setPos(int pos) {
this.pos = pos;
}
public int getPos() {
return this.pos;
}
}
public ParquetStream(String streamId, ByteArrayOutputStream stream) {
this.streamId = streamId;
this.data = stream.toByteArray();
}
@Override
public long getLength() throws IOException {
return this.data.length;
}
@Override
public SeekableInputStream newStream() throws IOException {
return new DelegatingSeekableInputStream(new SeekableByteArrayInputStream(this.data)) {
@Override
public void seek(long newPos) throws IOException {
((SeekableByteArrayInputStream) this.getStream()).setPos((int) newPos);
}
@Override
public long getPos() throws IOException {
return ((SeekableByteArrayInputStream) this.getStream()).getPos();
}
};
}
@Override
public String toString() {
return "ParquetStream[" + streamId + "]";
}
}
我想转换这个代码:
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.column.page.PageReadStore;
import org.apache.parquet.example.data.simple.SimpleGroup;
import org.apache.parquet.example.data.simple.convert.GroupRecordConverter;
import org.apache.parquet.hadoop.ParquetFileReader;
import org.apache.parquet.hadoop.util.HadoopInputFile;
import org.apache.parquet.io.ColumnIOFactory;
import org.apache.parquet.io.MessageColumnIO;
import org.apache.parquet.io.RecordReader;
import org.apache.parquet.schema.MessageType;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class ParquetReaderUtils {
public static Parquet getParquetData(String filePath) throws IOException {
List<SimpleGroup> simpleGroups = new ArrayList<>();
ParquetFileReader reader = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(filePath), new Configuration()));
MessageType schema = reader.getFooter().getFileMetaData().getSchema();
//List<Type> fields = schema.getFields();
PageReadStore pages;
while ((pages = reader.readNextRowGroup()) != null) {
long rows = pages.getRowCount();
MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema);
RecordReader recordReader = columnIO.getRecordReader(pages, new GroupRecordConverter(schema));
for (int i = 0; i < rows; i++) {
SimpleGroup simpleGroup = (SimpleGroup) recordReader.read();
simpleGroups.add(simpleGroup);
}
}
reader.close();
return new Parquet(simpleGroups, schema);
}
}
(来自https://www.arm64.ca/post/reading-parquet-files-java/)
采用 ByteArrayOutputStream 参数而不是文件路径。
这可能吗?我在 org.apache.parquet.hadoop.
中没有看到 ParquetStreamReader感谢任何帮助。我正在尝试为来自 kafka 的 parquet 编写一个测试应用程序,并且将许多消息中的每一个写入文件都相当慢。
所以如果没有更深入的测试,我会尝试使用这个 class(尽管输出流的内容应该是 parquet 兼容的)。我在那里放了一个 streamId 以便更容易地识别已处理的字节数组(如果出现问题,ParquetFileReader 会打印 instance.toString())。
public class ParquetStream implements InputFile {
private final String streamId;
private final byte[] data;
private static class SeekableByteArrayInputStream extends ByteArrayInputStream {
public SeekableByteArrayInputStream(byte[] buf) {
super(buf);
}
public void setPos(int pos) {
this.pos = pos;
}
public int getPos() {
return this.pos;
}
}
public ParquetStream(String streamId, ByteArrayOutputStream stream) {
this.streamId = streamId;
this.data = stream.toByteArray();
}
@Override
public long getLength() throws IOException {
return this.data.length;
}
@Override
public SeekableInputStream newStream() throws IOException {
return new DelegatingSeekableInputStream(new SeekableByteArrayInputStream(this.data)) {
@Override
public void seek(long newPos) throws IOException {
((SeekableByteArrayInputStream) this.getStream()).setPos((int) newPos);
}
@Override
public long getPos() throws IOException {
return ((SeekableByteArrayInputStream) this.getStream()).getPos();
}
};
}
@Override
public String toString() {
return "ParquetStream[" + streamId + "]";
}
}