SQL 在数据库中将多个 tsv 文件连接成单个 table,同时跟踪文件源 (MonetDBLite)
SQL concatenate many tsv files into single table in a database, while keeping track of file source (MonetDBLite)
我正在使用 MonetDBLite R 包创建一个 MonetDB。我可以使用 here 中的说明创建数据库 tables,代码如下:
library(DBI)
library(MonetDBLite)
# Write tsv file of mtcars
write.table(mtcars, "mtcars.tsv", row.names=FALSE, sep= "\t")
# Initialize MonetDB
dbdir <- "/Users/admin/my_directory"
con <- dbConnect(MonetDBLite::MonetDBLite(), dbdir)
# Write table
dbWriteTable(con, "test4", "mtcars.tsv", delim="\t")
下面的查询给出
> dbGetQuery(con, "SELECT * FROM test4 LIMIT 3")
mpg cyl disp hp drat wt qsec vs am gear carb
1 21.0 6 160.0 110 3.90 2.620 16.46 0 1 4 4
2 21.0 6 160.0 110 3.90 2.875 17.02 0 1 4 4
3 22.8 4 108.0 93 3.85 2.320 18.61 1 1 4 1
到目前为止一切顺利。但是,假设我有另一个文件 mtcars2 具有不同的 mpg 值:
mtcars2 <- mtcars
mtcars2$mpg <- mtcars2$mpg + 5
write.table(mtcars2, "mtcars2.tsv", row.names= FALSE, sep = "\t")
我可以加载到另一个table:
dbWriteTable(con, "test5", "mtcars2.tsv", delim = "\t")
> dbGetQuery(con, "SELECT * FROM test5 LIMIT 3")
mpg cyl disp hp drat wt qsec vs am gear carb
1 26.0 6 160 110 3.90 2.620 16.46 0 1 4 4
2 26.0 6 160 110 3.90 2.875 17.02 0 1 4 4
3 27.8 4 108 93 3.85 2.320 18.61 1 1 4 1
还可以。但我的问题是:我想稍后查找所有具有 6 cyl
的汽车的 mpg
,并知道它来自哪个数据集(mtcars 或 mtcars2)。根据我对 SQL 索引的理解(这不是很多,基本上是我读过的内容 here),我应该将所有数据集中在一个 table 中以进行最有效的搜索.我尝试加载第一个 tsv 文件,然后使用 ALTER TABLE test4 ADD dataset TEXT
和 UPDATE test4 SET dataset = dataset1
sql 命令添加了另一列 -
dbSendQuery(con, "UPDATE test4 SET dataset = dataset1")
dbSendQuery(con, "UPDATE test4 SET dataset = 1")
> dbGetQuery(con, "SELECT * FROM test4 LIMIT 3")
mpg cyl disp hp drat wt qsec vs am gear carb dataset
1 21.0 6 160 110 3.90 2.620 16.46 0 1 4 4 1
2 21.0 6 160 110 3.90 2.875 17.02 0 1 4 4 1
3 22.8 4 108 93 3.85 2.320 18.61 1 1 4 1 1
但是当我尝试将 mtcars2 附加到 table 时,它有不同数量的列(正如我应该预料的那样,呃)。将许多具有相同列的 tsv 文件中的数据连接到单个 table 中的数据,同时跟踪数据源的最佳方法是什么?
编辑 - 正如您可能已经猜到的那样,真实数据不是 mtcars - 它是数百万行长的平面 tsv 文件,这意味着我想避免将整个文件读入内存并使用 R 进行操作。
在读取文件后执行 dbWriteTable()
,您应该能够做您想做的事,在 data.frame 中创建一个新变量。类似于:
library(DBI)
library(MonetDBLite)
library(data.table)
# Write tsv file of mtcars
tmp <- tempfile()
write.table(mtcars, tmp, row.names=FALSE, sep= "\t")
# Initialize MonetDB
dbdir <- "~/Desktop/temp"
con <- dbConnect(MonetDBLite::MonetDBLite(), dbdir)
test4df <- fread(tmp)
test4df$dataset <- 1
dbWriteTable(con, "test4", test4df)
dbReadTable(con, "test4")
test5df <- fread(tmp)
test5df$mpg <- test5df$mpg + 5
test5df$dataset <- 2
dbWriteTable(con, "test4", test5df, append = TRUE)
dbReadTable(con, "test4")
编辑(途中建议不打开文件)
如果你想在不打开文件的情况下工作,你可以做这样的事情来修改文件并附加另一个字段。正如我所写的那样,这将适用于 OS 和 bash
.
infile <- tmp
outfile <- tempfile()
# open connections
incon <- file(description = infile, open = "r")
outcon <- file(description = outfile, open = "w")
# count the number of lines (this will work only with Mac/Linux)
com <- paste("wc -l ", infile, " | awk '{ print }'", sep="")
n <- system(command=com, intern=TRUE)
# work with the first line
txt <- scan(file = incon, what = character(), nlines=1, quiet=TRUE)
txt <- c(txt, "dataset")
cat(paste(txt, collapse = "\t"), "\n", file = outcon, sep = "")
# work with the rest of the file
for(i in 2:n) {
txt <- scan(file = incon, what = character(), nlines=1, quiet=TRUE)
txt <- c(txt, "1")
cat(paste(txt, collapse = "\t"), "\n", file = outcon, sep = "")
}
close(incon);close(outcon)
dbWriteTable(con, "test4", outfile, delim = "\t")
# do the similar for other files
按照xQbert的建议,我只使用SQL命令解决了问题(必要且比bash命令更快,考虑到我的数据是10个文件,每个文件长百万行).
library(DBI)
library(MonetDBLite)
# Write tsv file of mtcars
write.table(mtcars, "mtcars.tsv", row.names=FALSE, sep= "\t")
# Write tsv of second mtcars
mtcars2 <- mtcars
mtcars2$mpg <- mtcars2$mpg + 5
write.table(mtcars2, "mtcars2.tsv", row.names= FALSE, sep = "\t")
# Initialize MonetDB
dbdir <- "/Users/admin/"
con <- dbConnect(MonetDBLite::MonetDBLite(), dbdir)
# Write table
dbWriteTable(con, "test4", "mtcars.tsv", delim="\t")
# Add data source information
dbSendQuery(con, "ALTER TABLE test4 ADD source TEXT")
dbSendQuery(con, "UPDATE test4 SET source = 'dataset1'")
# Write second dataset to a temporary table
dbWriteTable(con, "temptable", "mtcars2.tsv", delim="\t")
# Add data source information
dbSendQuery(con, "ALTER TABLE temptable ADD source TEXT")
dbSendQuery(con, "UPDATE temptable SET source = 'dataset2'")
# Insert temp table into main table
dbSendQuery(con, "INSERT INTO test4 SELECT * FROM temptable")
# Drop temp table
dbSendQuery(con, "DROP TABLE temptable")
# Checking the data, truncated for clarity
> dbGetQuery(con, "SELECT * FROM test4")
mpg cyl disp hp drat wt qsec vs am gear carb source
1 21.0 6 160.0 110 3.90 2.620 16.46 0 1 4 4 dataset1
2 21.0 6 160.0 110 3.90 2.875 17.02 0 1 4 4 dataset1
3 22.8 4 108.0 93 3.85 2.320 18.61 1 1 4 1 dataset1
...
33 26.0 6 160.0 110 3.90 2.620 16.46 0 1 4 4 dataset2
34 26.0 6 160.0 110 3.90 2.875 17.02 0 1 4 4 dataset2
35 27.8 4 108.0 93 3.85 2.320 18.61 1 1 4 1 dataset2
...
64 26.4 4 121.0 109 4.11 2.780 18.60 1 1 4 2 dataset2
抱歉,如果我在我的数据比 mtcars 大得多的问题中没有说清楚 - 如果您有中等大小的数据,data.tables
包可能是比数据库更好的解决方案。
这是我会做的,给定一组具有相同结构的文件和最终 table 所需的文件名,否则它是来自所有文件的数据的组合:
# say we have those files
write.table(mtcars, "mtcars1.tsv", row.names=FALSE, sep= "\t")
write.table(mtcars, "mtcars2.tsv", row.names=FALSE, sep= "\t")
# write them individually, and add a column that contains the file name
dbWriteTable(con, "mtcars1", "mtcars1.tsv", delim="\t")
dbSendQuery(con, "ALTER TABLE mtcars1 ADD COLUMN file STRING DEFAULT 'mtcars1.tsv';")
dbWriteTable(con, "mtcars2", "mtcars2.tsv", delim="\t")
dbSendQuery(con, "ALTER TABLE mtcars2 ADD COLUMN file STRING DEFAULT 'mtcars2.tsv';")
# now combine into a new table
dbSendQuery(con, "CREATE TABLE mtcars_mat AS SELECT * FROM mtcars1 UNION ALL SELECT * FROM mtcars2")
# or a view if you don't need to modify the data in the mtcars table (faster)
dbSendQuery(con, "CREATE view mtcars AS SELECT * FROM mtcars1 UNION ALL SELECT * FROM mtcars2")
# and here is the same as a loop with a filename glob and some added robustness (handy if you have 1000 files)
files <- Sys.glob("/some/path/mtcars*.tsv")
tables <- dbQuoteIdentifier(con, tools::file_path_sans_ext(basename(files)))
dbBegin(con)
for (i in 1:length(files)) {
dbWriteTable(con, tables[i], files[i], delim="\t", transaction=FALSE)
dbSendQuery(con, paste0("ALTER TABLE ", tables[i], " ADD COLUMN file STRING DEFAULT ",dbQuoteString(con, files[i]),";"))
}
dbSendQuery(con, paste0("CREATE TABLE somefinalresult AS ", paste0("SELECT * FROM ",tables, collapse=" UNION ALL ")))
# remove the parts again, optional
dbSendQuery(con, paste0("DROP TABLE ", tables, ";", collapse=" "))
dbCommit(con)
我正在使用 MonetDBLite R 包创建一个 MonetDB。我可以使用 here 中的说明创建数据库 tables,代码如下:
library(DBI)
library(MonetDBLite)
# Write tsv file of mtcars
write.table(mtcars, "mtcars.tsv", row.names=FALSE, sep= "\t")
# Initialize MonetDB
dbdir <- "/Users/admin/my_directory"
con <- dbConnect(MonetDBLite::MonetDBLite(), dbdir)
# Write table
dbWriteTable(con, "test4", "mtcars.tsv", delim="\t")
下面的查询给出
> dbGetQuery(con, "SELECT * FROM test4 LIMIT 3")
mpg cyl disp hp drat wt qsec vs am gear carb
1 21.0 6 160.0 110 3.90 2.620 16.46 0 1 4 4
2 21.0 6 160.0 110 3.90 2.875 17.02 0 1 4 4
3 22.8 4 108.0 93 3.85 2.320 18.61 1 1 4 1
到目前为止一切顺利。但是,假设我有另一个文件 mtcars2 具有不同的 mpg 值:
mtcars2 <- mtcars
mtcars2$mpg <- mtcars2$mpg + 5
write.table(mtcars2, "mtcars2.tsv", row.names= FALSE, sep = "\t")
我可以加载到另一个table:
dbWriteTable(con, "test5", "mtcars2.tsv", delim = "\t")
> dbGetQuery(con, "SELECT * FROM test5 LIMIT 3")
mpg cyl disp hp drat wt qsec vs am gear carb
1 26.0 6 160 110 3.90 2.620 16.46 0 1 4 4
2 26.0 6 160 110 3.90 2.875 17.02 0 1 4 4
3 27.8 4 108 93 3.85 2.320 18.61 1 1 4 1
还可以。但我的问题是:我想稍后查找所有具有 6 cyl
的汽车的 mpg
,并知道它来自哪个数据集(mtcars 或 mtcars2)。根据我对 SQL 索引的理解(这不是很多,基本上是我读过的内容 here),我应该将所有数据集中在一个 table 中以进行最有效的搜索.我尝试加载第一个 tsv 文件,然后使用 ALTER TABLE test4 ADD dataset TEXT
和 UPDATE test4 SET dataset = dataset1
sql 命令添加了另一列 -
dbSendQuery(con, "UPDATE test4 SET dataset = dataset1")
dbSendQuery(con, "UPDATE test4 SET dataset = 1")
> dbGetQuery(con, "SELECT * FROM test4 LIMIT 3")
mpg cyl disp hp drat wt qsec vs am gear carb dataset
1 21.0 6 160 110 3.90 2.620 16.46 0 1 4 4 1
2 21.0 6 160 110 3.90 2.875 17.02 0 1 4 4 1
3 22.8 4 108 93 3.85 2.320 18.61 1 1 4 1 1
但是当我尝试将 mtcars2 附加到 table 时,它有不同数量的列(正如我应该预料的那样,呃)。将许多具有相同列的 tsv 文件中的数据连接到单个 table 中的数据,同时跟踪数据源的最佳方法是什么?
编辑 - 正如您可能已经猜到的那样,真实数据不是 mtcars - 它是数百万行长的平面 tsv 文件,这意味着我想避免将整个文件读入内存并使用 R 进行操作。
在读取文件后执行 dbWriteTable()
,您应该能够做您想做的事,在 data.frame 中创建一个新变量。类似于:
library(DBI)
library(MonetDBLite)
library(data.table)
# Write tsv file of mtcars
tmp <- tempfile()
write.table(mtcars, tmp, row.names=FALSE, sep= "\t")
# Initialize MonetDB
dbdir <- "~/Desktop/temp"
con <- dbConnect(MonetDBLite::MonetDBLite(), dbdir)
test4df <- fread(tmp)
test4df$dataset <- 1
dbWriteTable(con, "test4", test4df)
dbReadTable(con, "test4")
test5df <- fread(tmp)
test5df$mpg <- test5df$mpg + 5
test5df$dataset <- 2
dbWriteTable(con, "test4", test5df, append = TRUE)
dbReadTable(con, "test4")
编辑(途中建议不打开文件)
如果你想在不打开文件的情况下工作,你可以做这样的事情来修改文件并附加另一个字段。正如我所写的那样,这将适用于 OS 和 bash
.
infile <- tmp
outfile <- tempfile()
# open connections
incon <- file(description = infile, open = "r")
outcon <- file(description = outfile, open = "w")
# count the number of lines (this will work only with Mac/Linux)
com <- paste("wc -l ", infile, " | awk '{ print }'", sep="")
n <- system(command=com, intern=TRUE)
# work with the first line
txt <- scan(file = incon, what = character(), nlines=1, quiet=TRUE)
txt <- c(txt, "dataset")
cat(paste(txt, collapse = "\t"), "\n", file = outcon, sep = "")
# work with the rest of the file
for(i in 2:n) {
txt <- scan(file = incon, what = character(), nlines=1, quiet=TRUE)
txt <- c(txt, "1")
cat(paste(txt, collapse = "\t"), "\n", file = outcon, sep = "")
}
close(incon);close(outcon)
dbWriteTable(con, "test4", outfile, delim = "\t")
# do the similar for other files
按照xQbert的建议,我只使用SQL命令解决了问题(必要且比bash命令更快,考虑到我的数据是10个文件,每个文件长百万行).
library(DBI)
library(MonetDBLite)
# Write tsv file of mtcars
write.table(mtcars, "mtcars.tsv", row.names=FALSE, sep= "\t")
# Write tsv of second mtcars
mtcars2 <- mtcars
mtcars2$mpg <- mtcars2$mpg + 5
write.table(mtcars2, "mtcars2.tsv", row.names= FALSE, sep = "\t")
# Initialize MonetDB
dbdir <- "/Users/admin/"
con <- dbConnect(MonetDBLite::MonetDBLite(), dbdir)
# Write table
dbWriteTable(con, "test4", "mtcars.tsv", delim="\t")
# Add data source information
dbSendQuery(con, "ALTER TABLE test4 ADD source TEXT")
dbSendQuery(con, "UPDATE test4 SET source = 'dataset1'")
# Write second dataset to a temporary table
dbWriteTable(con, "temptable", "mtcars2.tsv", delim="\t")
# Add data source information
dbSendQuery(con, "ALTER TABLE temptable ADD source TEXT")
dbSendQuery(con, "UPDATE temptable SET source = 'dataset2'")
# Insert temp table into main table
dbSendQuery(con, "INSERT INTO test4 SELECT * FROM temptable")
# Drop temp table
dbSendQuery(con, "DROP TABLE temptable")
# Checking the data, truncated for clarity
> dbGetQuery(con, "SELECT * FROM test4")
mpg cyl disp hp drat wt qsec vs am gear carb source
1 21.0 6 160.0 110 3.90 2.620 16.46 0 1 4 4 dataset1
2 21.0 6 160.0 110 3.90 2.875 17.02 0 1 4 4 dataset1
3 22.8 4 108.0 93 3.85 2.320 18.61 1 1 4 1 dataset1
...
33 26.0 6 160.0 110 3.90 2.620 16.46 0 1 4 4 dataset2
34 26.0 6 160.0 110 3.90 2.875 17.02 0 1 4 4 dataset2
35 27.8 4 108.0 93 3.85 2.320 18.61 1 1 4 1 dataset2
...
64 26.4 4 121.0 109 4.11 2.780 18.60 1 1 4 2 dataset2
抱歉,如果我在我的数据比 mtcars 大得多的问题中没有说清楚 - 如果您有中等大小的数据,data.tables
包可能是比数据库更好的解决方案。
这是我会做的,给定一组具有相同结构的文件和最终 table 所需的文件名,否则它是来自所有文件的数据的组合:
# say we have those files
write.table(mtcars, "mtcars1.tsv", row.names=FALSE, sep= "\t")
write.table(mtcars, "mtcars2.tsv", row.names=FALSE, sep= "\t")
# write them individually, and add a column that contains the file name
dbWriteTable(con, "mtcars1", "mtcars1.tsv", delim="\t")
dbSendQuery(con, "ALTER TABLE mtcars1 ADD COLUMN file STRING DEFAULT 'mtcars1.tsv';")
dbWriteTable(con, "mtcars2", "mtcars2.tsv", delim="\t")
dbSendQuery(con, "ALTER TABLE mtcars2 ADD COLUMN file STRING DEFAULT 'mtcars2.tsv';")
# now combine into a new table
dbSendQuery(con, "CREATE TABLE mtcars_mat AS SELECT * FROM mtcars1 UNION ALL SELECT * FROM mtcars2")
# or a view if you don't need to modify the data in the mtcars table (faster)
dbSendQuery(con, "CREATE view mtcars AS SELECT * FROM mtcars1 UNION ALL SELECT * FROM mtcars2")
# and here is the same as a loop with a filename glob and some added robustness (handy if you have 1000 files)
files <- Sys.glob("/some/path/mtcars*.tsv")
tables <- dbQuoteIdentifier(con, tools::file_path_sans_ext(basename(files)))
dbBegin(con)
for (i in 1:length(files)) {
dbWriteTable(con, tables[i], files[i], delim="\t", transaction=FALSE)
dbSendQuery(con, paste0("ALTER TABLE ", tables[i], " ADD COLUMN file STRING DEFAULT ",dbQuoteString(con, files[i]),";"))
}
dbSendQuery(con, paste0("CREATE TABLE somefinalresult AS ", paste0("SELECT * FROM ",tables, collapse=" UNION ALL ")))
# remove the parts again, optional
dbSendQuery(con, paste0("DROP TABLE ", tables, ";", collapse=" "))
dbCommit(con)