批量备份 DolphinDB 数据库中所有表的任何有效方法?
Any efficient way for batch backup of all tables in a DolphinDB database?
我正在寻找一种更有效的方法将数据备份到数据库中。有没有建议的功能或方法?
您可以尝试以下方式实现批量备份:
backupDir="E:/Tools/DB_backup"
def batchBackup(backupDir){
for(db in getDFSDatabase){
tableList = getTables(database(db));
for(table in tableList){
backup(backupDir, <select * from loadTable(db, table)>, true);
}
}
}
batchBackup(backupDir);
如果您有兴趣,这里是完整的验证脚本。
login(`admin, `123456)
backupDir="E:/Tools/DB_backup
n = 10
t1=table(rand(2012.12.01..2012.12.10, n) as date, rand(`AAPL`IBM`MSFT`GOOG, n) as sym, rand(1000.0, n) as price)
t2=table(rand(2012.12.01..2012.12.10, n) as date, rand(`AAPL`IBM`MSFT`GOOG, n) as sym, rand(1000, n) as qty)
db1 = database("dfs://db2", VALUE, `AAPL`IBM`MSFT`GOOG)
quotes1 = db1.createPartionedTable(t1, `quotes1, `sym).append!(t1)
quotes2 = db2.createPartionedTable(t2, `quotes2, `sym).append!(t2)
// one-by-one backup
backup(backupDir, <select * from trades1>, true)
backup(backupDir, <select * from trades2>, true)
backup(backupDir, <select * from quotes1>, true)
backup(backupDir, <select * from quotes2>, true)
dropDatabase("dfs://db1")
dropDatabase("dfs://db2")
select * from loadTable("dfs://db1", `trades1);
// batch recovery
migrate(backupDir);
backupDir="E:/Tools/DB_backup"
def batchBackup(backupDir){
for(db in getDFSDatabses()){
tableList = getTables(database(db));
for(table in tableList){
backup(backupDir, <select * from loadTable(db, table)>, false);
}
}
}
// batch backup
batchBackup(backupDir);
select * from loadTable("dfs://db1", `trade1);
我正在寻找一种更有效的方法将数据备份到数据库中。有没有建议的功能或方法?
您可以尝试以下方式实现批量备份:
backupDir="E:/Tools/DB_backup"
def batchBackup(backupDir){
for(db in getDFSDatabase){
tableList = getTables(database(db));
for(table in tableList){
backup(backupDir, <select * from loadTable(db, table)>, true);
}
}
}
batchBackup(backupDir);
如果您有兴趣,这里是完整的验证脚本。
login(`admin, `123456)
backupDir="E:/Tools/DB_backup
n = 10
t1=table(rand(2012.12.01..2012.12.10, n) as date, rand(`AAPL`IBM`MSFT`GOOG, n) as sym, rand(1000.0, n) as price)
t2=table(rand(2012.12.01..2012.12.10, n) as date, rand(`AAPL`IBM`MSFT`GOOG, n) as sym, rand(1000, n) as qty)
db1 = database("dfs://db2", VALUE, `AAPL`IBM`MSFT`GOOG)
quotes1 = db1.createPartionedTable(t1, `quotes1, `sym).append!(t1)
quotes2 = db2.createPartionedTable(t2, `quotes2, `sym).append!(t2)
// one-by-one backup
backup(backupDir, <select * from trades1>, true)
backup(backupDir, <select * from trades2>, true)
backup(backupDir, <select * from quotes1>, true)
backup(backupDir, <select * from quotes2>, true)
dropDatabase("dfs://db1")
dropDatabase("dfs://db2")
select * from loadTable("dfs://db1", `trades1);
// batch recovery
migrate(backupDir);
backupDir="E:/Tools/DB_backup"
def batchBackup(backupDir){
for(db in getDFSDatabses()){
tableList = getTables(database(db));
for(table in tableList){
backup(backupDir, <select * from loadTable(db, table)>, false);
}
}
}
// batch backup
batchBackup(backupDir);
select * from loadTable("dfs://db1", `trade1);