如何在 PYTHON 中加快将 DBF 文件读取到 Dataframe 的速度?

How to speed up reading DBF file to Dataframe in PYTHON?

我正在使用以下例程 dbf2DF (https://gist.github.com/ryan-hill/f90b1c68f60d12baea81) 将 .dbf 文件读入数据帧。

import pysal as ps
import pandas as pd
'''
Arguments
---------
dbfile  : DBF file - Input to be imported
upper   : Condition - If true, make column heads upper case
'''

    def dbf2DF(dbfile, upper=True): #Reads in DBF files and returns Pandas DF
        db = ps.open(dbfile) #Pysal to open DBF
        d = {col: db.by_col(col) for col in db.header} #Convert dbf to dictionary
        #pandasDF = pd.DataFrame(db[:]) #Convert to Pandas DF
        pandasDF = pd.DataFrame(d) #Convert to Pandas DF
        if upper == True: #Make columns uppercase if wanted 
            pandasDF.columns = map(str.upper, db.header) 
        db.close() 
        return pandasDF

虽然它可以满足我的要求,但速度很慢 - 170 万条记录需要 56 秒。

其中,处理以下行需要54秒:

d = {col: db.by_col(col) for col in db.header} #Convert dbf to dictionary

我的问题是 - 我们可以通过消除 'for' 循环来加快这条线吗?

您没有提及您的文件中有多少列。这会对后续需要多长时间产生重大影响。下面的代码在 33 列的 130 万条记录上花费了大约 70 秒。但是,当列数减少到 10 时,所需时间约为 25 秒。

此代码不考虑备注字段。

我正在使用Ethan Furman's .dbf library https://pypi.org/project/dbf/

.dbf 文件是一种固定宽度的格式文件,带有一些描述列、记录计数等的元数据...

Pandas 有一个 read_fwf 函数,可用于读取 .dbf 文件的数据部分,传递列宽和名称。

import io
from collections import defaultdict
import pandas as pd
import numpy as np
import regex
import dbf

def format_dates(df, columns):
    if columns:
        df[columns] = df[columns].astype(np.datetime64, errors='ignore')

def format_float(df, columns):
    if columns:
        df[columns] = df[columns].astype(np.float64, errors='ignore')

def format_int(df, columns):
    if columns:
        df[columns] = df[columns].astype(np.int64, errors='ignore')

def format_bool(df, columns):
    if columns:
        df[columns] = df[columns].apply(lambda x: np.where(x == 'T', True, False))

def get_dbf_field_meta(table, keep_delete_mark=False):
    column_names = []
    field_types = defaultdict(list)
    field_sizes = []
    if keep_delete_mark:
        column_names.append('dbf_delete')
        field_sizes.append(1)
    column_names.extend(table.field_names)
    for field in table.field_names:
        fld_info = table.field_info(field)
        field_sizes.append(fld_info[1])
        ftype = fld_info[3]
        if ftype == 'default':
            ftype = ['float', 'int'][fld_info[0] == '78']
        field_types[ftype].append(field)

    return column_names, field_types, field_sizes

def get_fwf_dataframe_from_dbf(table_name):
    with dbf.Table(table_name).open(dbf.READ_ONLY) as table:
        column_names, field_types, field_sizes = get_dbf_field_meta(table, keep_delete_mark=True)

        # how far to read .dbf file to reach data
        skip_meta = len(table._meta.header._data)
        data = table._meta.dfd.read()[skip_meta:-1].decode('cp1252')

        chunk_size = min(20000 * table.record_length, len(data))
        df = pd.DataFrame(columns=column_names)
        while data:
            chunk, data = data[:chunk_size], data[chunk_size:]
            chunk = '\n'.join(regex.findall(f'.{{{table.record_length}}}', chunk)) + '\n'
            iodata = io.StringIO(chunk)
            incoming = pd.read_fwf(iodata, widths=field_sizes, names=column_names, header=None, keep_default_na=False, dtype=object, index_col=False)
            df = pd.concat([df, incoming])
            del iodata, incoming

        # typecast columns
        format_dates(df, field_types.get(datetime.date, []))
        format_float(df, field_types.get('float', []))
        format_int(df, field_types.get('int', []))
        format_bool(df, field_types.get(bool, []))
    return df

if __name__ == '__main__':
    table_name = r"<your table>"
    df = get_fwf_dataframe_from_dbf(table_name)