在多处理中不支持 'str' 和 'int' 的实例
not supported instances of 'str' and 'int' in multiprocessing
我正在尝试 运行 接收数据帧和条件的函数中的多进程任务以及 return 新数据帧。
我的问题是我在调用该函数时收到“not supported instances of 'str' and 'int'
”。
import multiprocessing
import pandas as pd
def operation(data, condition):
.
. No problem in the function since I tested it isolated
.
.
return pd.DataFrame(result_of_operation)
if __name__ == '__main__':
data = pd.read_csv(r"C:\Users\max\test.csv", index_col=False, encoding='UTF-8')
column_names = ["A", "B", "C"]
new_df = pd.DataFrame(columns = column_names)
condition = ['orange', 'banana', 'apple']
with multiprocessing.Pool(8) as p:
for x in range(0, len(condition)):
new_df.append(p.map(operation, data, condition[x]),ignore_index=True)
我认为这是我的 map
操作的问题,因为如果我自己调用函数它就可以工作,例如:
for x in range(0, len(condition)):
new_df.append(operation(data, condition[x]),ignore_index=True)
您向 map() 传递参数的方式似乎是错误的。
试试这个:
from functools import partial
with multiprocessing.Pool() as p:
new_df = pd.concat(p.map(partial(operation, data), condition), ignore_index=True)
也许为了让事情更清楚,这里有一个完整的工作示例。请注意,append() 已被弃用,因此我在此处使用首选 concat():
from pandas import DataFrame, concat
from concurrent.futures import ProcessPoolExecutor
from functools import partial
def process(columns, data):
return DataFrame(data, columns=columns)
def main():
data = [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
with ProcessPoolExecutor(max_workers=3) as p:
newdf = concat(p.map(partial(process, ['A']), data), ignore_index=True)
print(newdf)
if __name__ == '__main__':
main()
输出:
A
0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
我正在尝试 运行 接收数据帧和条件的函数中的多进程任务以及 return 新数据帧。
我的问题是我在调用该函数时收到“not supported instances of 'str' and 'int'
”。
import multiprocessing
import pandas as pd
def operation(data, condition):
.
. No problem in the function since I tested it isolated
.
.
return pd.DataFrame(result_of_operation)
if __name__ == '__main__':
data = pd.read_csv(r"C:\Users\max\test.csv", index_col=False, encoding='UTF-8')
column_names = ["A", "B", "C"]
new_df = pd.DataFrame(columns = column_names)
condition = ['orange', 'banana', 'apple']
with multiprocessing.Pool(8) as p:
for x in range(0, len(condition)):
new_df.append(p.map(operation, data, condition[x]),ignore_index=True)
我认为这是我的 map
操作的问题,因为如果我自己调用函数它就可以工作,例如:
for x in range(0, len(condition)):
new_df.append(operation(data, condition[x]),ignore_index=True)
您向 map() 传递参数的方式似乎是错误的。
试试这个:
from functools import partial
with multiprocessing.Pool() as p:
new_df = pd.concat(p.map(partial(operation, data), condition), ignore_index=True)
也许为了让事情更清楚,这里有一个完整的工作示例。请注意,append() 已被弃用,因此我在此处使用首选 concat():
from pandas import DataFrame, concat
from concurrent.futures import ProcessPoolExecutor
from functools import partial
def process(columns, data):
return DataFrame(data, columns=columns)
def main():
data = [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
with ProcessPoolExecutor(max_workers=3) as p:
newdf = concat(p.map(partial(process, ['A']), data), ignore_index=True)
print(newdf)
if __name__ == '__main__':
main()
输出:
A
0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9