将共享列表与 pathos 多处理一起使用会引发“已发送的摘要被拒绝”错误
Using shared list with pathos multiprocessing raises `digest sent was rejected` error
我正在尝试根据以下代码片段使用多处理来生成复杂的、不可拾取的对象:
from multiprocessing import Manager
from pathos.multiprocessing import ProcessingPool
class Facility:
def __init__(self):
self.blocks = Manager().list()
def __process_blocks(self, block):
designer = block["designer"]
apply_terrain = block["terrain"]
block_type = self.__block_type_to_string(block["type"])
block = designer.generate_block(block_id=block["id"],
block_type=block_type,
anchor=Point(float(block["anchor_x"]), float(block["anchor_y"]),
float(block["anchor_z"])),
pcu_anchor=Point(float(block["pcu_x"]), float(block["pcu_y"]), 0),
corridor_width=block["corridor"],
jb_height=block["jb_connect_height"],
min_boxes=block["min_boxes"],
apply_terrain=apply_terrain)
self.blocks.append(block)
def design(self, apply_terrain=False):
designer = FacilityBuilder(string_locator=self._string_locator, string_router=self._string_router,
box_router=self._box_router, sorter=self._sorter,
tracker_configurator=self._tracker_configurator, config=self._config)
blocks = [block.to_dict() for index, block in self._store.get_blocks().iterrows()]
for block in blocks:
block["designer"] = designer
block["terrain"] = apply_terrain
with ProcessingPool() as pool:
pool.map(self.__process_blocks, blocks)
(努力用更简单的代码重现这一点,所以我展示了实际代码)
我需要更新一个可共享变量,所以我使用 multiprocessing.Manager
初始化一个 class 级别变量,如下所示:
self.blocks = Manager().list()
这给我留下了以下错误(只有部分堆栈跟踪):
File "C:\Users\Paul.Nel\Documents\repos\autoPV\.autopv\lib\site-packages\dill\_dill.py", line 481, in load
obj = StockUnpickler.load(self)
File "C:\Users\Paul.Nel\AppData\Local\Programs\Python\Python39\lib\multiprocessing\managers.py", line 933, in RebuildProxy
return func(token, serializer, incref=incref, **kwds)
File "C:\Users\Paul.Nel\AppData\Local\Programs\Python\Python39\lib\multiprocessing\managers.py", line 783, in __init__
self._incref()
File "C:\Users\Paul.Nel\AppData\Local\Programs\Python\Python39\lib\multiprocessing\managers.py", line 837, in _incref
conn = self._Client(self._token.address, authkey=self._authkey)
File "C:\Users\Paul.Nel\AppData\Local\Programs\Python\Python39\lib\multiprocessing\connection.py", line 513, in Client
answer_challenge(c, authkey)
File "C:\Users\Paul.Nel\AppData\Local\Programs\Python\Python39\lib\multiprocessing\connection.py", line 764, in answer_challe
nge
raise AuthenticationError('digest sent was rejected')
multiprocessing.context.AuthenticationError: digest sent was rejected
作为最后的手段,我尝试使用 python
的标准 ThreadPool
实现来尝试规避 pickle
问题,但这也不太顺利。我读过很多类似的问题,但还没有找到解决这个特定问题的方法。问题是 dill
还是 pathos
与 mulitprocessing.Manager
的交互方式?
编辑:所以我设法用示例代码复制了它,如下所示:
import os
import math
from multiprocessing import Manager
from pathos.multiprocessing import ProcessingPool
class MyComplex:
def __init__(self, x):
self._z = x * x
def me(self):
return math.sqrt(self._z)
class Starter:
def __init__(self):
manager = Manager()
self.my_list = manager.list()
def _f(self, value):
print(f"{value.me()} on {os.getpid()}")
self.my_list.append(value.me)
def start(self):
names = [MyComplex(x) for x in range(100)]
with ProcessingPool() as pool:
pool.map(self._f, names)
if __name__ == '__main__':
starter = Starter()
starter.start()
添加self.my_list = manager.list()
时出现错误。
所以我已经解决了这个问题。如果像 mmckerns 这样的人或在多处理方面比我了解更多的人可以评论为什么这是一个解决方案,我仍然会很棒。
问题似乎是 Manager().list()
是在 __init__
中声明的。以下代码可以正常工作:
import os
import math
from multiprocessing import Manager
from pathos.multiprocessing import ProcessingPool
class MyComplex:
def __init__(self, x):
self._z = x * x
def me(self):
return math.sqrt(self._z)
class Starter:
def _f(self, value):
print(f"{value.me()} on {os.getpid()}")
return value.me()
def start(self):
manager = Manager()
my_list = manager.list()
names = [MyComplex(x) for x in range(100)]
with ProcessingPool() as pool:
my_list.append(pool.map(self._f, names))
print(my_list)
if __name__ == '__main__':
starter = Starter()
starter.start()
这里我声明 list
局部于 ProcessingPool
操作。如果我愿意,我可以在之后将结果分配给 class 级别变量。
我正在尝试根据以下代码片段使用多处理来生成复杂的、不可拾取的对象:
from multiprocessing import Manager
from pathos.multiprocessing import ProcessingPool
class Facility:
def __init__(self):
self.blocks = Manager().list()
def __process_blocks(self, block):
designer = block["designer"]
apply_terrain = block["terrain"]
block_type = self.__block_type_to_string(block["type"])
block = designer.generate_block(block_id=block["id"],
block_type=block_type,
anchor=Point(float(block["anchor_x"]), float(block["anchor_y"]),
float(block["anchor_z"])),
pcu_anchor=Point(float(block["pcu_x"]), float(block["pcu_y"]), 0),
corridor_width=block["corridor"],
jb_height=block["jb_connect_height"],
min_boxes=block["min_boxes"],
apply_terrain=apply_terrain)
self.blocks.append(block)
def design(self, apply_terrain=False):
designer = FacilityBuilder(string_locator=self._string_locator, string_router=self._string_router,
box_router=self._box_router, sorter=self._sorter,
tracker_configurator=self._tracker_configurator, config=self._config)
blocks = [block.to_dict() for index, block in self._store.get_blocks().iterrows()]
for block in blocks:
block["designer"] = designer
block["terrain"] = apply_terrain
with ProcessingPool() as pool:
pool.map(self.__process_blocks, blocks)
(努力用更简单的代码重现这一点,所以我展示了实际代码)
我需要更新一个可共享变量,所以我使用 multiprocessing.Manager
初始化一个 class 级别变量,如下所示:
self.blocks = Manager().list()
这给我留下了以下错误(只有部分堆栈跟踪):
File "C:\Users\Paul.Nel\Documents\repos\autoPV\.autopv\lib\site-packages\dill\_dill.py", line 481, in load
obj = StockUnpickler.load(self)
File "C:\Users\Paul.Nel\AppData\Local\Programs\Python\Python39\lib\multiprocessing\managers.py", line 933, in RebuildProxy
return func(token, serializer, incref=incref, **kwds)
File "C:\Users\Paul.Nel\AppData\Local\Programs\Python\Python39\lib\multiprocessing\managers.py", line 783, in __init__
self._incref()
File "C:\Users\Paul.Nel\AppData\Local\Programs\Python\Python39\lib\multiprocessing\managers.py", line 837, in _incref
conn = self._Client(self._token.address, authkey=self._authkey)
File "C:\Users\Paul.Nel\AppData\Local\Programs\Python\Python39\lib\multiprocessing\connection.py", line 513, in Client
answer_challenge(c, authkey)
File "C:\Users\Paul.Nel\AppData\Local\Programs\Python\Python39\lib\multiprocessing\connection.py", line 764, in answer_challe
nge
raise AuthenticationError('digest sent was rejected')
multiprocessing.context.AuthenticationError: digest sent was rejected
作为最后的手段,我尝试使用 python
的标准 ThreadPool
实现来尝试规避 pickle
问题,但这也不太顺利。我读过很多类似的问题,但还没有找到解决这个特定问题的方法。问题是 dill
还是 pathos
与 mulitprocessing.Manager
的交互方式?
编辑:所以我设法用示例代码复制了它,如下所示:
import os
import math
from multiprocessing import Manager
from pathos.multiprocessing import ProcessingPool
class MyComplex:
def __init__(self, x):
self._z = x * x
def me(self):
return math.sqrt(self._z)
class Starter:
def __init__(self):
manager = Manager()
self.my_list = manager.list()
def _f(self, value):
print(f"{value.me()} on {os.getpid()}")
self.my_list.append(value.me)
def start(self):
names = [MyComplex(x) for x in range(100)]
with ProcessingPool() as pool:
pool.map(self._f, names)
if __name__ == '__main__':
starter = Starter()
starter.start()
添加self.my_list = manager.list()
时出现错误。
所以我已经解决了这个问题。如果像 mmckerns 这样的人或在多处理方面比我了解更多的人可以评论为什么这是一个解决方案,我仍然会很棒。
问题似乎是 Manager().list()
是在 __init__
中声明的。以下代码可以正常工作:
import os
import math
from multiprocessing import Manager
from pathos.multiprocessing import ProcessingPool
class MyComplex:
def __init__(self, x):
self._z = x * x
def me(self):
return math.sqrt(self._z)
class Starter:
def _f(self, value):
print(f"{value.me()} on {os.getpid()}")
return value.me()
def start(self):
manager = Manager()
my_list = manager.list()
names = [MyComplex(x) for x in range(100)]
with ProcessingPool() as pool:
my_list.append(pool.map(self._f, names))
print(my_list)
if __name__ == '__main__':
starter = Starter()
starter.start()
这里我声明 list
局部于 ProcessingPool
操作。如果我愿意,我可以在之后将结果分配给 class 级别变量。