如何将 gpu 训练的模型加载到 cpu?
how to load the gpu trained model into the cpu?
我正在使用 PyTorch。我将在 CPU 的多个 GPU 上使用已经训练好的模型。如何做这个任务?
我在 Anaconda 3 和 pytorch 上试过 cpu 只是我没有 gpu
model = models.get_pose_net(config, is_train=False)
gpus = [int(i) for i in config.GPUS.split(',')]
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
print('Created model...')
print(model)
checkpoint = torch.load(config.MODEL.RESUME)
model.load_state_dict(checkpoint)
model.eval()
print('Loaded pretrained weights...')
我得到的错误是
AssertionError Traceback (most recent call last)
<ipython-input-15-bbfcd201d332> in <module>()
2 model = models.get_pose_net(config, is_train=False)
3 gpus = [int(i) for i in config.GPUS.split(',')]
----> 4 model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
5 print('Created model...')
6 print(model)
C:\Users\psl\Anaconda3\lib\site-packages\torch\nn\modules\module.py in cuda(self, device)
258 Module: self
259 """
--> 260 return self._apply(lambda t: t.cuda(device))
261
262 def cpu(self):
C:\Users\psl\Anaconda3\lib\site-packages\torch\nn\modules\module.py in
_申请(自我,fn)
185 def_apply(自我,fn):
self.children() 中的模块为 186:
--> 187 module._apply(fn)
188
self._parameters.values() 中的参数 189:
C:\Users\psl\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _apply(self, fn)
185 def _apply(self, fn):
186 for module in self.children():
--> 187 module._apply(fn)
188
189 for param in self._parameters.values():
C:\Users\psl\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _apply(self, fn)
191 # Tensors stored in modules are graph leaves, and we don't
192 # want to create copy nodes, so we have to unpack the data.
--> 193 param.data = fn(param.data)
194 if param._grad is not None:
195 param._grad.data = fn(param._grad.data)
C:\Users\psl\Anaconda3\lib\site-packages\torch\nn\modules\module.py in <lambda>(t)
258 Module: self
259 """
--> 260 return self._apply(lambda t: t.cuda(device))
261
262 def cpu(self):
C:\Users\psl\Anaconda3\lib\site-packages\torch\cuda\__init__.py in _lazy_init()
159 raise RuntimeError(
160 "Cannot re-initialize CUDA in forked subprocess. " + msg)
--> 161 _check_driver()
162 torch._C._cuda_init()
163 _cudart = _load_cudart()
C:\Users\psl\Anaconda3\lib\site-packages\torch\cuda\__init__.py in _check_driver()
80 Found no NVIDIA driver on your system. Please check that you
81 have an NVIDIA GPU and installed a driver from
---> 82 http://www.nvidia.com/Download/index.aspx""")
83 else:
84 # TODO: directly link to the alternative bin that needs install
AssertionError:
Found no NVIDIA driver on your system. Please check that you
have an NVIDIA GPU and installed a driver from
http://www.nvidia.com/Download/index.aspx
要强制将保存的模型加载到 cpu,请使用以下命令。
torch.load('/path/to/saved/model', map_location='cpu')
在你的情况下将其更改为
torch.load(config.MODEL.RESUME, map_location='cpu')
我正在使用 PyTorch。我将在 CPU 的多个 GPU 上使用已经训练好的模型。如何做这个任务?
我在 Anaconda 3 和 pytorch 上试过 cpu 只是我没有 gpu
model = models.get_pose_net(config, is_train=False)
gpus = [int(i) for i in config.GPUS.split(',')]
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
print('Created model...')
print(model)
checkpoint = torch.load(config.MODEL.RESUME)
model.load_state_dict(checkpoint)
model.eval()
print('Loaded pretrained weights...')
我得到的错误是
AssertionError Traceback (most recent call last)
<ipython-input-15-bbfcd201d332> in <module>()
2 model = models.get_pose_net(config, is_train=False)
3 gpus = [int(i) for i in config.GPUS.split(',')]
----> 4 model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
5 print('Created model...')
6 print(model)
C:\Users\psl\Anaconda3\lib\site-packages\torch\nn\modules\module.py in cuda(self, device)
258 Module: self
259 """
--> 260 return self._apply(lambda t: t.cuda(device))
261
262 def cpu(self):
C:\Users\psl\Anaconda3\lib\site-packages\torch\nn\modules\module.py in
_申请(自我,fn) 185 def_apply(自我,fn): self.children() 中的模块为 186: --> 187 module._apply(fn) 188 self._parameters.values() 中的参数 189:
C:\Users\psl\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _apply(self, fn)
185 def _apply(self, fn):
186 for module in self.children():
--> 187 module._apply(fn)
188
189 for param in self._parameters.values():
C:\Users\psl\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _apply(self, fn)
191 # Tensors stored in modules are graph leaves, and we don't
192 # want to create copy nodes, so we have to unpack the data.
--> 193 param.data = fn(param.data)
194 if param._grad is not None:
195 param._grad.data = fn(param._grad.data)
C:\Users\psl\Anaconda3\lib\site-packages\torch\nn\modules\module.py in <lambda>(t)
258 Module: self
259 """
--> 260 return self._apply(lambda t: t.cuda(device))
261
262 def cpu(self):
C:\Users\psl\Anaconda3\lib\site-packages\torch\cuda\__init__.py in _lazy_init()
159 raise RuntimeError(
160 "Cannot re-initialize CUDA in forked subprocess. " + msg)
--> 161 _check_driver()
162 torch._C._cuda_init()
163 _cudart = _load_cudart()
C:\Users\psl\Anaconda3\lib\site-packages\torch\cuda\__init__.py in _check_driver()
80 Found no NVIDIA driver on your system. Please check that you
81 have an NVIDIA GPU and installed a driver from
---> 82 http://www.nvidia.com/Download/index.aspx""")
83 else:
84 # TODO: directly link to the alternative bin that needs install
AssertionError:
Found no NVIDIA driver on your system. Please check that you
have an NVIDIA GPU and installed a driver from
http://www.nvidia.com/Download/index.aspx
要强制将保存的模型加载到 cpu,请使用以下命令。
torch.load('/path/to/saved/model', map_location='cpu')
在你的情况下将其更改为
torch.load(config.MODEL.RESUME, map_location='cpu')