You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hi, I am running the inference for the Abdomen_CT 701 dataset. The predictinos run smoothly for about 30 images but there after it got interrupted, showing the following error.
Traceback (most recent call last):
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/resource_sharer.py", line 138, in _serve
with self._listener.accept() as conn:
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/connection.py", line 466, in accept
answer_challenge(c, self._authkey)
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/connection.py", line 757, in answer_challenge
response = connection.recv_bytes(256) # reject large message
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/connection.py", line 216, in recv_bytes
buf = self._recv_bytes(maxlength)
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/connection.py", line 414, in _recv_bytes
buf = self._recv(4)
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/connection.py", line 379, in _recv
chunk = read(handle, remaining)
ConnectionResetError: [Errno 104] Connection reset by peer
Traceback (most recent call last):
File "/home/cvpr1234/miniconda3/envs/umamba/bin/nnUNetv2_predict", line 33, in
sys.exit(load_entry_point('nnunetv2', 'console_scripts', 'nnUNetv2_predict')())
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/predict_from_raw_data.py", line 837, in predict_entry_point
predictor.predict_from_files(args.i, args.o, save_probabilities=args.save_probabilities,
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/predict_from_raw_data.py", line 256, in predict_from_files
return self.predict_from_data_iterator(data_iterator, save_probabilities, num_processes_segmentation_export)
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/predict_from_raw_data.py", line 349, in predict_from_data_iterator
for preprocessed in data_iterator:
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/data_iterators.py", line 115, in preprocessing_iterator_fromfiles
[i.pin_memory() for i in item.values() if isinstance(i, torch.Tensor)]
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/data_iterators.py", line 115, in
[i.pin_memory() for i in item.values() if isinstance(i, torch.Tensor)]
RuntimeError: CUDA error: out of memory
CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1
Compile with TORCH_USE_CUDA_DSA to enable device-side assertions.
Process SpawnProcess-10:
Traceback (most recent call last):
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/data_iterators.py", line 57, in preprocess_fromfiles_save_to_queue
raise e
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/data_iterators.py", line 50, in preprocess_fromfiles_save_to_queue
target_queue.put(item, timeout=0.01)
File "", line 2, in put
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/managers.py", line 833, in _callmethod
raise convert_to_error(kind, result)
multiprocessing.managers.RemoteError:
Traceback (most recent call last):
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/managers.py", line 260, in serve_client
self.id_to_local_proxy_obj[ident]
KeyError: '7fe056998760'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/managers.py", line 262, in serve_client
raise ke
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/managers.py", line 256, in serve_client
obj, exposed, gettypeid = id_to_obj[ident]
KeyError: '7fe056998760'
I allotted more memory to the GPU, but still this wasn't fixed. Can anyone please suggest how this can be resolved?
The text was updated successfully, but these errors were encountered:
Hi, I am running the inference for the Abdomen_CT 701 dataset. The predictinos run smoothly for about 30 images but there after it got interrupted, showing the following error.
Traceback (most recent call last):
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/resource_sharer.py", line 138, in _serve
with self._listener.accept() as conn:
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/connection.py", line 466, in accept
answer_challenge(c, self._authkey)
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/connection.py", line 757, in answer_challenge
response = connection.recv_bytes(256) # reject large message
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/connection.py", line 216, in recv_bytes
buf = self._recv_bytes(maxlength)
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/connection.py", line 414, in _recv_bytes
buf = self._recv(4)
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/connection.py", line 379, in _recv
chunk = read(handle, remaining)
ConnectionResetError: [Errno 104] Connection reset by peer
Traceback (most recent call last):
File "/home/cvpr1234/miniconda3/envs/umamba/bin/nnUNetv2_predict", line 33, in
sys.exit(load_entry_point('nnunetv2', 'console_scripts', 'nnUNetv2_predict')())
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/predict_from_raw_data.py", line 837, in predict_entry_point
predictor.predict_from_files(args.i, args.o, save_probabilities=args.save_probabilities,
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/predict_from_raw_data.py", line 256, in predict_from_files
return self.predict_from_data_iterator(data_iterator, save_probabilities, num_processes_segmentation_export)
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/predict_from_raw_data.py", line 349, in predict_from_data_iterator
for preprocessed in data_iterator:
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/data_iterators.py", line 115, in preprocessing_iterator_fromfiles
[i.pin_memory() for i in item.values() if isinstance(i, torch.Tensor)]
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/data_iterators.py", line 115, in
[i.pin_memory() for i in item.values() if isinstance(i, torch.Tensor)]
RuntimeError: CUDA error: out of memory
CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1
Compile with
TORCH_USE_CUDA_DSA
to enable device-side assertions.Process SpawnProcess-10:
Traceback (most recent call last):
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/data_iterators.py", line 57, in preprocess_fromfiles_save_to_queue
raise e
File "/mnt/e/Siladittya_JRF/CVPR2022-ECCV2022/lightlyenv/U-Mamba/umamba/nnunetv2/inference/data_iterators.py", line 50, in preprocess_fromfiles_save_to_queue
target_queue.put(item, timeout=0.01)
File "", line 2, in put
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/managers.py", line 833, in _callmethod
raise convert_to_error(kind, result)
multiprocessing.managers.RemoteError:
Traceback (most recent call last):
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/managers.py", line 260, in serve_client
self.id_to_local_proxy_obj[ident]
KeyError: '7fe056998760'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/managers.py", line 262, in serve_client
raise ke
File "/home/cvpr1234/miniconda3/envs/umamba/lib/python3.10/multiprocessing/managers.py", line 256, in serve_client
obj, exposed, gettypeid = id_to_obj[ident]
KeyError: '7fe056998760'
I allotted more memory to the GPU, but still this wasn't fixed. Can anyone please suggest how this can be resolved?
The text was updated successfully, but these errors were encountered: