diff --git a/elf/utils_elf.py b/elf/utils_elf.py index d3e30027..9a80b8d7 100644 --- a/elf/utils_elf.py +++ b/elf/utils_elf.py @@ -188,13 +188,13 @@ def hist(self, s, key=None): else: return self[key][s] - def transfer_cpu2gpu(self, batch_gpu, async=True): + def transfer_cpu2gpu(self, batch_gpu): ''' transfer batch data to gpu ''' # For each time step for k, v in self.batch.items(): - batch_gpu[k].copy_(v, async=async) + batch_gpu[k].copy_(v) - def transfer_cpu2cpu(self, batch_dst, async=True): + def transfer_cpu2cpu(self, batch_dst): ''' transfer batch data to cpu ''' # For each time step diff --git a/elf_python/memory_receiver.py b/elf_python/memory_receiver.py index 4de5ebb7..9e44655b 100644 --- a/elf_python/memory_receiver.py +++ b/elf_python/memory_receiver.py @@ -75,19 +75,19 @@ def _cpu2gpu(batch_cpu, batch_gpu, allow_incomplete_batch=False): if isinstance(batch_cpu_t[k], (torch.FloatTensor, torch.LongTensor)): if allow_incomplete_batch: if len(batch_cpu_t[k].size()) == 1: - batch_gpu_t[k] = batch_cpu_t[k][:batchsize].cuda(async=True) + batch_gpu_t[k] = batch_cpu_t[k][:batchsize].cuda() else: - batch_gpu_t[k] = batch_cpu_t[k][:batchsize, :].cuda(async=True) + batch_gpu_t[k] = batch_cpu_t[k][:batchsize, :].cuda() else: if isinstance(batch_cpu_t[k], torch.FloatTensor): if k not in batch_gpu_t: batch_gpu_t[k] = torch.cuda.FloatTensor(batch_cpu_t[k].size()) - batch_gpu_t[k].copy_(batch_cpu_t[k], async=True) + batch_gpu_t[k].copy_(batch_cpu_t[k]) elif isinstance(batch_cpu_t[k], torch.LongTensor): if k not in batch_gpu_t: batch_gpu_t[k] = torch.cuda.LongTensor(batch_cpu_t[k].size()) - batch_gpu_t[k].copy_(batch_cpu_t[k], async=True) + batch_gpu_t[k].copy_(batch_cpu_t[k]) else: batch_gpu_t[k] = batch_cpu_t[k] diff --git a/rlpytorch/runner/parameter_server.py b/rlpytorch/runner/parameter_server.py index 62aee8a2..9bf56d64 100644 --- a/rlpytorch/runner/parameter_server.py +++ b/rlpytorch/runner/parameter_server.py @@ -215,7 +215,7 @@ def process_main(self, i, gpu_id): while True: self.cvs_recv[i].wait() - utils_elf.transfer_cpu2gpu(batch, batch_gpu, async=True) + utils_elf.transfer_cpu2gpu(batch, batch_gpu) self.cvs_send[i].notify() self.cb_remote_batch_process(context, batch_gpu)