diff --git a/atari/benchmark-gym.py b/atari/benchmark-gym.py index 2fc5ea52..5fc18460 100644 --- a/atari/benchmark-gym.py +++ b/atari/benchmark-gym.py @@ -44,7 +44,7 @@ def work(): for p in procs: p.start() - for t in tqdm.trange(100000): + for _ in tqdm.trange(100000): Q.get() def bench_thread(ngame): diff --git a/eval.py b/eval.py index 30244c7b..b3f4faa2 100644 --- a/eval.py +++ b/eval.py @@ -45,7 +45,7 @@ def actor(batch): evaluator.episode_start(0) - for n in eval_iters.iters(): + for _ in eval_iters.iters(): GC.Run() GC.Stop() diff --git a/eval_checkforward.py b/eval_checkforward.py index f6854502..747f11c1 100644 --- a/eval_checkforward.py +++ b/eval_checkforward.py @@ -154,7 +154,7 @@ def actor(self, batch): GC.reg_callback("actor", forward_actor.actor) GC.Start() - for n in eval_iters.iters(): + for _ in eval_iters.iters(): GC.Run() GC.Stop() diff --git a/eval_lstm.py b/eval_lstm.py index 91f251ee..ba101ebe 100644 --- a/eval_lstm.py +++ b/eval_lstm.py @@ -39,7 +39,7 @@ def actor(batch): trainer.episode_start(0) - for n in eval_iters.iters(): + for _ in eval_iters.iters(): GC.Run() GC.Stop() diff --git a/eval_reduced_mcts.py b/eval_reduced_mcts.py index 3694bcea..f973e0d1 100644 --- a/eval_reduced_mcts.py +++ b/eval_reduced_mcts.py @@ -54,7 +54,7 @@ def actor(batch): evaluator.episode_start(0) GC.Start() - for n in eval_iters.iters(): + for _ in eval_iters.iters(): GC.Run() GC.Stop() diff --git a/ex_elfpy.py b/ex_elfpy.py index 4ca110b0..553c9ef6 100644 --- a/ex_elfpy.py +++ b/ex_elfpy.py @@ -87,7 +87,7 @@ def actor(self, cpu_batch, gpu_batch): wrapper.reg_callback("train", trainer.train) wrapper.reg_callback("actor", trainer.actor) - for i in tqdm.trange(1000): + for _ in tqdm.trange(1000): wrapper.Run() print("Done") diff --git a/go/game.py b/go/game.py index 485f674b..fcd9a74d 100644 --- a/go/game.py +++ b/go/game.py @@ -142,11 +142,9 @@ def train(batch): GC.Start() import tqdm - for k in tqdm.trange(args.num_iter): + for _ in tqdm.trange(args.num_iter): b = datetime.now() - # print("Before wait") GC.Run() - # print("wake up from wait") elapsed_wait_only += (datetime.now() - b).total_seconds() * 1000 print(len(game_records_visited)) diff --git a/rlpytorch/runner/multi_process.py b/rlpytorch/runner/multi_process.py index b64f0739..92dd553f 100644 --- a/rlpytorch/runner/multi_process.py +++ b/rlpytorch/runner/multi_process.py @@ -78,7 +78,7 @@ def run(self): if args.tqdm: iterator = tqdm.trange(args.num_minibatch, ncols=50) else: iterator = range(args.num_minibatch) - for i in iterator: + for _ in iterator: self.GC.Run() if self.episode_summary is not None: diff --git a/rts/game_CF/game.py b/rts/game_CF/game.py index 7b62616f..63db8b3d 100644 --- a/rts/game_CF/game.py +++ b/rts/game_CF/game.py @@ -57,7 +57,7 @@ def actor(batch): GC.Start() import tqdm - for k in tqdm.trange(nIter): + for _ in tqdm.trange(nIter): b = datetime.now() GC.Run() elapsed_wait_only += (datetime.now() - b).total_seconds() * 1000 diff --git a/rts/game_MC/game.py b/rts/game_MC/game.py index 40c36437..bde8f0cb 100644 --- a/rts/game_MC/game.py +++ b/rts/game_MC/game.py @@ -137,7 +137,7 @@ def reduced_project(batch): GC.Start() import tqdm - for k in tqdm.trange(nIter): + for _ in tqdm.trange(nIter): b = datetime.now() GC.Run() elapsed_wait_only += (datetime.now() - b).total_seconds() * 1000 diff --git a/rts/game_TD/game.py b/rts/game_TD/game.py index a46fb814..3e3ad247 100644 --- a/rts/game_TD/game.py +++ b/rts/game_TD/game.py @@ -57,7 +57,7 @@ def actor(batch): GC.Start() import tqdm - for k in tqdm.trange(nIter): + for _ in tqdm.trange(nIter): b = datetime.now() GC.Run() elapsed_wait_only += (datetime.now() - b).total_seconds() * 1000 diff --git a/train_lstm.py b/train_lstm.py index a6f3f7db..493707cf 100644 --- a/train_lstm.py +++ b/train_lstm.py @@ -24,7 +24,7 @@ model = env["model_loaders"][0].load_model(GC.params) mi = ModelInterface() - mi.add_model("model", model, optim_params={ "lr" : 0.001}) + mi.add_model("model", model, params={ "lr" : 0.001}) mi.add_model("actor", model, copy=True, cuda=all_args.gpu is not None, gpu_id=all_args.gpu) trainer.setup(sampler=env["sampler"], mi=mi, rl_method=env["method"])