From 55fa8f134996fc201f586eefbb0f8e4238ad4a5b Mon Sep 17 00:00:00 2001 From: lucidrains Date: Mon, 3 Feb 2025 05:23:53 -0800 Subject: [PATCH] address https://github.com/lucidrains/vector-quantize-pytorch/issues/191 --- pyproject.toml | 2 +- vector_quantize_pytorch/residual_sim_vq.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3de550e..285596e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "vector-quantize-pytorch" -version = "1.21.4" +version = "1.21.5" description = "Vector Quantization - Pytorch" authors = [ { name = "Phil Wang", email = "lucidrains@gmail.com" } diff --git a/vector_quantize_pytorch/residual_sim_vq.py b/vector_quantize_pytorch/residual_sim_vq.py index 2aea9ca..c899c2b 100644 --- a/vector_quantize_pytorch/residual_sim_vq.py +++ b/vector_quantize_pytorch/residual_sim_vq.py @@ -150,7 +150,7 @@ def forward( all_losses = [] all_indices = [] - should_quantize_dropout = self.training and self.quantize_dropout and not return_loss + should_quantize_dropout = self.training and self.quantize_dropout # sample a layer index at which to dropout further residual quantization # also prepare null indices and loss