Skip to content

Commit

Permalink
attempt to address lucidrains/audiolm-pytorch#279 again
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidrains committed Nov 7, 2024
1 parent f97a37b commit ea13758
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 38 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "vector-quantize-pytorch"
version = "1.18.6"
version = "1.18.7"
description = "Vector Quantization - Pytorch"
authors = [
{ name = "Phil Wang", email = "[email protected]" }
Expand Down
27 changes: 14 additions & 13 deletions vector_quantize_pytorch/residual_fsq.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import random
from math import log2
from functools import partial, cache
from functools import partial

from typing import List

Expand Down Expand Up @@ -33,10 +33,17 @@ def round_up_multiple(num, mult):

# distributed helpers

@cache
def is_distributed():
return dist.is_initialized() and dist.get_world_size() > 1

def get_maybe_sync_seed(max_size = 10_000):
rand_int = torch.randint(0, max_size, ())

if is_distributed():
dist.all_reduce(rand_int)

return rand_int.item()

# main class

class ResidualFSQ(Module):
Expand Down Expand Up @@ -175,18 +182,12 @@ def forward(

if should_quantize_dropout:

if exists(rand_quantize_dropout_fixed_seed):
# seed is manually passed in
rand = random.Random(rand_quantize_dropout_fixed_seed)
# check if seed is manually passed in

elif is_distributed():
# in distributed environment, synchronize a random seed value if not given
t = torch.tensor(random.randrange(10_000), device = device)
dropout_seed = dist.all_reduce(t).item()
rand = random.Random(dropout_seed)
if not exists(rand_quantize_dropout_fixed_seed):
rand_quantize_dropout_fixed_seed = get_maybe_sync_seed()

else:
rand = random
rand = random.Random(rand_quantize_dropout_fixed_seed)

rand_quantize_dropout_index = rand.randrange(self.quantize_dropout_cutoff_index, num_quant)

Expand Down Expand Up @@ -304,7 +305,7 @@ def forward(

forward_kwargs = dict(
return_all_codes = return_all_codes,
rand_quantize_dropout_fixed_seed = random.randint(0, int(1e7))
rand_quantize_dropout_fixed_seed = get_maybe_sync_seed()
)

# invoke residual vq on each group
Expand Down
25 changes: 13 additions & 12 deletions vector_quantize_pytorch/residual_lfq.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,17 @@ def round_up_multiple(num, mult):

# distributed helpers

@cache
def is_distributed():
return dist.is_initialized() and dist.get_world_size() > 1

def get_maybe_sync_seed(max_size = 10_000):
rand_int = torch.randint(0, max_size, ())

if is_distributed():
dist.all_reduce(rand_int)

return rand_int.item()

# main class

class ResidualLFQ(Module):
Expand Down Expand Up @@ -152,18 +159,12 @@ def forward(

if should_quantize_dropout:

if exists(rand_quantize_dropout_fixed_seed):
# seed is manually passed in
rand = random.Random(rand_quantize_dropout_fixed_seed)
# check if seed is manually passed in

elif is_distributed():
# in distributed environment, synchronize a random seed value if not given
t = torch.tensor(random.randrange(10_000), device = device)
dropout_seed = dist.all_reduce(t).item()
rand = random.Random(dropout_seed)
if not exists(rand_quantize_dropout_fixed_seed):
rand_quantize_dropout_fixed_seed = get_maybe_sync_seed()

else:
rand = random
rand = random.Random(rand_quantize_dropout_fixed_seed)

rand_quantize_dropout_index = rand.randrange(self.quantize_dropout_cutoff_index, num_quant)

Expand Down Expand Up @@ -271,7 +272,7 @@ def forward(
forward_kwargs = dict(
mask = mask,
return_all_codes = return_all_codes,
rand_quantize_dropout_fixed_seed = random.randint(0, int(1e7))
rand_quantize_dropout_fixed_seed = get_maybe_sync_seed()
)

# invoke residual vq on each group
Expand Down
26 changes: 14 additions & 12 deletions vector_quantize_pytorch/residual_vq.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,17 @@ def round_up_multiple(num, mult):

# distributed helpers

@cache
def is_distributed():
return dist.is_initialized() and dist.get_world_size() > 1

def get_maybe_sync_seed(max_size = 10_000):
rand_int = torch.randint(0, max_size, ())

if is_distributed():
dist.all_reduce(rand_int)

return rand_int.item()

# the mlp for generating the neural implicit codebook
# from Huijben et al. https://arxiv.org/abs/2401.14732

Expand Down Expand Up @@ -286,18 +293,13 @@ def forward(

if should_quantize_dropout:

if exists(rand_quantize_dropout_fixed_seed):
# seed is manually passed in
rand = random.Random(rand_quantize_dropout_fixed_seed)
# check if seed is manually passed in

if not exists(rand_quantize_dropout_fixed_seed):
rand_quantize_dropout_fixed_seed = get_maybe_sync_seed()

elif is_distributed():
# in distributed environment, synchronize a random seed value if not given
t = torch.tensor(random.randrange(10_000), device = device)
dropout_seed = dist.all_reduce(t).item()
rand = random.Random(dropout_seed)
rand = random.Random(rand_quantize_dropout_fixed_seed)

else:
rand = random

rand_quantize_dropout_index = rand.randrange(self.quantize_dropout_cutoff_index, num_quant)

Expand Down Expand Up @@ -466,7 +468,7 @@ def forward(
sample_codebook_temp = sample_codebook_temp,
mask = mask,
freeze_codebook = freeze_codebook,
rand_quantize_dropout_fixed_seed = random.randint(0, int(1e7))
rand_quantize_dropout_fixed_seed = get_maybe_sync_seed()
)

# invoke residual vq on each group
Expand Down

0 comments on commit ea13758

Please sign in to comment.