Skip to content

Commit

Permalink
Replace with autocast(...) with with autocast("cuda", ...)
Browse files Browse the repository at this point in the history
  • Loading branch information
Li Peng committed Nov 28, 2024
1 parent 2d9825a commit 30ba83a
Show file tree
Hide file tree
Showing 7 changed files with 21 additions and 21 deletions.
4 changes: 2 additions & 2 deletions egs/libritts/CODEC/encodec/encodec.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def _forward_generator(
)

# calculate losses
with autocast(enabled=False):
with autocast("cuda", enabled=False):
gen_stft_adv_loss = self.generator_adversarial_loss(outputs=y_hat)

if self.multi_period_discriminator is not None:
Expand Down Expand Up @@ -272,7 +272,7 @@ def _forward_discriminator(
speech_hat.contiguous().detach(),
)
# calculate losses
with autocast(enabled=False):
with autocast("cuda", enabled=False):
(
disc_stft_real_adv_loss,
disc_stft_fake_adv_loss,
Expand Down
8 changes: 4 additions & 4 deletions egs/libritts/CODEC/encodec/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ def save_bad_model(suffix: str = ""):
loss_info["samples"] = batch_size

try:
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
d_weight = train_discriminator(
params.lambda_adv,
params.cur_epoch,
Expand Down Expand Up @@ -502,7 +502,7 @@ def save_bad_model(suffix: str = ""):
scaler.scale(disc_loss).backward()
scaler.step(optimizer_d)

with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
g_weight = train_discriminator(
params.lambda_adv,
params.cur_epoch,
Expand Down Expand Up @@ -846,7 +846,7 @@ def scan_pessimistic_batches_for_oom(
) = prepare_input(params, batch, device)
try:
# for discriminator
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
(
disc_stft_real_adv_loss,
disc_stft_fake_adv_loss,
Expand Down Expand Up @@ -876,7 +876,7 @@ def scan_pessimistic_batches_for_oom(
optimizer_d.zero_grad()
loss_d.backward()
# for generator
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
(
commit_loss,
gen_stft_adv_loss,
Expand Down
8 changes: 4 additions & 4 deletions egs/libritts/TTS/vits/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,7 @@ def save_bad_model(suffix: str = ""):
loss_info["samples"] = batch_size

try:
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
# forward discriminator
loss_d, stats_d = model(
text=tokens,
Expand All @@ -475,7 +475,7 @@ def save_bad_model(suffix: str = ""):
scaler.scale(loss_d).backward()
scaler.step(optimizer_d)

with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
# forward generator
loss_g, stats_g = model(
text=tokens,
Expand Down Expand Up @@ -748,7 +748,7 @@ def scan_pessimistic_batches_for_oom(
) = prepare_input(batch, tokenizer, device, train_speaker_map)
try:
# for discriminator
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
loss_d, stats_d = model(
text=tokens,
text_lengths=tokens_lens,
Expand All @@ -762,7 +762,7 @@ def scan_pessimistic_batches_for_oom(
optimizer_d.zero_grad()
loss_d.backward()
# for generator
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
loss_g, stats_g = model(
text=tokens,
text_lengths=tokens_lens,
Expand Down
2 changes: 1 addition & 1 deletion egs/ljspeech/TTS/matcha/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@ def save_bad_model(suffix: str = ""):
tokens_lens,
) = prepare_input(batch, tokenizer, device, params)
try:
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
losses = get_losses(
{
"x": tokens,
Expand Down
8 changes: 4 additions & 4 deletions egs/ljspeech/TTS/vits/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ def save_bad_model(suffix: str = ""):
loss_info["samples"] = batch_size

try:
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
# forward discriminator
loss_d, stats_d = model(
text=tokens,
Expand All @@ -414,7 +414,7 @@ def save_bad_model(suffix: str = ""):
scaler.scale(loss_d).backward()
scaler.step(optimizer_d)

with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
# forward generator
loss_g, stats_g = model(
text=tokens,
Expand Down Expand Up @@ -673,7 +673,7 @@ def scan_pessimistic_batches_for_oom(
)
try:
# for discriminator
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
loss_d, stats_d = model(
text=tokens,
text_lengths=tokens_lens,
Expand All @@ -686,7 +686,7 @@ def scan_pessimistic_batches_for_oom(
optimizer_d.zero_grad()
loss_d.backward()
# for generator
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
loss_g, stats_g = model(
text=tokens,
text_lengths=tokens_lens,
Expand Down
4 changes: 2 additions & 2 deletions egs/ljspeech/TTS/vits/vits.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ def _forward_generator(
p = self.discriminator(speech_)

# calculate losses
with autocast(enabled=False):
with autocast("cuda", enabled=False):
if not return_sample:
mel_loss = self.mel_loss(speech_hat_, speech_)
else:
Expand Down Expand Up @@ -518,7 +518,7 @@ def _forward_discrminator(
p = self.discriminator(speech_)

# calculate losses
with autocast(enabled=False):
with autocast("cuda", enabled=False):
real_loss, fake_loss = self.discriminator_adv_loss(p_hat, p)
loss = real_loss + fake_loss

Expand Down
8 changes: 4 additions & 4 deletions egs/vctk/TTS/vits/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@ def save_bad_model(suffix: str = ""):
loss_info["samples"] = batch_size

try:
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
# forward discriminator
loss_d, stats_d = model(
text=tokens,
Expand All @@ -467,7 +467,7 @@ def save_bad_model(suffix: str = ""):
scaler.scale(loss_d).backward()
scaler.step(optimizer_d)

with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
# forward generator
loss_g, stats_g = model(
text=tokens,
Expand Down Expand Up @@ -740,7 +740,7 @@ def scan_pessimistic_batches_for_oom(
) = prepare_input(batch, tokenizer, device, speaker_map)
try:
# for discriminator
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
loss_d, stats_d = model(
text=tokens,
text_lengths=tokens_lens,
Expand All @@ -754,7 +754,7 @@ def scan_pessimistic_batches_for_oom(
optimizer_d.zero_grad()
loss_d.backward()
# for generator
with autocast(enabled=params.use_fp16):
with autocast("cuda", enabled=params.use_fp16):
loss_g, stats_g = model(
text=tokens,
text_lengths=tokens_lens,
Expand Down

0 comments on commit 30ba83a

Please sign in to comment.