Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace deprecated pytorch methods #1814

Merged
merged 4 commits into from
Dec 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@
from model import Transducer
from optim import Eden, Eve
from torch import Tensor
from torch.cuda.amp import GradScaler
from torch.amp import GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter

Expand Down Expand Up @@ -638,7 +638,7 @@ def train_one_epoch(
params.batch_idx_train += 1
batch_size = len(batch["supervisions"]["text"])

with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, loss_info = compute_loss(
params=params,
model=model,
Expand Down Expand Up @@ -843,7 +843,7 @@ def remove_short_and_long_utt(c: Cut):
params=params,
)

scaler = GradScaler(enabled=params.use_fp16)
scaler = GradScaler("cuda", enabled=params.use_fp16)
if checkpoints and "grad_scaler" in checkpoints:
logging.info("Loading grad scaler state dict")
scaler.load_state_dict(checkpoints["grad_scaler"])
Expand Down Expand Up @@ -912,7 +912,7 @@ def scan_pessimistic_batches_for_oom(
# warmup = 0.0 is so that the derivs for the pruned loss stay zero
# (i.e. are not remembered by the decaying-average in adam), because
# we want to avoid these params being subject to shrinkage in adam.
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, _ = compute_loss(
params=params,
model=model,
Expand Down
8 changes: 4 additions & 4 deletions egs/aishell/ASR/pruned_transducer_stateless2/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@
from model import Transducer
from optim import Eden, Eve
from torch import Tensor
from torch.cuda.amp import GradScaler
from torch.amp import GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter

Expand Down Expand Up @@ -688,7 +688,7 @@ def train_one_epoch(
batch_size = len(batch["supervisions"]["text"])

try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, loss_info = compute_loss(
params=params,
model=model,
Expand Down Expand Up @@ -888,7 +888,7 @@ def run(rank, world_size, args):
params=params,
)

scaler = GradScaler(enabled=params.use_fp16)
scaler = GradScaler("cuda", enabled=params.use_fp16)
if checkpoints and "grad_scaler" in checkpoints:
logging.info("Loading grad scaler state dict")
scaler.load_state_dict(checkpoints["grad_scaler"])
Expand Down Expand Up @@ -989,7 +989,7 @@ def scan_pessimistic_batches_for_oom(
# warmup = 0.0 is so that the derivs for the pruned loss stay zero
# (i.e. are not remembered by the decaying-average in adam), because
# we want to avoid these params being subject to shrinkage in adam.
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, _ = compute_loss(
params=params,
model=model,
Expand Down
4 changes: 2 additions & 2 deletions egs/aishell/ASR/pruned_transducer_stateless3/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def forward(
lm = simple_lm_proj(decoder_out)
am = simple_am_proj(encoder_out)

with torch.cuda.amp.autocast(enabled=False):
with torch.amp.autocast("cuda", enabled=False):
simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed(
lm=lm.float(),
am=am.float(),
Expand Down Expand Up @@ -219,7 +219,7 @@ def forward(
# prior to do_rnnt_pruning (this is an optimization for speed).
logits = joiner(am_pruned, lm_pruned, project_input=False)

with torch.cuda.amp.autocast(enabled=False):
with torch.amp.autocast("cuda", enabled=False):
pruned_loss = k2.rnnt_loss_pruned(
logits=logits.float(),
symbols=y_padded,
Expand Down
8 changes: 4 additions & 4 deletions egs/aishell/ASR/pruned_transducer_stateless3/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@
from model import Transducer
from optim import Eden, Eve
from torch import Tensor
from torch.cuda.amp import GradScaler
from torch.amp import GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter

Expand Down Expand Up @@ -797,7 +797,7 @@ def train_one_epoch(
aishell = is_aishell(batch["supervisions"]["cut"][0])

try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, loss_info = compute_loss(
params=params,
model=model,
Expand Down Expand Up @@ -1096,7 +1096,7 @@ def run(rank, world_size, args):
params=params,
)

scaler = GradScaler(enabled=params.use_fp16)
scaler = GradScaler("cuda", enabled=params.use_fp16)
if checkpoints and "grad_scaler" in checkpoints:
logging.info("Loading grad scaler state dict")
scaler.load_state_dict(checkpoints["grad_scaler"])
Expand Down Expand Up @@ -1202,7 +1202,7 @@ def scan_pessimistic_batches_for_oom(
# warmup = 0.0 is so that the derivs for the pruned loss stay zero
# (i.e. are not remembered by the decaying-average in adam), because
# we want to avoid these params being subject to shrinkage in adam.
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, _ = compute_loss(
params=params,
model=model,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@
from model import Transducer
from optim import Eden, ScaledAdam
from torch import Tensor
from torch.cuda.amp import GradScaler
from torch.amp import GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from zipformer import Zipformer
Expand Down Expand Up @@ -812,7 +812,7 @@ def train_one_epoch(
batch_size = len(batch["supervisions"]["text"])

try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, loss_info = compute_loss(
params=params,
model=model,
Expand Down Expand Up @@ -1107,7 +1107,7 @@ def remove_short_and_long_utt(c: Cut):
# params=params,
# )

scaler = GradScaler(enabled=params.use_fp16, init_scale=1.0)
scaler = GradScaler("cuda", enabled=params.use_fp16, init_scale=1.0)
if checkpoints and "grad_scaler" in checkpoints:
logging.info("Loading grad scaler state dict")
scaler.load_state_dict(checkpoints["grad_scaler"])
Expand Down Expand Up @@ -1206,7 +1206,7 @@ def scan_pessimistic_batches_for_oom(
for criterion, cuts in batches.items():
batch = train_dl.dataset[cuts]
try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, _ = compute_loss(
params=params,
model=model,
Expand Down
8 changes: 4 additions & 4 deletions egs/aishell/ASR/pruned_transducer_stateless7/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@
from model import Transducer
from optim import Eden, ScaledAdam
from torch import Tensor
from torch.cuda.amp import GradScaler
from torch.amp import GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from zipformer import Zipformer
Expand Down Expand Up @@ -809,7 +809,7 @@ def train_one_epoch(
batch_size = len(batch["supervisions"]["text"])

try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, loss_info = compute_loss(
params=params,
model=model,
Expand Down Expand Up @@ -1107,7 +1107,7 @@ def remove_short_and_long_utt(c: Cut):
# params=params,
# )

scaler = GradScaler(enabled=params.use_fp16, init_scale=1.0)
scaler = GradScaler("cuda", enabled=params.use_fp16, init_scale=1.0)
if checkpoints and "grad_scaler" in checkpoints:
logging.info("Loading grad scaler state dict")
scaler.load_state_dict(checkpoints["grad_scaler"])
Expand Down Expand Up @@ -1206,7 +1206,7 @@ def scan_pessimistic_batches_for_oom(
for criterion, cuts in batches.items():
batch = train_dl.dataset[cuts]
try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, _ = compute_loss(
params=params,
model=model,
Expand Down
8 changes: 4 additions & 4 deletions egs/aishell/ASR/pruned_transducer_stateless7_bbpe/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@
from model import Transducer
from optim import Eden, ScaledAdam
from torch import Tensor
from torch.cuda.amp import GradScaler
from torch.amp import GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from zipformer import Zipformer
Expand Down Expand Up @@ -802,7 +802,7 @@ def train_one_epoch(
batch_size = len(batch["supervisions"]["text"])

try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, loss_info = compute_loss(
params=params,
model=model,
Expand Down Expand Up @@ -1102,7 +1102,7 @@ def tokenize_and_encode_text(c: Cut):
params=params,
)

scaler = GradScaler(enabled=params.use_fp16, init_scale=1.0)
scaler = GradScaler("cuda", enabled=params.use_fp16, init_scale=1.0)
if checkpoints and "grad_scaler" in checkpoints:
logging.info("Loading grad scaler state dict")
scaler.load_state_dict(checkpoints["grad_scaler"])
Expand Down Expand Up @@ -1202,7 +1202,7 @@ def scan_pessimistic_batches_for_oom(
for criterion, cuts in batches.items():
batch = train_dl.dataset[cuts]
try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, _ = compute_loss(
params=params,
model=model,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
from model import Transducer
from optim import Eden, ScaledAdam
from torch import Tensor
from torch.cuda.amp import GradScaler
from torch.amp import GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from zipformer_for_ncnn_export_only import Zipformer
Expand Down Expand Up @@ -813,7 +813,7 @@ def train_one_epoch(
batch_size = len(batch["supervisions"]["text"])

try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, loss_info = compute_loss(
params=params,
model=model,
Expand Down Expand Up @@ -1105,7 +1105,7 @@ def remove_short_and_long_utt(c: Cut):
params=params,
)

scaler = GradScaler(enabled=params.use_fp16, init_scale=1.0)
scaler = GradScaler("cuda", enabled=params.use_fp16, init_scale=1.0)
if checkpoints and "grad_scaler" in checkpoints:
logging.info("Loading grad scaler state dict")
scaler.load_state_dict(checkpoints["grad_scaler"])
Expand Down Expand Up @@ -1205,7 +1205,7 @@ def scan_pessimistic_batches_for_oom(
for criterion, cuts in batches.items():
batch = train_dl.dataset[cuts]
try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, _ = compute_loss(
params=params,
model=model,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
from model import Transducer
from optim import Eden, ScaledAdam
from torch import Tensor
from torch.cuda.amp import GradScaler
from torch.amp import GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from zipformer import Zipformer
Expand Down Expand Up @@ -812,7 +812,7 @@ def train_one_epoch(
batch_size = len(batch["supervisions"]["text"])

try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, loss_info = compute_loss(
params=params,
model=model,
Expand Down Expand Up @@ -1104,7 +1104,7 @@ def remove_short_and_long_utt(c: Cut):
# params=params,
# )

scaler = GradScaler(enabled=params.use_fp16, init_scale=1.0)
scaler = GradScaler("cuda", enabled=params.use_fp16, init_scale=1.0)
if checkpoints and "grad_scaler" in checkpoints:
logging.info("Loading grad scaler state dict")
scaler.load_state_dict(checkpoints["grad_scaler"])
Expand Down Expand Up @@ -1202,7 +1202,7 @@ def scan_pessimistic_batches_for_oom(
for criterion, cuts in batches.items():
batch = train_dl.dataset[cuts]
try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, _ = compute_loss(
params=params,
model=model,
Expand Down
8 changes: 4 additions & 4 deletions egs/aishell/ASR/whisper/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
from lhotse.utils import fix_random_seed
from optim import Eden, ScaledAdam
from torch import Tensor
from torch.cuda.amp import GradScaler
from torch.amp import GradScaler
from torch.nn.functional import pad as pad_tensor
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
Expand Down Expand Up @@ -514,7 +514,7 @@ def compute_validation_loss(
tot_loss = MetricsTracker()

for batch_idx, batch in enumerate(valid_dl):
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, loss_info = compute_loss(
params=params,
tokenizer=tokenizer,
Expand Down Expand Up @@ -608,7 +608,7 @@ def train_one_epoch(
)

try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, loss_info = compute_loss(
params=params,
tokenizer=tokenizer,
Expand Down Expand Up @@ -812,7 +812,7 @@ def run(rank, world_size, args):
train_dl = aishell.train_dataloaders(aishell.train_cuts())
valid_dl = aishell.valid_dataloaders(aishell.valid_cuts())

scaler = GradScaler(enabled=params.use_fp16, init_scale=1.0)
scaler = GradScaler("cuda", enabled=params.use_fp16, init_scale=1.0)
if checkpoints and "grad_scaler" in checkpoints:
logging.info("Loading grad scaler state dict")
scaler.load_state_dict(checkpoints["grad_scaler"])
Expand Down
8 changes: 4 additions & 4 deletions egs/aishell/ASR/zipformer/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@
from scaling import ScheduledFloat
from subsampling import Conv2dSubsampling
from torch import Tensor
from torch.cuda.amp import GradScaler
from torch.amp import GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from zipformer import Zipformer2
Expand Down Expand Up @@ -910,7 +910,7 @@ def save_bad_model(suffix: str = ""):
batch_size = len(batch["supervisions"]["text"])

try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, loss_info = compute_loss(
params=params,
model=model,
Expand Down Expand Up @@ -1201,7 +1201,7 @@ def remove_short_and_long_utt(c: Cut):
params=params,
)

scaler = GradScaler(enabled=params.use_fp16, init_scale=1.0)
scaler = GradScaler("cuda", enabled=params.use_fp16, init_scale=1.0)
if checkpoints and "grad_scaler" in checkpoints:
logging.info("Loading grad scaler state dict")
scaler.load_state_dict(checkpoints["grad_scaler"])
Expand Down Expand Up @@ -1302,7 +1302,7 @@ def scan_pessimistic_batches_for_oom(
for criterion, cuts in batches.items():
batch = train_dl.dataset[cuts]
try:
with torch.cuda.amp.autocast(enabled=params.use_fp16):
with torch.amp.autocast("cuda", enabled=params.use_fp16):
loss, _ = compute_loss(
params=params,
model=model,
Expand Down
Loading
Loading