-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtrain.py
129 lines (105 loc) · 4.35 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
"""
train-val pipeline
"""
import hydra
from hydra.utils import instantiate
import wandb
from omegaconf import OmegaConf
import torch
from torch import nn
from torch.utils.data import DataLoader, random_split
import pandas as pd
from tqdm import tqdm
from sklearn.metrics import accuracy_score
import os
# from skorch import NeuralNetClassifier
# from skorch.helper import predefined_split
# from skorch.callbacks import Checkpoint
from utils.dataset import TrainDataset, get_transforms, collate_transforms
device ='cuda' if torch.cuda.is_available() else 'cpu'
def get_outputs(model, dl, criterion=None):
scores, labels, losses = [], [], []
model = model.eval()
with torch.no_grad():
for images, label in tqdm(dl):
images, label = images.to(device), label.to(device)
pred = model(images)
if criterion is not None:
loss = criterion(pred, label)
losses.append(loss.cpu())
probs = torch.softmax(pred, dim=1)
scores.append(probs.cpu())
labels.append(label.cpu())
scores = torch.cat(scores, dim=0)
labels = torch.cat(labels, dim=0)
return scores, labels, losses
@hydra.main(config_path="config/", config_name="config", version_base="1.1")
def main(cfg):
if cfg.train.log:
config = OmegaConf.to_container(
cfg, resolve=True, throw_on_missing=True
)
run = wandb.init(project=cfg.wandb.project, name=cfg.wandb.name, save_code=True, job_type=cfg.wandb.job_type, config=config)
ds = TrainDataset(data_dir=cfg.train.data_dir, transform=None)
val_len = int(cfg.train.val_split * len(ds))
train_len = len(ds) - val_len
train_ds, val_ds = random_split(ds, [train_len, val_len])
print(f"train: {train_len}, val: {val_len}")
# train_transform = get_transforms(augment=False) # TODO: test augments
# val_transform = get_transforms(augment=False)
train_loader = DataLoader(train_ds, batch_size=cfg.train.batch_size, shuffle=True,
# collate_fn=lambda x: collate_transforms(x, train_transform),
num_workers=cfg.train.num_workers,
pin_memory=True)
val_loader = DataLoader(val_ds, batch_size=cfg.train.batch_size, shuffle=False,
# collate_fn=lambda x: collate_transforms(x, val_transform),
num_workers=cfg.train.num_workers,
pin_memory=True)
model = instantiate(cfg.model).to(device)
criterion = nn.CrossEntropyLoss(weight=torch.tensor(cfg.train.class_weights, device=device))
optimizer = torch.optim.Adam(model.parameters(), lr=cfg.train.lr)
# net = NeuralNetClassifier(
# module=model,
# criterion=criterion,
# optimizer=optimizer,
# lr=cfg.train.lr,
# batch_size=cfg.dataset.batch_size,
# max_epochs=cfg.train.epochs,
# device=device,
# train_split=predefined_split(val_dataset), # using the validation dataset to validate
# verbose=2,
# callbacks=[Checkpoint(dirname=cfg.wandb.project, monitor='valid_loss', mode='min')]
# )
# net.fit(train_dataset)
for epoch in range(cfg.train.epochs):
print(f"epoch: {epoch}")
model.train()
total_loss = 0
for images, labels in tqdm(train_loader):
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
total_loss += loss.item()
if cfg.wandb.enabled:
wandb.log({"train/loss": loss})
avg_train_loss = total_loss / len(train_loader)
preds, labels, losses = get_outputs(model, val_loader, criterion)
avg_val_loss = torch.stack(losses).mean()
avg_val_acc = accuracy_score(labels.numpy(), preds.argmax(dim=1).numpy())
print(f"train loss: {avg_train_loss:.4f}, val loss: {avg_val_loss:.4f}, val acc: {avg_val_acc:.4f}")
if cfg.train.log:
wandb.log({"train/loss": avg_train_loss, "val/loss": avg_val_loss, "val/acc": avg_val_acc})
if cfg.train.save_model:
out_dir = os.path.join(cfg.train.out_dir, cfg.wandb.name)
os.makedirs(out_dir, exist_ok=True)
torch.save(model.state_dict(), os.path.join(out_dir, "model.pt"))
if cfg.train.log:
artifact = wandb.Artifact(f"{cfg.wandb.name}", type="model")
artifact.add_file(os.path.join(out_dir, "model.pt"))
run.log_artifact(artifact)
run.finish()
if __name__ == "__main__":
main()