-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathsimple_eg.py
58 lines (49 loc) · 1.61 KB
/
simple_eg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from torch import optim, nn, utils, Tensor
from torchvision.datasets import CIFAR10
from torchvision.transforms import ToTensor
import pytorch_lightning as pl
# define the model
class MyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(32 * 32 * 3, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 64)
)
self.decoder = nn.Sequential(
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 32 * 32 * 3))
def forward(self, x: Tensor) -> Tensor:
z = self.encoder(x)
x_hat = self.decoder(z)
return x_hat
class LightingModel(pl.LightningModule):
def __init__(self):
super().__init__()
self.model = MyModel()
def training_step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)
x_hat = self.model(x)
loss = nn.functional.mse_loss(x_hat, x)
self.log("train_loss", loss, prog_bar=True)
return loss
def configure_optimizers(self):
optimizer = optim.Adam(self.model.parameters(), lr=1e-4)
return optimizer
lighting_model = LightingModel()
train_loader = utils.data.DataLoader(CIFAR10('data/', download=True, transform=ToTensor()), batch_size=320)
trainer = pl.Trainer(
accelerator='gpu',
devices=1,
max_steps=100,
)
trainer.fit(model=lighting_model, train_dataloaders=train_loader)