Skip to content

Commit

Permalink
Merge pull request #108 from mlverse/examples
Browse files Browse the repository at this point in the history
Examples
  • Loading branch information
dfalbel authored Oct 27, 2022
2 parents c74e1e8 + 3c3149f commit 5163f6b
Show file tree
Hide file tree
Showing 11 changed files with 127 additions and 30 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,5 @@ inst/doc
docs
doc
Meta
mnist
pets
2 changes: 1 addition & 1 deletion R/metrics.R
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ luz_metric_loss_average <- luz_metric(
self$values <- list()
},
update = function(preds, targets) {
if (length(ctx$loss) == 1)
if (length(ctx$loss) == 1 && is.list(ctx$loss))
loss <- ctx$loss[[1]]
else
loss <- ctx$loss
Expand Down
2 changes: 2 additions & 0 deletions vignettes/examples/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,5 @@ triplet.pt
mnist-virtual-batch_size.pt
mnist-autoencoder.pt
model-dogs-and-cats.pt
dogs-vs-cats
mnist
7 changes: 3 additions & 4 deletions vignettes/examples/dogs-vs-cats-binary-classification.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,11 @@ torch_manual_seed(1)
# Datasets and loaders ----------------------------------------------------
dir <- "~/Downloads/dogs-vs-cats" # caching directory
kaggle_token <- "~/Downloads/kaggle.json"
dir <- "./dogs-vs-cats" # caching directory
ds <- torchdatasets::dogs_vs_cats_dataset(
dir,
token = kaggle_token,
download = TRUE,
transform = . %>%
torchvision::transform_to_tensor() %>%
torchvision::transform_resize(size = c(224, 224)) %>%
Expand Down Expand Up @@ -65,7 +64,7 @@ net <- torch::nn_module(
fitted <- net %>%
setup(
loss = nn_bce_with_logits_loss(),
optimizer = madgrad::optim_madgrad,
optimizer = optim_adam,
metrics = list(
luz_metric_binary_accuracy_with_logits()
)
Expand Down
2 changes: 1 addition & 1 deletion vignettes/examples/mnist-autoencoder.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ library(luz)
# Datasets and loaders ----------------------------------------------------
dir <- "~/Downloads/mnist" # caching directory
dir <- "./mnist" # caching directory
# Modify the MNIST dataset so the target is identical to the input.
mnist_dataset2 <- torch::dataset(
Expand Down
30 changes: 20 additions & 10 deletions vignettes/examples/mnist-cnn-virtual-batch-size.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,14 @@ category: 'advanced'
---

```{r, eval = FALSE}
# Packages ----------------------------------------------------------------
library(torch)
library(torchvision)
library(luz)
# Datasets and loaders ----------------------------------------------------
dir <- "~/Downloads/mnist" # caching directory
dir <- "./mnist" # caching directory
train_ds <- mnist_dataset(
dir,
Expand All @@ -38,8 +37,8 @@ net <- nn_module(
initialize = function(accumulate_batches = 2) {
self$conv1 <- nn_conv2d(1, 32, 3, 1)
self$conv2 <- nn_conv2d(32, 64, 3, 1)
self$dropout1 <- nn_dropout2d(0.25)
self$dropout2 <- nn_dropout2d(0.5)
self$dropout1 <- nn_dropout(0.25)
self$dropout2 <- nn_dropout(0.5)
self$fc1 <- nn_linear(9216, 128)
self$fc2 <- nn_linear(128, 10)
Expand All @@ -60,14 +59,25 @@ net <- nn_module(
x
},
step = function() {
ctx$pred <- ctx$model(ctx$input)
loss <- ctx$model$loss(ctx$pred, ctx$target)
# we implement a custom step method that runs for every
# batch in training and validation.
# calculate predictions. we save them in `ctx$pred` so other parts of luz
# can use it.
ctx$pred <- ctx$model(ctx$input)
# we now calculate the loss. also save it in `ctx$loss` so, for example,
# it's correctly logged.
ctx$loss <- ctx$model$loss(ctx$pred, ctx$target)
# `ctx$training` is set automatically to `TRUE` during the training phase
if (ctx$training) {
loss <- loss/self$accumulate_batches
loss$backward()
ctx$loss <- ctx$loss/self$accumulate_batches
ctx$loss$backward()
}
# only after `accumulate_batches` that we do a optimizer step, so we use
# the virtual batch_size.
if (ctx$training && (ctx$iter %% self$accumulate_batches == 0)) {
opt <- ctx$optimizers[[1]]
opt$step()
Expand All @@ -87,7 +97,7 @@ fitted <- net %>%
luz_metric_accuracy()
)
) %>%
fit(train_dl, epochs = 10, valid_data = test_dl)
fit(train_dl, valid_data = test_dl, epochs = 10)
# Serialization -----------------------------------------------------------
Expand Down
6 changes: 3 additions & 3 deletions vignettes/examples/mnist-cnn.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ torch_manual_seed(1)
# Datasets and loaders ----------------------------------------------------
dir <- "~/Downloads/mnist" # caching directory
dir <- "./mnist" # caching directory
train_ds <- mnist_dataset(
dir,
Expand All @@ -41,8 +41,8 @@ net <- nn_module(
initialize = function() {
self$conv1 <- nn_conv2d(1, 32, 3, 1)
self$conv2 <- nn_conv2d(32, 64, 3, 1)
self$dropout1 <- nn_dropout2d(0.25)
self$dropout2 <- nn_dropout2d(0.5)
self$dropout1 <- nn_dropout(0.25)
self$dropout2 <- nn_dropout(0.5)
self$fc1 <- nn_linear(9216, 128)
self$fc2 <- nn_linear(128, 10)
},
Expand Down
6 changes: 2 additions & 4 deletions vignettes/examples/mnist-dcgan.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ plot_callback <- luz_callback(
# Datasets and loaders ----------------------------------------------------
dir <- "~/Downloads/mnist" # caching directory
dir <- "./mnist" # caching directory
train_ds <- mnist_dataset(
dir,
Expand Down Expand Up @@ -153,9 +153,8 @@ dcgan <- torch::nn_module(
}
)
dcgan <- setup(dcgan)
res <- dcgan %>%
setup() %>%
set_hparams(latent_dim = 100, channels = 1) %>%
fit(train_dl, epochs = 10, valid_data = test_dl, callbacks = list(plot_callback()))
Expand All @@ -172,6 +171,5 @@ plot(img)
# Serialization ----------------------------------------------------
luz_save(res, "mnist-dcgan.pt")
```

89 changes: 89 additions & 0 deletions vignettes/examples/mnist-mixup.Rmd
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
---
title: 'MixUp augmentation'
desc: 'Demonstrates using the MixUp callback'
category: 'intermediate'
---

```{r, eval = FALSE}
# Packages ----------------------------------------------------------------
library(torch)
library(torchvision)
library(luz)
set.seed(1)
torch_manual_seed(1)
# Datasets and loaders ----------------------------------------------------
dir <- "./mnist" # caching directory
train_ds <- mnist_dataset(
dir,
download = TRUE,
transform = transform_to_tensor
)
test_ds <- mnist_dataset(
dir,
train = FALSE,
transform = transform_to_tensor
)
train_dl <- dataloader(train_ds, batch_size = 128, shuffle = TRUE)
test_dl <- dataloader(test_ds, batch_size = 128)
# Building the network ---------------------------------------------------
net <- nn_module(
"Net",
initialize = function() {
self$conv1 <- nn_conv2d(1, 32, 3, 1)
self$conv2 <- nn_conv2d(32, 64, 3, 1)
self$dropout1 <- nn_dropout(0.25)
self$dropout2 <- nn_dropout(0.5)
self$fc1 <- nn_linear(9216, 128)
self$fc2 <- nn_linear(128, 10)
},
forward = function(x) {
x %>%
self$conv1() %>%
nnf_relu() %>%
self$conv2() %>%
nnf_relu() %>%
nnf_max_pool2d(2) %>%
self$dropout1() %>%
torch_flatten(start_dim = 2) %>%
self$fc1() %>%
nnf_relu() %>%
self$dropout2() %>%
self$fc2()
}
)
# Train -------------------------------------------------------------------
fitted <- net %>%
setup(
loss = nn_mixup_loss(nn_cross_entropy_loss()),
optimizer = optim_adam
) %>%
fit(
train_dl, epochs = 10, valid_data = test_dl,
callbacks = list(
luz_callback_mixup(alpha = 0.4)
)
)
# Making predictions ------------------------------------------------------
preds <- predict(fitted, test_dl)
preds$shape
# Serialization -----------------------------------------------------------
luz_save(fitted, "mnist-mixup.pt")
```


7 changes: 3 additions & 4 deletions vignettes/examples/mnist-triplet.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ library(luz)
# Datasets and loaders ----------------------------------------------------
dir <- "~/Downloads/mnist" # caching directory
dir <- "./mnist" # caching directory
triplet_mnist_dataset <- dataset(
inherit = mnist_dataset,
Expand Down Expand Up @@ -56,8 +56,8 @@ net <- nn_module(
initialize = function(embedding_dim) {
self$conv1 <- nn_conv2d(1, 32, 3, 1)
self$conv2 <- nn_conv2d(32, 64, 3, 1)
self$dropout1 <- nn_dropout2d(0.25)
self$dropout2 <- nn_dropout2d(0.5)
self$dropout1 <- nn_dropout(0.25)
self$dropout2 <- nn_dropout(0.5)
self$fc1 <- nn_linear(9216, 512)
self$fc2 <- nn_linear(512, embedding_dim)
},
Expand Down Expand Up @@ -100,6 +100,5 @@ fitted <- triplet_model %>%
# Serializing
luz_save(fitted, "triplet.pt")
```

4 changes: 1 addition & 3 deletions vignettes/examples/pets-unet.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ library(luz)
# Datasets and loaders ----------------------------------------------------
dir <- "~/Downloads/" #caching directory
dir <- "./pets" #caching directory
# A light wrapper around the `oxford_pet_dataset` that resizes and transforms
# input images and masks to the specified `size` and introduces the `augmentation`
Expand Down Expand Up @@ -73,8 +73,6 @@ valid_ds <- pet_dataset(
train_dl <- dataloader(train_ds, batch_size = 32, shuffle = TRUE)
valid_dl <- dataloader(valid_ds, batch_size = 32)
x <- coro::collect(train_dl, 1)
# Define the network ------------------------------------------------------
# We use a pre-trained mobile net encoder. We take intermediate layers to use
Expand Down

0 comments on commit 5163f6b

Please sign in to comment.