Skip to content

Commit 5a054c1

Browse files
author
Azure Pipelines
committed
Merge remote-tracking branch 'origin/main' into publication
2 parents d7b9c76 + 5cd053f commit 5a054c1

File tree

43 files changed

+148
-184
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+148
-184
lines changed
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +0,0 @@
1-
matplotlib
2-
lightning>=2.0.0
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,2 @@
11
torchvision
2-
matplotlib
32
seaborn
4-
lightning>=2.0.0

course_UvA-DL/03-initialization-and-optimization/notebook.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,12 @@
1313
import urllib.request
1414
from urllib.error import HTTPError
1515

16-
import lightning as L
1716
import matplotlib.pyplot as plt
1817

1918
# %matplotlib inline
2019
import matplotlib_inline.backend_inline
2120
import numpy as np
21+
import pytorch_lightning as pl
2222
import seaborn as sns
2323
import torch
2424
import torch.nn as nn
@@ -33,7 +33,7 @@
3333
sns.set()
3434

3535
# %% [markdown]
36-
# Instead of the `set_seed` function as in Tutorial 3, we can use Lightning's built-in function `L.seed_everything`.
36+
# Instead of the `set_seed` function as in Tutorial 3, we can use Lightning's built-in function `pl.seed_everything`.
3737
# We will reuse the path variables `DATASET_PATH` and `CHECKPOINT_PATH` as in Tutorial 3.
3838
# Adjust the paths if necessary.
3939

@@ -44,7 +44,7 @@
4444
CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/InitOptim/")
4545

4646
# Seed everything
47-
L.seed_everything(42)
47+
pl.seed_everything(42)
4848

4949
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
5050
torch.backends.cudnn.deterministic = True
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,2 @@
11
torchvision
2-
matplotlib
32
seaborn
4-
lightning>=2.0.0

course_UvA-DL/04-inception-resnet-densenet/notebook.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,11 @@
88
from types import SimpleNamespace
99
from urllib.error import HTTPError
1010

11-
import lightning as L
1211
import matplotlib
1312
import matplotlib.pyplot as plt
1413
import matplotlib_inline.backend_inline
1514
import numpy as np
15+
import pytorch_lightning as pl
1616
import seaborn as sns
1717
import tabulate
1818
import torch
@@ -47,7 +47,7 @@
4747

4848

4949
# Function for setting the seed
50-
L.seed_everything(42)
50+
pl.seed_everything(42)
5151

5252
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
5353
torch.backends.cudnn.deterministic = True
@@ -137,9 +137,9 @@
137137
# We need to do a little trick because the validation set should not use the augmentation.
138138
train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=train_transform, download=True)
139139
val_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=test_transform, download=True)
140-
L.seed_everything(42)
140+
pl.seed_everything(42)
141141
train_set, _ = torch.utils.data.random_split(train_dataset, [45000, 5000])
142-
L.seed_everything(42)
142+
pl.seed_everything(42)
143143
_, val_set = torch.utils.data.random_split(val_dataset, [45000, 5000])
144144

145145
# Loading the test set
@@ -193,12 +193,12 @@
193193

194194
# %%
195195
# Setting the seed
196-
L.seed_everything(42)
196+
pl.seed_everything(42)
197197

198198
# %% [markdown]
199199
# Thus, in the future, we don't have to define our own `set_seed` function anymore.
200200
#
201-
# In PyTorch Lightning, we define `L.LightningModule`'s (inheriting from `Module`) that organize our code into 5 main sections:
201+
# In PyTorch Lightning, we define `pl.LightningModule`'s (inheriting from `Module`) that organize our code into 5 main sections:
202202
#
203203
# 1. Initialization (`__init__`), where we create all necessary parameters/models
204204
# 2. Optimizers (`configure_optimizers`) where we create the optimizers, learning rate scheduler, etc.
@@ -215,7 +215,7 @@
215215

216216

217217
# %%
218-
class CIFARModule(L.LightningModule):
218+
class CIFARModule(pl.LightningModule):
219219
def __init__(self, model_name, model_hparams, optimizer_name, optimizer_hparams):
220220
"""CIFARModule.
221221
@@ -350,7 +350,7 @@ def train_model(model_name, save_name=None, **kwargs):
350350
save_name = model_name
351351

352352
# Create a PyTorch Lightning trainer with the generation callback
353-
trainer = L.Trainer(
353+
trainer = pl.Trainer(
354354
default_root_dir=os.path.join(CHECKPOINT_PATH, save_name), # Where to save models
355355
# We run on a single GPU (if possible)
356356
accelerator="auto",
@@ -374,7 +374,7 @@ def train_model(model_name, save_name=None, **kwargs):
374374
# Automatically loads the model with the saved hyperparameters
375375
model = CIFARModule.load_from_checkpoint(pretrained_filename)
376376
else:
377-
L.seed_everything(42) # To be reproducible
377+
pl.seed_everything(42) # To be reproducible
378378
model = CIFARModule(model_name=model_name, **kwargs)
379379
trainer.fit(model, train_loader, val_loader)
380380
model = CIFARModule.load_from_checkpoint(
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,4 @@
11
torchvision
2-
matplotlib
32
seaborn
43
tabulate
5-
lightning>=2.0.0
64
tensorboard

course_UvA-DL/05-transformers-and-MH-attention/MHAttention.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -22,14 +22,14 @@
2222
from functools import partial
2323
from urllib.error import HTTPError
2424

25-
# PyTorch Lightning
26-
import lightning as L
27-
2825
# Plotting
2926
import matplotlib
3027
import matplotlib.pyplot as plt
3128
import matplotlib_inline.backend_inline
3229
import numpy as np
30+
31+
# PyTorch Lightning
32+
import pytorch_lightning as pl
3333
import seaborn as sns
3434

3535
# PyTorch
@@ -58,7 +58,7 @@
5858
CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/Transformers/")
5959

6060
# Setting the seed
61-
L.seed_everything(42)
61+
pl.seed_everything(42)
6262

6363
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
6464
torch.backends.cudnn.deterministic = True
@@ -246,7 +246,7 @@ def scaled_dot_product(q, k, v, mask=None):
246246

247247
# %%
248248
seq_len, d_k = 3, 2
249-
L.seed_everything(42)
249+
pl.seed_everything(42)
250250
q = torch.randn(seq_len, d_k)
251251
k = torch.randn(seq_len, d_k)
252252
v = torch.randn(seq_len, d_k)
@@ -748,7 +748,7 @@ def get_lr_factor(self, epoch):
748748

749749

750750
# %%
751-
class TransformerPredictor(L.LightningModule):
751+
class TransformerPredictor(pl.LightningModule):
752752
def __init__(
753753
self,
754754
input_dim,
@@ -965,7 +965,7 @@ def test_step(self, batch, batch_idx):
965965

966966
# %% [markdown]
967967
# Finally, we can create a training function similar to the one we have seen in Tutorial 5 for PyTorch Lightning.
968-
# We create a `L.Trainer` object, running for $N$ epochs, logging in TensorBoard, and saving our best model based on the validation.
968+
# We create a `pl.Trainer` object, running for $N$ epochs, logging in TensorBoard, and saving our best model based on the validation.
969969
# Afterward, we test our models on the test set.
970970
# An additional parameter we pass to the trainer here is `gradient_clip_val`.
971971
# This clips the norm of the gradients for all parameters before taking an optimizer step and prevents the model
@@ -983,7 +983,7 @@ def train_reverse(**kwargs):
983983
# Create a PyTorch Lightning trainer with the generation callback
984984
root_dir = os.path.join(CHECKPOINT_PATH, "ReverseTask")
985985
os.makedirs(root_dir, exist_ok=True)
986-
trainer = L.Trainer(
986+
trainer = pl.Trainer(
987987
default_root_dir=root_dir,
988988
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
989989
accelerator="auto",
@@ -1444,7 +1444,7 @@ def train_anomaly(**kwargs):
14441444
# Create a PyTorch Lightning trainer with the generation callback
14451445
root_dir = os.path.join(CHECKPOINT_PATH, "SetAnomalyTask")
14461446
os.makedirs(root_dir, exist_ok=True)
1447-
trainer = L.Trainer(
1447+
trainer = pl.Trainer(
14481448
default_root_dir=root_dir,
14491449
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
14501450
accelerator="auto",
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,2 @@
11
torchvision
2-
matplotlib
32
seaborn
4-
lightning>=2.0.0

course_UvA-DL/06-graph-neural-networks/overview.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from urllib.error import HTTPError
1212

1313
# PyTorch Lightning
14-
import lightning as L
14+
import pytorch_lightning as pl
1515

1616
# PyTorch
1717
import torch
@@ -36,7 +36,7 @@
3636
CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/GNNs/")
3737

3838
# Setting the seed
39-
L.seed_everything(42)
39+
pl.seed_everything(42)
4040

4141
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
4242
torch.backends.cudnn.deterministic = True
@@ -592,7 +592,7 @@ def forward(self, x, *args, **kwargs):
592592

593593

594594
# %%
595-
class NodeLevelGNN(L.LightningModule):
595+
class NodeLevelGNN(pl.LightningModule):
596596
def __init__(self, model_name, **model_kwargs):
597597
super().__init__()
598598
# Saving hyperparameters
@@ -654,13 +654,13 @@ def test_step(self, batch, batch_idx):
654654

655655
# %%
656656
def train_node_classifier(model_name, dataset, **model_kwargs):
657-
L.seed_everything(42)
657+
pl.seed_everything(42)
658658
node_data_loader = geom_data.DataLoader(dataset, batch_size=1)
659659

660660
# Create a PyTorch Lightning trainer
661661
root_dir = os.path.join(CHECKPOINT_PATH, "NodeLevel" + model_name)
662662
os.makedirs(root_dir, exist_ok=True)
663-
trainer = L.Trainer(
663+
trainer = pl.Trainer(
664664
default_root_dir=root_dir,
665665
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
666666
accelerator="auto",
@@ -676,7 +676,7 @@ def train_node_classifier(model_name, dataset, **model_kwargs):
676676
print("Found pretrained model, loading...")
677677
model = NodeLevelGNN.load_from_checkpoint(pretrained_filename)
678678
else:
679-
L.seed_everything()
679+
pl.seed_everything()
680680
model = NodeLevelGNN(
681681
model_name=model_name, c_in=dataset.num_node_features, c_out=dataset.num_classes, **model_kwargs
682682
)
@@ -892,7 +892,7 @@ def forward(self, x, edge_index, batch_idx):
892892

893893

894894
# %%
895-
class GraphLevelGNN(L.LightningModule):
895+
class GraphLevelGNN(pl.LightningModule):
896896
def __init__(self, **model_kwargs):
897897
super().__init__()
898898
# Saving hyperparameters
@@ -941,12 +941,12 @@ def test_step(self, batch, batch_idx):
941941

942942
# %%
943943
def train_graph_classifier(model_name, **model_kwargs):
944-
L.seed_everything(42)
944+
pl.seed_everything(42)
945945

946946
# Create a PyTorch Lightning trainer with the generation callback
947947
root_dir = os.path.join(CHECKPOINT_PATH, "GraphLevel" + model_name)
948948
os.makedirs(root_dir, exist_ok=True)
949-
trainer = L.Trainer(
949+
trainer = pl.Trainer(
950950
default_root_dir=root_dir,
951951
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
952952
accelerator="cuda",
@@ -962,7 +962,7 @@ def train_graph_classifier(model_name, **model_kwargs):
962962
print("Found pretrained model, loading...")
963963
model = GraphLevelGNN.load_from_checkpoint(pretrained_filename)
964964
else:
965-
L.seed_everything(42)
965+
pl.seed_everything(42)
966966
model = GraphLevelGNN(
967967
c_in=tu_dataset.num_node_features,
968968
c_out=1 if tu_dataset.num_classes == 2 else tu_dataset.num_classes,

course_UvA-DL/06-graph-neural-networks/requirements.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,3 @@ torch-sparse
33
torch-cluster
44
torch-spline-conv
55
torch-geometric>=2.0.0,<2.5.0
6-
lightning>=2.0.0
Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
11
torchvision
22
matplotlib
33
tensorboard
4-
pytorch-lightning>=2.0.0

course_UvA-DL/08-deep-autoencoders/notebook.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@
66
import urllib.request
77
from urllib.error import HTTPError
88

9-
import lightning as L
109
import matplotlib
1110
import matplotlib.pyplot as plt
1211
import matplotlib_inline.backend_inline
12+
import pytorch_lightning as pl
1313
import seaborn as sns
1414
import torch
1515
import torch.nn as nn
@@ -38,7 +38,7 @@
3838
CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/tutorial9")
3939

4040
# Setting the seed
41-
L.seed_everything(42)
41+
pl.seed_everything(42)
4242

4343
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
4444
torch.backends.cudnn.deterministic = True
@@ -94,7 +94,7 @@
9494

9595
# Loading the training dataset. We need to split it into a training and validation part
9696
train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=transform, download=True)
97-
L.seed_everything(42)
97+
pl.seed_everything(42)
9898
train_set, val_set = torch.utils.data.random_split(train_dataset, [45000, 5000])
9999

100100
# Loading the test set
@@ -240,7 +240,7 @@ def forward(self, x):
240240

241241

242242
# %%
243-
class Autoencoder(L.LightningModule):
243+
class Autoencoder(pl.LightningModule):
244244
def __init__(
245245
self,
246246
base_channel_size: int,
@@ -387,7 +387,7 @@ def on_train_epoch_end(self, trainer, pl_module):
387387
# %%
388388
def train_cifar(latent_dim):
389389
# Create a PyTorch Lightning trainer with the generation callback
390-
trainer = L.Trainer(
390+
trainer = pl.Trainer(
391391
default_root_dir=os.path.join(CHECKPOINT_PATH, "cifar10_%i" % latent_dim),
392392
accelerator="auto",
393393
devices=1,
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
11
torchvision
2-
matplotlib
32
seaborn
4-
lightning>=2.0.0
53
tensorboard

0 commit comments

Comments
 (0)