Skip to content

Commit 1cb71d5

Browse files
author
Azure Pipelines
committed
Merge remote-tracking branch 'origin/main' into publication
2 parents c19f155 + 8799474 commit 1cb71d5

File tree

48 files changed

+190
-204
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+190
-204
lines changed

.github/workflows/ci_docs.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ jobs:
9090
done <<< $(cat changed-folders.txt)
9191
tree changed-notebooks
9292
93-
- uses: actions/upload-artifact@v3
93+
- uses: actions/upload-artifact@v4
9494
if: ${{ matrix.target == 'html' && env.NB_DIRS != 0 }}
9595
with:
9696
name: notebooks-${{ github.sha }}
@@ -102,7 +102,7 @@ jobs:
102102

103103
- name: Upload built docs
104104
if: ${{ matrix.target == 'html' }}
105-
uses: actions/upload-artifact@v3
105+
uses: actions/upload-artifact@v4
106106
with:
107107
name: docs-html-${{ github.sha }}
108108
path: _docs/build/html/

.notebooks/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ shadow folder for generated notebooks, no uploading here
44

55
```bash
66
git checkout -b publication main
7+
export MAKEFLAGS="-j$(nproc)"
78
make ipynb
89
git commit -m "regenerate all notebooks"
910
git push

README.md

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,3 +97,19 @@ pytest -v notebook.ipynb --nbval
9797
# 3) generating notebooks outputs
9898
papermill in-notebook.ipynb out-notebook.ipynb
9999
```
100+
101+
## Development tips
102+
103+
### Local Docs build
104+
105+
You may want to build the documentation local without need to excrete all notebooks.
106+
In such case you can convert all scripts to ipython notebooks as dry run...
107+
108+
```bash
109+
# set skip notebooks execution, just conversion
110+
export DRY_RUN=1
111+
# generate notebooks from scripts
112+
make ipynb
113+
# build the documentation
114+
make docs
115+
```

_requirements/devel.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ virtualenv>=20.10
33
# pytest>=7.0, <9.0
44
# testing with own fork with extended cell timeout
55
# https://github.com/Borda/nbval/archive/refs/heads/timeout-limit.zip
6-
ipython[notebook]>=8.0.0, <8.17.0
6+
ipython[notebook]>=8.0.0, <8.24.0
77
urllib3 # for ipython
88
jupytext>=1.10, <1.15 # converting
99
papermill>=2.3.4, <2.5.0 # rendering
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +0,0 @@
1-
matplotlib
2-
lightning>=2.0.0
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,2 @@
11
torchvision
2-
matplotlib
32
seaborn
4-
lightning>=2.0.0

course_UvA-DL/03-initialization-and-optimization/notebook.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,12 @@
1313
import urllib.request
1414
from urllib.error import HTTPError
1515

16-
import lightning as L
1716
import matplotlib.pyplot as plt
1817

1918
# %matplotlib inline
2019
import matplotlib_inline.backend_inline
2120
import numpy as np
21+
import pytorch_lightning as pl
2222
import seaborn as sns
2323
import torch
2424
import torch.nn as nn
@@ -33,7 +33,7 @@
3333
sns.set()
3434

3535
# %% [markdown]
36-
# Instead of the `set_seed` function as in Tutorial 3, we can use Lightning's built-in function `L.seed_everything`.
36+
# Instead of the `set_seed` function as in Tutorial 3, we can use Lightning's built-in function `pl.seed_everything`.
3737
# We will reuse the path variables `DATASET_PATH` and `CHECKPOINT_PATH` as in Tutorial 3.
3838
# Adjust the paths if necessary.
3939

@@ -44,7 +44,7 @@
4444
CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/InitOptim/")
4545

4646
# Seed everything
47-
L.seed_everything(42)
47+
pl.seed_everything(42)
4848

4949
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
5050
torch.backends.cudnn.deterministic = True
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,2 @@
11
torchvision
2-
matplotlib
32
seaborn
4-
lightning>=2.0.0

course_UvA-DL/04-inception-resnet-densenet/notebook.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,11 @@
88
from types import SimpleNamespace
99
from urllib.error import HTTPError
1010

11-
import lightning as L
1211
import matplotlib
1312
import matplotlib.pyplot as plt
1413
import matplotlib_inline.backend_inline
1514
import numpy as np
15+
import pytorch_lightning as pl
1616
import seaborn as sns
1717
import tabulate
1818
import torch
@@ -23,7 +23,6 @@
2323

2424
# %matplotlib inline
2525
from IPython.display import HTML, display
26-
from lightning.pytorch.callbacks import LearningRateMonitor, ModelCheckpoint
2726
from PIL import Image
2827
from torchvision import transforms
2928
from torchvision.datasets import CIFAR10
@@ -47,7 +46,7 @@
4746

4847

4948
# Function for setting the seed
50-
L.seed_everything(42)
49+
pl.seed_everything(42)
5150

5251
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
5352
torch.backends.cudnn.deterministic = True
@@ -137,9 +136,9 @@
137136
# We need to do a little trick because the validation set should not use the augmentation.
138137
train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=train_transform, download=True)
139138
val_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=test_transform, download=True)
140-
L.seed_everything(42)
139+
pl.seed_everything(42)
141140
train_set, _ = torch.utils.data.random_split(train_dataset, [45000, 5000])
142-
L.seed_everything(42)
141+
pl.seed_everything(42)
143142
_, val_set = torch.utils.data.random_split(val_dataset, [45000, 5000])
144143

145144
# Loading the test set
@@ -193,12 +192,12 @@
193192

194193
# %%
195194
# Setting the seed
196-
L.seed_everything(42)
195+
pl.seed_everything(42)
197196

198197
# %% [markdown]
199198
# Thus, in the future, we don't have to define our own `set_seed` function anymore.
200199
#
201-
# In PyTorch Lightning, we define `L.LightningModule`'s (inheriting from `Module`) that organize our code into 5 main sections:
200+
# In PyTorch Lightning, we define `pl.LightningModule`'s (inheriting from `Module`) that organize our code into 5 main sections:
202201
#
203202
# 1. Initialization (`__init__`), where we create all necessary parameters/models
204203
# 2. Optimizers (`configure_optimizers`) where we create the optimizers, learning rate scheduler, etc.
@@ -215,7 +214,7 @@
215214

216215

217216
# %%
218-
class CIFARModule(L.LightningModule):
217+
class CIFARModule(pl.LightningModule):
219218
def __init__(self, model_name, model_hparams, optimizer_name, optimizer_hparams):
220219
"""CIFARModule.
221220
@@ -296,6 +295,7 @@ def test_step(self, batch, batch_idx):
296295

297296
# %%
298297
# Callbacks
298+
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint # noqa: E402
299299

300300
# %% [markdown]
301301
# To allow running multiple different models with the same Lightning module, we define a function below that maps a model name to the model class.
@@ -350,7 +350,7 @@ def train_model(model_name, save_name=None, **kwargs):
350350
save_name = model_name
351351

352352
# Create a PyTorch Lightning trainer with the generation callback
353-
trainer = L.Trainer(
353+
trainer = pl.Trainer(
354354
default_root_dir=os.path.join(CHECKPOINT_PATH, save_name), # Where to save models
355355
# We run on a single GPU (if possible)
356356
accelerator="auto",
@@ -374,7 +374,7 @@ def train_model(model_name, save_name=None, **kwargs):
374374
# Automatically loads the model with the saved hyperparameters
375375
model = CIFARModule.load_from_checkpoint(pretrained_filename)
376376
else:
377-
L.seed_everything(42) # To be reproducible
377+
pl.seed_everything(42) # To be reproducible
378378
model = CIFARModule(model_name=model_name, **kwargs)
379379
trainer.fit(model, train_loader, val_loader)
380380
model = CIFARModule.load_from_checkpoint(
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,4 @@
11
torchvision
2-
matplotlib
32
seaborn
43
tabulate
5-
lightning>=2.0.0
64
tensorboard

course_UvA-DL/05-transformers-and-MH-attention/MHAttention.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -22,14 +22,14 @@
2222
from functools import partial
2323
from urllib.error import HTTPError
2424

25-
# PyTorch Lightning
26-
import lightning as L
27-
2825
# Plotting
2926
import matplotlib
3027
import matplotlib.pyplot as plt
3128
import matplotlib_inline.backend_inline
3229
import numpy as np
30+
31+
# PyTorch Lightning
32+
import pytorch_lightning as pl
3333
import seaborn as sns
3434

3535
# PyTorch
@@ -41,7 +41,7 @@
4141

4242
# Torchvision
4343
import torchvision
44-
from lightning.pytorch.callbacks import ModelCheckpoint
44+
from pytorch_lightning.callbacks import ModelCheckpoint
4545
from torchvision import transforms
4646
from torchvision.datasets import CIFAR100
4747
from tqdm.notebook import tqdm
@@ -58,7 +58,7 @@
5858
CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/Transformers/")
5959

6060
# Setting the seed
61-
L.seed_everything(42)
61+
pl.seed_everything(42)
6262

6363
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
6464
torch.backends.cudnn.deterministic = True
@@ -246,7 +246,7 @@ def scaled_dot_product(q, k, v, mask=None):
246246

247247
# %%
248248
seq_len, d_k = 3, 2
249-
L.seed_everything(42)
249+
pl.seed_everything(42)
250250
q = torch.randn(seq_len, d_k)
251251
k = torch.randn(seq_len, d_k)
252252
v = torch.randn(seq_len, d_k)
@@ -748,7 +748,7 @@ def get_lr_factor(self, epoch):
748748

749749

750750
# %%
751-
class TransformerPredictor(L.LightningModule):
751+
class TransformerPredictor(pl.LightningModule):
752752
def __init__(
753753
self,
754754
input_dim,
@@ -965,7 +965,7 @@ def test_step(self, batch, batch_idx):
965965

966966
# %% [markdown]
967967
# Finally, we can create a training function similar to the one we have seen in Tutorial 5 for PyTorch Lightning.
968-
# We create a `L.Trainer` object, running for $N$ epochs, logging in TensorBoard, and saving our best model based on the validation.
968+
# We create a `pl.Trainer` object, running for $N$ epochs, logging in TensorBoard, and saving our best model based on the validation.
969969
# Afterward, we test our models on the test set.
970970
# An additional parameter we pass to the trainer here is `gradient_clip_val`.
971971
# This clips the norm of the gradients for all parameters before taking an optimizer step and prevents the model
@@ -983,7 +983,7 @@ def train_reverse(**kwargs):
983983
# Create a PyTorch Lightning trainer with the generation callback
984984
root_dir = os.path.join(CHECKPOINT_PATH, "ReverseTask")
985985
os.makedirs(root_dir, exist_ok=True)
986-
trainer = L.Trainer(
986+
trainer = pl.Trainer(
987987
default_root_dir=root_dir,
988988
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
989989
accelerator="auto",
@@ -1444,7 +1444,7 @@ def train_anomaly(**kwargs):
14441444
# Create a PyTorch Lightning trainer with the generation callback
14451445
root_dir = os.path.join(CHECKPOINT_PATH, "SetAnomalyTask")
14461446
os.makedirs(root_dir, exist_ok=True)
1447-
trainer = L.Trainer(
1447+
trainer = pl.Trainer(
14481448
default_root_dir=root_dir,
14491449
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
14501450
accelerator="auto",
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,2 @@
11
torchvision
2-
matplotlib
32
seaborn
4-
lightning>=2.0.0

course_UvA-DL/06-graph-neural-networks/overview.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from urllib.error import HTTPError
1212

1313
# PyTorch Lightning
14-
import lightning as L
14+
import pytorch_lightning as pl
1515

1616
# PyTorch
1717
import torch
@@ -25,7 +25,7 @@
2525
import torch_geometric.nn as geom_nn
2626

2727
# PL callbacks
28-
from lightning.pytorch.callbacks import ModelCheckpoint
28+
from pytorch_lightning.callbacks import ModelCheckpoint
2929
from torch import Tensor
3030

3131
AVAIL_GPUS = min(1, torch.cuda.device_count())
@@ -36,7 +36,7 @@
3636
CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/GNNs/")
3737

3838
# Setting the seed
39-
L.seed_everything(42)
39+
pl.seed_everything(42)
4040

4141
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
4242
torch.backends.cudnn.deterministic = True
@@ -592,7 +592,7 @@ def forward(self, x, *args, **kwargs):
592592

593593

594594
# %%
595-
class NodeLevelGNN(L.LightningModule):
595+
class NodeLevelGNN(pl.LightningModule):
596596
def __init__(self, model_name, **model_kwargs):
597597
super().__init__()
598598
# Saving hyperparameters
@@ -654,13 +654,13 @@ def test_step(self, batch, batch_idx):
654654

655655
# %%
656656
def train_node_classifier(model_name, dataset, **model_kwargs):
657-
L.seed_everything(42)
657+
pl.seed_everything(42)
658658
node_data_loader = geom_data.DataLoader(dataset, batch_size=1)
659659

660660
# Create a PyTorch Lightning trainer
661661
root_dir = os.path.join(CHECKPOINT_PATH, "NodeLevel" + model_name)
662662
os.makedirs(root_dir, exist_ok=True)
663-
trainer = L.Trainer(
663+
trainer = pl.Trainer(
664664
default_root_dir=root_dir,
665665
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
666666
accelerator="auto",
@@ -676,7 +676,7 @@ def train_node_classifier(model_name, dataset, **model_kwargs):
676676
print("Found pretrained model, loading...")
677677
model = NodeLevelGNN.load_from_checkpoint(pretrained_filename)
678678
else:
679-
L.seed_everything()
679+
pl.seed_everything()
680680
model = NodeLevelGNN(
681681
model_name=model_name, c_in=dataset.num_node_features, c_out=dataset.num_classes, **model_kwargs
682682
)
@@ -892,7 +892,7 @@ def forward(self, x, edge_index, batch_idx):
892892

893893

894894
# %%
895-
class GraphLevelGNN(L.LightningModule):
895+
class GraphLevelGNN(pl.LightningModule):
896896
def __init__(self, **model_kwargs):
897897
super().__init__()
898898
# Saving hyperparameters
@@ -941,12 +941,12 @@ def test_step(self, batch, batch_idx):
941941

942942
# %%
943943
def train_graph_classifier(model_name, **model_kwargs):
944-
L.seed_everything(42)
944+
pl.seed_everything(42)
945945

946946
# Create a PyTorch Lightning trainer with the generation callback
947947
root_dir = os.path.join(CHECKPOINT_PATH, "GraphLevel" + model_name)
948948
os.makedirs(root_dir, exist_ok=True)
949-
trainer = L.Trainer(
949+
trainer = pl.Trainer(
950950
default_root_dir=root_dir,
951951
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
952952
accelerator="cuda",
@@ -962,7 +962,7 @@ def train_graph_classifier(model_name, **model_kwargs):
962962
print("Found pretrained model, loading...")
963963
model = GraphLevelGNN.load_from_checkpoint(pretrained_filename)
964964
else:
965-
L.seed_everything(42)
965+
pl.seed_everything(42)
966966
model = GraphLevelGNN(
967967
c_in=tu_dataset.num_node_features,
968968
c_out=1 if tu_dataset.num_classes == 2 else tu_dataset.num_classes,

course_UvA-DL/06-graph-neural-networks/requirements.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,3 @@ torch-sparse
33
torch-cluster
44
torch-spline-conv
55
torch-geometric>=2.0.0,<2.5.0
6-
lightning>=2.0.0
Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
11
torchvision
22
matplotlib
33
tensorboard
4-
pytorch-lightning>=2.0.0

0 commit comments

Comments
 (0)