Skip to content

Commit fcc24b3

Browse files
author
Azure Pipelines
committed
Merge remote-tracking branch 'origin/main' into publication
2 parents 5838577 + e24bec1 commit fcc24b3

File tree

8 files changed

+39
-26
lines changed

8 files changed

+39
-26
lines changed

.actions/assistant.py

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from datetime import datetime
99
from shutil import copyfile
1010
from textwrap import wrap
11-
from typing import Any, Dict, List, Optional, Sequence, Tuple
11+
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
1212
from warnings import warn
1313

1414
import fire
@@ -283,11 +283,12 @@ def _valid_accelerator(folder: str) -> bool:
283283
return any(ac in meta_accels for ac in device_accels)
284284

285285
@staticmethod
286-
def _parse_requirements(folder: str) -> Tuple[str, str]:
286+
def _parse_requirements(folder: str, formatted: bool = True) -> Union[Tuple[str, str], Tuple[list, list]]:
287287
"""Parse standard requirements from meta file.
288288
289289
Args:
290290
folder: path to the folder with python script, meta and artefacts
291+
formatted: format it into two strings
291292
292293
"""
293294
meta = AssistantCLI._load_meta(folder)
@@ -298,15 +299,27 @@ def _parse_requirements(folder: str) -> Tuple[str, str]:
298299
for k, v in meta.items()
299300
if k.startswith(AssistantCLI._META_PIP_KEY)
300301
}
301-
pip_args = ['--extra-index-url="https://download.pytorch.org/whl/"' + _RUNTIME_VERSIONS.get("DEVICE")]
302+
pip_args = [f'--extra-index-url="https://download.pytorch.org/whl/{_RUNTIME_VERSIONS.get("DEVICE")}"']
302303
for pip_key in meta_pip_args:
303304
if not isinstance(meta_pip_args[pip_key], (list, tuple, set)):
304305
meta_pip_args[pip_key] = [meta_pip_args[pip_key]]
305306
for arg in meta_pip_args[pip_key]:
306307
arg = arg % _RUNTIME_VERSIONS
307308
pip_args.append(f"--{pip_key} {arg}")
309+
if formatted:
310+
return " ".join([f'"{req}"' for req in requires]), " ".join(pip_args)
311+
return list(requires), pip_args
308312

309-
return " ".join([f'"{req}"' for req in requires]), " ".join(pip_args)
313+
@staticmethod
314+
def pip_install(folder: str) -> str:
315+
"""Print all notebook requirements to be pre-installed in format of requirements file.
316+
317+
Args:
318+
folder: path to the folder with python script, meta and artefacts
319+
320+
"""
321+
req, args = AssistantCLI._parse_requirements(folder, formatted=False)
322+
return os.linesep.join(req) + os.linesep + os.linesep.join(args)
310323

311324
@staticmethod
312325
def _bash_download_data(folder: str) -> List[str]:

.azure/ipynb-publish.yml

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -143,9 +143,11 @@ jobs:
143143
- bash: |
144144
set -e
145145
pip --version
146-
# todo: export requirements for notebooks to file and execute
147146
# todo: adjust torch ecosystem versions
148147
pip install -r requirements.txt
148+
# export requirements for notebooks to file and execute
149+
python .actions/assistant.py pip-install --folder=$(notebook) > notebook.txt
150+
pip install -r notebook.txt
149151
displayName: "Install dependencies"
150152
timeoutInMinutes: "15"
151153
@@ -156,14 +158,14 @@ jobs:
156158
python -m papermill --version
157159
displayName: "Sanity check"
158160
159-
- bash: python .actions/assistant.py convert-ipynb $(notebook)
161+
- bash: python .actions/assistant.py convert-ipynb --folder=$(notebook)
160162
displayName: "Generate notebook"
161163
timeoutInMinutes: "5"
162164

163165
- bash: |
164166
set -e
165167
mkdir $(PATH_DATASETS)
166-
python .actions/assistant.py bash-render $(notebook)
168+
python .actions/assistant.py bash-render --folder=$(notebook)
167169
cat .actions/_ipynb-render.sh
168170
bash .actions/_ipynb-render.sh
169171
git status

.azure/ipynb-validate.yml

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,8 @@ jobs:
8585
set -e
8686
pip --version
8787
pip install -r requirements.txt
88-
pip list
88+
python .actions/assistant.py pip-install --folder=$(notebook) > notebook.txt
89+
pip install -r notebook.txt
8990
displayName: "Install dependencies"
9091
9192
- bash: |
@@ -94,13 +95,13 @@ jobs:
9495
python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu > 0, f'GPU: {mgpu}'"
9596
displayName: "Sanity check"
9697
97-
- bash: python .actions/assistant.py convert-ipynb $(notebook)
98+
- bash: python .actions/assistant.py convert-ipynb --folder=$(notebook)
9899
displayName: "Generate notebook"
99100

100101
- bash: |
101102
set -e
102103
mkdir $(PATH_DATASETS)
103-
python .actions/assistant.py bash-validate $(notebook)
104+
python .actions/assistant.py bash-validate --folder=$(notebook)
104105
cat .actions/_ipynb-validate.sh
105106
bash .actions/_ipynb-validate.sh
106107
env:

_requirements/devel.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ virtualenv>=20.10
66
ipython[notebook]>=8.0.0, <8.24.0
77
urllib3 # for ipython
88
jupytext>=1.10, <1.15 # converting
9-
papermill>=2.3.4, <2.5.0 # rendering
9+
papermill ==2.6.* # rendering
1010

1111
matplotlib
1212

_requirements/docs.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
sphinx >5.0, <7.0
2-
myst-parser >=0.18.1, <3.0.0
2+
myst-parser >=1.0.0, <4.0.0
33
nbsphinx >=0.8.5, <0.10
4-
pandoc >=1.0, <=2.3
4+
pandoc >=1.0, <2.4
55
#docutils >=0.16
66
sphinx-paramlinks >=0.5.1, <=0.6.0
77
ipython[notebook] >=8.0.0, <8.2.0

course_UvA-DL/requirements.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
numpy <2.0 # needed for older Torch
2-
torch >=1.8.1,<2.2
2+
torch >=1.8.1,<2.5
33
pytorch-lightning >=2.0,<2.1
4-
torchmetrics >=1.0,<1.3
4+
torchmetrics >=1.0,<1.5
55
torchvision
66
matplotlib
77
seaborn

lightning_examples/finetuning-scheduler/finetuning-scheduler.py

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -554,9 +554,7 @@ def train() -> None:
554554
# the implicit schedule will limit fine-tuning to just the last 4 parameters of the model, which is only a small fraction
555555
# of the parameters you'd want to tune for maximum performance. Since the implicit schedule is quite computationally
556556
# intensive and most useful for exploring model behavior, leaving [max_depth](https://finetuning-scheduler.readthedocs.io/en/stable/api/finetuning_scheduler.fts.html?highlight=max_depth#finetuning_scheduler.fts.FinetuningScheduler.params.max_depth) 1 allows us to demo implicit mode
557-
# behavior while keeping the computational cost and runtime of this notebook reasonable. To review how a full implicit
558-
# mode run compares to the ``nofts_baseline`` and ``fts_explicit`` scenarios, please see the the following
559-
# [tensorboard experiment summary](https://tensorboard.dev/experiment/n7U8XhrzRbmvVzC4SQSpWw/).
557+
# behavior while keeping the computational cost and runtime of this notebook reasonable.
560558

561559

562560
# %%
@@ -579,16 +577,15 @@ def train() -> None:
579577
# %% [markdown]
580578
# ### Reviewing the Training Results
581579
#
582-
# See the [tensorboard experiment summaries](https://tensorboard.dev/experiment/n7U8XhrzRbmvVzC4SQSpWw/) to get a sense
583-
# of the relative computational and performance tradeoffs associated with these [FinetuningScheduler](https://finetuning-scheduler.readthedocs.io/en/stable/api/finetuning_scheduler.fts.html#finetuning_scheduler.fts.FinetuningScheduler) configurations.
584-
# The summary compares a full ``fts_implicit`` execution to ``fts_explicit`` and ``nofts_baseline`` scenarios using DDP
580+
# It's worth considering the relative computational and performance tradeoffs associated with different [FinetuningScheduler](https://finetuning-scheduler.readthedocs.io/en/stable/api/finetuning_scheduler.fts.html#finetuning_scheduler.fts.FinetuningScheduler) configurations.
581+
# The example below compares ``fts_implicit`` execution to ``fts_explicit`` and ``nofts_baseline`` scenarios using DDP
585582
# training with 2 GPUs. The full logs/schedules for all three scenarios are available
586583
# [here](https://drive.google.com/file/d/1LrUcisRLHeJgh_BDOOD_GUBPp5iHAkoR/view?usp=sharing) and the checkpoints
587584
# produced in the scenarios [here](https://drive.google.com/file/d/1t7myBgcqcZ9ax_IT9QVk-vFH_l_o5UXB/view?usp=sharing)
588585
# (caution, ~3.5GB).
589586
#
590-
# [![fts_explicit_accuracy](fts_explicit_accuracy.png){height="315px" width="492px"}](https://tensorboard.dev/experiment/n7U8XhrzRbmvVzC4SQSpWw/#scalars&_smoothingWeight=0&runSelectionState=eyJmdHNfZXhwbGljaXQiOnRydWUsIm5vZnRzX2Jhc2VsaW5lIjpmYWxzZSwiZnRzX2ltcGxpY2l0IjpmYWxzZX0%3D)
591-
# [![nofts_baseline](nofts_baseline_accuracy.png){height="316px" width="505px"}](https://tensorboard.dev/experiment/n7U8XhrzRbmvVzC4SQSpWw/#scalars&_smoothingWeight=0&runSelectionState=eyJmdHNfZXhwbGljaXQiOmZhbHNlLCJub2Z0c19iYXNlbGluZSI6dHJ1ZSwiZnRzX2ltcGxpY2l0IjpmYWxzZX0%3D)
587+
# ![fts_explicit_accuracy](fts_explicit_accuracy.png){height="315px" width="492px"}
588+
# ![nofts_baseline](nofts_baseline_accuracy.png){height="316px" width="505px"}
592589
#
593590
# Note that given execution context differences, there could be a modest variation in performance from the tensorboard summaries generated by this notebook.
594591
#
@@ -597,7 +594,7 @@ def train() -> None:
597594
# greater fine-tuning flexibility for model exploration in research. For example, glancing at DeBERTa-v3's implicit training
598595
# run, a critical tuning transition point is immediately apparent:
599596
#
600-
# [![implicit_training_transition](implicit_training_transition.png){height="272px" width="494px"}](https://tensorboard.dev/experiment/n7U8XhrzRbmvVzC4SQSpWw/#scalars&_smoothingWeight=0&runSelectionState=eyJmdHNfZXhwbGljaXQiOmZhbHNlLCJub2Z0c19iYXNlbGluZSI6ZmFsc2UsImZ0c19pbXBsaWNpdCI6dHJ1ZX0%3D)
597+
# ![implicit_training_transition](implicit_training_transition.png){height="272px" width="494px"}
601598
#
602599
# Our `val_loss` begins a precipitous decline at step 3119 which corresponds to phase 17 in the schedule. Referring to our
603600
# schedule, in phase 17 we're beginning tuning the attention parameters of our 10th encoder layer (of 11). Interesting!

lightning_examples/requirements.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
numpy <2.0 # needed for older Torch
2-
torch>=1.8.1, <2.2
2+
torch>=1.8.1, <2.5
33
pytorch-lightning>=2.0, <2.2
4-
torchmetrics>=1.0, <1.3
4+
torchmetrics>=1.0, <1.5
55
matplotlib

0 commit comments

Comments
 (0)