Skip to content

Commit 3e74bcd

Browse files
author
Azure Pipelines
committed
Merge remote-tracking branch 'origin/main' into publication
2 parents cc67b8b + db5d275 commit 3e74bcd

File tree

7 files changed

+32
-32
lines changed

7 files changed

+32
-32
lines changed

.pre-commit-config.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ repos:
2323
- id: detect-private-key
2424

2525
- repo: https://github.com/codespell-project/codespell
26-
rev: v2.3.0
26+
rev: v2.4.1
2727
hooks:
2828
- id: codespell
2929
additional_dependencies: [tomli]
@@ -45,7 +45,7 @@ repos:
4545
args: ["--print-width=120"]
4646

4747
- repo: https://github.com/executablebooks/mdformat
48-
rev: 0.7.21
48+
rev: 0.7.22
4949
hooks:
5050
- id: mdformat
5151
additional_dependencies:
@@ -55,7 +55,7 @@ repos:
5555
args: ["--number"]
5656

5757
- repo: https://github.com/astral-sh/ruff-pre-commit
58-
rev: v0.8.6
58+
rev: v0.11.4
5959
hooks:
6060
# try to fix what is possible
6161
- id: ruff

_docs/source/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@
201201
project + " Documentation",
202202
author,
203203
project,
204-
"" "Miscellaneous", # about.__docs__,
204+
"Miscellaneous", # about.__docs__,
205205
),
206206
]
207207

course_UvA-DL/01-introduction-to-pytorch/notebook.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -883,7 +883,7 @@ def train_model(model, optimizer, data_loader, loss_module, num_epochs=100):
883883
#
884884
# $$acc = \frac{\#\text{correct predictions}}{\#\text{all predictions}} = \frac{TP+TN}{TP+TN+FP+FN}$$
885885
#
886-
# where TP are the true positives, TN true negatives, FP false positives, and FN the fale negatives.
886+
# where TP are the true positives, TN true negatives, FP false positives, and FN the false negatives.
887887
#
888888
# When evaluating the model, we don't need to keep track of the computation graph as we don't intend to calculate the gradients.
889889
# This reduces the required memory and speed up the model.
@@ -910,7 +910,7 @@ def eval_model(model, data_loader):
910910
num_preds += data_labels.shape[0]
911911

912912
acc = true_preds / num_preds
913-
print(f"Accuracy of the model: {100.0*acc:4.2f}%")
913+
print(f"Accuracy of the model: {100.0 * acc:4.2f}%")
914914

915915

916916
# %%

course_UvA-DL/02-activation-functions/notebook.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -343,12 +343,12 @@ def load_model(model_path, model_name, net=None):
343343
344344
"""
345345
config_file, model_file = _get_config_file(model_path, model_name), _get_model_file(model_path, model_name)
346-
assert os.path.isfile(
347-
config_file
348-
), f'Could not find the config file "{config_file}". Are you sure this is the correct path and you have your model config stored here?'
349-
assert os.path.isfile(
350-
model_file
351-
), f'Could not find the model file "{model_file}". Are you sure this is the correct path and you have your model stored here?'
346+
assert os.path.isfile(config_file), (
347+
f'Could not find the config file "{config_file}". Are you sure this is the correct path and you have your model config stored here?'
348+
)
349+
assert os.path.isfile(model_file), (
350+
f'Could not find the model file "{model_file}". Are you sure this is the correct path and you have your model stored here?'
351+
)
352352
with open(config_file) as f:
353353
config_dict = json.load(f)
354354
if net is None:
@@ -548,7 +548,7 @@ def train_model(net, model_name, max_epochs=50, patience=7, batch_size=256, over
548548
############
549549
net.train()
550550
true_preds, count = 0.0, 0
551-
for imgs, labels in tqdm(train_loader_local, desc=f"Epoch {epoch+1}", leave=False):
551+
for imgs, labels in tqdm(train_loader_local, desc=f"Epoch {epoch + 1}", leave=False):
552552
imgs, labels = imgs.to(device), labels.to(device) # To GPU
553553
optimizer.zero_grad() # Zero-grad can be placed anywhere before "loss.backward()"
554554
preds = net(imgs)
@@ -566,7 +566,7 @@ def train_model(net, model_name, max_epochs=50, patience=7, batch_size=256, over
566566
val_acc = test_model(net, val_loader)
567567
val_scores.append(val_acc)
568568
print(
569-
f"[Epoch {epoch+1:2i}] Training accuracy: {train_acc*100.0:05.2f}%, Validation accuracy: {val_acc*100.0:05.2f}%"
569+
f"[Epoch {epoch + 1:2i}] Training accuracy: {train_acc * 100.0:05.2f}%, Validation accuracy: {val_acc * 100.0:05.2f}%"
570570
)
571571

572572
if len(val_scores) == 1 or val_acc > val_scores[best_val_epoch]:
@@ -587,7 +587,7 @@ def train_model(net, model_name, max_epochs=50, patience=7, batch_size=256, over
587587

588588
load_model(CHECKPOINT_PATH, model_name, net=net)
589589
test_acc = test_model(net, test_loader)
590-
print((f" Test accuracy: {test_acc*100.0:4.2f}% ").center(50, "=") + "\n")
590+
print((f" Test accuracy: {test_acc * 100.0:4.2f}% ").center(50, "=") + "\n")
591591
return test_acc
592592

593593

course_UvA-DL/03-initialization-and-optimization/notebook.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -580,19 +580,19 @@ def _get_result_file(model_path, model_name):
580580
def load_model(model_path, model_name, net=None):
581581
config_file = _get_config_file(model_path, model_name)
582582
model_file = _get_model_file(model_path, model_name)
583-
assert os.path.isfile(
584-
config_file
585-
), f'Could not find the config file "{config_file}". Are you sure this is the correct path and you have your model config stored here?'
586-
assert os.path.isfile(
587-
model_file
588-
), f'Could not find the model file "{model_file}". Are you sure this is the correct path and you have your model stored here?'
583+
assert os.path.isfile(config_file), (
584+
f'Could not find the config file "{config_file}". Are you sure this is the correct path and you have your model config stored here?'
585+
)
586+
assert os.path.isfile(model_file), (
587+
f'Could not find the model file "{model_file}". Are you sure this is the correct path and you have your model stored here?'
588+
)
589589
with open(config_file) as f:
590590
config_dict = json.load(f)
591591
if net is None:
592592
act_fn_name = config_dict["act_fn"].pop("name").lower()
593-
assert (
594-
act_fn_name in act_fn_by_name
595-
), f'Unknown activation function "{act_fn_name}". Please add it to the "act_fn_by_name" dict.'
593+
assert act_fn_name in act_fn_by_name, (
594+
f'Unknown activation function "{act_fn_name}". Please add it to the "act_fn_by_name" dict.'
595+
)
596596
act_fn = act_fn_by_name[act_fn_name]()
597597
net = BaseNetwork(act_fn=act_fn, **config_dict)
598598
net.load_state_dict(torch.load(model_file))
@@ -678,7 +678,7 @@ def train_model(net, model_name, optim_func, max_epochs=50, batch_size=256, over
678678
plt.show()
679679
plt.close()
680680

681-
print((f" Test accuracy: {results['test_acc']*100.0:4.2f}% ").center(50, "=") + "\n")
681+
print((f" Test accuracy: {results['test_acc'] * 100.0:4.2f}% ").center(50, "=") + "\n")
682682
return results
683683

684684

@@ -700,7 +700,7 @@ def epoch_iteration(net, loss_module, optimizer, train_loader_local, val_loader,
700700
# Record statistics during training
701701
true_preds += (preds.argmax(dim=-1) == labels).sum().item()
702702
count += labels.shape[0]
703-
t.set_description(f"Epoch {epoch+1}: loss={loss.item():4.2f}")
703+
t.set_description(f"Epoch {epoch + 1}: loss={loss.item():4.2f}")
704704
epoch_losses.append(loss.item())
705705
train_acc = true_preds / count
706706

@@ -709,7 +709,7 @@ def epoch_iteration(net, loss_module, optimizer, train_loader_local, val_loader,
709709
##############
710710
val_acc = test_model(net, val_loader)
711711
print(
712-
f"[Epoch {epoch+1:2i}] Training accuracy: {train_acc*100.0:05.2f}%, Validation accuracy: {val_acc*100.0:05.2f}%"
712+
f"[Epoch {epoch + 1:2i}] Training accuracy: {train_acc * 100.0:05.2f}%, Validation accuracy: {val_acc * 100.0:05.2f}%"
713713
)
714714
return train_acc, val_acc, epoch_losses
715715

course_UvA-DL/04-inception-resnet-densenet/notebook.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1183,8 +1183,8 @@ def forward(self, x):
11831183
table = [
11841184
[
11851185
model_name,
1186-
f"{100.0*model_results['val']:4.2f}%",
1187-
f"{100.0*model_results['test']:4.2f}%",
1186+
f"{100.0 * model_results['val']:4.2f}%",
1187+
f"{100.0 * model_results['test']:4.2f}%",
11881188
f"{sum(np.prod(p.shape) for p in model.parameters()):,}",
11891189
]
11901190
for model_name, model_results, model in all_models

course_UvA-DL/13-contrastive-learning/SimCLR.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -642,7 +642,7 @@ def get_smaller_dataset(original_dataset, num_imgs_per_label):
642642
plt.show()
643643

644644
for k, score in zip(dataset_sizes, test_scores):
645-
print(f"Test accuracy for {k:3d} images per label: {100*score:4.2f}%")
645+
print(f"Test accuracy for {k:3d} images per label: {100 * score:4.2f}%")
646646

647647
# %% [markdown]
648648
# As one would expect, the classification performance improves the more data we have.
@@ -781,8 +781,8 @@ def train_resnet(batch_size, max_epochs=100, **kwargs):
781781

782782
# %%
783783
resnet_model, resnet_result = train_resnet(batch_size=64, num_classes=10, lr=1e-3, weight_decay=2e-4, max_epochs=100)
784-
print(f"Accuracy on training set: {100*resnet_result['train']:4.2f}%")
785-
print(f"Accuracy on test set: {100*resnet_result['test']:4.2f}%")
784+
print(f"Accuracy on training set: {100 * resnet_result['train']:4.2f}%")
785+
print(f"Accuracy on test set: {100 * resnet_result['test']:4.2f}%")
786786

787787
# %% [markdown]
788788
# The ResNet trained from scratch achieves 73.31% on the test set.

0 commit comments

Comments
 (0)