Skip to content

Update no_grad usage to inference_mode #1164

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 4, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions examples/binary_segmentation_buildings.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ def train_and_evaluate_one_epoch(
# Set the model to evaluation mode
model.eval()
val_loss = 0
with torch.no_grad():
with torch.inference_mode():
for batch in tqdm(valid_dataloader, desc="Evaluating"):
images, masks = batch
images, masks = images.to(device), masks.to(device)
Expand Down Expand Up @@ -325,7 +325,7 @@ def test_model(model, output_dir, test_dataloader, loss_fn, device):
model.eval()
test_loss = 0
tp, fp, fn, tn = 0, 0, 0, 0
with torch.no_grad():
with torch.inference_mode():
for batch in tqdm(test_dataloader, desc="Evaluating"):
images, masks = batch
images, masks = images.to(device), masks.to(device)
Expand Down
2 changes: 1 addition & 1 deletion examples/binary_segmentation_intro.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1026,7 +1026,7 @@
],
"source": [
"batch = next(iter(test_dataloader))\n",
"with torch.no_grad():\n",
"with torch.inference_mode():\n",
" model.eval()\n",
" logits = model(batch[\"image\"])\n",
"pr_masks = logits.sigmoid()\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/camvid_segmentation_multiclass.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1683,7 +1683,7 @@
"images, masks = next(iter(test_loader))\n",
"\n",
"# Switch the model to evaluation mode\n",
"with torch.no_grad():\n",
"with torch.inference_mode():\n",
" model.eval()\n",
" logits = model(images) # Get raw logits from the model\n",
"\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/cars segmentation (camvid).ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1209,7 +1209,7 @@
],
"source": [
"images, masks = next(iter(test_loader))\n",
"with torch.no_grad():\n",
"with torch.inference_mode():\n",
" model.eval()\n",
" logits = model(images)\n",
"pr_masks = logits.sigmoid()\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/convert_to_onnx.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@
],
"source": [
"# compute PyTorch output prediction\n",
"with torch.no_grad():\n",
"with torch.inference_mode():\n",
" torch_out = model(sample)\n",
"\n",
"# compare ONNX Runtime and PyTorch results\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/dpt_inference_pretrained.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@
"input_tensor = input_tensor.to(device)\n",
"\n",
"# Perform inference\n",
"with torch.no_grad():\n",
"with torch.inference_mode():\n",
" output_mask = model(input_tensor)\n",
"\n",
"# Postprocess mask\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/segformer_inference_pretrained.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
"input_tensor = input_tensor.to(device)\n",
"\n",
"# Perform inference\n",
"with torch.no_grad():\n",
"with torch.inference_mode():\n",
" output_mask = model(input_tensor)\n",
"\n",
"# Postprocess mask\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/upernet_inference_pretrained.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@
"input_tensor = input_tensor.to(device)\n",
"\n",
"# Perform inference\n",
"with torch.no_grad():\n",
"with torch.inference_mode():\n",
" output_mask = model(input_tensor)\n",
"\n",
"# Postprocess mask\n",
Expand Down
2 changes: 1 addition & 1 deletion misc/generate_test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def save_and_push(model, inputs, outputs, model_name, encoder_name):
torch.manual_seed(423553)
sample = torch.rand(1, 3, 256, 256)

with torch.no_grad():
with torch.inference_mode():
output = model(sample)

save_and_push(model, sample, output, model_name, encoder_name)
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def main(args):
tensor = torch.tensor(normalized_image).permute(2, 0, 1).unsqueeze(0).float()

# Forward pass
with torch.no_grad():
with torch.inference_mode():
mask = model(tensor)

# Postprocessing
Expand Down
2 changes: 1 addition & 1 deletion scripts/models-conversions/upernet-hf-to-smp.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def convert_model(model_name: str, push_to_hub: bool = False):
print("Verifying model with test inference...")
smp_model.eval()
sample = torch.ones(1, 3, 512, 512)
with torch.no_grad():
with torch.inference_mode():
output = smp_model(sample)
print(f"Test inference successful. Output shape: {output.shape}")

Expand Down
4 changes: 2 additions & 2 deletions segmentation_models_pytorch/base/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,9 @@ def forward(self, x):

return masks

@torch.no_grad()
@torch.inference_mode()
def predict(self, x):
"""Inference method. Switch model to `eval` mode, call `.forward(x)` with `torch.no_grad()`
"""Inference method. Switch model to `eval` mode, call `.forward(x)` with `torch.inference_mode()`

Args:
x: 4D torch tensor with shape (batch_size, channels, height, width)
Expand Down
4 changes: 2 additions & 2 deletions segmentation_models_pytorch/metrics/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def get_stats(
return tp, fp, fn, tn


@torch.no_grad()
@torch.inference_mode()
def _get_stats_multiclass(
output: torch.LongTensor,
target: torch.LongTensor,
Expand Down Expand Up @@ -221,7 +221,7 @@ def _get_stats_multiclass(
return tp_count, fp_count, fn_count, tn_count


@torch.no_grad()
@torch.inference_mode()
def _get_stats_multilabel(
output: torch.LongTensor, target: torch.LongTensor
) -> Tuple[torch.LongTensor, torch.LongTensor, torch.LongTensor, torch.LongTensor]:
Expand Down
2 changes: 1 addition & 1 deletion segmentation_models_pytorch/utils/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def on_epoch_start(self):
self.model.eval()

def batch_update(self, x, y):
with torch.no_grad():
with torch.inference_mode():
prediction = self.model.forward(x)
loss = self.loss(prediction, y)
return loss, prediction
Loading