We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 350b35b commit 9773d24Copy full SHA for 9773d24
tests/data/pytorch_neo/code/inference.py
@@ -71,8 +71,8 @@ def model_fn(model_dir):
71
logger.info("model_fn")
72
neopytorch.config(model_dir=model_dir, neo_runtime=True)
73
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
74
- # The compiled model is saved as "compiled.pt"
75
- model = torch.jit.load(os.path.join(model_dir, "compiled.pt"), map_location=device)
+ # The compiled model is saved as "model.pth"
+ model = torch.jit.load(os.path.join(model_dir, "model.pth"), map_location=device)
76
77
# It is recommended to run warm-up inference during model load
78
sample_input_path = os.path.join(model_dir, "sample_input.pkl")
0 commit comments