We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 4224529 commit 81089b1Copy full SHA for 81089b1
timm/models/efficientformer.py
@@ -67,7 +67,7 @@ def __init__(
67
rel_pos = (pos[..., :, None] - pos[..., None, :]).abs()
68
rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1]
69
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1]))
70
- self.register_buffer('attention_bias_idxs', torch.LongTensor(rel_pos))
+ self.register_buffer('attention_bias_idxs', rel_pos)
71
self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat)
72
73
@torch.no_grad()
0 commit comments