|
1 | 1 | import theano.tensor as tt
|
2 | 2 |
|
3 | 3 | from ..model import FreeRV
|
4 |
| -from ..theanof import gradient |
| 4 | +from ..theanof import gradient, floatX |
5 | 5 | from . import distribution
|
6 | 6 | from ..math import logit, invlogit
|
7 | 7 | from .distribution import draw_values
|
@@ -139,7 +139,7 @@ def forward(self, x):
|
139 | 139 | def forward_val(self, x, point=None):
|
140 | 140 | a, b = draw_values([self.a, self.b],
|
141 | 141 | point=point)
|
142 |
| - return tt.log(x - a) - tt.log(b - x) |
| 142 | + return floatX(tt.log(x - a) - tt.log(b - x)) |
143 | 143 |
|
144 | 144 | def jacobian_det(self, x):
|
145 | 145 | s = tt.nnet.softplus(-x)
|
@@ -168,7 +168,7 @@ def forward(self, x):
|
168 | 168 | def forward_val(self, x, point=None):
|
169 | 169 | a = draw_values([self.a],
|
170 | 170 | point=point)[0]
|
171 |
| - return tt.log(x - a) |
| 171 | + return floatX(tt.log(x - a)) |
172 | 172 |
|
173 | 173 | def jacobian_det(self, x):
|
174 | 174 | return x
|
@@ -196,7 +196,7 @@ def forward(self, x):
|
196 | 196 | def forward_val(self, x, point=None):
|
197 | 197 | b = draw_values([self.b],
|
198 | 198 | point=point)[0]
|
199 |
| - return tt.log(b - x) |
| 199 | + return floatX(tt.log(b - x)) |
200 | 200 |
|
201 | 201 | def jacobian_det(self, x):
|
202 | 202 | return x
|
|
0 commit comments