Skip to content

Commit 1ff4b9d

Browse files
kc611brandonwillard
authored andcommitted
Refactor tests.tensor to use NumPy Generator
1 parent 8044a41 commit 1ff4b9d

30 files changed

+1395
-1417
lines changed

tests/tensor/nnet/speed_test_conv.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def flip(kern, kshp):
3737
return flip
3838

3939

40-
global_rng = np.random.RandomState(3423489)
40+
global_rng = np.random.default_rng(3423489)
4141

4242
dmatrix4 = TensorType("float64", (False, False, False, False))
4343

@@ -64,7 +64,7 @@ def exec_multilayer_conv_nnet_old(
6464
img = dmatrix()
6565

6666
# build actual input images
67-
imgval = global_rng.rand(bsize, imshp[0], imshp[1], imshp[2])
67+
imgval = global_rng.random((bsize, imshp[0], imshp[1], imshp[2]))
6868

6969
a = dmatrix()
7070
kerns = [a for i in nkerns]
@@ -82,7 +82,7 @@ def exec_multilayer_conv_nnet_old(
8282
print(conv_mode, ss, n_layer, kshp, nkern)
8383

8484
# actual values
85-
w = global_rng.random_sample(np.r_[nkern, imshp[0], kshp])
85+
w = global_rng.random((np.r_[nkern, imshp[0], kshp]))
8686
w_flip = flip(w, kshp).reshape(w.shape)
8787

8888
# manual implementation
@@ -193,7 +193,7 @@ def exec_multilayer_conv_nnet(
193193
img = dmatrix()
194194

195195
# build actual input images
196-
imgval = global_rng.rand(bsize, imshp[0], imshp[1], imshp[2])
196+
imgval = global_rng.random((bsize, imshp[0], imshp[1], imshp[2]))
197197

198198
a = dmatrix()
199199
kerns = [a for i in nkerns]
@@ -211,7 +211,7 @@ def exec_multilayer_conv_nnet(
211211
print(conv_mode, ss, n_layer, kshp, nkern)
212212

213213
# actual values
214-
w = global_rng.random_sample(np.r_[nkern, imshp[0], kshp])
214+
w = global_rng.random((np.r_[nkern, imshp[0], kshp]))
215215
w_flip = flip(w, kshp).reshape(w.shape)
216216

217217
outshp = np.hstack(

tests/tensor/nnet/test_abstract_conv.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1903,7 +1903,7 @@ def test_interface(self):
19031903
)
19041904
class TestConv2dGrads:
19051905
def setup_method(self):
1906-
self.random_stream = np.random.RandomState(utt.fetch_seed())
1906+
self.random_stream = np.random.default_rng(utt.fetch_seed())
19071907

19081908
self.inputs_shapes = [(8, 1, 12, 12), (1, 1, 5, 5), (1, 1, 5, 6), (1, 1, 6, 6)]
19091909
self.filters_shapes = [(5, 1, 2, 2), (1, 1, 3, 3)]
@@ -1928,12 +1928,12 @@ def test_conv2d_grad_wrt_inputs(self):
19281928
for bm in self.border_modes:
19291929
for ss in self.subsamples:
19301930
for ff in self.filter_flip:
1931-
input_val = self.random_stream.random_sample(in_shape).astype(
1931+
input_val = self.random_stream.random(in_shape).astype(
1932+
config.floatX
1933+
)
1934+
filter_val = self.random_stream.random(fltr_shape).astype(
19321935
config.floatX
19331936
)
1934-
filter_val = self.random_stream.random_sample(
1935-
fltr_shape
1936-
).astype(config.floatX)
19371937
out_grad_shape = (
19381938
aesara.tensor.nnet.abstract_conv.get_conv_output_shape(
19391939
image_shape=in_shape,
@@ -1942,9 +1942,9 @@ def test_conv2d_grad_wrt_inputs(self):
19421942
subsample=ss,
19431943
)
19441944
)
1945-
out_grad_val = self.random_stream.random_sample(
1946-
out_grad_shape
1947-
).astype(config.floatX)
1945+
out_grad_val = self.random_stream.random(out_grad_shape).astype(
1946+
config.floatX
1947+
)
19481948
conv_out = aesara.tensor.nnet.conv2d(
19491949
self.x,
19501950
filters=self.w,
@@ -1994,12 +1994,12 @@ def test_conv2d_grad_wrt_weights(self):
19941994
for bm in self.border_modes:
19951995
for ss in self.subsamples:
19961996
for ff in self.filter_flip:
1997-
input_val = self.random_stream.random_sample(in_shape).astype(
1997+
input_val = self.random_stream.random(in_shape).astype(
1998+
config.floatX
1999+
)
2000+
filter_val = self.random_stream.random(fltr_shape).astype(
19982001
config.floatX
19992002
)
2000-
filter_val = self.random_stream.random_sample(
2001-
fltr_shape
2002-
).astype(config.floatX)
20032003
out_grad_shape = (
20042004
aesara.tensor.nnet.abstract_conv.get_conv_output_shape(
20052005
image_shape=in_shape,
@@ -2008,9 +2008,9 @@ def test_conv2d_grad_wrt_weights(self):
20082008
subsample=ss,
20092009
)
20102010
)
2011-
out_grad_val = self.random_stream.random_sample(
2012-
out_grad_shape
2013-
).astype(config.floatX)
2011+
out_grad_val = self.random_stream.random(out_grad_shape).astype(
2012+
config.floatX
2013+
)
20142014
conv_out = aesara.tensor.nnet.conv2d(
20152015
self.x,
20162016
filters=self.w,

0 commit comments

Comments
 (0)