Skip to content

Commit 5611cf7

Browse files
kc611brandonwillard
authored andcommitted
Refactor tests.gpuarray to use NumPy Generator
1 parent 39c1147 commit 5611cf7

File tree

10 files changed

+76
-125
lines changed

10 files changed

+76
-125
lines changed

tests/gpuarray/check_dnn_conv.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -612,7 +612,6 @@ def get_atol_rtol(self, algo, dtype, precision):
612612
return None, None
613613

614614
def __init__(self):
615-
utt.seed_rng(1234)
616615
self.dtype_configs = cudnn.get_supported_dtype_configs(
617616
check_dtype_config_support
618617
)

tests/gpuarray/test_basic_ops.py

Lines changed: 24 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -35,14 +35,13 @@
3535
TestJoinAndSplit,
3636
TestReshape,
3737
)
38-
from tests.tensor.utils import rand, safe_make_node
38+
from tests.tensor.utils import random, safe_make_node
3939

4040

4141
pygpu = pytest.importorskip("pygpu")
4242
gpuarray = pygpu.gpuarray
4343

44-
utt.seed_rng()
45-
rng = np.random.RandomState(seed=utt.fetch_seed())
44+
rng = np.random.default_rng(seed=utt.fetch_seed())
4645

4746

4847
def inplace_func(
@@ -79,7 +78,7 @@ def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs):
7978

8079

8180
def rand_gpuarray(*shape, **kwargs):
82-
r = rng.rand(*shape) * 2 - 1
81+
r = rng.random(shape) * 2 - 1
8382
dtype = kwargs.pop("dtype", aesara.config.floatX)
8483
cls = kwargs.pop("cls", None)
8584
if len(kwargs) != 0:
@@ -219,7 +218,7 @@ def test_transfer_cpu_gpu():
219218
a = fmatrix("a")
220219
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
221220

222-
av = np.asarray(rng.rand(5, 4), dtype="float32")
221+
av = np.asarray(rng.random((5, 4)), dtype="float32")
223222
gv = gpuarray.array(av, context=get_context(test_ctx_name))
224223

225224
f = aesara.function([a], GpuFromHost(test_ctx_name)(a))
@@ -236,7 +235,7 @@ def test_transfer_gpu_gpu():
236235
dtype="float32", broadcastable=(False, False), context_name=test_ctx_name
237236
)()
238237

239-
av = np.asarray(rng.rand(5, 4), dtype="float32")
238+
av = np.asarray(rng.random((5, 4)), dtype="float32")
240239
gv = gpuarray.array(av, context=get_context(test_ctx_name))
241240
mode = mode_with_gpu.excluding(
242241
"cut_gpua_host_transfers", "local_cut_gpua_host_gpua"
@@ -256,7 +255,7 @@ def test_transfer_strided():
256255
a = fmatrix("a")
257256
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
258257

259-
av = np.asarray(rng.rand(5, 8), dtype="float32")
258+
av = np.asarray(rng.random((5, 8)), dtype="float32")
260259
gv = gpuarray.array(av, context=get_context(test_ctx_name))
261260

262261
av = av[:, ::2]
@@ -283,14 +282,14 @@ def gpu_alloc_expected(x, *shp):
283282
op=lambda *args: alloc(*args) + 1,
284283
gpu_op=GpuAlloc(test_ctx_name),
285284
cases=dict(
286-
correct01=(rand(), np.int32(7)),
285+
correct01=(random(), np.int32(7)),
287286
# just gives a DeepCopyOp with possibly wrong results on the CPU
288-
# correct01_bcast=(rand(1), np.int32(7)),
289-
correct02=(rand(), np.int32(4), np.int32(7)),
290-
correct12=(rand(7), np.int32(4), np.int32(7)),
291-
correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)),
292-
correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)),
293-
bad_shape12=(rand(7), np.int32(7), np.int32(5)),
287+
# correct01_bcast=(random(1), np.int32(7)),
288+
correct02=(random(), np.int32(4), np.int32(7)),
289+
correct12=(random(7), np.int32(4), np.int32(7)),
290+
correct13=(random(7), np.int32(2), np.int32(4), np.int32(7)),
291+
correct23=(random(4, 7), np.int32(2), np.int32(4), np.int32(7)),
292+
bad_shape12=(random(7), np.int32(7), np.int32(5)),
294293
),
295294
)
296295

@@ -357,7 +356,7 @@ def test_shape():
357356
def test_gpu_contiguous():
358357
a = fmatrix("a")
359358
i = iscalar("i")
360-
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
359+
a_val = np.asarray(np.random.random(4, 5), dtype="float32")
361360
# The reshape is needed otherwise we make the subtensor on the CPU
362361
# to transfer less data.
363362
f = aesara.function(
@@ -390,7 +389,6 @@ def setup_method(self):
390389

391390
class TestGPUComparison(TestComparison):
392391
def setup_method(self):
393-
utt.seed_rng()
394392
self.mode = mode_with_gpu
395393
self.shared = gpuarray_shared_constructor
396394
self.dtypes = ["float64", "float32"]
@@ -415,8 +413,8 @@ def shared(x, **kwargs):
415413
def test_gpusplit_opt(self):
416414
# Test that we move the node to the GPU
417415
# Also test float16 computation at the same time.
418-
rng = np.random.RandomState(seed=utt.fetch_seed())
419-
m = self.shared(rng.rand(4, 6).astype("float16"))
416+
rng = np.random.default_rng(seed=utt.fetch_seed())
417+
m = self.shared(rng.random((4, 6)).astype("float16"))
420418
o = Split(2)(m, 0, [2, 2])
421419
assert o[0].dtype == "float16"
422420
f = aesara.function([], o, mode=self.mode)
@@ -433,9 +431,9 @@ def test_gpusplit_opt(self):
433431

434432
def test_gpujoin_gpualloc():
435433
a = fmatrix("a")
436-
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
434+
a_val = np.asarray(np.random.random(4, 5), dtype="float32")
437435
b = fmatrix("b")
438-
b_val = np.asarray(np.random.rand(3, 5), dtype="float32")
436+
b_val = np.asarray(np.random.random(3, 5), dtype="float32")
439437

440438
f = aesara.function(
441439
[a, b],
@@ -514,9 +512,9 @@ def test_hostfromgpu_shape_i():
514512
)
515513
a = fmatrix("a")
516514
ca = aesara.gpuarray.type.GpuArrayType("float32", (False, False))()
517-
av = np.asarray(np.random.rand(5, 4), dtype="float32")
515+
av = np.asarray(np.random.random(5, 4), dtype="float32")
518516
cv = gpuarray.asarray(
519-
np.random.rand(5, 4), dtype="float32", context=get_context(test_ctx_name)
517+
np.random.random(5, 4), dtype="float32", context=get_context(test_ctx_name)
520518
)
521519

522520
f = aesara.function([a], GpuFromHost(test_ctx_name)(a), mode=m)
@@ -583,12 +581,11 @@ def check_u(m, k=0):
583581
assert result.dtype == np.dtype(dtype)
584582
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
585583

586-
utt.seed_rng()
587-
test_rng = np.random.RandomState(seed=utt.fetch_seed())
584+
test_rng = np.random.default_rng(seed=utt.fetch_seed())
588585

589586
for dtype in ["float64", "float32", "float16"]:
590587
# try a big one
591-
m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype)
588+
m = np.asarray(test_rng.random((5000, 5000)) * 2 - 1, dtype=dtype)
592589
check_l(m, 0)
593590
check_l(m, 1)
594591
check_l(m, -1)
@@ -597,7 +594,7 @@ def check_u(m, k=0):
597594
check_u(m, 1)
598595
check_u(m, -1)
599596

600-
m = np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype)
597+
m = np.asarray(test_rng.random((10, 10)) * 2 - 1, dtype=dtype)
601598
check_l(m, 0)
602599
check_l(m, 1)
603600
check_l(m, -1)
@@ -606,7 +603,7 @@ def check_u(m, k=0):
606603
check_u(m, 1)
607604
check_u(m, -1)
608605

609-
m = np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype)
606+
m = np.asarray(test_rng.random((10, 5)) * 2 - 1, dtype=dtype)
610607
check_l(m, 0)
611608
check_l(m, 1)
612609
check_l(m, -1)

tests/gpuarray/test_blocksparse.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717

1818
class TestBlockSparseGemvAndOuterGPUarray(TestBlockSparseGemvAndOuter):
1919
def setup_method(self):
20-
utt.seed_rng()
2120
self.mode = mode_with_gpu.excluding("constant_folding")
2221
self.gemv_op = gpu_sparse_block_gemv
2322
self.outer_op = gpu_sparse_block_outer

tests/gpuarray/test_dnn.py

Lines changed: 2 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,6 @@ def test_dnn_conv_merge():
170170
def test_dnn_conv_inplace():
171171
# This test that we have inplace work correctly even when
172172
# GpuAllocEmpty get merged together.
173-
utt.seed_rng()
174173

175174
img_shp = [2, 5, 6, 8]
176175
kern_shp = [3, 5, 5, 6]
@@ -312,7 +311,6 @@ def assert_types(conv):
312311

313312

314313
def test_pooling():
315-
utt.seed_rng()
316314

317315
modes = get_dnn_pool_modes()
318316

@@ -420,7 +418,6 @@ def fn(x):
420418
# This test will be run with different values of 'mode'
421419
# (see next test below).
422420
def run_pooling_with_tensor_vars(mode):
423-
utt.seed_rng()
424421

425422
x = tensor4()
426423
ws = aesara.shared(np.array([2, 2], dtype="int32"))
@@ -473,7 +470,6 @@ def test_pooling_with_tensor_vars():
473470
@pytest.mark.skipif(dnn.version(raises=False) < 3000, reason=dnn.dnn_available.msg)
474471
def test_pooling3d():
475472
# 3d pooling requires version 3 or newer.
476-
utt.seed_rng()
477473

478474
# We force the FAST_RUN as we don't want the reference to run in DebugMode.
479475
mode_without_gpu_ref = aesara.compile.mode.get_mode("FAST_RUN").excluding(
@@ -582,7 +578,6 @@ def fn(x):
582578

583579

584580
def test_pooling_opt():
585-
utt.seed_rng()
586581

587582
# 2D pooling
588583
x = matrix()
@@ -654,7 +649,6 @@ def test_pooling_opt():
654649
def test_pooling_opt_arbitrary_dimensions():
655650
# test if input with an arbitrary number of non-pooling dimensions
656651
# is correctly reshaped to run on the GPU
657-
utt.seed_rng()
658652

659653
modes = get_dnn_pool_modes()
660654

@@ -1081,7 +1075,6 @@ def test_dnn_conv_border_mode():
10811075

10821076

10831077
def test_dnn_conv_alpha_output_merge():
1084-
utt.seed_rng()
10851078

10861079
img = tensor4()
10871080
kern = tensor4()
@@ -1151,7 +1144,6 @@ def test_dnn_conv_alpha_output_merge():
11511144

11521145

11531146
def test_dnn_conv_grad():
1154-
utt.seed_rng()
11551147

11561148
b = 1
11571149
c = 4
@@ -1261,7 +1253,6 @@ def run_conv_small_batched_vs_multicall(inputs_shape, filters_shape, batch_sub):
12611253

12621254
batch_size = inputs_shape[0]
12631255

1264-
utt.seed_rng()
12651256
inputs_val = np.random.random(inputs_shape).astype("float32")
12661257
filters_val = np.random.random(filters_shape).astype("float32")
12671258
# Scale down the input values to prevent very large absolute errors
@@ -1311,8 +1302,6 @@ def test_batched_conv3d_small():
13111302

13121303

13131304
def test_conv3d_fwd():
1314-
utt.seed_rng()
1315-
13161305
def run_conv3d_fwd(
13171306
inputs_shape, filters_shape, subsample, dilation, border_mode, conv_mode
13181307
):
@@ -1378,8 +1367,6 @@ def run_conv3d_fwd(
13781367

13791368

13801369
def test_conv3d_bwd():
1381-
utt.seed_rng()
1382-
13831370
def run_conv3d_bwd(
13841371
inputs_shape, filters_shape, subsample, dilation, border_mode, conv_mode
13851372
):
@@ -1819,7 +1806,6 @@ def test_dnn_maxandargmax_opt():
18191806

18201807

18211808
def test_dnn_batchnorm_train():
1822-
utt.seed_rng()
18231809

18241810
for mode in ("per-activation", "spatial"):
18251811
for vartype in (
@@ -2022,7 +2008,6 @@ def test_dnn_batchnorm_train():
20222008

20232009
def test_dnn_batchnorm_train_without_running_averages():
20242010
# compile and run batch_normalization_train without running averages
2025-
utt.seed_rng()
20262011

20272012
x, scale, bias, dy = (
20282013
tensor4("x"),
@@ -2096,7 +2081,6 @@ def test_dnn_batchnorm_train_without_running_averages():
20962081
def test_without_dnn_batchnorm_train_without_running_averages():
20972082
# compile and run batch_normalization_train without running averages
20982083
# But disable cudnn and make sure it run on the GPU.
2099-
utt.seed_rng()
21002084

21012085
x, scale, bias, dy = (
21022086
tensor4("x"),
@@ -2163,7 +2147,6 @@ def test_without_dnn_batchnorm_train_without_running_averages():
21632147
@utt.assertFailure_fast
21642148
def test_dnn_batchnorm_train_inplace():
21652149
# test inplace_running_mean and inplace_running_var
2166-
utt.seed_rng()
21672150

21682151
x, scale, bias = tensor4("x"), tensor4("scale"), tensor4("bias")
21692152
data_shape = (5, 10, 30, 25)
@@ -2218,7 +2201,6 @@ def test_dnn_batchnorm_train_inplace():
22182201

22192202

22202203
def test_batchnorm_inference():
2221-
utt.seed_rng()
22222204

22232205
for mode in ("per-activation", "spatial"):
22242206
for vartype in (
@@ -2344,7 +2326,6 @@ def test_batchnorm_inference():
23442326
@utt.assertFailure_fast
23452327
def test_batchnorm_inference_inplace():
23462328
# test inplace
2347-
utt.seed_rng()
23482329

23492330
x, scale, bias, mean, var = (
23502331
tensor4(n) for n in ("x", "scale", "bias", "mean", "var")
@@ -2460,7 +2441,6 @@ def test_dnn_batchnorm_valid_and_invalid_axes():
24602441

24612442

24622443
def test_dnn_rnn_gru():
2463-
utt.seed_rng()
24642444

24652445
# test params
24662446
input_dim = 32
@@ -2569,7 +2549,6 @@ def funcs(out, params, hy=None):
25692549

25702550

25712551
def test_dnn_rnn_gru_bidi():
2572-
utt.seed_rng()
25732552

25742553
# test params
25752554
input_dim = 32
@@ -2630,7 +2609,6 @@ def funcs(out, params, hy=None):
26302609

26312610

26322611
def test_dnn_rnn_lstm():
2633-
utt.seed_rng()
26342612

26352613
# test params
26362614
input_dim = 32
@@ -2716,7 +2694,6 @@ def funcs(out, params):
27162694

27172695

27182696
def test_dnn_rnn_lstm_grad_c():
2719-
utt.seed_rng()
27202697

27212698
# test params
27222699
input_dim = 32
@@ -2819,7 +2796,6 @@ def __init__(self, *args, **kwargs):
28192796

28202797

28212798
def test_dnn_spatialtf():
2822-
utt.seed_rng()
28232799

28242800
"""
28252801
Spatial Transformer implementation using Aesara from Lasagne
@@ -3023,7 +2999,6 @@ def try_theta_shp(theta_shp):
30232999

30243000

30253001
def test_dnn_spatialtf_grad():
3026-
utt.seed_rng()
30273002

30283003
inputs = tensor4("inputs")
30293004
theta = tensor3("theta")
@@ -3097,7 +3072,7 @@ class TestDnnConv2DRuntimeAlgorithms:
30973072
]
30983073

30993074
def __init__(self):
3100-
utt.seed_rng()
3075+
31013076
self.runtime_algorithms = (
31023077
"time_once",
31033078
"guess_once",
@@ -3286,7 +3261,7 @@ class TestDnnConv3DRuntimeAlgorithms(TestDnnConv2DRuntimeAlgorithms):
32863261
def test_conv_guess_once_with_dtypes():
32873262
# This test checks that runtime conv algorithm selection does not raise any exception
32883263
# when consecutive functions with different dtypes and precisions are executed.
3289-
utt.seed_rng()
3264+
32903265
inputs_shape = (2, 3, 5, 5)
32913266
filters_shape = (2, 3, 40, 4)
32923267
border_mode = "full"

0 commit comments

Comments
 (0)