Skip to content

Commit ef85489

Browse files
brandonwillardtwiecki
authored andcommitted
Rename theano.tensor.nnet.nnet to theano.tensor.nnet.basic
1 parent 7783552 commit ef85489

File tree

10 files changed

+25
-25
lines changed

10 files changed

+25
-25
lines changed

doc/library/tensor/nnet/nnet.txt renamed to doc/library/tensor/nnet/basic.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1-
.. _libdoc_tensor_nnet_nnet:
1+
.. _libdoc_tensor_nnet_basic:
22

33
======================================================
4-
:mod:`nnet` -- Ops for neural networks
4+
:mod:`basic` -- Basic Ops for neural networks
55
======================================================
66

7-
.. module:: theano.tensor.nnet.nnet
7+
.. module:: theano.tensor.nnet.basic
88
:platform: Unix, Windows
99
:synopsis: Ops for neural networks
1010
.. moduleauthor:: LISA

tests/tensor/nnet/test_nnet.py renamed to tests/tensor/nnet/test_basic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
from theano.graph.opt import check_stack_trace
1717
from theano.tensor.basic import Argmax
1818
from theano.tensor.elemwise import CAReduce, DimShuffle, Elemwise
19-
from theano.tensor.nnet.nnet import (
19+
from theano.tensor.nnet.basic import (
2020
CrossentropyCategorical1Hot,
2121
CrossentropyCategorical1HotGrad,
2222
CrossentropySoftmax1HotWithBiasDx,

tests/tensor/test_mlp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
import theano
1414
import theano.tensor as tt
15-
from theano.tensor.nnet.nnet import CrossentropySoftmax1HotWithBiasDx, softmax
15+
from theano.tensor.nnet.basic import CrossentropySoftmax1HotWithBiasDx, softmax
1616
from theano.tensor.type import ivector, lscalar, matrix
1717

1818

theano/compile/profiling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1696,7 +1696,7 @@ def exp_float32_op(op):
16961696
# tip 7
16971697
import theano.gpuarray
16981698
import theano.tensor.signal.pool as pool
1699-
from theano.tensor.nnet.nnet import LogSoftmax
1699+
from theano.tensor.nnet.basic import LogSoftmax
17001700

17011701
for (fgraph, a) in self.apply_time:
17021702
node = a

theano/configdefaults.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1465,7 +1465,7 @@ def add_deprecated_configvars():
14651465
"warn__argmax_pushdown_bug",
14661466
(
14671467
"Warn if in past version of Theano we generated a bug with the "
1468-
"theano.tensor.nnet.nnet.local_argmax_pushdown optimization. "
1468+
"theano.tensor.nnet.basic.local_argmax_pushdown optimization. "
14691469
"Was fixed 27 may 2010"
14701470
),
14711471
BoolParam(_warn_default("0.3")),

theano/gpuarray/dnn_opt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@
6262
assert_conv_shape,
6363
get_conv_output_shape,
6464
)
65-
from theano.tensor.nnet.nnet import LogSoftmax, SoftmaxGrad
65+
from theano.tensor.nnet.basic import LogSoftmax, SoftmaxGrad
6666
from theano.tensor.signal.pool import AveragePoolGrad, MaxPoolGrad, Pool
6767

6868

theano/gpuarray/opt.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1480,9 +1480,9 @@ def local_gpua_tri(fgraph, op, context_name, inputs, outputs):
14801480

14811481

14821482
@register_opt("fast_compile")
1483-
@op_lifter([theano.tensor.nnet.nnet.CrossentropySoftmaxArgmax1HotWithBias])
1483+
@op_lifter([theano.tensor.nnet.basic.CrossentropySoftmaxArgmax1HotWithBias])
14841484
@register_opt2(
1485-
[theano.tensor.nnet.nnet.CrossentropySoftmaxArgmax1HotWithBias], "fast_compile"
1485+
[theano.tensor.nnet.basic.CrossentropySoftmaxArgmax1HotWithBias], "fast_compile"
14861486
)
14871487
def local_gpua_crossentropysoftmaxargmax1hotwithbias(
14881488
fgraph, op, context_name, inputs, outputs
@@ -1491,9 +1491,9 @@ def local_gpua_crossentropysoftmaxargmax1hotwithbias(
14911491

14921492

14931493
@register_opt("fast_compile")
1494-
@op_lifter([theano.tensor.nnet.nnet.CrossentropySoftmax1HotWithBiasDx])
1494+
@op_lifter([theano.tensor.nnet.basic.CrossentropySoftmax1HotWithBiasDx])
14951495
@register_opt2(
1496-
[theano.tensor.nnet.nnet.CrossentropySoftmax1HotWithBiasDx], "fast_compile"
1496+
[theano.tensor.nnet.basic.CrossentropySoftmax1HotWithBiasDx], "fast_compile"
14971497
)
14981498
def local_gpua_crossentropysoftmax1hotwithbiasdx(
14991499
fgraph, op, context_name, inputs, outputs
@@ -1502,22 +1502,22 @@ def local_gpua_crossentropysoftmax1hotwithbiasdx(
15021502

15031503

15041504
@register_opt("fast_compile")
1505-
@op_lifter([theano.tensor.nnet.nnet.Softmax])
1506-
@register_opt2([theano.tensor.nnet.nnet.Softmax], "fast_compile")
1505+
@op_lifter([theano.tensor.nnet.basic.Softmax])
1506+
@register_opt2([theano.tensor.nnet.basic.Softmax], "fast_compile")
15071507
def local_gpua_softmax(fgraph, op, context_name, inputs, outputs):
15081508
return gpu_softmax
15091509

15101510

15111511
@register_opt("fast_compile")
1512-
@op_lifter([theano.tensor.nnet.nnet.SoftmaxWithBias])
1513-
@register_opt2([theano.tensor.nnet.nnet.SoftmaxWithBias], "fast_compile")
1512+
@op_lifter([theano.tensor.nnet.basic.SoftmaxWithBias])
1513+
@register_opt2([theano.tensor.nnet.basic.SoftmaxWithBias], "fast_compile")
15141514
def local_gpua_softmaxwithbias(fgraph, op, context_name, inputs, outputs):
15151515
return gpu_softmax_with_bias
15161516

15171517

15181518
@register_opt("fast_compile")
1519-
@op_lifter([theano.tensor.nnet.nnet.CrossentropyCategorical1Hot])
1520-
@register_opt2([theano.tensor.nnet.nnet.CrossentropyCategorical1Hot], "fast_compile")
1519+
@op_lifter([theano.tensor.nnet.basic.CrossentropyCategorical1Hot])
1520+
@register_opt2([theano.tensor.nnet.basic.CrossentropyCategorical1Hot], "fast_compile")
15211521
def local_gpu_crossentropycategorical1hot(fgraph, op, context_name, inputs, outputs):
15221522
# There is no corresponding GPU Op, but we can express it as:
15231523
# coding, one_of_n = inputs
@@ -1528,9 +1528,9 @@ def local_gpu_crossentropycategorical1hot(fgraph, op, context_name, inputs, outp
15281528

15291529

15301530
@register_opt("fast_compile")
1531-
@op_lifter([theano.tensor.nnet.nnet.CrossentropyCategorical1HotGrad])
1531+
@op_lifter([theano.tensor.nnet.basic.CrossentropyCategorical1HotGrad])
15321532
@register_opt2(
1533-
[theano.tensor.nnet.nnet.CrossentropyCategorical1HotGrad], "fast_compile"
1533+
[theano.tensor.nnet.basic.CrossentropyCategorical1HotGrad], "fast_compile"
15341534
)
15351535
def local_gpu_crossentropycategorical1hotgrad(
15361536
fgraph, op, context_name, inputs, outputs

theano/link/jax/jax_dispatch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@
5757
QRFull,
5858
QRIncomplete,
5959
)
60-
from theano.tensor.nnet.nnet import Softmax
60+
from theano.tensor.nnet.basic import Softmax
6161
from theano.tensor.nnet.sigm import ScalarSoftplus
6262
from theano.tensor.opt import MakeVector
6363
from theano.tensor.slinalg import Cholesky, Solve

theano/tensor/nnet/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,7 @@
77
conv3d,
88
separable_conv2d,
99
)
10-
from theano.tensor.nnet.bn import batch_normalization
11-
from theano.tensor.nnet.nnet import (
10+
from theano.tensor.nnet.basic import (
1211
binary_crossentropy,
1312
categorical_crossentropy,
1413
confusion_matrix,
@@ -41,6 +40,7 @@
4140
softmax_with_bias,
4241
softsign,
4342
)
43+
from theano.tensor.nnet.bn import batch_normalization
4444
from theano.tensor.nnet.sigm import (
4545
hard_sigmoid,
4646
scalar_sigmoid,

theano/tensor/nnet/nnet.py renamed to theano/tensor/nnet/basic.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -926,7 +926,7 @@ def local_softmax_with_bias(fgraph, node):
926926

927927
if sm_bias.type == node.outputs[0].type:
928928
# This condition is not always true. See the test
929-
# nnet/tests/test_nnet.py:T_SoftmaxWithBias.test_broadcast
929+
# nnet/tests/test_basic.py:T_SoftmaxWithBias.test_broadcast
930930
return [sm_bias]
931931

932932

@@ -1672,7 +1672,7 @@ def local_argmax_pushdown(fgraph, node):
16721672
)
16731673
):
16741674
if config.warn__argmax_pushdown_bug:
1675-
logging.getLogger("theano.tensor.nnet.nnet").warn(
1675+
logging.getLogger("theano.tensor.nnet.basic").warn(
16761676
"There was a bug in Theano fixed on May 27th, 2010 in this case."
16771677
" I.E. when we take the max of a softplus, softmax, exp, "
16781678
"log, tanh, sigmoid, softmax_with_bias op, we were doing "

0 commit comments

Comments
 (0)