Skip to content

Commit 320f66a

Browse files
committed
Remove unused config options
1 parent eaf05be commit 320f66a

File tree

3 files changed

+0
-182
lines changed

3 files changed

+0
-182
lines changed

doc/library/config.rst

-104
Original file line numberDiff line numberDiff line change
@@ -510,116 +510,12 @@ import ``pytensor`` and print the config variable, as in:
510510

511511
Removing these asserts can speed up execution.
512512

513-
.. attribute:: config.dnn__enabled
514-
515-
String value: ``'auto'``, ``'True'``, ``'False'``, ``'no_check'``
516-
517-
Default: ``'auto'``
518-
519-
If ``'auto'``, automatically detect and use
520-
`cuDNN <https://developer.nvidia.com/cudnn>`_ when it is available.
521-
If cuDNN is unavailable, do not raise an error.
522-
523-
If ``'True'``, require the use of cuDNN. If cuDNN is unavailable, raise an error.
524-
525-
If ``'False'``, neither use cuDNN nor check if it is available.
526-
527-
If ``'no_check'``, assume cuDNN is present and that the versions between the
528-
header and library match.
529-
530-
.. attribute:: config.dnn__include_path
531-
532-
Default: ``include`` sub-folder in CUDA root directory, or headers paths defined for the compiler.
533-
534-
Location of the cuDNN header.
535-
536-
.. attribute:: config.dnn__library_path
537-
538-
Default: Library sub-folder (``lib64`` on Linux) in CUDA root directory, or
539-
libraries paths defined for the compiler.
540-
541-
Location of the cuDNN library.
542-
543513
.. attribute:: config.conv__assert_shape
544514

545515
If ``True``, ``AbstractConv*`` :class:`Op`\s will verify that user-provided shapes
546516
match the run-time shapes. This is a debugging option, and may slow down
547517
compilation.
548518

549-
.. attribute:: config.dnn.conv.workmem
550-
551-
Deprecated, use :attr:`config.dnn__conv__algo_fwd`.
552-
553-
554-
.. attribute:: config.dnn.conv.workmem_bwd
555-
556-
Deprecated, use :attr:`config.dnn__conv__algo_bwd_filter` and
557-
:attr:`config.dnn__conv__algo_bwd_data` instead.
558-
559-
.. attribute:: config.dnn__conv__algo_fwd
560-
561-
String value:
562-
``'small'``, ``'none'``, ``'large'``, ``'fft'``, ``'fft_tiling'``,
563-
``'winograd'``, ``'winograd_non_fused'``, ``'guess_once'``, ``'guess_on_shape_change'``,
564-
``'time_once'``, ``'time_on_shape_change'``.
565-
566-
Default: ``'small'``
567-
568-
3d convolution only support ``'none'``, ``'small'``, ``'fft_tiling'``, ``'guess_once'``,
569-
``'guess_on_shape_change'``, ``'time_once'``, ``'time_on_shape_change'``.
570-
571-
.. attribute:: config.dnn.conv.algo_bwd
572-
573-
Deprecated, use :attr:`config.dnn__conv__algo_bwd_filter` and
574-
:attr:`config.dnn__conv__algo_bwd_data` instead.
575-
576-
.. attribute:: config.dnn__conv__algo_bwd_filter
577-
578-
String value:
579-
``'none'``, ``'deterministic'``, ``'fft'``, ``'small'``, ``'winograd_non_fused'``, ``'fft_tiling'``, ``'guess_once'``,
580-
``'guess_on_shape_change'``, ``'time_once'``, ``'time_on_shape_change'``.
581-
582-
Default: ``'none'``
583-
584-
3d convolution only supports ``'none'``, ``'small'``, ``'guess_once'``,
585-
``'guess_on_shape_change'``, ``'time_once'``, ``'time_on_shape_change'``.
586-
587-
.. attribute:: config.dnn__conv__algo_bwd_data
588-
589-
String value:
590-
``'none'``, ``'deterministic'``, ``'fft'``, ``'fft_tiling'``, ``'winograd'``,
591-
``'winograd_non_fused'``, ``'guess_once'``, ``'guess_on_shape_change'``, ``'time_once'``,
592-
``'time_on_shape_change'``.
593-
594-
Default: ``'none'``
595-
596-
3d convolution only supports ``'none'``, ``'deterministic'``, ``'fft_tiling'``
597-
``'guess_once'``, ``'guess_on_shape_change'``, ``'time_once'``,
598-
``'time_on_shape_change'``.
599-
600-
.. attribute:: config.magma__enabled
601-
602-
String value: ``'True'``, ``'False'``
603-
604-
Default: ``'False'``
605-
606-
If ``'True'``, use `magma <http://icl.cs.utk.edu/magma/>`_ for matrix
607-
computations.
608-
609-
If ``'False'``, disable magma.
610-
611-
.. attribute:: config.magma__include_path
612-
613-
Default: ``''``
614-
615-
Location of the magma headers.
616-
617-
.. attribute:: config.magma__library_path
618-
619-
Default: ``''``
620-
621-
Location of the magma library.
622-
623519
.. attribute:: config.ctc__root
624520

625521
Default: ``''``

pytensor/configdefaults.py

-65
Original file line numberDiff line numberDiff line change
@@ -34,62 +34,6 @@
3434
_logger = logging.getLogger("pytensor.configdefaults")
3535

3636

37-
def get_cuda_root() -> Path | None:
38-
# We look for the cuda path since we need headers from there
39-
if (v := os.getenv("CUDA_ROOT")) is not None:
40-
return Path(v)
41-
if (v := os.getenv("CUDA_PATH")) is not None:
42-
return Path(v)
43-
if (s := os.getenv("PATH")) is None:
44-
return Path()
45-
for dir in s.split(os.pathsep):
46-
if (Path(dir) / "nvcc").exists():
47-
return Path(dir).absolute().parent
48-
return None
49-
50-
51-
def default_cuda_include() -> Path | None:
52-
if config.cuda__root:
53-
return config.cuda__root / "include"
54-
return None
55-
56-
57-
def default_dnn_base_path() -> Path | None:
58-
# We want to default to the cuda root if cudnn is installed there
59-
if config.cuda__root:
60-
# The include doesn't change location between OS.
61-
if (config.cuda__root / "include/cudnn.h").exists():
62-
return config.cuda__root
63-
return None
64-
65-
66-
def default_dnn_inc_path() -> Path | None:
67-
if config.dnn__base_path:
68-
return config.dnn__base_path / "include"
69-
return None
70-
71-
72-
def default_dnn_lib_path() -> Path | None:
73-
if config.dnn__base_path:
74-
if sys.platform == "win32":
75-
path = config.dnn__base_path / "lib/x64"
76-
elif sys.platform == "darwin":
77-
path = config.dnn__base_path / "lib"
78-
else:
79-
# This is linux
80-
path = config.dnn__base_path / "lib64"
81-
return path
82-
return None
83-
84-
85-
def default_dnn_bin_path() -> Path | None:
86-
if config.dnn__base_path:
87-
if sys.platform == "win32":
88-
return config.dnn__base_path / "bin"
89-
return config.dnn__library_path
90-
return None
91-
92-
9337
def _filter_mode(val):
9438
# Do not add FAST_RUN_NOGC to this list (nor any other ALL CAPS shortcut).
9539
# The way to get FAST_RUN_NOGC is with the flag 'linker=c|py_nogc'.
@@ -607,15 +551,6 @@ def add_compile_configvars():
607551
in_c_key=False,
608552
)
609553

610-
config.add(
611-
"ctc__root",
612-
"Directory which contains the root of Baidu CTC library. It is assumed \
613-
that the compiled library is either inside the build, lib or lib64 \
614-
subdirectory, and the header inside the include directory.",
615-
StrParam("", mutable=False),
616-
in_c_key=False,
617-
)
618-
619554

620555
def _is_valid_cmp_sloppy(v):
621556
return v in (0, 1, 2)

pytensor/configparser.py

-13
Original file line numberDiff line numberDiff line change
@@ -74,12 +74,8 @@ class PyTensorConfigParser:
7474
warn_float64: str
7575
pickle_test_value: bool
7676
cast_policy: str
77-
deterministic: str
7877
device: str
79-
force_device: bool
80-
conv__assert_shape: bool
8178
print_global_stats: bool
82-
assert_no_cpu_op: str
8379
unpickle_function: bool
8480
# add_compile_configvars
8581
mode: str
@@ -90,17 +86,14 @@ class PyTensorConfigParser:
9086
optimizer_verbose: bool
9187
on_opt_error: str
9288
nocleanup: bool
93-
on_unused_import: str
9489
gcc__cxxflags: str
9590
cmodule__warn_no_version: bool
9691
cmodule__remove_gxx_opt: bool
9792
cmodule__compilation_warning: bool
9893
cmodule__preload_cache: bool
9994
cmodule__age_thresh_use: int
10095
cmodule__debug: bool
101-
compile__wait: int
10296
compile__timeout: int
103-
ctc__root: str
10497
# add_tensor_configvars
10598
tensor__cmp_sloppy: int
10699
lib__amblibm: bool
@@ -151,8 +144,6 @@ class PyTensorConfigParser:
151144
cycle_detection: str
152145
check_stack_trace: str
153146
metaopt__verbose: int
154-
metaopt__optimizer_excluding: str
155-
metaopt__optimizer_including: str
156147
# add_vm_configvars
157148
profile: bool
158149
profile_optimizer: bool
@@ -175,10 +166,6 @@ class PyTensorConfigParser:
175166
# add_blas_configvars
176167
blas__ldflags: str
177168
blas__check_openmp: bool
178-
# add CUDA (?)
179-
cuda__root: Path | None
180-
dnn__base_path: Path | None
181-
dnn__library_path: Path | None
182169

183170
def __init__(
184171
self,

0 commit comments

Comments
 (0)