diff --git a/doc/library/config.rst b/doc/library/config.rst index 60f0f7e307..80fe090118 100644 --- a/doc/library/config.rst +++ b/doc/library/config.rst @@ -103,14 +103,6 @@ import ``pytensor`` and print the config variable, as in: String value: either ``'cpu'`` -.. attribute:: force_device - - Bool value: either ``True`` or ``False`` - - Default: ``False`` - - This flag's value cannot be modified during the program execution. - .. attribute:: print_active_device Bool value: either ``True`` or ``False`` @@ -139,16 +131,6 @@ import ``pytensor`` and print the config variable, as in: equal to ``float64`` is created. This can be used to help find upcasts to ``float64`` in user code. -.. attribute:: deterministic - - String value: either ``'default'``, ``'more'`` - - Default: ``'default'`` - - If ``more``, sometimes PyTensor will select :class:`Op` implementations that - are more "deterministic", but slower. See the ``dnn.conv.algo*`` - flags for more cases. - .. attribute:: allow_gc Bool value: either ``True`` or ``False`` @@ -373,7 +355,7 @@ import ``pytensor`` and print the config variable, as in: When ``True``, ignore the first call to an PyTensor function while profiling. -.. attribute:: config.lib__amblibm +.. attribute:: config.lib__amdlibm Bool value: either ``True`` or ``False`` @@ -412,16 +394,6 @@ import ``pytensor`` and print the config variable, as in: ignore it (i.e. ``'ignore'``). We suggest never using ``'ignore'`` except during testing. -.. attribute:: assert_no_cpu_op - - String value: ``'ignore'`` or ``'warn'`` or ``'raise'`` or ``'pdb'`` - - Default: ``'ignore'`` - - If there is a CPU :class:`Op` in the computational graph, depending on its value, - this flag can either raise a warning, an exception or drop into the frame - with ``pdb``. - .. attribute:: on_shape_error String value: ``'warn'`` or ``'raise'`` @@ -797,18 +769,3 @@ import ``pytensor`` and print the config variable, as in: The verbosity level of the meta-rewriter: ``0`` for silent, ``1`` to only warn when PyTensor cannot meta-rewrite an :class:`Op`, ``2`` for full output (e.g. timings and the rewrites selected). - - -.. attribute:: config.metaopt__optimizer_excluding - - Default: ``""`` - - A list of rewrite tags that we don't want included in the meta-rewriter. - Multiple tags are separate by ``':'``. - -.. attribute:: config.metaopt__optimizer_including - - Default: ``""`` - - A list of rewriter tags to be included during meta-rewriting. - Multiple tags are separate by ``':'``. diff --git a/pytensor/compile/profiling.py b/pytensor/compile/profiling.py index a361ac5087..9d93431753 100644 --- a/pytensor/compile/profiling.py +++ b/pytensor/compile/profiling.py @@ -1566,26 +1566,26 @@ def exp_float32_op(op): printed_tip = True # tip 2 - if not config.lib__amblibm and any( + if not config.lib__amdlibm and any( amdlibm_speed_up(a.op) for (fgraph, a) in self.apply_time ): print( " - Try installing amdlibm and set the PyTensor flag " - "lib__amblibm=True. This speeds up only some Elemwise " + "lib__amdlibm=True. This speeds up only some Elemwise " "operation.", file=file, ) printed_tip = True # tip 3 - if not config.lib__amblibm and any( + if not config.lib__amdlibm and any( exp_float32_op(a.op) and a.inputs[0].dtype == "float32" for (fgraph, a) in self.apply_time ): print( " - With the default gcc libm, exp in float32 is slower " "than in float64! Try PyTensor flag floatX=float64, or " - "install amdlibm and set the pytensor flags lib__amblibm=True", + "install amdlibm and set the pytensor flags lib__amdlibm=True", file=file, ) printed_tip = True diff --git a/pytensor/configdefaults.py b/pytensor/configdefaults.py index f3a8b4a146..0353c58fcd 100644 --- a/pytensor/configdefaults.py +++ b/pytensor/configdefaults.py @@ -258,14 +258,6 @@ def add_basic_configvars(): # was expected, so it is currently not available. # numpy, ), - ) - - config.add( - "deterministic", - "If `more`, sometimes we will select some implementation that " - "are more deterministic, but slower. Also see " - "the dnn.conv.algo* flags to cover more cases.", - EnumStr("default", ["more"]), in_c_key=False, ) @@ -276,13 +268,6 @@ def add_basic_configvars(): in_c_key=False, ) - config.add( - "force_device", - "Raise an error if we can't use the specified device", - BoolParam(False, mutable=False), - in_c_key=False, - ) - config.add( "conv__assert_shape", "If True, AbstractConv* ops will verify that user-provided" @@ -299,14 +284,6 @@ def add_basic_configvars(): in_c_key=False, ) - # This flag determines whether or not to raise error/warning message if - # there is a CPU Op in the computational graph. - config.add( - "assert_no_cpu_op", - "Raise an error/warning if there is a CPU op in the computational graph.", - EnumStr("ignore", ["warn", "raise", "pdb"], mutable=True), - in_c_key=False, - ) config.add( "unpickle_function", ( @@ -394,23 +371,11 @@ def add_compile_configvars(): if rc == 0 and config.cxx != "": # Keep the default linker the same as the one for the mode FAST_RUN - config.add( - "linker", - "Default linker used if the pytensor flags mode is Mode", - EnumStr( - "cvm", ["c|py", "py", "c", "c|py_nogc", "vm", "vm_nogc", "cvm_nogc"] - ), - in_c_key=False, - ) + linker_options = ["c|py", "py", "c", "c|py_nogc", "vm", "vm_nogc", "cvm_nogc"] else: # g++ is not present or the user disabled it, # linker should default to python only. - config.add( - "linker", - "Default linker used if the pytensor flags mode is Mode", - EnumStr("vm", ["py", "vm_nogc"]), - in_c_key=False, - ) + linker_options = ["py", "vm_nogc"] if type(config).cxx.is_default: # If the user provided an empty value for cxx, do not warn. _logger.warning( @@ -420,6 +385,13 @@ def add_compile_configvars(): "To remove this warning, set PyTensor flags cxx to an empty string." ) + config.add( + "linker", + "Default linker used if the pytensor flags mode is Mode", + EnumStr("cvm", linker_options), + in_c_key=False, + ) + # Keep the default value the same as the one for the mode FAST_RUN config.add( "allow_gc", @@ -570,7 +542,7 @@ def add_tensor_configvars(): # http://developer.amd.com/CPU/LIBRARIES/LIBM/Pages/default.aspx config.add( - "lib__amblibm", + "lib__amdlibm", "Use amd's amdlibm numerical library", BoolParam(False), # Added elsewhere in the c key only when needed. @@ -609,10 +581,6 @@ def add_traceback_configvars(): ) -def add_experimental_configvars(): - return - - def add_error_and_warning_configvars(): ### # To disable some warning about old bug that are fixed now. @@ -1043,20 +1011,6 @@ def add_metaopt_configvars(): in_c_key=False, ) - config.add( - "metaopt__optimizer_excluding", - ("exclude optimizers with these tags. Separate tags with ':'."), - StrParam(""), - in_c_key=False, - ) - - config.add( - "metaopt__optimizer_including", - ("include optimizers with these tags. Separate tags with ':'."), - StrParam(""), - in_c_key=False, - ) - def add_vm_configvars(): config.add( @@ -1295,55 +1249,6 @@ def add_caching_dir_configvars(): ) -# Those are the options provided by PyTensor to choose algorithms at runtime. -SUPPORTED_DNN_CONV_ALGO_RUNTIME = ( - "guess_once", - "guess_on_shape_change", - "time_once", - "time_on_shape_change", -) - -# Those are the supported algorithm by PyTensor, -# The tests will reference those lists. -SUPPORTED_DNN_CONV_ALGO_FWD = ( - "small", - "none", - "large", - "fft", - "fft_tiling", - "winograd", - "winograd_non_fused", - *SUPPORTED_DNN_CONV_ALGO_RUNTIME, -) - -SUPPORTED_DNN_CONV_ALGO_BWD_DATA = ( - "none", - "deterministic", - "fft", - "fft_tiling", - "winograd", - "winograd_non_fused", - *SUPPORTED_DNN_CONV_ALGO_RUNTIME, -) - -SUPPORTED_DNN_CONV_ALGO_BWD_FILTER = ( - "none", - "deterministic", - "fft", - "small", - "winograd_non_fused", - "fft_tiling", - *SUPPORTED_DNN_CONV_ALGO_RUNTIME, -) - -SUPPORTED_DNN_CONV_PRECISION = ( - "as_input_f32", - "as_input", - "float16", - "float32", - "float64", -) - # Eventually, the instance of `PyTensorConfigParser` should be created right here, # where it is also populated with settings. config = _create_default_config() @@ -1353,7 +1258,6 @@ def add_caching_dir_configvars(): add_compile_configvars() add_tensor_configvars() add_traceback_configvars() -add_experimental_configvars() add_error_and_warning_configvars() add_testvalue_and_checking_configvars() add_multiprocessing_configvars() diff --git a/pytensor/configparser.py b/pytensor/configparser.py index 1656558668..e587782e40 100644 --- a/pytensor/configparser.py +++ b/pytensor/configparser.py @@ -32,11 +32,7 @@ class ConfigAccessViolation(AttributeError): class _ChangeFlagsDecorator: - def __init__(self, *args, _root=None, **kwargs): - # the old API supported passing a dict as the first argument: - if args: - assert len(args) == 1 and isinstance(args[0], dict) - kwargs = dict(**args[0], **kwargs) + def __init__(self, _root=None, **kwargs): self.confs = {k: _root._config_var_dict[k] for k in kwargs} self.new_vals = kwargs self._root = _root @@ -75,6 +71,7 @@ class PyTensorConfigParser: pickle_test_value: bool cast_policy: str device: str + conv__assert_shape: bool print_global_stats: bool unpickle_function: bool # add_compile_configvars @@ -86,6 +83,7 @@ class PyTensorConfigParser: optimizer_verbose: bool on_opt_error: str nocleanup: bool + on_unused_input: str gcc__cxxflags: str cmodule__warn_no_version: bool cmodule__remove_gxx_opt: bool @@ -93,15 +91,15 @@ class PyTensorConfigParser: cmodule__preload_cache: bool cmodule__age_thresh_use: int cmodule__debug: bool + compile__wait: int compile__timeout: int # add_tensor_configvars tensor__cmp_sloppy: int - lib__amblibm: bool + lib__amdlibm: bool tensor__insert_inplace_optimizer_validate_nb: int # add_traceback_configvars traceback__limit: int traceback__compile_limit: int - # add_experimental_configvars # add_error_and_warning_configvars warn__ignore_bug_before: int exception_verbosity: str @@ -143,6 +141,7 @@ class PyTensorConfigParser: optdb__max_use_ratio: float cycle_detection: str check_stack_trace: str + # add_metaopt_configvars metaopt__verbose: int # add_vm_configvars profile: bool @@ -177,7 +176,6 @@ def __init__( self._pytensor_cfg = pytensor_cfg self._pytensor_raw_cfg = pytensor_raw_cfg self._config_var_dict: dict = {} - super().__init__() def __str__(self, print_doc=True): sio = StringIO() @@ -212,9 +210,7 @@ def get_config_hash(self): ) ) - def add( - self, name: str, doc: str, configparam: "ConfigParam", in_c_key: bool = True - ): + def add(self, name: str, doc: str, configparam: "ConfigParam", in_c_key: bool): """Add a new variable to PyTensorConfigParser. This method performs some of the work of initializing `ConfigParam` instances. @@ -281,7 +277,7 @@ def fetch_val_for_key(self, key, delete_key: bool = False): The (decreasing) priority order is: - PYTENSOR_FLAGS - - ~./pytensorrc + - ~/.pytensorrc """ @@ -310,14 +306,14 @@ def fetch_val_for_key(self, key, delete_key: bool = False): except (NoOptionError, NoSectionError): raise KeyError(key) - def change_flags(self, *args, **kwargs) -> _ChangeFlagsDecorator: + def change_flags(self, **kwargs) -> _ChangeFlagsDecorator: """ Use this as a decorator or context manager to change the value of PyTensor config variables. Useful during tests. """ - return _ChangeFlagsDecorator(*args, _root=self, **kwargs) + return _ChangeFlagsDecorator(_root=self, **kwargs) def warn_unused_flags(self): for key in self._flags_dict: @@ -375,7 +371,6 @@ def __init__( # more appropriate user-provided default value. # Calling `filter` here may actually be harmful if the default value is # invalid and causes a crash or has unwanted side effects. - super().__init__() @property def default(self): @@ -543,22 +538,6 @@ def __str__(self): return f"{self.name} ({self.default})" -class ContextsParam(ConfigParam): - def __init__(self): - super().__init__("", apply=self._apply, mutable=False) - - def _apply(self, val): - if val == "": - return val - for v in val.split(";"): - s = v.split("->") - if len(s) != 2: - raise ValueError(f"Malformed context map: {v}") - if s[0] == "cpu" or s[0].startswith("cuda") or s[0].startswith("opencl"): - raise ValueError(f"Cannot use {s[0]} as context name") - return val - - def parse_config_string( config_string: str, issue_warnings: bool = True ) -> dict[str, str]: diff --git a/pytensor/scalar/basic.py b/pytensor/scalar/basic.py index 763323cdb2..d4c41d5cb5 100644 --- a/pytensor/scalar/basic.py +++ b/pytensor/scalar/basic.py @@ -356,18 +356,18 @@ def c_headers(self, c_compiler=None, **kwargs): # we declare them here and they will be re-used by TensorType l.append("") l.append("") - if config.lib__amblibm and c_compiler.supports_amdlibm: + if config.lib__amdlibm and c_compiler.supports_amdlibm: l += [""] return l def c_libraries(self, c_compiler=None, **kwargs): l = [] - if config.lib__amblibm and c_compiler and c_compiler.supports_amdlibm: + if config.lib__amdlibm and c_compiler and c_compiler.supports_amdlibm: l += ["amdlibm"] return l def c_compile_args(self, c_compiler=None, **kwargs): - if config.lib__amblibm and c_compiler and c_compiler.supports_amdlibm: + if config.lib__amdlibm and c_compiler and c_compiler.supports_amdlibm: return ["-DREPLACE_WITH_AMDLIBM"] else: return [] @@ -1245,7 +1245,7 @@ class UnaryScalarOp(ScalarOp): def c_code_contiguous(self, node, name, inputs, outputs, sub): (x,) = inputs (z,) = outputs - if not config.lib__amblibm or node.inputs[0].type != node.outputs[0].type: + if not config.lib__amdlibm or node.inputs[0].type != node.outputs[0].type: raise MethodNotDefined() dtype = node.inputs[0].type.dtype_specs()[1] @@ -1260,7 +1260,7 @@ def c_code_contiguous(self, node, name, inputs, outputs, sub): """ def c_code_contiguous_raw(self, dtype, n, i, o): - if not config.lib__amblibm: + if not config.lib__amdlibm: raise MethodNotDefined() if dtype.startswith("npy_"): dtype = dtype[4:] @@ -2296,7 +2296,7 @@ def L_op(self, inputs, outputs, gout): def c_code_contiguous(self, node, name, inputs, outputs, sub): (x, y) = inputs (z,) = outputs - if not config.lib__amblibm: + if not config.lib__amdlibm: raise MethodNotDefined() # We compare the dtype AND the broadcast flag diff --git a/tests/link/c/test_type.py b/tests/link/c/test_type.py index 84287e1607..0ebd249bf4 100644 --- a/tests/link/c/test_type.py +++ b/tests/link/c/test_type.py @@ -287,6 +287,6 @@ def test_op_with_cenumtype(self): assert val_billion == val_million * 1000 assert val_two_billions == val_billion * 2 - @pytensor.config.change_flags(**{"cmodule__debug": True}) + @pytensor.config.change_flags(cmodule__debug=True) def test_op_with_cenumtype_debug(self): self.test_op_with_cenumtype() diff --git a/tests/tensor/test_blas.py b/tests/tensor/test_blas.py index c2479edba9..34a1d1bcf9 100644 --- a/tests/tensor/test_blas.py +++ b/tests/tensor/test_blas.py @@ -514,7 +514,7 @@ def compute_ref( C = self.get_value(C, transpose_C, slice_C) return alpha * np.dot(A, B) + beta * C - @config.change_flags({"blas__ldflags": ""}) + @config.change_flags(blas__ldflags="") def run_gemm( self, dtype, diff --git a/tests/test_config.py b/tests/test_config.py index 65705c6988..47a4e24035 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -98,6 +98,7 @@ def test_config_hash(): "test__config_hash", "A config var from a test case.", configparser.StrParam("test_default"), + in_c_key=True, ) h0 = root.get_config_hash() @@ -160,13 +161,14 @@ def test_config_context(): "test__config_context", "A config var from a test case.", configparser.StrParam("test_default"), + in_c_key=False, ) assert hasattr(root, "test__config_context") assert root.test__config_context == "test_default" with root.change_flags(test__config_context="new_value"): assert root.test__config_context == "new_value" - with root.change_flags({"test__config_context": "new_value2"}): + with root.change_flags(test__config_context="new_value2"): assert root.test__config_context == "new_value2" assert root.test__config_context == "new_value" assert root.test__config_context == "test_default" @@ -181,6 +183,7 @@ def test_invalid_configvar_access(): "test__on_test_instance", "This config setting was added to the test instance.", configparser.IntParam(5), + in_c_key=False, ) assert hasattr(root_test, "test__on_test_instance") # While the property _actually_ exists on all instances, @@ -197,6 +200,7 @@ def test_invalid_configvar_access(): "test__on_test_instance", "This config setting was already added to another instance.", configparser.IntParam(5), + in_c_key=False, ) @@ -248,6 +252,7 @@ def test_config_pickling(): "test__lambda_kills_pickling", "Lambda functions cause pickling problems.", configparser.IntParam(5, lambda i: i > 0), + in_c_key=False, ) with pytest.raises(AttributeError, match="Can't pickle local object"): pickle.dump(root, io.BytesIO())