Skip to content

Update Ruff 11.13 #1462

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ repos:
- id: sphinx-lint
args: ["."]
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.3
rev: v0.11.13
hooks:
- id: ruff
args: ["--fix", "--output-format=full"]
Expand Down
2 changes: 1 addition & 1 deletion pytensor/bin/pytensor_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

def print_help(exit_status):
if exit_status:
print(f"command \"{' '.join(sys.argv)}\" not recognized")
print(f'command "{" ".join(sys.argv)}" not recognized')
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Don't you still need different quotes inside?

Suggested change
print(f'command "{" ".join(sys.argv)}" not recognized')
print(f'command "{' '.join(sys.argv)}" not recognized')

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah, these are automatic changes, I need to investigate

print('Type "pytensor-cache" to print the cache location')
print('Type "pytensor-cache help" to print this help')
print('Type "pytensor-cache clear" to erase the cache')
Expand Down
2 changes: 1 addition & 1 deletion pytensor/compile/function/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from pytensor.graph import Variable


__all__ = ["types", "pfunc"]
__all__ = ["pfunc", "types"]

__docformat__ = "restructuredtext en"
_logger = logging.getLogger("pytensor.compile.function")
Expand Down
3 changes: 1 addition & 2 deletions pytensor/compile/function/pfunc.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,8 +328,7 @@ def clone_inputs(i):
cloned_outputs = [] # TODO: get Function.__call__ to return None
else:
raise TypeError(
"output must be an PyTensor Variable or Out "
"instance (or list of them)",
"output must be an PyTensor Variable or Out instance (or list of them)",
outputs,
)

Expand Down
2 changes: 1 addition & 1 deletion pytensor/compile/function/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def add_supervisor_to_fgraph(
input
for spec, input in zip(input_specs, fgraph.inputs, strict=True)
if not (
spec.mutable or has_destroy_handler and fgraph.has_destroyers([input])
spec.mutable or (has_destroy_handler and fgraph.has_destroyers([input]))
)
)
)
Expand Down
3 changes: 1 addition & 2 deletions pytensor/compile/monitormode.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,7 @@ def __init__(
optimizer = config.optimizer
if linker is not None and not isinstance(linker.mode, MonitorMode):
raise Exception(
"MonitorMode can only use its own linker! You "
"should not provide one.",
"MonitorMode can only use its own linker! You should not provide one.",
linker,
)

Expand Down
37 changes: 15 additions & 22 deletions pytensor/compile/profiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -935,18 +935,14 @@ def count_running_memory(order, fgraph, nodes_mem, ignore_dmap=False):
if dmap and idx2 in dmap:
vidx = dmap[idx2]
assert len(vidx) == 1, (
"Here we only support the "
"possibility to destroy one "
"input"
"Here we only support the possibility to destroy one input"
)
ins = node.inputs[vidx[0]]
if vmap and idx2 in vmap:
assert ins is None
vidx = vmap[idx2]
assert len(vidx) == 1, (
"Here we only support the "
"possibility to view one "
"input"
"Here we only support the possibility to view one input"
)
ins = node.inputs[vidx[0]]
if ins is not None:
Expand Down Expand Up @@ -1093,9 +1089,7 @@ def min_memory_generator(executable_nodes, viewed_by, view_of):
assert ins is None
vidx = vmap[idx]
assert len(vidx) == 1, (
"Here we only support "
"the possibility to "
"view one input"
"Here we only support the possibility to view one input"
)
ins = node.inputs[vidx[0]]
if ins is not None:
Expand Down Expand Up @@ -1304,22 +1298,22 @@ def print_stats(stats1, stats2):

print(
(
f" CPU: {int(round(new_max_running_max_memory_size[1] / 1024.0))}KB "
f"({int(round(max_running_max_memory_size[1] / 1024.0))}KB)"
f" CPU: {round(new_max_running_max_memory_size[1] / 1024.0)}KB "
f"({round(max_running_max_memory_size[1] / 1024.0)}KB)"
),
file=file,
)
print(
(
f" GPU: {int(round(new_max_running_max_memory_size[2] / 1024.0))}KB "
f"({int(round(max_running_max_memory_size[2] / 1024.0))}KB)"
f" GPU: {round(new_max_running_max_memory_size[2] / 1024.0)}KB "
f"({round(max_running_max_memory_size[2] / 1024.0)}KB)"
),
file=file,
)
print(
(
f" CPU + GPU: {int(round(new_max_running_max_memory_size[0] / 1024.0))}KB "
f"({int(round(max_running_max_memory_size[0] / 1024.0))}KB)"
f" CPU + GPU: {round(new_max_running_max_memory_size[0] / 1024.0)}KB "
f"({round(max_running_max_memory_size[0] / 1024.0)}KB)"
),
file=file,
)
Expand All @@ -1340,23 +1334,23 @@ def print_stats(stats1, stats2):
file=file,
)
print(
f" CPU: {int(round(new_max_node_memory_size[1] / 1024.0))}KB",
f" CPU: {round(new_max_node_memory_size[1] / 1024.0)}KB",
file=file,
)
print(
f" GPU: {int(round(new_max_node_memory_size[2] / 1024.0))}KB",
f" GPU: {round(new_max_node_memory_size[2] / 1024.0)}KB",
file=file,
)
print(
f" CPU + GPU: {int(round(new_max_node_memory_size[0] / 1024.0))}KB",
f" CPU + GPU: {round(new_max_node_memory_size[0] / 1024.0)}KB",
file=file,
)
print("---", file=file)

if min_max_peak:
print(
" Minimum peak from all valid apply node order is "
f"{int(round(min_max_peak / 1024.0))}KB(took {min_peak_time:3f}s to compute)",
f"{round(min_max_peak / 1024.0)}KB(took {min_peak_time:3f}s to compute)",
file=file,
)

Expand Down Expand Up @@ -1405,7 +1399,7 @@ def print_stats(stats1, stats2):
print(
(
f" ... (remaining {max(0, len(node_mem) - N)} Apply account for "
f"{sum_remaining:4d}B/{size_sum_dense :d}B ({p}) of the"
f"{sum_remaining:4d}B/{size_sum_dense:d}B ({p}) of the"
" Apply with dense outputs sizes)"
),
file=file,
Expand Down Expand Up @@ -1545,8 +1539,7 @@ def amdlibm_speed_up(op):
return True
elif s_op.__class__ not in scalar_op_amdlibm_no_speed_up:
print(
"We don't know if amdlibm will accelerate "
"this scalar op.",
"We don't know if amdlibm will accelerate this scalar op.",
s_op,
file=file,
)
Expand Down
3 changes: 1 addition & 2 deletions pytensor/configdefaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,7 @@ def _warn_cxx(val):
"""We only support clang++ as otherwise we hit strange g++/OSX bugs."""
if sys.platform == "darwin" and val and "clang++" not in val:
_logger.warning(
"Only clang++ is supported. With g++,"
" we end up with strange g++/OSX bugs."
"Only clang++ is supported. With g++, we end up with strange g++/OSX bugs."
)
return True

Expand Down
9 changes: 4 additions & 5 deletions pytensor/gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -624,8 +624,7 @@ def grad(

if cost is not None and isinstance(cost.type, NullType):
raise ValueError(
"Can't differentiate a NaN cost. "
f"Cost is NaN because {cost.type.why_null}"
f"Can't differentiate a NaN cost. Cost is NaN because {cost.type.why_null}"
)

if cost is not None and cost.type.ndim != 0:
Expand Down Expand Up @@ -2181,9 +2180,9 @@ def hessian(cost, wrt, consider_constant=None, disconnected_inputs="raise"):
sequences=pytensor.tensor.arange(expr.shape[0]),
non_sequences=[expr, input],
)
assert (
not updates
), "Scan has returned a list of updates; this should not happen."
assert not updates, (
"Scan has returned a list of updates; this should not happen."
)
hessians.append(hess)
return as_list_or_tuple(using_list, using_tuple, hessians)

Expand Down
4 changes: 3 additions & 1 deletion pytensor/graph/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,9 @@ class Variable(Node, Generic[_TypeType, OptionalApplyType]):
[b], [c]
) # this works because a has a value associated with it already

assert 4.0 == f(2.5) # bind 2.5 to an internal copy of b and evaluate an internal c
assert 4.0 == f(
2.5
) # bind 2.5 to an internal copy of b and evaluate an internal c

pytensor.function(
[a], [c]
Expand Down
3 changes: 1 addition & 2 deletions pytensor/graph/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,8 +390,7 @@ def __init__(self):
def on_attach(self, fgraph):
if hasattr(fgraph, "checkpoint") or hasattr(fgraph, "revert"):
raise AlreadyThere(
"History feature is already present or in"
" conflict with another plugin."
"History feature is already present or in conflict with another plugin."
)
self.history[fgraph] = []
# Don't call unpickle here, as ReplaceValidate.on_attach()
Expand Down
5 changes: 1 addition & 4 deletions pytensor/graph/rewriting/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -3044,10 +3044,7 @@ def check_stack_trace(f_or_fgraph, ops_to_check="last", bug_print="raise"):
raise ValueError("ops_to_check does not have the right type")

if not apply_nodes_to_check:
msg = (
"Provided op instances/classes are not in the graph or the "
"graph is empty"
)
msg = "Provided op instances/classes are not in the graph or the graph is empty"
if bug_print == "warn":
warnings.warn(msg)
elif bug_print == "raise":
Expand Down
2 changes: 1 addition & 1 deletion pytensor/graph/rewriting/db.py
Original file line number Diff line number Diff line change
Expand Up @@ -460,7 +460,7 @@ def query(self, *tags, position_cutoff: int | float | None = None, **kwtags):
return ret

def print_summary(self, stream=sys.stdout):
print(f"{self.__class__.__name__ } (id {id(self)})", file=stream)
print(f"{self.__class__.__name__} (id {id(self)})", file=stream)
positions = list(self.__position__.items())

def c(a, b):
Expand Down
11 changes: 4 additions & 7 deletions pytensor/link/c/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -821,9 +821,9 @@ def code_gen(self):

behavior = op.c_code(node, name, isyms, osyms, sub)

assert isinstance(
behavior, str
), f"{node.op} didn't return a string for c_code"
assert isinstance(behavior, str), (
f"{node.op} didn't return a string for c_code"
)
# To help understand what is following. It help read the c code.
# This prevent different op that generate the same c code
# to be merged, I suppose this won't happen...
Expand Down Expand Up @@ -1752,10 +1752,7 @@ def __call__(self):
exc_value.__thunk_trace__ = trace
except Exception:
print( # noqa: T201
(
"ERROR retrieving error_storage."
"Was the error set in the c code?"
),
("ERROR retrieving error_storage.Was the error set in the c code?"),
end=" ",
file=sys.stderr,
)
Expand Down
2 changes: 1 addition & 1 deletion pytensor/link/c/op.py
Original file line number Diff line number Diff line change
Expand Up @@ -588,7 +588,7 @@ def c_code(self, node, name, inp, out, sub):
{define_macros}
{{
if ({self.func_name}({self.format_c_function_args(inp, out)}{params}) != 0) {{
{sub['fail']}
{sub["fail"]}
}}
}}
{undef_macros}
Expand Down
17 changes: 11 additions & 6 deletions pytensor/link/c/params_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,8 @@ def __init__(value_attr1, value_attr2):
.. code-block:: python

wrapper = ParamsType(
enum1=EnumList("A", ("B", "beta"), "C"), enum2=EnumList(("D", "delta"), "E", "F")
enum1=EnumList("A", ("B", "beta"), "C"),
enum2=EnumList(("D", "delta"), "E", "F"),
)
b1 = wrapper.B
b2 = wrapper.get_enum("B")
Expand Down Expand Up @@ -540,7 +541,9 @@ def enum_from_alias(self, alias):
wrapper = ParamsType(
scalar=ScalarType("int32"),
letters=EnumType(A=(1, "alpha"), B=(2, "beta"), C=3),
digits=EnumList(("ZERO", "nothing"), ("ONE", "unit"), ("TWO", "couple")),
digits=EnumList(
("ZERO", "nothing"), ("ONE", "unit"), ("TWO", "couple")
),
)
print(wrapper.get_enum("C")) # 3
print(wrapper.get_enum("TWO")) # 2
Expand Down Expand Up @@ -593,7 +596,9 @@ def __init__(self):
self.b = numpy.asarray([[1, 2, 3], [4, 5, 6]])


params_type = ParamsType(a=ScalarType("int32"), b=dmatrix, c=ScalarType("bool"))
params_type = ParamsType(
a=ScalarType("int32"), b=dmatrix, c=ScalarType("bool")
)

o = MyObject()
value_for_c = False
Expand Down Expand Up @@ -852,20 +857,20 @@ def c_extract(self, name, sub, check_input=True, **kwargs):
const char* fields[] = {{{fields_list}}};
if (py_{name} == Py_None) {{
PyErr_SetString(PyExc_ValueError, "ParamsType: expected an object, not None.");
{sub['fail']}
{sub["fail"]}
}}
for (int i = 0; i < {self.length}; ++i) {{
PyObject* o = PyDict_GetItemString(py_{name}, fields[i]);
if (o == NULL) {{
PyErr_Format(PyExc_TypeError, "ParamsType: missing expected attribute \\"%s\\" in object.", fields[i]);
{sub['fail']}
{sub["fail"]}
}}
{name}->extract(o, i);
if ({name}->errorOccurred()) {{
/* The extract code from attribute type should have already raised a Python exception,
* so we just print the attribute name in stderr. */
fprintf(stderr, "\\nParamsType: error when extracting value for attribute \\"%s\\".\\n", fields[i]);
{sub['fail']}
{sub["fail"]}
}}
}}
}}
Expand Down
12 changes: 9 additions & 3 deletions pytensor/link/c/type.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,11 @@ class EnumType(CType, dict):
.. code-block:: python

enum = EnumType(
CONSTANT_1=0, CONSTANT_2=1, CONSTANT_3=2, ctype="size_t", cname="MyEnumName"
CONSTANT_1=0,
CONSTANT_2=1,
CONSTANT_3=2,
ctype="size_t",
cname="MyEnumName",
)

**Example with aliases**
Expand Down Expand Up @@ -477,7 +481,7 @@ def get_aliases(self):
return tuple(sorted(self.aliases))

def __repr__(self):
names_to_aliases = {constant_name: "" for constant_name in self}
names_to_aliases = dict.fromkeys(self, "")
for alias in self.aliases:
names_to_aliases[self.aliases[alias]] = f"({alias})"
args = ", ".join(f"{k}{names_to_aliases[k]}:{self[k]}" for k in sorted(self))
Expand Down Expand Up @@ -674,7 +678,9 @@ class EnumList(EnumType):

.. code-block:: python

enum = EnumList(("A", "alpha"), ("B", "beta"), "C", "D", "E", "F", ("G", "gamma"))
enum = EnumList(
("A", "alpha"), ("B", "beta"), "C", "D", "E", "F", ("G", "gamma")
)

See test class :class:`tests.graph.test_types.TestOpEnumList` for a working example.

Expand Down
4 changes: 2 additions & 2 deletions pytensor/link/numba/dispatch/scalar.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,12 +119,12 @@ def {scalar_op_fn_name}({input_names}):
)
if not has_pyx_skip_dispatch:
scalar_op_src = f"""
def {scalar_op_fn_name}({', '.join(input_names)}):
def {scalar_op_fn_name}({", ".join(input_names)}):
return direct_cast(scalar_func_numba({converted_call_args}), output_dtype)
"""
else:
scalar_op_src = f"""
def {scalar_op_fn_name}({', '.join(input_names)}):
def {scalar_op_fn_name}({", ".join(input_names)}):
return direct_cast(scalar_func_numba({converted_call_args}, np.intc(1)), output_dtype)
"""

Expand Down
12 changes: 6 additions & 6 deletions pytensor/link/vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,16 +83,16 @@ def calculate_reallocate_info(
ins = None
if dmap and idx_o in dmap:
idx_v = dmap[idx_o]
assert (
len(idx_v) == 1
), "Here we only support the possibility to destroy one input"
assert len(idx_v) == 1, (
"Here we only support the possibility to destroy one input"
)
ins = node.inputs[idx_v[0]]
if vmap and idx_o in vmap:
assert ins is None
idx_v = vmap[idx_o]
assert (
len(idx_v) == 1
), "Here we only support the possibility to view one input"
assert len(idx_v) == 1, (
"Here we only support the possibility to view one input"
)
ins = node.inputs[idx_v[0]]
if ins is not None:
assert isinstance(ins, Variable)
Expand Down
Loading
Loading