[BE][Easy] apply autofix for ruff rules unnecessary-collection-call (C408): `list()` / `tuple()` / `dict()` (#130199)
This PR changes the empty collection factory call to Python literals:
- `list()` -> `[]`
- `tuple()` -> `()`
- `dict()` -> `{}`
The Python literals are more performant and safer. For example, the bytecode for building an empty dictionary:
```bash
$ python3 -m dis - <<EOS
import collections
d1 = {}
d2 = dict()
dict = collections.OrderedDict
d3 = dict()
EOS
```
```text
0 0 RESUME 0
1 2 LOAD_CONST 0 (0)
4 LOAD_CONST 1 (None)
6 IMPORT_NAME 0 (collections)
8 STORE_NAME 0 (collections)
3 10 BUILD_MAP 0
12 STORE_NAME 1 (d1)
4 14 PUSH_NULL
16 LOAD_NAME 2 (dict)
18 CALL 0
26 STORE_NAME 3 (d2)
6 28 LOAD_NAME 0 (collections)
30 LOAD_ATTR 8 (OrderedDict)
50 STORE_NAME 2 (dict)
7 52 PUSH_NULL
54 LOAD_NAME 2 (dict)
56 CALL 0
64 STORE_NAME 5 (d3)
66 RETURN_CONST 1 (None)
```
The dict literal `{}` only has one bytecode `BUILD_MAP`, while the factory call `dict()` has three `PUSH_NULL + LOAD_NAME + CALL`. Also, the factory call is not safe if users override the `dict` name in `locals` or `globals` (see the example of replacing with `OrderedDict` above).
Pull Request resolved: https://github.com/pytorch/pytorch/pull/130199
Approved by: https://github.com/malfet
diff --git a/test/distributed/_tensor/test_pointwise_ops.py b/test/distributed/_tensor/test_pointwise_ops.py
index f0103ba..cc60262 100644
--- a/test/distributed/_tensor/test_pointwise_ops.py
+++ b/test/distributed/_tensor/test_pointwise_ops.py
@@ -76,7 +76,7 @@
placements: Sequence[Placement],
op: Callable,
pre_op_fn: Optional[Callable] = None,
- args: Sequence[Any] = tuple(),
+ args: Sequence[Any] = (),
kwargs: Optional[Dict[str, Any]] = None,
):
if pre_op_fn is None:
diff --git a/test/distributed/checkpoint/test_state_dict.py b/test/distributed/checkpoint/test_state_dict.py
index 7736350..92c9c35 100644
--- a/test/distributed/checkpoint/test_state_dict.py
+++ b/test/distributed/checkpoint/test_state_dict.py
@@ -217,7 +217,7 @@
"use_orig_params": [True, False],
"use_composable": [True, False],
"use_dtensor": [True, False],
- "wrapping": [tuple(), (nn.Linear, UnitModule)],
+ "wrapping": [(), (nn.Linear, UnitModule)],
"optimizer_class": [torch.optim.Adam, torch.optim.AdamW],
},
self._test_fsdp,
@@ -231,7 +231,7 @@
"use_orig_params": [True],
"use_composable": [False],
"use_dtensor": [False],
- "wrapping": [tuple()],
+ "wrapping": [()],
"optimizer_class": [torch.optim.Adam, torch.optim.AdamW],
},
self._test_fsdp,
diff --git a/test/dynamo/test_functions.py b/test/dynamo/test_functions.py
index 123ca7c..25eab5d 100644
--- a/test/dynamo/test_functions.py
+++ b/test/dynamo/test_functions.py
@@ -960,11 +960,11 @@
self._test_default_dict_helper(dict)
def test_default_dict_lambda(self):
- self._test_default_dict_helper(lambda: dict())
+ self._test_default_dict_helper(lambda: dict()) # noqa: C408
def test_default_dict_closure(self):
def factory():
- return dict()
+ return dict() # noqa: C408
self._test_default_dict_helper(factory)
@@ -972,7 +972,7 @@
param = torch.nn.Parameter(torch.ones([2, 2]))
def fn(x):
- dd = collections.defaultdict(lambda: dict())
+ dd = collections.defaultdict(lambda: dict()) # noqa: C408
dd["a"] = x + 1
dd[param] = 123
dd["c"] = x * 2
@@ -1011,7 +1011,7 @@
@make_test
def test_call_dict1(x):
- d1 = dict()
+ d1 = dict() # noqa: C408
d1["x"] = x + 1
d2 = collections.OrderedDict()
d2["x"] = x + 2
@@ -1019,7 +1019,7 @@
@make_test
def test_call_dict2(x):
- d1 = dict()
+ d1 = dict() # noqa: C408
d1["x"] = x
d2 = collections.OrderedDict(d1)
if isinstance(d2, collections.OrderedDict):
diff --git a/test/dynamo/test_guard_manager.py b/test/dynamo/test_guard_manager.py
index 4edc65c..caa7217 100644
--- a/test/dynamo/test_guard_manager.py
+++ b/test/dynamo/test_guard_manager.py
@@ -575,7 +575,7 @@
guard_manager = RootGuardManager()
# Check a[3] which is tuple_iterator_getitem(foo, 2)
guard_manager.add_tuple_iterator_length_guard(
- 5, id_type(iter(tuple())), ["len == 5"]
+ 5, id_type(iter(())), ["len == 5"]
)
guard_manager.tuple_iterator_getitem_manager(
2, "", foo, default_mgr_enum
diff --git a/test/dynamo/test_higher_order_ops.py b/test/dynamo/test_higher_order_ops.py
index d809ebca..b8d391d 100644
--- a/test/dynamo/test_higher_order_ops.py
+++ b/test/dynamo/test_higher_order_ops.py
@@ -1319,7 +1319,7 @@
compiled_ret = torch.compile(
control_flow.map, backend=backend, fullgraph=True
)(inner, x)
- eager_sin, eager_transpose, eager_view = map_dense(inner, (x,), tuple())
+ eager_sin, eager_transpose, eager_view = map_dense(inner, (x,), ())
map_node = next(
node
@@ -1519,7 +1519,7 @@
def false_fn():
return torch.cos(x)
- return control_flow.cond(x.sum() > 0, true_fn, false_fn, tuple())
+ return control_flow.cond(x.sum() > 0, true_fn, false_fn, ())
graphs = self._check_cond_graph_and_extract(fn, (torch.randn(4, 5),))
if graphs is not None:
@@ -1562,7 +1562,7 @@
def false_fn():
return torch.ones(3, 4).sin()
- return control_flow.cond(x.sum() > 0, true_fn, false_fn, tuple())
+ return control_flow.cond(x.sum() > 0, true_fn, false_fn, ())
self._check_cond_graph_and_extract(fn, (torch.randn(4, 5),))
graphs = self._check_cond_graph_and_extract(fn, (torch.randn(4, 5),))
@@ -2168,7 +2168,7 @@
model = ToyModel()
forward_handles = {}
- activations = dict()
+ activations = {}
def save_activations(mod, inp, out):
activations[name] = inp
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index d8b3011..a012d9f 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -1407,7 +1407,7 @@
r1 = fn(i, [])
opt_fn = torch._dynamo.optimize("eager")(fn)
r2 = opt_fn(i, [])
- r3 = opt_fn(i, tuple())
+ r3 = opt_fn(i, ())
self.assertTrue(same(r1, r2))
self.assertTrue(same(r1, r3))
@@ -4312,7 +4312,7 @@
subs_of_foo_reg = Foo.__subclasses__()
sub_of_foo_subclass_var_reg = subs_of_foo_reg[0].__subclasses__()
- sub_of_foo_subclass_var_optim = list()
+ sub_of_foo_subclass_var_optim = []
counter = CompileCounter()
@torch._dynamo.optimize_assert(counter)
@@ -7081,7 +7081,7 @@
od = collections.OrderedDict
def fn():
- d1 = dict()
+ d1 = dict() # noqa: C408
d1["a"] = 1
d2 = od(d1)
d2["b"] = 2
diff --git a/test/dynamo/test_modules.py b/test/dynamo/test_modules.py
index de68a17..1d468cd 100644
--- a/test/dynamo/test_modules.py
+++ b/test/dynamo/test_modules.py
@@ -2254,8 +2254,8 @@
def _forward_hook_test_helper(self, model):
forward_handles = {}
- compiled_activations = dict()
- eager_activations = dict()
+ compiled_activations = {}
+ eager_activations = {}
activations = None
def save_activations(name, mod, inp, out):
diff --git a/test/dynamo/test_trace_rules.py b/test/dynamo/test_trace_rules.py
index c82f643..a0585d0 100644
--- a/test/dynamo/test_trace_rules.py
+++ b/test/dynamo/test_trace_rules.py
@@ -113,10 +113,10 @@
"""
warnings.filterwarnings("ignore", category=UserWarning, module="torch.distributed")
- torch_object_ids = dict()
+ torch_object_ids = {}
c_binding_in_graph_functions = set()
non_c_binding_in_graph_functions = set()
- torch_name_rule_map = dict()
+ torch_name_rule_map = {}
# In some platforms, these functions were loaded as classes instead of functions.
# To mitigate these weired cases, we need this special check.
diff --git a/test/export/test_passes.py b/test/export/test_passes.py
index cf066bc..a438a6d 100644
--- a/test/export/test_passes.py
+++ b/test/export/test_passes.py
@@ -482,7 +482,7 @@
shape_env=ShapeEnv(tracked_fakes=[]),
allow_non_fake_inputs=True,
)
- with _fakify_script_objects(m, tuple(), {}, fake_mode) as (
+ with _fakify_script_objects(m, (), {}, fake_mode) as (
patched_mod,
_,
_,
@@ -502,7 +502,7 @@
shape_env=ShapeEnv(tracked_fakes=[]),
allow_non_fake_inputs=True,
)
- with _fakify_script_objects(m, tuple(), {}, fake_mode) as (
+ with _fakify_script_objects(m, (), {}, fake_mode) as (
patched_mod,
_,
_,
diff --git a/test/jit/test_list_dict.py b/test/jit/test_list_dict.py
index 90fa24e..637bd57 100644
--- a/test/jit/test_list_dict.py
+++ b/test/jit/test_list_dict.py
@@ -254,7 +254,7 @@
self.checkScript(foo, ())
def foo2():
- x: List[int] = list()
+ x: List[int] = list() # noqa: C408
x.append(1)
return (x,)
@@ -330,7 +330,7 @@
def test_dict_keyword_is_correctly_typed(self):
def fn():
- x: Dict[str, int] = dict()
+ x: Dict[str, int] = dict() # noqa: C408
x["foo"] = 1
return x
@@ -2025,7 +2025,7 @@
test_func(no_args, ())
def test_dict_constructor():
- a = dict()
+ a = dict() # noqa: C408
a["one"] = torch.tensor(1)
return a, dict([(1, 2), (2, 3), (1, 4)]) # noqa: C406
@@ -2041,7 +2041,7 @@
test_func(test_dict_initializer_list, ())
def test_dict_error():
- a = dict()
+ a = dict() # noqa: C408
a[1] = 2
return a
diff --git a/test/onnx/test_fx_to_onnx_with_onnxruntime.py b/test/onnx/test_fx_to_onnx_with_onnxruntime.py
index 78228d8..50435ab 100644
--- a/test/onnx/test_fx_to_onnx_with_onnxruntime.py
+++ b/test/onnx/test_fx_to_onnx_with_onnxruntime.py
@@ -1229,7 +1229,7 @@
batch, seq = 4, 256
def create_args():
- return tuple()
+ return ()
def create_kwargs():
input_ids = torch.randint(0, config.vocab_size, (batch, seq))
@@ -1336,7 +1336,7 @@
batch, seq = 4, 256
def create_args():
- return tuple()
+ return ()
def create_kwargs():
input_ids = torch.randint(0, config.vocab_size, (batch, seq))
@@ -1369,7 +1369,7 @@
batch, seq = 4, 256
def create_args():
- return tuple()
+ return ()
def create_kwargs():
input_ids = torch.randint(0, config.vocab_size, (batch, seq))
@@ -1406,7 +1406,7 @@
return transformers.GPT2Model(config).eval()
def create_args():
- return tuple()
+ return ()
def create_kwargs():
batch, seq = 4, 256
@@ -1454,7 +1454,7 @@
return transformers.GPTNeoXModel(config).eval()
def create_args():
- return tuple()
+ return ()
def create_kwargs():
input_ids = torch.randint(0, config.vocab_size, (batch, seq))
diff --git a/test/onnx/test_pytorch_onnx_onnxruntime.py b/test/onnx/test_pytorch_onnx_onnxruntime.py
index e49d5d3..e560bd5 100644
--- a/test/onnx/test_pytorch_onnx_onnxruntime.py
+++ b/test/onnx/test_pytorch_onnx_onnxruntime.py
@@ -12598,7 +12598,7 @@
model_export = M()
model_onnx = io.BytesIO()
- test_inputs = tuple()
+ test_inputs = ()
torch.onnx.export(
model_export, test_inputs, model_onnx, opset_version=self.opset_version
)
diff --git a/test/test_fx_experimental.py b/test/test_fx_experimental.py
index d3ee06d..8ccff7c 100644
--- a/test/test_fx_experimental.py
+++ b/test/test_fx_experimental.py
@@ -1712,7 +1712,7 @@
"include_last_offset",
},
)
- self.assertEqual(norm_args_and_kwargs.args, tuple())
+ self.assertEqual(norm_args_and_kwargs.args, ())
def test_normalize_args_op_overload(self):
for target in [torch.ops.aten.resize_as_.default, torch.ops.aten.resize_as_]:
diff --git a/test/test_jit.py b/test/test_jit.py
index 306cfb1..1f775d2 100644
--- a/test/test_jit.py
+++ b/test/test_jit.py
@@ -10009,7 +10009,7 @@
super().__init__()
x = torch.zeros(1, 3)
mod_fn = lambda : mod(x) # noqa: E731
- self.mod = torch.jit.trace(mod_fn, tuple())
+ self.mod = torch.jit.trace(mod_fn, ())
@torch.jit.script_method
def forward(self):
diff --git a/test/test_mps.py b/test/test_mps.py
index d5918ff..7b2e40e 100644
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -11504,7 +11504,7 @@
f"mismatch in cpu:{cpu_name} vs mps:{mps_name}, layers: {num_layers}")
LSTM_TEST_CASES = [
- dict(), # default
+ {}, # default
dict(batch_first=True),
dict(bias=False),
dict(bidirectional=True),
diff --git a/test/test_nestedtensor.py b/test/test_nestedtensor.py
index a4f69fb..f054671 100644
--- a/test/test_nestedtensor.py
+++ b/test/test_nestedtensor.py
@@ -4013,7 +4013,7 @@
((2, 3), (3, None), (3, None, 1, 1)),
((0, 1, 3), (3,), (1, 1, 3, 1)),
((0, 1, 2), (4,), (1, 1, 1, 4)),
- ((0, 1, 2, 3), tuple(), (1, 1, 1, 1)),
+ ((0, 1, 2, 3), (), (1, 1, 1, 1)),
)
for rd, ref_shape_no_keepdim, ref_shape_keepdim in reduce_dims:
if (0 in rd) ^ (1 in rd):
diff --git a/test/test_nn.py b/test/test_nn.py
index 7a94514..fed55ce 100644
--- a/test/test_nn.py
+++ b/test/test_nn.py
@@ -91,7 +91,7 @@
def _forward_criterion(self, criterion, input, target, extra_args=None):
if extra_args is None:
- extra_args = tuple()
+ extra_args = ()
if isinstance(input, tuple):
args = input + (target,) + extra_args
output = criterion(*args)
@@ -101,7 +101,7 @@
def _backward_criterion(self, criterion, input, output, target, gradOutput=None, extra_args=None):
if extra_args is None:
- extra_args = tuple()
+ extra_args = ()
input_tuple = input if isinstance(input, tuple) else (input,)
output_tuple = output if isinstance(output, tuple) else (output,)
for i in input_tuple:
@@ -2293,7 +2293,7 @@
self.assertEqual(state_dict['bias'].data_ptr(), l.bias.data_ptr())
# Reference https://github.com/pytorch/pytorch/pull/75507#issuecomment-1110291545
- self.assertNotWarn(lambda: l.state_dict(destination=dict()), "Should not warn kwarg destination w/o _metadata")
+ self.assertNotWarn(lambda: l.state_dict(destination={}), "Should not warn kwarg destination w/o _metadata")
def test_extra_state(self):
diff --git a/test/test_ops.py b/test/test_ops.py
index cbec881..47c2a6d 100644
--- a/test/test_ops.py
+++ b/test/test_ops.py
@@ -1408,7 +1408,7 @@
unsupported_dtypes = set()
supported_backward_dtypes = set()
unsupported_backward_dtypes = set()
- dtype_error: Dict[torch.dtype, Exception] = dict()
+ dtype_error: Dict[torch.dtype, Exception] = {}
def unsupported(dtype, e):
dtype_error[dtype] = e
diff --git a/test/test_sparse_csr.py b/test/test_sparse_csr.py
index 86316a7..f61136b 100644
--- a/test/test_sparse_csr.py
+++ b/test/test_sparse_csr.py
@@ -4059,7 +4059,7 @@
dump() # this will update torch/sparse/_triton_ops_meta.py
expected = reference(input, mat1, mat2, beta=beta, alpha=alpha)
- kwargs = dict(bsr_dense_addmm=dict(beta=beta, alpha=alpha), bsr_dense_mm=dict(),
+ kwargs = dict(bsr_dense_addmm=dict(beta=beta, alpha=alpha), bsr_dense_mm={},
bsr_dense_linear=dict(bias=input.transpose(-1, -2)))[op]
args = dict(bsr_dense_addmm=(input, bsr, mat2), bsr_dense_mm=(bsr, mat2),
diff --git a/test/test_testing.py b/test/test_testing.py
index 77b8d4f..8b4a33a 100644
--- a/test/test_testing.py
+++ b/test/test_testing.py
@@ -1395,7 +1395,7 @@
)
@supported_dtypes
- @parametrize("shape", [tuple(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
+ @parametrize("shape", [(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
@parametrize("splat_shape", [False, True])
def test_smoke(self, dtype, device, shape, splat_shape):
t = torch.testing.make_tensor(*shape if splat_shape else shape, dtype=dtype, device=device)
@@ -1426,7 +1426,7 @@
@supported_dtypes
@parametrize("noncontiguous", [False, True])
- @parametrize("shape", [tuple(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
+ @parametrize("shape", [(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
def test_noncontiguous(self, dtype, device, noncontiguous, shape):
numel = functools.reduce(operator.mul, shape, 1)
diff --git a/test/torch_np/numpy_tests/core/test_scalarmath.py b/test/torch_np/numpy_tests/core/test_scalarmath.py
index d86595d..8427d03 100644
--- a/test/torch_np/numpy_tests/core/test_scalarmath.py
+++ b/test/torch_np/numpy_tests/core/test_scalarmath.py
@@ -439,7 +439,7 @@
for t in [np.complex64, np.complex128]:
# tupled (numerator, denominator, expected)
# for testing as expected == numerator/denominator
- data = list()
+ data = []
# trigger branch: real(fabs(denom)) > imag(fabs(denom))
# followed by else condition as neither are == 0
diff --git a/test/torch_np/numpy_tests/lib/test_shape_base_.py b/test/torch_np/numpy_tests/lib/test_shape_base_.py
index 4ac179f..bbf384d 100644
--- a/test/torch_np/numpy_tests/lib/test_shape_base_.py
+++ b/test/torch_np/numpy_tests/lib/test_shape_base_.py
@@ -86,9 +86,9 @@
a = rand(3, 4, 5)
funcs = [
- (np.sort, np.argsort, dict()),
- (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()),
- (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()),
+ (np.sort, np.argsort, {}),
+ (_add_keepdims(np.min), _add_keepdims(np.argmin), {}),
+ (_add_keepdims(np.max), _add_keepdims(np.argmax), {}),
# FIXME (np.partition, np.argpartition, dict(kth=2)),
]