[BE][Easy] apply autofix for ruff rules unnecessary-collection-call (C408): `list()` / `tuple()` / `dict()` (#130199)
This PR changes the empty collection factory call to Python literals:
- `list()` -> `[]`
- `tuple()` -> `()`
- `dict()` -> `{}`
The Python literals are more performant and safer. For example, the bytecode for building an empty dictionary:
```bash
$ python3 -m dis - <<EOS
import collections
d1 = {}
d2 = dict()
dict = collections.OrderedDict
d3 = dict()
EOS
```
```text
0 0 RESUME 0
1 2 LOAD_CONST 0 (0)
4 LOAD_CONST 1 (None)
6 IMPORT_NAME 0 (collections)
8 STORE_NAME 0 (collections)
3 10 BUILD_MAP 0
12 STORE_NAME 1 (d1)
4 14 PUSH_NULL
16 LOAD_NAME 2 (dict)
18 CALL 0
26 STORE_NAME 3 (d2)
6 28 LOAD_NAME 0 (collections)
30 LOAD_ATTR 8 (OrderedDict)
50 STORE_NAME 2 (dict)
7 52 PUSH_NULL
54 LOAD_NAME 2 (dict)
56 CALL 0
64 STORE_NAME 5 (d3)
66 RETURN_CONST 1 (None)
```
The dict literal `{}` only has one bytecode `BUILD_MAP`, while the factory call `dict()` has three `PUSH_NULL + LOAD_NAME + CALL`. Also, the factory call is not safe if users override the `dict` name in `locals` or `globals` (see the example of replacing with `OrderedDict` above).
Pull Request resolved: https://github.com/pytorch/pytorch/pull/130199
Approved by: https://github.com/malfet
diff --git a/.github/scripts/tag_docker_images_for_release.py b/.github/scripts/tag_docker_images_for_release.py
index fb3f723..62a4f21 100644
--- a/.github/scripts/tag_docker_images_for_release.py
+++ b/.github/scripts/tag_docker_images_for_release.py
@@ -41,7 +41,7 @@
)
options = parser.parse_args()
- tagged_images: Dict[str, bool] = dict()
+ tagged_images: Dict[str, bool] = {}
platform_images = [
generate_binary_build_matrix.WHEEL_CONTAINER_IMAGES,
generate_binary_build_matrix.LIBTORCH_CONTAINER_IMAGES,
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py
index 65b503d..ea4fe73 100644
--- a/benchmarks/dynamo/common.py
+++ b/benchmarks/dynamo/common.py
@@ -1167,7 +1167,7 @@
class AOTInductorModelCache:
- cache = dict()
+ cache = {}
@classmethod
def load(cls, model, example_inputs, device):
diff --git a/benchmarks/dynamo/huggingface.py b/benchmarks/dynamo/huggingface.py
index 0994a46..46492cc 100755
--- a/benchmarks/dynamo/huggingface.py
+++ b/benchmarks/dynamo/huggingface.py
@@ -86,7 +86,7 @@
# combination of models supported by HF Fx parser and some manually supplied
# models. For these models, we already know the largest batch size that can fit
# on A100 GPUs - 40 GB.
-BATCH_SIZE_KNOWN_MODELS = dict()
+BATCH_SIZE_KNOWN_MODELS = {}
# Get the list of models and their batch sizes
@@ -619,7 +619,7 @@
"""
import transformers.utils.fx as hf_fx
- family = dict()
+ family = {}
lm_seen = set()
family_seen = set()
for cls_name in hf_fx._SUPPORTED_MODELS:
diff --git a/benchmarks/dynamo/timm_models.py b/benchmarks/dynamo/timm_models.py
index 1460ad6..650af58 100755
--- a/benchmarks/dynamo/timm_models.py
+++ b/benchmarks/dynamo/timm_models.py
@@ -36,7 +36,7 @@
from timm.data import resolve_data_config
from timm.models import create_model
-TIMM_MODELS = dict()
+TIMM_MODELS = {}
filename = os.path.join(os.path.dirname(__file__), "timm_models_list.txt")
with open(filename) as fh:
@@ -174,7 +174,7 @@
return name.split("_")[0]
def populate_family(models):
- family = dict()
+ family = {}
for model_name in models:
family_name = get_family_name(model_name)
if family_name not in family:
diff --git a/benchmarks/functional_autograd_benchmark/vision_models.py b/benchmarks/functional_autograd_benchmark/vision_models.py
index 4f1050b..fdb06cf 100644
--- a/benchmarks/functional_autograd_benchmark/vision_models.py
+++ b/benchmarks/functional_autograd_benchmark/vision_models.py
@@ -113,7 +113,7 @@
labels = []
for idx in range(N):
targets = {}
- n_targets: int = int(torch.randint(5, 10, size=tuple()).item())
+ n_targets: int = int(torch.randint(5, 10, size=()).item())
label = torch.randint(5, 10, size=(n_targets,), device=device)
targets["labels"] = label
boxes = torch.randint(100, 800, size=(n_targets, 4), device=device)
diff --git a/benchmarks/inference/process_metrics.py b/benchmarks/inference/process_metrics.py
index 860dc65..acdd1df 100644
--- a/benchmarks/inference/process_metrics.py
+++ b/benchmarks/inference/process_metrics.py
@@ -24,8 +24,8 @@
# Calculate mean and standard deviation for a subset of metrics
metrics = ["warmup_latency", "average_latency", "throughput", "gpu_util"]
- means = dict()
- stds = dict()
+ means = {}
+ stds = {}
for metric in metrics:
means[metric] = df[metric].mean()
diff --git a/benchmarks/inference/server.py b/benchmarks/inference/server.py
index 6c33980..40ef2fd 100644
--- a/benchmarks/inference/server.py
+++ b/benchmarks/inference/server.py
@@ -182,7 +182,7 @@
self.h2d_stream = torch.cuda.Stream()
self.d2h_stream = torch.cuda.Stream()
# maps thread_id to the cuda.Stream associated with that worker thread
- self.stream_map = dict()
+ self.stream_map = {}
def _setup(self):
import time
diff --git a/benchmarks/sparse/triton_ops.py b/benchmarks/sparse/triton_ops.py
index 6652269..90e0e0c 100644
--- a/benchmarks/sparse/triton_ops.py
+++ b/benchmarks/sparse/triton_ops.py
@@ -367,7 +367,7 @@
num_stages=num_stages,
num_warps=num_warps,
),
- ).get(op, dict())
+ ).get(op, {})
meta_str = ";".join(
f"{k}={v}" for k, v in meta.items() if v is not None
diff --git a/pyproject.toml b/pyproject.toml
index 7ecda0a..e064f16 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -76,7 +76,7 @@
"SIM102", "SIM103", "SIM112", # flake8-simplify code styles
"SIM113", # please fix
"SIM105", # these ignores are from flake8-simplify. please fix or ignore with commented reason
- "SIM108",
+ "SIM108", # SIM108 ignored because we prefer if-else-block instead of ternary expression
"SIM110",
"SIM114", # Combine `if` branches using logical `or` operator
"SIM115",
diff --git a/scripts/release_notes/common.py b/scripts/release_notes/common.py
index 6b768b4..9143fd6 100644
--- a/scripts/release_notes/common.py
+++ b/scripts/release_notes/common.py
@@ -287,7 +287,7 @@
pr_number = parse_pr_number(body, commit_hash, title)
labels = []
author = ""
- accepters = tuple()
+ accepters = ()
if pr_number is not None:
labels, author, accepters = github_data(pr_number)
result = Features(title, body, pr_number, files_changed, labels, author, accepters)
diff --git a/test/distributed/_tensor/test_pointwise_ops.py b/test/distributed/_tensor/test_pointwise_ops.py
index f0103ba..cc60262 100644
--- a/test/distributed/_tensor/test_pointwise_ops.py
+++ b/test/distributed/_tensor/test_pointwise_ops.py
@@ -76,7 +76,7 @@
placements: Sequence[Placement],
op: Callable,
pre_op_fn: Optional[Callable] = None,
- args: Sequence[Any] = tuple(),
+ args: Sequence[Any] = (),
kwargs: Optional[Dict[str, Any]] = None,
):
if pre_op_fn is None:
diff --git a/test/distributed/checkpoint/test_state_dict.py b/test/distributed/checkpoint/test_state_dict.py
index 7736350..92c9c35 100644
--- a/test/distributed/checkpoint/test_state_dict.py
+++ b/test/distributed/checkpoint/test_state_dict.py
@@ -217,7 +217,7 @@
"use_orig_params": [True, False],
"use_composable": [True, False],
"use_dtensor": [True, False],
- "wrapping": [tuple(), (nn.Linear, UnitModule)],
+ "wrapping": [(), (nn.Linear, UnitModule)],
"optimizer_class": [torch.optim.Adam, torch.optim.AdamW],
},
self._test_fsdp,
@@ -231,7 +231,7 @@
"use_orig_params": [True],
"use_composable": [False],
"use_dtensor": [False],
- "wrapping": [tuple()],
+ "wrapping": [()],
"optimizer_class": [torch.optim.Adam, torch.optim.AdamW],
},
self._test_fsdp,
diff --git a/test/dynamo/test_functions.py b/test/dynamo/test_functions.py
index 123ca7c..25eab5d 100644
--- a/test/dynamo/test_functions.py
+++ b/test/dynamo/test_functions.py
@@ -960,11 +960,11 @@
self._test_default_dict_helper(dict)
def test_default_dict_lambda(self):
- self._test_default_dict_helper(lambda: dict())
+ self._test_default_dict_helper(lambda: dict()) # noqa: C408
def test_default_dict_closure(self):
def factory():
- return dict()
+ return dict() # noqa: C408
self._test_default_dict_helper(factory)
@@ -972,7 +972,7 @@
param = torch.nn.Parameter(torch.ones([2, 2]))
def fn(x):
- dd = collections.defaultdict(lambda: dict())
+ dd = collections.defaultdict(lambda: dict()) # noqa: C408
dd["a"] = x + 1
dd[param] = 123
dd["c"] = x * 2
@@ -1011,7 +1011,7 @@
@make_test
def test_call_dict1(x):
- d1 = dict()
+ d1 = dict() # noqa: C408
d1["x"] = x + 1
d2 = collections.OrderedDict()
d2["x"] = x + 2
@@ -1019,7 +1019,7 @@
@make_test
def test_call_dict2(x):
- d1 = dict()
+ d1 = dict() # noqa: C408
d1["x"] = x
d2 = collections.OrderedDict(d1)
if isinstance(d2, collections.OrderedDict):
diff --git a/test/dynamo/test_guard_manager.py b/test/dynamo/test_guard_manager.py
index 4edc65c..caa7217 100644
--- a/test/dynamo/test_guard_manager.py
+++ b/test/dynamo/test_guard_manager.py
@@ -575,7 +575,7 @@
guard_manager = RootGuardManager()
# Check a[3] which is tuple_iterator_getitem(foo, 2)
guard_manager.add_tuple_iterator_length_guard(
- 5, id_type(iter(tuple())), ["len == 5"]
+ 5, id_type(iter(())), ["len == 5"]
)
guard_manager.tuple_iterator_getitem_manager(
2, "", foo, default_mgr_enum
diff --git a/test/dynamo/test_higher_order_ops.py b/test/dynamo/test_higher_order_ops.py
index d809ebca..b8d391d 100644
--- a/test/dynamo/test_higher_order_ops.py
+++ b/test/dynamo/test_higher_order_ops.py
@@ -1319,7 +1319,7 @@
compiled_ret = torch.compile(
control_flow.map, backend=backend, fullgraph=True
)(inner, x)
- eager_sin, eager_transpose, eager_view = map_dense(inner, (x,), tuple())
+ eager_sin, eager_transpose, eager_view = map_dense(inner, (x,), ())
map_node = next(
node
@@ -1519,7 +1519,7 @@
def false_fn():
return torch.cos(x)
- return control_flow.cond(x.sum() > 0, true_fn, false_fn, tuple())
+ return control_flow.cond(x.sum() > 0, true_fn, false_fn, ())
graphs = self._check_cond_graph_and_extract(fn, (torch.randn(4, 5),))
if graphs is not None:
@@ -1562,7 +1562,7 @@
def false_fn():
return torch.ones(3, 4).sin()
- return control_flow.cond(x.sum() > 0, true_fn, false_fn, tuple())
+ return control_flow.cond(x.sum() > 0, true_fn, false_fn, ())
self._check_cond_graph_and_extract(fn, (torch.randn(4, 5),))
graphs = self._check_cond_graph_and_extract(fn, (torch.randn(4, 5),))
@@ -2168,7 +2168,7 @@
model = ToyModel()
forward_handles = {}
- activations = dict()
+ activations = {}
def save_activations(mod, inp, out):
activations[name] = inp
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index d8b3011..a012d9f 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -1407,7 +1407,7 @@
r1 = fn(i, [])
opt_fn = torch._dynamo.optimize("eager")(fn)
r2 = opt_fn(i, [])
- r3 = opt_fn(i, tuple())
+ r3 = opt_fn(i, ())
self.assertTrue(same(r1, r2))
self.assertTrue(same(r1, r3))
@@ -4312,7 +4312,7 @@
subs_of_foo_reg = Foo.__subclasses__()
sub_of_foo_subclass_var_reg = subs_of_foo_reg[0].__subclasses__()
- sub_of_foo_subclass_var_optim = list()
+ sub_of_foo_subclass_var_optim = []
counter = CompileCounter()
@torch._dynamo.optimize_assert(counter)
@@ -7081,7 +7081,7 @@
od = collections.OrderedDict
def fn():
- d1 = dict()
+ d1 = dict() # noqa: C408
d1["a"] = 1
d2 = od(d1)
d2["b"] = 2
diff --git a/test/dynamo/test_modules.py b/test/dynamo/test_modules.py
index de68a17..1d468cd 100644
--- a/test/dynamo/test_modules.py
+++ b/test/dynamo/test_modules.py
@@ -2254,8 +2254,8 @@
def _forward_hook_test_helper(self, model):
forward_handles = {}
- compiled_activations = dict()
- eager_activations = dict()
+ compiled_activations = {}
+ eager_activations = {}
activations = None
def save_activations(name, mod, inp, out):
diff --git a/test/dynamo/test_trace_rules.py b/test/dynamo/test_trace_rules.py
index c82f643..a0585d0 100644
--- a/test/dynamo/test_trace_rules.py
+++ b/test/dynamo/test_trace_rules.py
@@ -113,10 +113,10 @@
"""
warnings.filterwarnings("ignore", category=UserWarning, module="torch.distributed")
- torch_object_ids = dict()
+ torch_object_ids = {}
c_binding_in_graph_functions = set()
non_c_binding_in_graph_functions = set()
- torch_name_rule_map = dict()
+ torch_name_rule_map = {}
# In some platforms, these functions were loaded as classes instead of functions.
# To mitigate these weired cases, we need this special check.
diff --git a/test/export/test_passes.py b/test/export/test_passes.py
index cf066bc..a438a6d 100644
--- a/test/export/test_passes.py
+++ b/test/export/test_passes.py
@@ -482,7 +482,7 @@
shape_env=ShapeEnv(tracked_fakes=[]),
allow_non_fake_inputs=True,
)
- with _fakify_script_objects(m, tuple(), {}, fake_mode) as (
+ with _fakify_script_objects(m, (), {}, fake_mode) as (
patched_mod,
_,
_,
@@ -502,7 +502,7 @@
shape_env=ShapeEnv(tracked_fakes=[]),
allow_non_fake_inputs=True,
)
- with _fakify_script_objects(m, tuple(), {}, fake_mode) as (
+ with _fakify_script_objects(m, (), {}, fake_mode) as (
patched_mod,
_,
_,
diff --git a/test/jit/test_list_dict.py b/test/jit/test_list_dict.py
index 90fa24e..637bd57 100644
--- a/test/jit/test_list_dict.py
+++ b/test/jit/test_list_dict.py
@@ -254,7 +254,7 @@
self.checkScript(foo, ())
def foo2():
- x: List[int] = list()
+ x: List[int] = list() # noqa: C408
x.append(1)
return (x,)
@@ -330,7 +330,7 @@
def test_dict_keyword_is_correctly_typed(self):
def fn():
- x: Dict[str, int] = dict()
+ x: Dict[str, int] = dict() # noqa: C408
x["foo"] = 1
return x
@@ -2025,7 +2025,7 @@
test_func(no_args, ())
def test_dict_constructor():
- a = dict()
+ a = dict() # noqa: C408
a["one"] = torch.tensor(1)
return a, dict([(1, 2), (2, 3), (1, 4)]) # noqa: C406
@@ -2041,7 +2041,7 @@
test_func(test_dict_initializer_list, ())
def test_dict_error():
- a = dict()
+ a = dict() # noqa: C408
a[1] = 2
return a
diff --git a/test/onnx/test_fx_to_onnx_with_onnxruntime.py b/test/onnx/test_fx_to_onnx_with_onnxruntime.py
index 78228d8..50435ab 100644
--- a/test/onnx/test_fx_to_onnx_with_onnxruntime.py
+++ b/test/onnx/test_fx_to_onnx_with_onnxruntime.py
@@ -1229,7 +1229,7 @@
batch, seq = 4, 256
def create_args():
- return tuple()
+ return ()
def create_kwargs():
input_ids = torch.randint(0, config.vocab_size, (batch, seq))
@@ -1336,7 +1336,7 @@
batch, seq = 4, 256
def create_args():
- return tuple()
+ return ()
def create_kwargs():
input_ids = torch.randint(0, config.vocab_size, (batch, seq))
@@ -1369,7 +1369,7 @@
batch, seq = 4, 256
def create_args():
- return tuple()
+ return ()
def create_kwargs():
input_ids = torch.randint(0, config.vocab_size, (batch, seq))
@@ -1406,7 +1406,7 @@
return transformers.GPT2Model(config).eval()
def create_args():
- return tuple()
+ return ()
def create_kwargs():
batch, seq = 4, 256
@@ -1454,7 +1454,7 @@
return transformers.GPTNeoXModel(config).eval()
def create_args():
- return tuple()
+ return ()
def create_kwargs():
input_ids = torch.randint(0, config.vocab_size, (batch, seq))
diff --git a/test/onnx/test_pytorch_onnx_onnxruntime.py b/test/onnx/test_pytorch_onnx_onnxruntime.py
index e49d5d3..e560bd5 100644
--- a/test/onnx/test_pytorch_onnx_onnxruntime.py
+++ b/test/onnx/test_pytorch_onnx_onnxruntime.py
@@ -12598,7 +12598,7 @@
model_export = M()
model_onnx = io.BytesIO()
- test_inputs = tuple()
+ test_inputs = ()
torch.onnx.export(
model_export, test_inputs, model_onnx, opset_version=self.opset_version
)
diff --git a/test/test_fx_experimental.py b/test/test_fx_experimental.py
index d3ee06d..8ccff7c 100644
--- a/test/test_fx_experimental.py
+++ b/test/test_fx_experimental.py
@@ -1712,7 +1712,7 @@
"include_last_offset",
},
)
- self.assertEqual(norm_args_and_kwargs.args, tuple())
+ self.assertEqual(norm_args_and_kwargs.args, ())
def test_normalize_args_op_overload(self):
for target in [torch.ops.aten.resize_as_.default, torch.ops.aten.resize_as_]:
diff --git a/test/test_jit.py b/test/test_jit.py
index 306cfb1..1f775d2 100644
--- a/test/test_jit.py
+++ b/test/test_jit.py
@@ -10009,7 +10009,7 @@
super().__init__()
x = torch.zeros(1, 3)
mod_fn = lambda : mod(x) # noqa: E731
- self.mod = torch.jit.trace(mod_fn, tuple())
+ self.mod = torch.jit.trace(mod_fn, ())
@torch.jit.script_method
def forward(self):
diff --git a/test/test_mps.py b/test/test_mps.py
index d5918ff..7b2e40e 100644
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -11504,7 +11504,7 @@
f"mismatch in cpu:{cpu_name} vs mps:{mps_name}, layers: {num_layers}")
LSTM_TEST_CASES = [
- dict(), # default
+ {}, # default
dict(batch_first=True),
dict(bias=False),
dict(bidirectional=True),
diff --git a/test/test_nestedtensor.py b/test/test_nestedtensor.py
index a4f69fb..f054671 100644
--- a/test/test_nestedtensor.py
+++ b/test/test_nestedtensor.py
@@ -4013,7 +4013,7 @@
((2, 3), (3, None), (3, None, 1, 1)),
((0, 1, 3), (3,), (1, 1, 3, 1)),
((0, 1, 2), (4,), (1, 1, 1, 4)),
- ((0, 1, 2, 3), tuple(), (1, 1, 1, 1)),
+ ((0, 1, 2, 3), (), (1, 1, 1, 1)),
)
for rd, ref_shape_no_keepdim, ref_shape_keepdim in reduce_dims:
if (0 in rd) ^ (1 in rd):
diff --git a/test/test_nn.py b/test/test_nn.py
index 7a94514..fed55ce 100644
--- a/test/test_nn.py
+++ b/test/test_nn.py
@@ -91,7 +91,7 @@
def _forward_criterion(self, criterion, input, target, extra_args=None):
if extra_args is None:
- extra_args = tuple()
+ extra_args = ()
if isinstance(input, tuple):
args = input + (target,) + extra_args
output = criterion(*args)
@@ -101,7 +101,7 @@
def _backward_criterion(self, criterion, input, output, target, gradOutput=None, extra_args=None):
if extra_args is None:
- extra_args = tuple()
+ extra_args = ()
input_tuple = input if isinstance(input, tuple) else (input,)
output_tuple = output if isinstance(output, tuple) else (output,)
for i in input_tuple:
@@ -2293,7 +2293,7 @@
self.assertEqual(state_dict['bias'].data_ptr(), l.bias.data_ptr())
# Reference https://github.com/pytorch/pytorch/pull/75507#issuecomment-1110291545
- self.assertNotWarn(lambda: l.state_dict(destination=dict()), "Should not warn kwarg destination w/o _metadata")
+ self.assertNotWarn(lambda: l.state_dict(destination={}), "Should not warn kwarg destination w/o _metadata")
def test_extra_state(self):
diff --git a/test/test_ops.py b/test/test_ops.py
index cbec881..47c2a6d 100644
--- a/test/test_ops.py
+++ b/test/test_ops.py
@@ -1408,7 +1408,7 @@
unsupported_dtypes = set()
supported_backward_dtypes = set()
unsupported_backward_dtypes = set()
- dtype_error: Dict[torch.dtype, Exception] = dict()
+ dtype_error: Dict[torch.dtype, Exception] = {}
def unsupported(dtype, e):
dtype_error[dtype] = e
diff --git a/test/test_sparse_csr.py b/test/test_sparse_csr.py
index 86316a7..f61136b 100644
--- a/test/test_sparse_csr.py
+++ b/test/test_sparse_csr.py
@@ -4059,7 +4059,7 @@
dump() # this will update torch/sparse/_triton_ops_meta.py
expected = reference(input, mat1, mat2, beta=beta, alpha=alpha)
- kwargs = dict(bsr_dense_addmm=dict(beta=beta, alpha=alpha), bsr_dense_mm=dict(),
+ kwargs = dict(bsr_dense_addmm=dict(beta=beta, alpha=alpha), bsr_dense_mm={},
bsr_dense_linear=dict(bias=input.transpose(-1, -2)))[op]
args = dict(bsr_dense_addmm=(input, bsr, mat2), bsr_dense_mm=(bsr, mat2),
diff --git a/test/test_testing.py b/test/test_testing.py
index 77b8d4f..8b4a33a 100644
--- a/test/test_testing.py
+++ b/test/test_testing.py
@@ -1395,7 +1395,7 @@
)
@supported_dtypes
- @parametrize("shape", [tuple(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
+ @parametrize("shape", [(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
@parametrize("splat_shape", [False, True])
def test_smoke(self, dtype, device, shape, splat_shape):
t = torch.testing.make_tensor(*shape if splat_shape else shape, dtype=dtype, device=device)
@@ -1426,7 +1426,7 @@
@supported_dtypes
@parametrize("noncontiguous", [False, True])
- @parametrize("shape", [tuple(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
+ @parametrize("shape", [(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
def test_noncontiguous(self, dtype, device, noncontiguous, shape):
numel = functools.reduce(operator.mul, shape, 1)
diff --git a/test/torch_np/numpy_tests/core/test_scalarmath.py b/test/torch_np/numpy_tests/core/test_scalarmath.py
index d86595d..8427d03 100644
--- a/test/torch_np/numpy_tests/core/test_scalarmath.py
+++ b/test/torch_np/numpy_tests/core/test_scalarmath.py
@@ -439,7 +439,7 @@
for t in [np.complex64, np.complex128]:
# tupled (numerator, denominator, expected)
# for testing as expected == numerator/denominator
- data = list()
+ data = []
# trigger branch: real(fabs(denom)) > imag(fabs(denom))
# followed by else condition as neither are == 0
diff --git a/test/torch_np/numpy_tests/lib/test_shape_base_.py b/test/torch_np/numpy_tests/lib/test_shape_base_.py
index 4ac179f..bbf384d 100644
--- a/test/torch_np/numpy_tests/lib/test_shape_base_.py
+++ b/test/torch_np/numpy_tests/lib/test_shape_base_.py
@@ -86,9 +86,9 @@
a = rand(3, 4, 5)
funcs = [
- (np.sort, np.argsort, dict()),
- (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()),
- (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()),
+ (np.sort, np.argsort, {}),
+ (_add_keepdims(np.min), _add_keepdims(np.argmin), {}),
+ (_add_keepdims(np.max), _add_keepdims(np.argmax), {}),
# FIXME (np.partition, np.argpartition, dict(kth=2)),
]
diff --git a/tools/autograd/load_derivatives.py b/tools/autograd/load_derivatives.py
index 96a37eb..f691f6b 100644
--- a/tools/autograd/load_derivatives.py
+++ b/tools/autograd/load_derivatives.py
@@ -240,7 +240,7 @@
for r in f.func.returns:
if r.name in var_names:
if var_types is None:
- var_types = tuple()
+ var_types = ()
var_types = var_types + (r.type,)
# Handle default return names
@@ -253,7 +253,7 @@
res = re.findall(r"^result(\d+)$", var_name)
if len(res) == 1:
if var_types is None:
- var_types = tuple()
+ var_types = ()
arg_idx = int(res[0])
var_types = var_types + (f.func.returns[arg_idx].type,)
diff --git a/tools/stats/import_test_stats.py b/tools/stats/import_test_stats.py
index 207db7d..086d08f 100644
--- a/tools/stats/import_test_stats.py
+++ b/tools/stats/import_test_stats.py
@@ -109,7 +109,7 @@
def process_disabled_test(the_response: dict[str, Any]) -> dict[str, Any]:
# remove re-enabled tests and condense even further by getting rid of pr_num
disabled_issues = get_disabled_issues()
- disabled_test_from_issues = dict()
+ disabled_test_from_issues = {}
for test_name, (pr_num, link, platforms) in the_response.items():
if pr_num not in disabled_issues:
disabled_test_from_issues[test_name] = (
diff --git a/tools/test/test_codegen_model.py b/tools/test/test_codegen_model.py
index e4e68f9..58b1aa6 100644
--- a/tools/test/test_codegen_model.py
+++ b/tools/test/test_codegen_model.py
@@ -164,13 +164,13 @@
a = Annotation.parse("a")
self.assertEqual(a.alias_set, tuple("a"))
self.assertFalse(a.is_write)
- self.assertEqual(a.alias_set_after, tuple())
+ self.assertEqual(a.alias_set_after, ())
def test_single_alias_is_write(self) -> None:
a = Annotation.parse("a!")
self.assertEqual(a.alias_set, tuple("a"))
self.assertTrue(a.is_write)
- self.assertEqual(a.alias_set_after, tuple())
+ self.assertEqual(a.alias_set_after, ())
def test_single_alias_is_write_to_wildcard(self) -> None:
a = Annotation.parse("a! -> *")
diff --git a/torch/__init__.py b/torch/__init__.py
index 932eea0..42d5166 100644
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -2123,7 +2123,7 @@
compiler_name = "inductor"
def __init__(self, mode, options, dynamic):
- self.config: _Dict[str, _Any] = dict()
+ self.config: _Dict[str, _Any] = {}
self.dynamic = dynamic
self.apply_mode(mode)
self.apply_options(options)
diff --git a/torch/_dynamo/backends/registry.py b/torch/_dynamo/backends/registry.py
index 13cb47a..e3538a4 100644
--- a/torch/_dynamo/backends/registry.py
+++ b/torch/_dynamo/backends/registry.py
@@ -15,7 +15,7 @@
CompilerFn = Callable[[fx.GraphModule, List[torch.Tensor]], CompiledFn]
-_BACKENDS: Dict[str, CompilerFn] = dict()
+_BACKENDS: Dict[str, CompilerFn] = {}
def register_backend(
@@ -111,5 +111,5 @@
if backend_name in backend_eps.names:
compiler_fn = backend_eps[backend_name].load()
- if compiler_fn is not None and backend_name not in list_backends(tuple()):
+ if compiler_fn is not None and backend_name not in list_backends(()):
register_backend(compiler_fn=compiler_fn, name=backend_name)
diff --git a/torch/_dynamo/bytecode_transformation.py b/torch/_dynamo/bytecode_transformation.py
index 9e5026e..52c218b 100644
--- a/torch/_dynamo/bytecode_transformation.py
+++ b/torch/_dynamo/bytecode_transformation.py
@@ -985,7 +985,7 @@
def explicit_super(code: types.CodeType, instructions: List[Instruction]) -> None:
"""convert super() with no args into explicit arg form"""
- cell_and_free = (code.co_cellvars or tuple()) + (code.co_freevars or tuple())
+ cell_and_free = (code.co_cellvars or ()) + (code.co_freevars or ())
if not len(code.co_varnames):
# A function with no argument cannot contain a valid "super()" call
return
diff --git a/torch/_dynamo/convert_frame.py b/torch/_dynamo/convert_frame.py
index 2af5cb9..9721e4d 100644
--- a/torch/_dynamo/convert_frame.py
+++ b/torch/_dynamo/convert_frame.py
@@ -216,7 +216,7 @@
if np and config.trace_numpy and (obj is np or is_numpy(obj)):
return True
- seen_ids: Dict[int, bool] = dict()
+ seen_ids: Dict[int, bool] = {}
def has_tensor(obj):
"""Recursively check if the obj has a tensor"""
diff --git a/torch/_dynamo/eval_frame.py b/torch/_dynamo/eval_frame.py
index 629f968..2f32cfe 100644
--- a/torch/_dynamo/eval_frame.py
+++ b/torch/_dynamo/eval_frame.py
@@ -1337,7 +1337,7 @@
**named_parameters,
**named_buffers,
}
- fake_params_buffers = dict()
+ fake_params_buffers = {}
for name, value in params_and_buffers.items():
fake_params_buffers[name] = ambient_fake_mode.from_tensor(
diff --git a/torch/_dynamo/guards.py b/torch/_dynamo/guards.py
index 1d5efad..acaf7bb 100644
--- a/torch/_dynamo/guards.py
+++ b/torch/_dynamo/guards.py
@@ -1078,7 +1078,7 @@
# guard.
make_guard_fn_args = ", ".join(closure_vars.keys())
guard_body, pycode = build_guard_function(code_parts, make_guard_fn_args)
- out: Dict[str, Any] = dict()
+ out: Dict[str, Any] = {}
globals_for_guard_fn = {"G": self.scope["G"]}
exec(pycode, globals_for_guard_fn, out)
guard_fn = out["___make_guard_fn"](*closure_vars.values())
@@ -1407,7 +1407,7 @@
# Special case for nan because float("nan") == float("nan") evaluates to False
if istype(val, float) and math.isnan(val):
self.TYPE_MATCH(guard)
- code = list()
+ code = []
code.append(f"__math_isnan({ref})")
self._set_guard_export_info(guard, code)
@@ -1422,7 +1422,7 @@
# Python math library doesn't support complex nan, so we need to use numpy
if istype(val, complex) and np.isnan(val):
self.TYPE_MATCH(guard)
- code = list()
+ code = []
code.append(f"__numpy_isnan({ref})")
self._set_guard_export_info(guard, code)
@@ -1443,7 +1443,7 @@
self._set_guard_export_info(guard, code)
return
- code = list()
+ code = []
# If matching equality against list/tuple, we must also check that
# the internal types match. (TODO: what about nested lists?)
@@ -1520,7 +1520,7 @@
# C++ DICT_LENGTH checks for type
self.TYPE_MATCH(guard)
- code = list()
+ code = []
if len(value) == 0:
code.append(f"not {ref}")
else:
@@ -1548,7 +1548,7 @@
# C++ guard already checks the type
self.TYPE_MATCH(guard)
- code = list()
+ code = []
code.append(f"___tuple_iterator_len({ref}) == {tuple_iterator_len(value)}")
self._set_guard_export_info(guard, code)
@@ -1591,7 +1591,7 @@
t = type(value)
self.TYPE_MATCH(guard)
- code = list()
+ code = []
any_key_is_id = any(key_is_id(k) for k in value.keys())
const_keys_repr = dict_keys_repr(
key_to_id(value),
@@ -1632,7 +1632,7 @@
# DictGuardManager supports TYPE_MATCH internally
self.TYPE_MATCH(guard)
- code = list()
+ code = []
code.append(f"list({ref}.keys()) == {list(value.keys())!r}")
self._set_guard_export_info(guard, code)
@@ -1794,7 +1794,7 @@
#
# The list of tensor fields and calls we care about can be found in `terms` below.
# TODO(voz): We are missing storage offset in all our tensor guards?
- code: List[str] = list()
+ code: List[str] = []
if self.check_fn_manager.output_graph.export:
self.TYPE_MATCH(guard)
terms = [
@@ -2371,7 +2371,7 @@
if os.environ.get("TORCHDYNAMO_PRINT_GUARDS", None) == "1":
print("GUARDS\n", guard_body)
- out: Dict[str, Any] = dict()
+ out: Dict[str, Any] = {}
# We don't put builder.scope as the globals in exec call because
# guard_fn.__globals__ becomes equal to builder.scope. This causes
diff --git a/torch/_dynamo/output_graph.py b/torch/_dynamo/output_graph.py
index 466d919..67ba36e 100644
--- a/torch/_dynamo/output_graph.py
+++ b/torch/_dynamo/output_graph.py
@@ -321,7 +321,7 @@
int, List[Source]
] = collections.defaultdict(list)
# Stores the full fqn of a param or buffer to the relevant source.
- self.param_name_to_source: Optional[Dict[str, Source]] = dict()
+ self.param_name_to_source: Optional[Dict[str, Source]] = {}
self.side_effects = SideEffects()
# Cached variable trackers. This makes symbolic analysis of LOAD_GLOBAL
# and LOAD_ATTR for same python objects free.
@@ -795,7 +795,7 @@
vt = wrap_fx_proxy(
self.root_tx,
- tracer.create_proxy("get_attr", module_key, tuple(), {}),
+ tracer.create_proxy("get_attr", module_key, (), {}),
example_value=target,
**options,
)
@@ -833,7 +833,7 @@
def wrap_name(module_key):
return SymNodeVariable.create(
self,
- self.create_proxy("get_attr", module_key, tuple(), {}),
+ self.create_proxy("get_attr", module_key, (), {}),
sym_num=target,
**options,
)
@@ -1030,7 +1030,7 @@
restore_vars = []
val_to_names: Dict[VariableTracker, List[str]] = {}
if stack_values:
- val_to_names[stack_values[-1]] = list()
+ val_to_names[stack_values[-1]] = []
# NB: Typically (i.e., for graph compile from RETURN_VALUE),
# symbolic_locals will be empty at this point, as prune_dead_locals
# will clear out all of symbolic_locals because RETURN_VALUE is the
@@ -1053,7 +1053,7 @@
# A variable should never be NULL in < 3.12
assert not type.__instancecheck__(NullVariable, v)
if v not in val_to_names:
- val_to_names[v] = list()
+ val_to_names[v] = []
val_to_names[v].append(k)
for v in val_to_names.keys():
restore_vars.extend(val_to_names[v])
diff --git a/torch/_dynamo/resume_execution.py b/torch/_dynamo/resume_execution.py
index 88899d5..96f9bac 100644
--- a/torch/_dynamo/resume_execution.py
+++ b/torch/_dynamo/resume_execution.py
@@ -354,7 +354,7 @@
@classmethod
def lookup(cls, code, lineno, *key):
if code not in cls.cache:
- cls.cache[code] = dict()
+ cls.cache[code] = {}
key = tuple(key)
if key not in cls.cache[code]:
cls.cache[code][key] = cls.generate(code, lineno, *key)
@@ -422,7 +422,7 @@
"co_qualname"
] = f"{module_name}.{TORCH_DYNAMO_RESUME_IN_PREFIX}_{co_name}_at_{lineno}"
code_options["co_firstlineno"] = lineno
- code_options["co_cellvars"] = tuple()
+ code_options["co_cellvars"] = ()
code_options["co_freevars"] = freevars
code_options["co_argcount"] = len(args)
code_options["co_posonlyargcount"] = 0
diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py
index 3ba69b1..ca2cf6d 100644
--- a/torch/_dynamo/symbolic_convert.py
+++ b/torch/_dynamo/symbolic_convert.py
@@ -1681,7 +1681,7 @@
def BUILD_LIST_UNPACK(self, inst, cls=ListVariable):
seqs = self.popn(inst.argval)
- items = list()
+ items = []
for seq in seqs:
try:
items.extend(seq.unpack_var_sequence(self))
@@ -1703,7 +1703,7 @@
items = self.popn(inst.argval)
# ensure everything is a dict
items = [BuiltinVariable(dict).call_function(self, [x], {}) for x in items]
- result = dict()
+ result = {}
for x in items:
assert isinstance(x, ConstDictVariable)
result.update(x.items)
@@ -2455,7 +2455,7 @@
self.symbolic_locals
)
- self._freevars_ids = dict()
+ self._freevars_ids = {}
for name in self.code_options["co_freevars"]:
if name in f_locals:
self._freevars_ids[name] = id(f_locals[name])
diff --git a/torch/_dynamo/testing.py b/torch/_dynamo/testing.py
index 96191ea..02d86d4 100644
--- a/torch/_dynamo/testing.py
+++ b/torch/_dynamo/testing.py
@@ -57,8 +57,8 @@
# f"High loss value alert - {loss:.2f}. Can result in unstable gradients."
# )
- grads = dict()
- params = dict()
+ grads = {}
+ params = {}
for name, param in model.named_parameters():
if isinstance(model, eval_frame.OptimizedModule):
name = remove_optimized_module_prefix(name)
@@ -71,7 +71,7 @@
params[name] = param_copy
results.append(grads)
results.append(params)
- buffers = dict()
+ buffers = {}
for name, buffer in model.named_buffers():
if isinstance(model, eval_frame.OptimizedModule):
name = remove_optimized_module_prefix(name)
diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py
index 1b4f0fc..9342acb 100644
--- a/torch/_dynamo/trace_rules.py
+++ b/torch/_dynamo/trace_rules.py
@@ -2841,7 +2841,7 @@
@functools.lru_cache(None)
def get_torch_obj_rule_map():
- d: Dict[Any, VariableTracker] = dict()
+ d: Dict[Any, VariableTracker] = {}
for m in torch_name_rule_map:
for k, v in m.items(): # type: ignore[attr-defined]
if ".py#" not in k:
@@ -2998,7 +2998,7 @@
@FunctionIdSet
def _numpy_function_ids() -> Dict[int, str]:
- rv = dict()
+ rv = {}
for mod in NP_SUPPORTED_MODULES:
rv.update(
{
diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py
index d9a3e61..7c813f4 100644
--- a/torch/_dynamo/utils.py
+++ b/torch/_dynamo/utils.py
@@ -78,7 +78,7 @@
np.random: tnp.random,
}
else:
- NP_SUPPORTED_MODULES = tuple()
+ NP_SUPPORTED_MODULES = ()
NP_TO_TNP_MODULE = {}
from torch._subclasses.fake_tensor import FakeTensor, is_fake, maybe_get_fake_mode
@@ -463,8 +463,8 @@
"""Similar to weakref.WeakKeyDictionary, but use `is`/`id` rather than `==` to compare equality"""
def __init__(self):
- self.values = dict()
- self.refs = dict()
+ self.values = {}
+ self.refs = {}
def __getitem__(self, key):
return self.values[id(key)]
@@ -1144,10 +1144,10 @@
)
-dict_keys: Type[KeysView[Any]] = type(dict().keys())
-dict_values: Type[ValuesView[Any]] = type(dict().values())
+dict_keys: Type[KeysView[Any]] = type({}.keys())
+dict_values: Type[ValuesView[Any]] = type({}.values())
odict_values: Type[ValuesView[Any]] = type(collections.OrderedDict().values())
-tuple_iterator: Type[Iterator[Any]] = type(iter(tuple()))
+tuple_iterator: Type[Iterator[Any]] = type(iter(()))
tuple_iterator_len = tuple_iterator.__length_hint__ # type: ignore[attr-defined]
object_new = object.__new__
@@ -1610,7 +1610,7 @@
guard_failures: DefaultDict[Any, List[Any]] = collections.defaultdict(list)
# Keep a record of graph break reasons for logging
-graph_break_reasons: List["torch._dynamo.output_graph.GraphCompileReason"] = list()
+graph_break_reasons: List["torch._dynamo.output_graph.GraphCompileReason"] = []
# keep record of compiled code, if we are in "error if recompile"
# to track code that dynamo has compiled previously
diff --git a/torch/_dynamo/variables/base.py b/torch/_dynamo/variables/base.py
index 1b2c056..472b98c 100644
--- a/torch/_dynamo/variables/base.py
+++ b/torch/_dynamo/variables/base.py
@@ -146,7 +146,7 @@
Walk value and call fn on all the VariableTracker instances
"""
if cache is None:
- cache = dict()
+ cache = {}
idx = id(value)
if idx in cache:
diff --git a/torch/_dynamo/variables/functions.py b/torch/_dynamo/variables/functions.py
index 5b38122..b017a4f 100644
--- a/torch/_dynamo/variables/functions.py
+++ b/torch/_dynamo/variables/functions.py
@@ -55,7 +55,7 @@
def init_cellvars(parent, result, code):
- closure_cells = dict()
+ closure_cells = {}
side_effects = parent.output.side_effects
# for name in itertools.chain(code.co_cellvars, code.co_freevars):
diff --git a/torch/_dynamo/variables/lazy.py b/torch/_dynamo/variables/lazy.py
index fb4f5cf..04be291 100644
--- a/torch/_dynamo/variables/lazy.py
+++ b/torch/_dynamo/variables/lazy.py
@@ -95,7 +95,7 @@
Walk an object and realize all LazyVariableTrackers inside it.
"""
if cache is None:
- cache = dict()
+ cache = {}
idx = id(value)
if idx in cache:
diff --git a/torch/_dynamo/variables/misc.py b/torch/_dynamo/variables/misc.py
index 8ad0ccf..2b84c14 100644
--- a/torch/_dynamo/variables/misc.py
+++ b/torch/_dynamo/variables/misc.py
@@ -276,7 +276,7 @@
# a free variable that we actually DO have the runtime
# value for
# tuple(make_cell(ComptimeVar(i)) for i in fn.closure.items)
- tuple(),
+ (),
)
func(ComptimeContext(tx))
else:
@@ -580,7 +580,7 @@
for x in args
)
proxy = tx.output.create_proxy(
- "call_function", torch.autograd.function.FunctionCtx, tuple(), {}
+ "call_function", torch.autograd.function.FunctionCtx, (), {}
)
out = tx.output.side_effects.track_object_new(
None,
diff --git a/torch/_dynamo/variables/nn_module.py b/torch/_dynamo/variables/nn_module.py
index d216e8b..909bde7 100644
--- a/torch/_dynamo/variables/nn_module.py
+++ b/torch/_dynamo/variables/nn_module.py
@@ -456,7 +456,7 @@
mod_proxy = tx.output.create_proxy(
"get_attr",
self.module_key,
- tuple(),
+ (),
{},
)
set_example_value(mod_proxy.node, module)
diff --git a/torch/_dynamo/variables/optimizer.py b/torch/_dynamo/variables/optimizer.py
index 9676475..62f09f9 100644
--- a/torch/_dynamo/variables/optimizer.py
+++ b/torch/_dynamo/variables/optimizer.py
@@ -130,7 +130,7 @@
all_uninitialized = True
all_cuda = True
- for p in group.get("params", list()):
+ for p in group.get("params", []):
all_cuda &= p.is_cuda
all_uninitialized &= p not in self.value.state
diff --git a/torch/_dynamo/variables/user_defined.py b/torch/_dynamo/variables/user_defined.py
index 4bfd0e7..4f125eb 100644
--- a/torch/_dynamo/variables/user_defined.py
+++ b/torch/_dynamo/variables/user_defined.py
@@ -239,7 +239,7 @@
and "__subclasses__" not in self.value.__dict__
):
options = {"mutable_local": MutableLocal()}
- subs_as_vars: List[VariableTracker] = list()
+ subs_as_vars: List[VariableTracker] = []
for sub in self.value.__subclasses__():
source = AttrSource(tx.import_source(sub.__module__), sub.__name__)
subs_as_vars.append(
diff --git a/torch/_export/converter.py b/torch/_export/converter.py
index d762fdb..564bdaa 100644
--- a/torch/_export/converter.py
+++ b/torch/_export/converter.py
@@ -129,19 +129,19 @@
"""
# A map from a block to its expected to be lifted arguments.
- blocks_to_lifted_attrs: Dict[torch._C.Block, Set[str]] = dict()
+ blocks_to_lifted_attrs: Dict[torch._C.Block, Set[str]] = {}
# Reference map stores the input (i.e., src) and output (i.e., dest) IR of a
# GetAttr node. By traversing this reference map, we can figure out the
# full IR aliasing pass and figure out the FQN of an attribute.
# E.g., %2 = GetAttr(linear)[%1] --> node_to_parent_map["%2"] = "%1"
- node_to_parent_map: Dict[str, str] = dict()
+ node_to_parent_map: Dict[str, str] = {}
# Used for reconstructing the FQN of an attribute based on the reference map.
# In nutshell, for each GetAttr call, GetAttr(input IR, attribute name) -> output IR
# This name map stores which attribute name is called for a src IR --> dest IR action.
# E.g., %2 = GetAttr(linear)[%1] --> node_to_attr_name["%2"] = "linear"
- node_to_attr_name: Dict[str, str] = dict()
+ node_to_attr_name: Dict[str, str] = {}
def _dfs_get_attr_dependency(entry):
"""
@@ -674,7 +674,7 @@
subgraph_nodes = []
for block in node.blocks():
subgraph_converter = TS2FXGraphConverter(
- block, dict(), dict(), self.blocks_to_lifted_attrs, dict()
+ block, {}, {}, self.blocks_to_lifted_attrs, {}
)
subgraph_converter.constant_map = self.constant_map
subgraph_converter.name_to_attribute_fqn = self.name_to_attribute_fqn
@@ -915,14 +915,14 @@
self.name_to_param_map: Dict[str, torch.Tensor] = (
dict(ts_model.named_parameters())
if isinstance(ts_model, torch.jit.ScriptModule)
- else dict()
+ else {}
)
self.name_to_buffer_map: Dict[str, torch.Tensor] = (
dict(ts_model.named_buffers())
if isinstance(ts_model, torch.jit.ScriptModule)
- else dict()
+ else {}
)
- self.name_to_non_tensor_attributes: Dict[str, Any] = dict()
+ self.name_to_non_tensor_attributes: Dict[str, Any] = {}
self.lift_tensor_constants_to_buffer()
diff --git a/torch/_functorch/_aot_autograd/traced_function_transforms.py b/torch/_functorch/_aot_autograd/traced_function_transforms.py
index 169e6c3..943094f 100644
--- a/torch/_functorch/_aot_autograd/traced_function_transforms.py
+++ b/torch/_functorch/_aot_autograd/traced_function_transforms.py
@@ -226,7 +226,7 @@
if config.functionalize_rng_ops:
PhiloxStateTracker.mark_beginning_of_backward()
- backward_out: Tuple[Tensor, ...] = tuple()
+ backward_out: Tuple[Tensor, ...] = ()
# Call the backwards pass
if grad_primals:
with fx_traceback.preserve_node_meta():
@@ -765,7 +765,7 @@
if not isinstance(out, (tuple, list)):
raise RuntimeError(
- "Graph output must be a tuple(). This is so that we can avoid "
+ "Graph output must be a (). This is so that we can avoid "
"pytree processing of the outputs. Please change the module to "
"have tuple outputs or use aot_module instead."
)
diff --git a/torch/_functorch/partitioners.py b/torch/_functorch/partitioners.py
index 2aba4e7..e4e427f 100644
--- a/torch/_functorch/partitioners.py
+++ b/torch/_functorch/partitioners.py
@@ -645,7 +645,7 @@
joint_graph_rng_ops = get_rng_ops(joint_module)
fw_graph_rng_ops = get_rng_ops(fw_module)
bw_graph_rng_ops = get_rng_ops(bw_module)
- recomputable_rng_ops_map = dict()
+ recomputable_rng_ops_map = {}
for node in joint_module.graph.nodes:
if (
must_recompute(node)
diff --git a/torch/_guards.py b/torch/_guards.py
index 9dc736d..c063386 100644
--- a/torch/_guards.py
+++ b/torch/_guards.py
@@ -274,7 +274,7 @@
def set_export_info(self, guard_type, guarded_class, code_list, obj_weakref):
if not self.guard_types:
- self.guard_types = list()
+ self.guard_types = []
self.guard_types.append(guard_type)
diff --git a/torch/_higher_order_ops/triton_kernel_wrap.py b/torch/_higher_order_ops/triton_kernel_wrap.py
index 07c6fab..c40b812 100644
--- a/torch/_higher_order_ops/triton_kernel_wrap.py
+++ b/torch/_higher_order_ops/triton_kernel_wrap.py
@@ -36,9 +36,9 @@
# Use a side table.
# We use two dicts so that fetching both the kernel and id are O(1)
class KernelSideTable:
- id_to_kernel: Dict[int, Any] = dict()
- kernel_to_id: Dict[Any, int] = dict()
- constant_args: Dict[int, Any] = dict()
+ id_to_kernel: Dict[int, Any] = {}
+ kernel_to_id: Dict[Any, int] = {}
+ constant_args: Dict[int, Any] = {}
lock = threading.Lock()
# Returns index on the table
@@ -75,9 +75,9 @@
# Resets the table (only meant to be used in unit tests)
# This is only safe assuming single threaded execution
def reset_table(self) -> None:
- self.id_to_kernel = dict()
- self.kernel_to_id = dict()
- self.constant_args = dict()
+ self.id_to_kernel = {}
+ self.kernel_to_id = {}
+ self.constant_args = {}
kernel_side_table = KernelSideTable()
@@ -174,7 +174,7 @@
context = triton._C.libtriton.ir.context()
target = triton.runtime.driver.active.get_current_target()
backend = triton.compiler.compiler.make_backend(target)
- options = backend.parse_options(dict())
+ options = backend.parse_options({})
triton._C.libtriton.ir.load_dialects(context)
backend.load_dialects(context)
diff --git a/torch/_higher_order_ops/while_loop.py b/torch/_higher_order_ops/while_loop.py
index baf5500..4924e1f 100644
--- a/torch/_higher_order_ops/while_loop.py
+++ b/torch/_higher_order_ops/while_loop.py
@@ -118,7 +118,7 @@
# Currently, additional_inputs is not a user-facing input. It will be automatically set in dynamo.
# parameters and buffers accessed in cond_fn or body_fn or tensor closures will become additional_inputs.
- additional_inputs: Tuple = tuple()
+ additional_inputs: Tuple = ()
if torch.compiler.is_dynamo_compiling():
return while_loop_op(cond_fn, body_fn, carried_inputs, additional_inputs)
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index b68c594..63c6956 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -1562,7 +1562,7 @@
@clear_on_fresh_inductor_cache
class CudaKernelParamCache:
- cache: Dict[str, Dict[str, str]] = dict()
+ cache: Dict[str, Dict[str, str]] = {}
cache_clear = staticmethod(cache.clear)
@classmethod
@@ -2909,8 +2909,8 @@
@clear_on_fresh_inductor_cache
class PyCodeCache:
- cache: Dict[str, ModuleType] = dict()
- linemaps: Dict[str, List[Tuple[Any, ...]]] = dict()
+ cache: Dict[str, ModuleType] = {}
+ linemaps: Dict[str, List[Tuple[Any, ...]]] = {}
cache_clear = staticmethod(cache.clear)
@classmethod
@@ -3200,7 +3200,7 @@
input_path: str
output_path: str
- cache: Dict[str, CacheEntry] = dict()
+ cache: Dict[str, CacheEntry] = {}
cache_clear = staticmethod(cache.clear)
_SOURCE_CODE_SUFFIX = "cu"
@@ -3285,7 +3285,7 @@
input_path: str
output_path: str
- cache: Dict[str, CacheEntry] = dict()
+ cache: Dict[str, CacheEntry] = {}
cache_clear = staticmethod(cache.clear)
_SOURCE_CODE_SUFFIX = "cpp"
_logged_compiler_version = False
diff --git a/torch/_inductor/codegen/common.py b/torch/_inductor/codegen/common.py
index d574f55..6319651 100644
--- a/torch/_inductor/codegen/common.py
+++ b/torch/_inductor/codegen/common.py
@@ -1184,10 +1184,10 @@
return odict[name]
def __init__(self, sizevars=None):
- self.input_buffers = dict()
- self.output_buffers = dict()
- self.inplace_buffers = dict()
- self.sizevars = sizevars or dict()
+ self.input_buffers = {}
+ self.output_buffers = {}
+ self.inplace_buffers = {}
+ self.sizevars = sizevars or {}
self.workspace_arg = None
def __repr__(self):
@@ -1615,7 +1615,7 @@
# key: the buffer to write
# value: the buffer to read and whose memory can be reused for
# the buffer specified by key
- self.inplace_update_buffers = dict()
+ self.inplace_update_buffers = {}
# Set minimum number of elements processed per thread.
self.min_elem_per_thread = 1
self.kernel_name = None
diff --git a/torch/_inductor/codegen/cpp_wrapper_cpu.py b/torch/_inductor/codegen/cpp_wrapper_cpu.py
index fb0422b..4dbb97e 100644
--- a/torch/_inductor/codegen/cpp_wrapper_cpu.py
+++ b/torch/_inductor/codegen/cpp_wrapper_cpu.py
@@ -239,7 +239,7 @@
# mark output type to unwrap tensor back to python scalar
from ..ir import ShapeAsConstantBuffer
- output_is_tensor = dict()
+ output_is_tensor = {}
for idx, x in enumerate(V.graph.graph_outputs):
if isinstance(x, ShapeAsConstantBuffer):
output_is_tensor[idx] = False
diff --git a/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py b/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py
index 1125838..a41fa62 100644
--- a/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py
+++ b/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py
@@ -61,7 +61,7 @@
self.output = IndentedBuffer(0)
self.var_counter = 0
self.evt_type_name = evt_type_name
- self.aliases = dict()
+ self.aliases = {}
@staticmethod
def ir_to_evt_string(
@@ -243,7 +243,7 @@
self.var_counter: int = (
0 # used to generate variable names, incremented for each new variable
)
- self.aliases: Dict[str, str] = dict() # Aliases for subexpression functors
+ self.aliases: Dict[str, str] = {} # Aliases for subexpression functors
@staticmethod
def ir_to_evt_argument_string(
diff --git a/torch/_inductor/codegen/cuda/cutlass_utils.py b/torch/_inductor/codegen/cuda/cutlass_utils.py
index 04866fe..69aa7f3 100644
--- a/torch/_inductor/codegen/cuda/cutlass_utils.py
+++ b/torch/_inductor/codegen/cuda/cutlass_utils.py
@@ -157,7 +157,7 @@
arch,
version,
)
- return list()
+ return []
arch = _normalize_cuda_arch(arch)
args = CUTLASSArgs(architectures=arch, cuda_version=version)
manifest = cutlass_manifest.Manifest(args)
diff --git a/torch/_inductor/codegen/cuda/gemm_template.py b/torch/_inductor/codegen/cuda/gemm_template.py
index 3a7dccf..764ee77 100644
--- a/torch/_inductor/codegen/cuda/gemm_template.py
+++ b/torch/_inductor/codegen/cuda/gemm_template.py
@@ -744,7 +744,7 @@
import cutlass_library.library as cutlass_lib
ops = cutlass_utils.gen_ops()[cutlass_lib.OperationKind.Gemm]
- res: Dict[str, cutlass_gemm_op.GemmOperation] = dict()
+ res: Dict[str, cutlass_gemm_op.GemmOperation] = {}
for op_dict in ops.values():
for op_list in op_dict.values():
for op in op_list:
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
index fea0828..9cb6c26 100644
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -509,7 +509,7 @@
self.freed: Set[BufferName] = set()
# maps from reusing buffer to reused buffer
- self.reuses: Dict[BufferName, BufferName] = dict()
+ self.reuses: Dict[BufferName, BufferName] = {}
self.write_get_raw_stream = functools.lru_cache(None)( # type: ignore[assignment]
self.write_get_raw_stream
@@ -905,7 +905,7 @@
del async_compile
"""
)
- scope = dict() # type: ignore[var-annotated]
+ scope = {} # type: ignore[var-annotated]
tuning_code = (
self.kernel_autotune_defs.getvalue() + self.kernel_autotune_calls.getvalue()
)
diff --git a/torch/_inductor/dependencies.py b/torch/_inductor/dependencies.py
index 6028be9..5d5f54a 100644
--- a/torch/_inductor/dependencies.py
+++ b/torch/_inductor/dependencies.py
@@ -456,7 +456,7 @@
# TODO: check call sites
def var_builder(prefix: str) -> Tuple[VarRanges, Callable[[sympy.Expr], sympy.Symbol]]:
cnt = itertools.count()
- var_ranges: VarRanges = dict()
+ var_ranges: VarRanges = {}
def add_var(length: sympy.Expr) -> sympy.Symbol:
v = sympy_index_symbol(f"{prefix}{next(cnt)}")
diff --git a/torch/_inductor/fx_passes/group_batch_fusion.py b/torch/_inductor/fx_passes/group_batch_fusion.py
index 35f1b52..4f64c7e 100644
--- a/torch/_inductor/fx_passes/group_batch_fusion.py
+++ b/torch/_inductor/fx_passes/group_batch_fusion.py
@@ -112,8 +112,8 @@
raise NotImplementedError("fuse called on base")
-PRE_GRAD_FUSIONS: Dict[str, GroupBatchFusionBase] = dict()
-POST_GRAD_FUSIONS: Dict[str, GroupBatchFusionBase] = dict()
+PRE_GRAD_FUSIONS: Dict[str, GroupBatchFusionBase] = {}
+POST_GRAD_FUSIONS: Dict[str, GroupBatchFusionBase] = {}
def register_fusion(name: str, pre_grad=True):
diff --git a/torch/_inductor/fx_passes/split_cat.py b/torch/_inductor/fx_passes/split_cat.py
index 9c7f94d..c059adb 100644
--- a/torch/_inductor/fx_passes/split_cat.py
+++ b/torch/_inductor/fx_passes/split_cat.py
@@ -40,8 +40,8 @@
_Range: TypeAlias = Tuple[int, int]
-PRE_GRAD_PATTERNS: Dict[str, PatternMatcherPass] = dict()
-POST_GRAD_PATTERNS: Dict[str, PatternMatcherPass] = dict()
+PRE_GRAD_PATTERNS: Dict[str, PatternMatcherPass] = {}
+POST_GRAD_PATTERNS: Dict[str, PatternMatcherPass] = {}
pre_grad_pass_names = [
"normalization_pass",
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index a9ffb5c..8912a5a 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -4860,7 +4860,7 @@
def __init__(self, *, kernel_idx, grid, kernel_args):
inputs = []
- kwargs = dict()
+ kwargs = {}
constant_args = []
for k, v in kernel_args.items():
if isinstance(v, TensorBox):
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
index 3e06086..84b0a9b 100644
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -82,7 +82,7 @@
needs_realized_inputs: Set[torch._ops.OpOverload] = set()
foreach_ops: Set[torch._ops.OpOverload] = set()
inplace_foreach_ops: Set[torch._ops.OpOverload] = set()
-inplaceable_foreach_ops: Dict[torch._ops.OpOverload, torch._ops.OpOverload] = dict()
+inplaceable_foreach_ops: Dict[torch._ops.OpOverload, torch._ops.OpOverload] = {}
quantized_decomposed = torch.ops.quantized_decomposed
diff --git a/torch/_inductor/pattern_matcher.py b/torch/_inductor/pattern_matcher.py
index f255ef9..60e023b 100644
--- a/torch/_inductor/pattern_matcher.py
+++ b/torch/_inductor/pattern_matcher.py
@@ -1870,7 +1870,7 @@
def _args(n: torch.fx.Node) -> List[torch.fx.node.Argument]:
- args: List[torch.fx.node.Argument] = list()
+ args: List[torch.fx.node.Argument] = []
torch.fx.map_arg((n.args, n.kwargs), args.append)
return args
diff --git a/torch/_inductor/runtime/hints.py b/torch/_inductor/runtime/hints.py
index a7bd600..90f9e5a 100644
--- a/torch/_inductor/runtime/hints.py
+++ b/torch/_inductor/runtime/hints.py
@@ -68,7 +68,7 @@
instance_descriptor = collections.namedtuple( # type: ignore[no-redef]
"instance_descriptor",
["divisible_by_16", "equal_to_1", "ids_of_folded_args", "divisible_by_8"],
- defaults=[tuple(), tuple(), tuple(), tuple()],
+ defaults=[(), (), (), ()],
)
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py
index df4ee6d..4529ec3 100644
--- a/torch/_inductor/scheduler.py
+++ b/torch/_inductor/scheduler.py
@@ -1544,7 +1544,7 @@
items: Optional[List[T]] = None,
membership: Optional[Set[T]] = None,
) -> None:
- self.items = items or list()
+ self.items = items or []
self.membership = membership or set()
def append(self, node_user: T) -> None:
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index d1d199a..3eaefb5 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -61,7 +61,7 @@
log = logging.getLogger(__name__)
# correctness checks struggle with fp16/tf32
-VERIFY: Dict[str, Any] = dict()
+VERIFY: Dict[str, Any] = {}
PRINT_AUTOTUNE = True
DEBUG = False
@@ -154,7 +154,7 @@
self.prefix_args = prefix_args
self.suffix_args = suffix_args
self.epilogue_fn = epilogue_fn
- self.render_hooks = dict() # type: ignore[var-annotated]
+ self.render_hooks = {} # type: ignore[var-annotated]
self.triton_meta: Optional[Dict[str, object]] = None
# For Templated Attention this can be a list of ir.Subgraph
self.subgraphs: Optional[List[ir.ComputedBuffer]] = subgraphs
@@ -575,7 +575,7 @@
class TritonTemplate(KernelTemplate):
index_counter = itertools.count()
- all_templates: Dict[str, "TritonTemplate"] = dict()
+ all_templates: Dict[str, "TritonTemplate"] = {}
def __init__(self, name: str, grid: Any, source: str, debug=False):
super().__init__(name)
diff --git a/torch/_inductor/sizevars.py b/torch/_inductor/sizevars.py
index f152866..5ed1cec 100644
--- a/torch/_inductor/sizevars.py
+++ b/torch/_inductor/sizevars.py
@@ -56,8 +56,8 @@
# which potentially could have already had a precomputed replacement
# on it, we are obligated to invert the precomputed replacements
# (inv_precomputed_replacements).
- self.precomputed_replacements: Dict[Expr, sympy.Symbol] = dict()
- self.inv_precomputed_replacements: Dict[sympy.Symbol, Expr] = dict()
+ self.precomputed_replacements: Dict[Expr, sympy.Symbol] = {}
+ self.inv_precomputed_replacements: Dict[sympy.Symbol, Expr] = {}
self.stride_vars = self.make_stride_vars_cache()
self.simplify_with_ranges = self.make_simplify_with_ranges_cache()
self._simplify_loops = self.make_simplify_loops_cache()
@@ -69,7 +69,7 @@
"""
self._simplify_with_ranges() can be expensive, cache its results
"""
- cache: Dict[Tuple[Any, ...], Expr] = dict()
+ cache: Dict[Tuple[Any, ...], Expr] = {}
replacement_count = len(self.replacements)
def simplify_with_ranges(expr: Expr, var_ranges: VarRanges) -> Expr:
@@ -91,7 +91,7 @@
"""
self._simplify_with_ranges() can be expensive, cache its results
"""
- cache: Dict[Tuple[Any, ...], Any] = dict()
+ cache: Dict[Tuple[Any, ...], Any] = {}
replacement_count = len(self.replacements)
def simplify_loops(index_vars, sizes, index_formulas):
diff --git a/torch/_lazy/device_context.py b/torch/_lazy/device_context.py
index bc47835..e09fdab 100644
--- a/torch/_lazy/device_context.py
+++ b/torch/_lazy/device_context.py
@@ -6,7 +6,7 @@
class DeviceContext:
- _CONTEXTS: Dict[str, Any] = dict()
+ _CONTEXTS: Dict[str, Any] = {}
_CONTEXTS_LOCK = threading.Lock()
def __init__(self, device):
diff --git a/torch/_logging/_internal.py b/torch/_logging/_internal.py
index 74af65b..a10a81c 100644
--- a/torch/_logging/_internal.py
+++ b/torch/_logging/_internal.py
@@ -656,7 +656,7 @@
@functools.lru_cache
def _parse_log_settings(settings):
if settings == "":
- return dict()
+ return {}
if settings == "help":
raise ValueError(help_message(verbose=False))
diff --git a/torch/_numpy/_funcs_impl.py b/torch/_numpy/_funcs_impl.py
index 93f8a8a..0b40f40 100644
--- a/torch/_numpy/_funcs_impl.py
+++ b/torch/_numpy/_funcs_impl.py
@@ -748,7 +748,7 @@
N = len(dimensions)
shape = (1,) * N
if sparse:
- res = tuple()
+ res = ()
else:
res = torch.empty((N,) + dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
diff --git a/torch/_refs/__init__.py b/torch/_refs/__init__.py
index 44484f8..2a6438c 100644
--- a/torch/_refs/__init__.py
+++ b/torch/_refs/__init__.py
@@ -3785,9 +3785,7 @@
@register_decomposition(aten.roll)
@out_wrapper()
-def roll(
- a: TensorLikeType, shifts: DimsType, dims: DimsType = tuple()
-) -> TensorLikeType:
+def roll(a: TensorLikeType, shifts: DimsType, dims: DimsType = ()) -> TensorLikeType:
"""Reference implementation of :func:`torch.roll`."""
dims = utils.canonicalize_dims(a.ndim, dims)
# ATen specifies int[1] type for shifts and dims which expands integers to tuples of length 1
@@ -3947,7 +3945,7 @@
lambda: "Dimension specified as 0 but tensor has no dimensions",
)
if guard_size_oblivious(t.shape[dim] == 0):
- return tuple()
+ return ()
else:
return tuple(
torch.squeeze(s, dim) for s in torch.tensor_split(t, t.shape[dim], dim)
diff --git a/torch/_refs/nn/functional/__init__.py b/torch/_refs/nn/functional/__init__.py
index 8383d88..a1221ea 100644
--- a/torch/_refs/nn/functional/__init__.py
+++ b/torch/_refs/nn/functional/__init__.py
@@ -1102,7 +1102,7 @@
weight = weight[0] if weight.ndim == 1 else weight
else:
weight = prims.broadcast_in_dim(
- weight, a.shape, tuple() if weight.ndim == 0 else (0 if a.ndim == 1 else 1,)
+ weight, a.shape, () if weight.ndim == 0 else (0 if a.ndim == 1 else 1,)
)
return torch.where(a > 0, a, a * weight)
diff --git a/torch/autograd/__init__.py b/torch/autograd/__init__.py
index d8088dd..5c68b94 100644
--- a/torch/autograd/__init__.py
+++ b/torch/autograd/__init__.py
@@ -274,7 +274,7 @@
if isinstance(inputs, (torch.Tensor, graph.GradientEdge))
else tuple(inputs)
if inputs is not None
- else tuple()
+ else ()
)
grad_tensors_ = _tensor_or_tensors_to_tuple(grad_tensors, len(tensors))
diff --git a/torch/autograd/functional.py b/torch/autograd/functional.py
index 8cf3955..df8b7f8 100644
--- a/torch/autograd/functional.py
+++ b/torch/autograd/functional.py
@@ -179,8 +179,8 @@
assert isinstance(grad_outputs, tuple)
assert len(outputs) == len(grad_outputs)
- new_outputs: Tuple[torch.Tensor, ...] = tuple()
- new_grad_outputs: Tuple[torch.Tensor, ...] = tuple()
+ new_outputs: Tuple[torch.Tensor, ...] = ()
+ new_grad_outputs: Tuple[torch.Tensor, ...] = ()
for out, grad_out in zip(outputs, grad_outputs):
if out is not None and out.requires_grad:
new_outputs += (out,)
@@ -209,7 +209,7 @@
if stage not in ["back", "back_trick", "double_back", "double_back_trick"]:
raise RuntimeError(f"Invalid stage argument '{stage}' to _fill_in_zeros")
- res: Tuple[torch.Tensor, ...] = tuple()
+ res: Tuple[torch.Tensor, ...] = ()
for i, grads_i in enumerate(grads):
if grads_i is None:
if strict:
@@ -778,7 +778,7 @@
jacobian_output_input, (is_outputs_tuple, is_inputs_tuple)
)
- jacobian: Tuple[torch.Tensor, ...] = tuple()
+ jacobian: Tuple[torch.Tensor, ...] = ()
for i, out in enumerate(outputs):
# mypy complains that expression and variable have different types due to the empty list
diff --git a/torch/cuda/_sanitizer.py b/torch/cuda/_sanitizer.py
index bf72f27..f9ce311 100644
--- a/torch/cuda/_sanitizer.py
+++ b/torch/cuda/_sanitizer.py
@@ -481,7 +481,7 @@
def __init__(self):
self.dataptrs_read: Set[DataPtr] = set()
self.dataptrs_written: Set[DataPtr] = set()
- self.tensor_aliases: Dict[DataPtr, List[str]] = dict()
+ self.tensor_aliases: Dict[DataPtr, List[str]] = {}
self.outputs: Set[DataPtr] = set()
def _handle_argument(
diff --git a/torch/distributed/_composable/replicate.py b/torch/distributed/_composable/replicate.py
index 6ba70cf..391022a 100644
--- a/torch/distributed/_composable/replicate.py
+++ b/torch/distributed/_composable/replicate.py
@@ -66,7 +66,7 @@
assert self._init_args is not None
self.init(*self._init_args, **self._init_kwargs)
self.register_comm_hook()
- self._init_args = tuple()
+ self._init_args = ()
self._init_kwargs = {}
_lazy_init()
diff --git a/torch/distributed/_spmd/graph_utils.py b/torch/distributed/_spmd/graph_utils.py
index d48e1c0..dede019 100644
--- a/torch/distributed/_spmd/graph_utils.py
+++ b/torch/distributed/_spmd/graph_utils.py
@@ -87,7 +87,7 @@
This API currently does not support inserting after ``target``.
"""
all_nodes = set(subgraph)
- mapping: Dict[fx.Node, fx.Node] = dict()
+ mapping: Dict[fx.Node, fx.Node] = {}
cloned_subgraph = []
with graph.inserting_before(target):
for node in subgraph:
diff --git a/torch/distributed/_spmd/iter_graph_module.py b/torch/distributed/_spmd/iter_graph_module.py
index cd5f934..ce49c59 100644
--- a/torch/distributed/_spmd/iter_graph_module.py
+++ b/torch/distributed/_spmd/iter_graph_module.py
@@ -668,7 +668,7 @@
self._iter = 0
self._max_iters = max_iters
- self._previous_output: Tuple[Any, ...] = tuple()
+ self._previous_output: Tuple[Any, ...] = ()
self._num_extra_output = 0
self._is_frozen = False
self._enable_inductor = enable_inductor
diff --git a/torch/distributed/_state_dict_utils.py b/torch/distributed/_state_dict_utils.py
index cb9def7..4223e42 100644
--- a/torch/distributed/_state_dict_utils.py
+++ b/torch/distributed/_state_dict_utils.py
@@ -90,7 +90,7 @@
device: Optional[torch.device] = None,
cpu_offload: bool = False,
companion_obj: Any = None,
- ranks_only: Tuple[int, ...] = tuple(),
+ ranks_only: Tuple[int, ...] = (),
type_check: bool = True,
non_blocking: bool = True,
) -> Dict[str, Any]:
@@ -207,7 +207,7 @@
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
cpu_offload: bool = False,
- ranks_only: Tuple[int, ...] = tuple(),
+ ranks_only: Tuple[int, ...] = (),
type_check: bool = True,
) -> Dict[str, Any]:
"""
@@ -292,7 +292,7 @@
def _offload_state_dict_to_cpu(
state_dict: Dict[str, Any],
*,
- ranks_only: Tuple[int, ...] = tuple(),
+ ranks_only: Tuple[int, ...] = (),
type_check: bool = True,
) -> Dict[str, Any]:
"""
@@ -371,7 +371,7 @@
pg=None,
device=None,
cpu_offload=False,
- ranks_only=tuple(),
+ ranks_only=(),
companion_obj=copy_state_dict,
type_check=type_check,
non_blocking=non_blocking,
@@ -430,7 +430,7 @@
pg=None,
device=None,
cpu_offload=False,
- ranks_only=tuple(),
+ ranks_only=(),
type_check=False,
)
return ret
@@ -468,7 +468,7 @@
pg=None,
device=None,
cpu_offload=False,
- ranks_only=tuple(),
+ ranks_only=(),
companion_obj=compared_state_dict,
type_check=False,
)
diff --git a/torch/distributed/checkpoint/filesystem.py b/torch/distributed/checkpoint/filesystem.py
index d78829f..78afb33 100644
--- a/torch/distributed/checkpoint/filesystem.py
+++ b/torch/distributed/checkpoint/filesystem.py
@@ -574,7 +574,7 @@
return fut
def finish(self, metadata: Metadata, results: List[List[WriteResult]]) -> None:
- storage_md = dict()
+ storage_md = {}
for wr_list in results:
storage_md.update({wr.index: wr.storage_data for wr in wr_list})
metadata.storage_data = storage_md
@@ -620,21 +620,21 @@
super().__init__()
self.fs = FileSystem()
self.path = self.fs.init_path(path)
- self.storage_data: Dict[MetadataIndex, _StorageInfo] = dict()
+ self.storage_data: Dict[MetadataIndex, _StorageInfo] = {}
self.load_id = _generate_uuid()
def _slice_file(self, file, sinfo: _StorageInfo) -> io.IOBase:
return _create_file_view(file, sinfo.offset, sinfo.length)
def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None:
- self.storage_data = dict()
+ self.storage_data = {}
if checkpoint_id:
self.path = self.fs.init_path(checkpoint_id)
self.load_id = _generate_uuid()
def read_data(self, plan: LoadPlan, planner: LoadPlanner) -> Future[None]:
# group requests by file
- per_file: Dict[str, List[ReadItem]] = dict()
+ per_file: Dict[str, List[ReadItem]] = {}
for read_item in plan.items:
item_md = self.storage_data[read_item.storage_index]
path = item_md.relative_path
diff --git a/torch/distributed/checkpoint/state_dict.py b/torch/distributed/checkpoint/state_dict.py
index 6bdeb38..e104bc9 100644
--- a/torch/distributed/checkpoint/state_dict.py
+++ b/torch/distributed/checkpoint/state_dict.py
@@ -445,7 +445,7 @@
) -> Dict[str, Any]:
if info.full_state_dict:
ranks_only = (
- tuple()
+ ()
if (not info.cpu_offload or not torch.distributed.is_initialized())
else (0,)
)
@@ -973,7 +973,7 @@
with _gc_context():
info = _verify_options(
model,
- tuple(),
+ (),
optim_only=False,
submodules=submodules,
options=options,
@@ -1183,7 +1183,7 @@
model, model_state_dict
)
with _gc_context():
- info = _verify_options(model, tuple(), optim_only=False, options=options)
+ info = _verify_options(model, (), optim_only=False, options=options)
_verify_state_dict(model_state_dict, {}, info)
return _load_model_state_dict(model, model_state_dict, info)
diff --git a/torch/distributed/fsdp/_init_utils.py b/torch/distributed/fsdp/_init_utils.py
index aaeedf2..830eeb4 100644
--- a/torch/distributed/fsdp/_init_utils.py
+++ b/torch/distributed/fsdp/_init_utils.py
@@ -476,7 +476,7 @@
state._unshard_event = None
# Mapping from fully sharded module to the handles it is responsible to
# unshard and reshard (see [Note: Fully Sharded Module])
- _fully_sharded_module_to_handle: Dict[nn.Module, FlatParamHandle] = dict()
+ _fully_sharded_module_to_handle: Dict[nn.Module, FlatParamHandle] = {}
state._fully_sharded_module_to_handle = _fully_sharded_module_to_handle
# Invariant: `state.params` contains exactly the `FlatParameter`s of the
# handles in `state._handle`
diff --git a/torch/distributed/pipelining/_IR.py b/torch/distributed/pipelining/_IR.py
index 81ddeb8..53ce989 100644
--- a/torch/distributed/pipelining/_IR.py
+++ b/torch/distributed/pipelining/_IR.py
@@ -771,7 +771,7 @@
# A list of param referrals for deferred deletion.
# To be accumulated in `move_param_to_callee`.
- to_delete = list()
+ to_delete = []
def _recursive_getattr_with_parent(mod, fqn):
# Returns getattr call given a nested FQN, and the last parent
diff --git a/torch/distributed/rpc/backend_registry.py b/torch/distributed/rpc/backend_registry.py
index a06f027..4a34dfa 100644
--- a/torch/distributed/rpc/backend_registry.py
+++ b/torch/distributed/rpc/backend_registry.py
@@ -40,7 +40,7 @@
# Create an enum type, `BackendType`, with empty members.
# Can't handle Function Enum API (mypy bug #9079)
-BackendType = enum.Enum(value="BackendType", names=dict()) # type: ignore[misc]
+BackendType = enum.Enum(value="BackendType", names={}) # type: ignore[misc]
# Unable to assign a function a method (mypy bug #2427)
BackendType.__repr__ = _backend_type_repr # type: ignore[assignment]
diff --git a/torch/fx/experimental/recording.py b/torch/fx/experimental/recording.py
index 1c384d9..ebed7d5 100644
--- a/torch/fx/experimental/recording.py
+++ b/torch/fx/experimental/recording.py
@@ -104,8 +104,8 @@
return ShapeEnv(**self.kwargs)
assert shape_env is not None
- args = list(self.args or list())
- kwargs = dict(self.kwargs or dict())
+ args = list(self.args or [])
+ kwargs = dict(self.kwargs or {})
# Replace any argument of type ShapeEnv by the given one.
args, kwargs = pytree.tree_map_only(
diff --git a/torch/fx/experimental/unification/multipledispatch/utils.py b/torch/fx/experimental/unification/multipledispatch/utils.py
index 0e90241..77702e8 100644
--- a/torch/fx/experimental/unification/multipledispatch/utils.py
+++ b/torch/fx/experimental/unification/multipledispatch/utils.py
@@ -75,7 +75,7 @@
result = OrderedDict() # type: ignore[var-annotated]
for key in d:
for val in d[key]:
- result[val] = result.get(val, tuple()) + (key, )
+ result[val] = result.get(val, ()) + (key,)
return result
@@ -97,7 +97,7 @@
for item in seq:
key = func(item)
if key not in d:
- d[key] = list()
+ d[key] = []
d[key].append(item)
return d
diff --git a/torch/fx/experimental/unification/utils.py b/torch/fx/experimental/unification/utils.py
index 2147d61..609fe59 100644
--- a/torch/fx/experimental/unification/utils.py
+++ b/torch/fx/experimental/unification/utils.py
@@ -76,7 +76,7 @@
result = {} # type: ignore[var-annotated]
for key in d:
for val in d[key]:
- result[val] = result.get(val, tuple()) + (key, )
+ result[val] = result.get(val, ()) + (key,)
return result
diff --git a/torch/fx/passes/infra/partitioner.py b/torch/fx/passes/infra/partitioner.py
index 58e4e9d..e308ab8 100644
--- a/torch/fx/passes/infra/partitioner.py
+++ b/torch/fx/passes/infra/partitioner.py
@@ -18,7 +18,7 @@
class Partition:
def __init__(self, id: Optional[int] = None, nodes: Optional[Iterable[Node]] = None):
self.id = id
- self.nodes = {node: None for node in nodes} if nodes is not None else dict()
+ self.nodes = {node: None for node in nodes} if nodes is not None else {}
def __repr__(self) -> str:
return str(self.nodes)
diff --git a/torch/fx/passes/utils/fuser_utils.py b/torch/fx/passes/utils/fuser_utils.py
index cc26dea..324e8a6 100644
--- a/torch/fx/passes/utils/fuser_utils.py
+++ b/torch/fx/passes/utils/fuser_utils.py
@@ -24,7 +24,7 @@
if indegree_map[node] == 0:
candidates.put(node)
- sorted_nodes: NodeList = list()
+ sorted_nodes: NodeList = []
while not candidates.empty():
node = candidates.get()
sorted_nodes.append(node)
@@ -47,7 +47,7 @@
partition_set = set(partition)
- outputs: NodeList = list()
+ outputs: NodeList = []
for node in partition_set:
for user_node in node.users:
if user_node not in partition_set:
diff --git a/torch/fx/passes/utils/matcher_utils.py b/torch/fx/passes/utils/matcher_utils.py
index a698068..56b9d96 100644
--- a/torch/fx/passes/utils/matcher_utils.py
+++ b/torch/fx/passes/utils/matcher_utils.py
@@ -164,7 +164,7 @@
return True
def _remove_overlapping_matches(self, matches: List[InternalMatch]) -> List[InternalMatch]:
- non_overlapping_matches: List[InternalMatch] = list()
+ non_overlapping_matches: List[InternalMatch] = []
nodes_matched: Set[Node] = set()
for match in matches:
diff --git a/torch/jit/_recursive.py b/torch/jit/_recursive.py
index fc37237..b8dc0ec 100644
--- a/torch/jit/_recursive.py
+++ b/torch/jit/_recursive.py
@@ -103,7 +103,7 @@
def jit_ignored_properties(module):
user_annotated_ignored_attributes = getattr(
- module, "__jit_ignored_attributes__", list()
+ module, "__jit_ignored_attributes__", []
)
def get_properties_names(module):
@@ -206,7 +206,7 @@
# Get user-annotated ignored attributes.
user_annotated_ignored_attributes = getattr(
- nn_module, "__jit_ignored_attributes__", list()
+ nn_module, "__jit_ignored_attributes__", []
)
concrete_type_builder.add_ignored_attributes(user_annotated_ignored_attributes)
ignored_properties = jit_ignored_properties(nn_module)
@@ -574,7 +574,7 @@
hook_stubs, pre_hook_stubs = get_hook_stubs(nn_module)
user_annotated_ignored_attributes = getattr(
- nn_module, "__jit_ignored_attributes__", list()
+ nn_module, "__jit_ignored_attributes__", []
)
ignored_properties = jit_ignored_properties(nn_module)
@@ -841,7 +841,7 @@
"""
check_module_initialized(nn_module)
user_annotated_ignored_attributes = getattr(
- nn_module, "__jit_ignored_attributes__", list()
+ nn_module, "__jit_ignored_attributes__", []
)
ignored_properties = jit_ignored_properties(nn_module)
diff --git a/torch/nn/modules/module.py b/torch/nn/modules/module.py
index 6531f84..16b562a 100644
--- a/torch/nn/modules/module.py
+++ b/torch/nn/modules/module.py
@@ -493,8 +493,8 @@
super().__setattr__ for all other attributes.
"""
super().__setattr__("training", True)
- super().__setattr__("_parameters", dict())
- super().__setattr__("_buffers", dict())
+ super().__setattr__("_parameters", {})
+ super().__setattr__("_buffers", {})
super().__setattr__("_non_persistent_buffers_set", set())
super().__setattr__("_backward_pre_hooks", OrderedDict())
super().__setattr__("_backward_hooks", OrderedDict())
@@ -508,7 +508,7 @@
super().__setattr__("_state_dict_pre_hooks", OrderedDict())
super().__setattr__("_load_state_dict_pre_hooks", OrderedDict())
super().__setattr__("_load_state_dict_post_hooks", OrderedDict())
- super().__setattr__("_modules", dict())
+ super().__setattr__("_modules", {})
if self.call_super_init:
super().__init__(*args, **kwargs)
@@ -2870,7 +2870,7 @@
# replicas do not have parameters themselves, the replicas reference the original
# module.
- replica._parameters = dict()
+ replica._parameters = {}
replica._buffers = replica._buffers.copy()
replica._modules = replica._modules.copy()
replica._is_replica = True # type: ignore[assignment]
diff --git a/torch/nn/parallel/_functions.py b/torch/nn/parallel/_functions.py
index fa04bbc..cce3f04 100644
--- a/torch/nn/parallel/_functions.py
+++ b/torch/nn/parallel/_functions.py
@@ -17,7 +17,7 @@
target_gpus = [_get_device_index(x, True) for x in target_gpus]
ctx.target_gpus = target_gpus
if len(inputs) == 0:
- return tuple()
+ return ()
ctx.num_inputs = len(inputs)
ctx.input_device = inputs[0].get_device()
outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus)
diff --git a/torch/nn/utils/prune.py b/torch/nn/utils/prune.py
index b39bb2f..ccf7a2c 100644
--- a/torch/nn/utils/prune.py
+++ b/torch/nn/utils/prune.py
@@ -271,7 +271,7 @@
"""
def __init__(self, *args):
- self._pruning_methods: Tuple[BasePruningMethod, ...] = tuple()
+ self._pruning_methods: Tuple[BasePruningMethod, ...] = ()
if not isinstance(args, Iterable): # only 1 item
self._tensor_name = args._tensor_name
self.add_pruning_method(args)
diff --git a/torch/onnx/_internal/fx/onnxfunction_dispatcher.py b/torch/onnx/_internal/fx/onnxfunction_dispatcher.py
index 3886733..432e13c 100644
--- a/torch/onnx/_internal/fx/onnxfunction_dispatcher.py
+++ b/torch/onnx/_internal/fx/onnxfunction_dispatcher.py
@@ -809,7 +809,7 @@
import onnx
onnx_inputs: List[Any] = []
- onnx_attributes: Dict[str, Any] = dict()
+ onnx_attributes: Dict[str, Any] = {}
# NOTE: We need to copy kwargs because we will mutate it
copy_kwargs = kwargs.copy()
for i, param in enumerate(param_schemas):
diff --git a/torch/onnx/_internal/onnx_proto_utils.py b/torch/onnx/_internal/onnx_proto_utils.py
index 40eb1bd..975abe3 100644
--- a/torch/onnx/_internal/onnx_proto_utils.py
+++ b/torch/onnx/_internal/onnx_proto_utils.py
@@ -230,7 +230,7 @@
# Iterate graph nodes to insert only the included custom
# function_proto into model_proto
- onnx_function_list = list() # type: ignore[var-annotated]
+ onnx_function_list = [] # type: ignore[var-annotated]
included_node_func = set() # type: Set[str]
# onnx_function_list and included_node_func are expanded in-place
_find_onnxscript_op(
diff --git a/torch/onnx/_internal/onnxruntime.py b/torch/onnx/_internal/onnxruntime.py
index d8a7e55..93efaa7 100644
--- a/torch/onnx/_internal/onnxruntime.py
+++ b/torch/onnx/_internal/onnxruntime.py
@@ -804,7 +804,7 @@
def _select_eps(
self, graph_module: torch.fx.GraphModule, *args
) -> Sequence[Tuple[str, Mapping[str, Any]]]:
- inferred_eps: Tuple[str, ...] = tuple()
+ inferred_eps: Tuple[str, ...] = ()
if self._options.infer_execution_providers:
if eps_from_args := _infer_ep_from_device(*args):
# If user feeds CUDA tensor as input argument,
diff --git a/torch/onnx/symbolic_helper.py b/torch/onnx/symbolic_helper.py
index 70a4dd2..6c33073 100644
--- a/torch/onnx/symbolic_helper.py
+++ b/torch/onnx/symbolic_helper.py
@@ -368,7 +368,7 @@
return descriptor and _is_value(arg) and _is_tuple_construct(arg)
# Run regular symbolic function if none of the argument is QTensor.
- is_quantized = list()
+ is_quantized = []
for descriptor, arg in descriptor_args:
# ListConstruct
if _is_packed_list(arg):
@@ -1801,7 +1801,7 @@
@_beartype.beartype
def symbolic(g, self, dim=None, keepdim=None):
self = _maybe_cast_reduce_op_input(g, self)
- if dim is None or dim == tuple():
+ if dim is None or dim == ():
# Dim can be 0, which will cause (not dim) == True. So we don't want to do
# (not dim)
# all-reduce path
diff --git a/torch/onnx/symbolic_opset9.py b/torch/onnx/symbolic_opset9.py
index f43a09a..ffd65f9 100644
--- a/torch/onnx/symbolic_opset9.py
+++ b/torch/onnx/symbolic_opset9.py
@@ -3634,7 +3634,7 @@
dtype = _type_utils.JitScalarType.from_value(
symbolic_helper._unpack_list(data)[0]
)
- input_list = list()
+ input_list = []
for t in symbolic_helper._unpack_list(data):
shape_reference = g.op("Constant", value_t=torch.LongTensor([1]))
t = symbolic_helper._reshape_helper(g, t, shape_reference)
@@ -4300,7 +4300,7 @@
else:
raise errors.SymbolicValueError("repeats must be 0-dim or 1-dim tensor", self)
- final_splits = list()
+ final_splits = []
r_splits = symbolic_helper._repeat_interleave_split_helper(g, repeats, reps, 0)
i_splits = symbolic_helper._repeat_interleave_split_helper(g, self, reps, dim)
input_sizes[dim], input_sizes_temp[dim] = -1, 1
diff --git a/torch/overrides.py b/torch/overrides.py
index b96088b..c10a480 100644
--- a/torch/overrides.py
+++ b/torch/overrides.py
@@ -1041,18 +1041,18 @@
lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1 # noqa: B950
),
torch.quantized_max_pool1d: (
- lambda input, kernel_size, stride=tuple(), padding=(0,), dilation=(
+ lambda input, kernel_size, stride=(), padding=(0,), dilation=(
1,
), ceil_mode=False: -1
),
torch.quantized_max_pool2d: (
- lambda input, kernel_size, stride=tuple(), padding=(0, 0), dilation=(
+ lambda input, kernel_size, stride=(), padding=(0, 0), dilation=(
1,
1,
), ceil_mode=False: -1
),
torch.quantized_max_pool3d: (
- lambda input, kernel_size, stride=tuple(), padding=(0, 0, 0), dilation=(
+ lambda input, kernel_size, stride=(), padding=(0, 0, 0), dilation=(
1,
1,
1,
diff --git a/torch/profiler/profiler.py b/torch/profiler/profiler.py
index 2fd3ab9..47faac8 100644
--- a/torch/profiler/profiler.py
+++ b/torch/profiler/profiler.py
@@ -138,7 +138,7 @@
self.use_device = _get_privateuse1_backend_name()
# user-defined metadata to be amended to the trace
- self.preset_metadata: Dict[str, str] = dict()
+ self.preset_metadata: Dict[str, str] = {}
def start(self):
self.prepare_trace()
diff --git a/torch/sparse/_triton_ops_meta.py b/torch/sparse/_triton_ops_meta.py
index eedfa03..ef61a1c 100644
--- a/torch/sparse/_triton_ops_meta.py
+++ b/torch/sparse/_triton_ops_meta.py
@@ -282,7 +282,7 @@
return dict(zip(sorted(parameters), key))
if all_values is None:
- all_values = dict()
+ all_values = {}
directions = list(range(-max_step, max_step + 1))
names = sorted(initial_parameters)
diff --git a/torch/testing/_internal/common_device_type.py b/torch/testing/_internal/common_device_type.py
index e93ffc4..43816fe 100644
--- a/torch/testing/_internal/common_device_type.py
+++ b/torch/testing/_internal/common_device_type.py
@@ -705,7 +705,7 @@
def get_device_type_test_bases():
# set type to List[Any] due to mypy list-of-union issue:
# https://github.com/python/mypy/issues/3351
- test_bases: List[Any] = list()
+ test_bases: List[Any] = []
if IS_SANDCASTLE or IS_FBCODE:
if IS_REMOTE_GPU:
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index 9e728fc..e854540 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -569,7 +569,7 @@
yield from reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs)
def sample_kwargs_prelu_scalar_weight(device, dtype, input):
- weight = torch.rand(tuple(), device=device, dtype=dtype)
+ weight = torch.rand((), device=device, dtype=dtype)
# NumPy does not support bfloat16, so we default to float32 (only for NumPy) in that case
if dtype == torch.bfloat16:
weight_cpu = weight.to(dtype=torch.float32, device="cpu")
@@ -580,7 +580,7 @@
def error_inputs_prelu(op, device):
# Weight has numel != 1, but self.ndim is zero-dim tensor
- inp = make_tensor(tuple(), device=device, dtype=torch.float32)
+ inp = make_tensor((), device=device, dtype=torch.float32)
weight = make_tensor((2,), device=device, dtype=torch.float32)
yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}),
error_regex="Not allow zero-dim input tensor.")
@@ -4518,7 +4518,7 @@
err_msg1 = "Expected normalized_shape to be at least 1-dimensional"
s1 = SampleInput(
- make_arg(input_shape), args=(tuple(), None, None, 1e-5)
+ make_arg(input_shape), args=((), None, None, 1e-5)
)
yield ErrorInput(s1, error_regex=err_msg1)
@@ -4549,7 +4549,7 @@
err_msg1 = "Expected normalized_shape to be at least 1-dimensional"
s1 = SampleInput(
- make_arg(input_shape), args=(tuple(), None, 1e-5)
+ make_arg(input_shape), args=((), None, 1e-5)
)
yield ErrorInput(s1, error_regex=err_msg1)
@@ -8616,19 +8616,19 @@
# shape mismatch
(make_input(3, 5), (make_input(3, 4), make_input(3, 4)),
- dict(),
+ {},
RuntimeError,
(r'(Attempting to broadcast a dimension of length|'
r"The size of tensor a \(5\) must match the size of tensor b \(4\) "
r"at non-singleton dimension 1)")),
(make_input(3, 4), (make_input(3, 5), make_input(3, 4)),
- dict(),
+ {},
RuntimeError,
(r'(Attempting to broadcast a dimension of length|'
r"The size of tensor a \(4\) must match the size of tensor b \(5\) "
r"at non-singleton dimension 1)")),
(make_input(3, 4), (make_input(3, 4), make_input(3, 5)),
- dict(),
+ {},
RuntimeError,
(r'(Attempting to broadcast a dimension of length|'
r"The size of tensor a \(4\) must match the size of tensor b \(5\) "
@@ -8636,19 +8636,19 @@
# different dimensions
(make_input(3,), (make_input(3, 4), make_input(3, 4)),
- dict(),
+ {},
RuntimeError,
(r"The anchor, positive, and negative tensors are expected to have "
r"the same number of dimensions, but got: anchor 1D, positive 2D, "
r"and negative 2D inputs")),
(make_input(3, 4), (make_input(3,), make_input(3, 4)),
- dict(),
+ {},
RuntimeError,
(r"The anchor, positive, and negative tensors are expected to have "
r"the same number of dimensions, but got: anchor 2D, positive 1D, "
r"and negative 2D inputs")),
(make_input(3, 4), (make_input(3, 4), make_input(3,)),
- dict(),
+ {},
RuntimeError,
(r"The anchor, positive, and negative tensors are expected to have "
r"the same number of dimensions, but got: anchor 2D, positive 2D, "
diff --git a/torch/testing/_internal/common_nn.py b/torch/testing/_internal/common_nn.py
index 0505c74..b785d85 100644
--- a/torch/testing/_internal/common_nn.py
+++ b/torch/testing/_internal/common_nn.py
@@ -3278,7 +3278,7 @@
if jacobian_parameters:
jacobian_param[:, i] = torch.cat(self._flatten_tensors(d_param), 0)
- res: Tuple[torch.Tensor, ...] = tuple()
+ res: Tuple[torch.Tensor, ...] = ()
if jacobian_input:
res += jacobian_inp,
if jacobian_parameters:
@@ -3290,7 +3290,7 @@
def fw(*input):
return self._forward(module, input).detach()
- res: Tuple[torch.Tensor, ...] = tuple()
+ res: Tuple[torch.Tensor, ...] = ()
if jacobian_input:
res += _get_numerical_jacobian(fw, input, eps=1e-6),
if jacobian_parameters:
@@ -3331,7 +3331,7 @@
for name in self._required_arg_names:
if name not in kwargs and name + '_fn' not in kwargs and name + '_size' not in kwargs:
if name in {'constructor_args', 'extra_args'}:
- kwargs[name] = tuple()
+ kwargs[name] = ()
else:
raise ValueError(f"{self.get_name()}: Specify {name} by a value, a function to generate it, or it's size!")
self._extra_kwargs = kwargs
diff --git a/torch/testing/_internal/common_optimizers.py b/torch/testing/_internal/common_optimizers.py
index 05547dd..1663842 100644
--- a/torch/testing/_internal/common_optimizers.py
+++ b/torch/testing/_internal/common_optimizers.py
@@ -1027,7 +1027,7 @@
ErrorOptimizerInput(
OptimizerInput(
params=[torch.rand(2, 3, device=device, dtype=torch.complex64)],
- kwargs=dict(),
+ kwargs={},
desc="complex not supported",
),
error_type=ValueError,
diff --git a/torch/testing/_internal/distributed/distributed_test.py b/torch/testing/_internal/distributed/distributed_test.py
index 0ec5dd2..508c968 100644
--- a/torch/testing/_internal/distributed/distributed_test.py
+++ b/torch/testing/_internal/distributed/distributed_test.py
@@ -1740,8 +1740,8 @@
rank = dist.get_rank()
send_recv_size = 10
tensor = _build_tensor(send_recv_size, value=rank)
- recv_ranks = list()
- irecv_ranks = list()
+ recv_ranks = []
+ irecv_ranks = []
ctx = profiler_ctx if profiler_ctx is not None else nullcontext()
with ctx as prof:
diff --git a/torch/testing/_internal/opinfo/core.py b/torch/testing/_internal/opinfo/core.py
index efbf3bc..68871c3 100644
--- a/torch/testing/_internal/opinfo/core.py
+++ b/torch/testing/_internal/opinfo/core.py
@@ -680,10 +680,10 @@
# the following metadata are test directives for skipping or modifying tests
# information about which tests to skip
- skips: Tuple = tuple()
+ skips: Tuple = ()
# decorators to apply to generated tests
- decorators: Tuple = tuple()
+ decorators: Tuple = ()
# the following are pointers to functions to generate certain classes of inputs
@@ -1475,7 +1475,7 @@
# TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo
# use op_info.generate_args_kwargs directly.
generate_args_kwargs = kwargs.get(
- "generate_args_kwargs", lambda *args, **kwargs: (yield tuple(), {})
+ "generate_args_kwargs", lambda *args, **kwargs: (yield (), {})
)
for t in _generate_reduction_inputs(device, dtype, requires_grad):
@@ -1555,7 +1555,7 @@
# kwargs to use when calling the op. This is required for operators that
# have other required parameters besides the input tensor.
generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: (
- yield tuple(),
+ yield (),
{},
),
# Options from the OpInfo base class
@@ -2160,7 +2160,7 @@
"test_numpy_refs",
),
)
- kwargs["skips"] = kwargs.get("skips", tuple()) + common_skips
+ kwargs["skips"] = kwargs.get("skips", ()) + common_skips
super().__init__(
name,
sample_inputs_func=sample_inputs_func,
diff --git a/torch/testing/_internal/opinfo/definitions/linalg.py b/torch/testing/_internal/opinfo/definitions/linalg.py
index 71173ab..7956f5b 100644
--- a/torch/testing/_internal/opinfo/definitions/linalg.py
+++ b/torch/testing/_internal/opinfo/definitions/linalg.py
@@ -807,7 +807,7 @@
# Shapes for 3D Tensors
shapes_3d = ((S, S, S),)
- kwargs_2d = (dict(), dict(offset=2), dict(offset=2), dict(offset=1))
+ kwargs_2d = ({}, dict(offset=2), dict(offset=2), dict(offset=1))
kwargs_3d = (
dict(offset=1, dim1=1, dim2=2),
dict(offset=2, dim1=0, dim2=1),
diff --git a/torch/testing/_internal/opinfo/utils.py b/torch/testing/_internal/opinfo/utils.py
index 34a197e..41973dc 100644
--- a/torch/testing/_internal/opinfo/utils.py
+++ b/torch/testing/_internal/opinfo/utils.py
@@ -103,7 +103,7 @@
# CUDA is not available, dtypes will be empty.
if len(dtypes) == 0:
- return return_type((), str(tuple()))
+ return return_type((), "()")
set_dtypes = set(dtypes)
for dispatch in COMPLETE_DTYPES_DISPATCH:
diff --git a/torch/utils/_config_module.py b/torch/utils/_config_module.py
index 0e548aa..95b93df 100644
--- a/torch/utils/_config_module.py
+++ b/torch/utils/_config_module.py
@@ -52,8 +52,8 @@
else:
raise AssertionError(f"Unhandled config {key}={value} ({type(value)})")
- config: Dict[str, Any] = dict()
- default: Dict[str, Any] = dict()
+ config: Dict[str, Any] = {}
+ default: Dict[str, Any] = {}
compile_ignored_keys = get_assignments_with_compile_ignored_comments(module)
diff --git a/torch/utils/checkpoint.py b/torch/utils/checkpoint.py
index dd91c4f..1c0ff2f 100644
--- a/torch/utils/checkpoint.py
+++ b/torch/utils/checkpoint.py
@@ -769,7 +769,7 @@
class _Holder:
def __init__(self):
- self.handles: Dict[int, Optional[_Handle]] = dict()
+ self.handles: Dict[int, Optional[_Handle]] = {}
class _NoopSaveInputs(torch.autograd.Function):
diff --git a/torchgen/api/python.py b/torchgen/api/python.py
index 1ba93a4..344ed06 100644
--- a/torchgen/api/python.py
+++ b/torchgen/api/python.py
@@ -1230,7 +1230,7 @@
) -> tuple[str, ...]:
cpp_args: Sequence[Binding] = _cpp_signature(f, method=False).arguments()
- exprs: tuple[str, ...] = tuple()
+ exprs: tuple[str, ...] = ()
if not isinstance(python_signature, PythonSignatureDeprecated):
# By default the exprs are consistent with the C++ signature.
exprs = tuple(a.name for a in cpp_args)
diff --git a/torchgen/gen.py b/torchgen/gen.py
index ef4867a..e66107f 100644
--- a/torchgen/gen.py
+++ b/torchgen/gen.py
@@ -2347,7 +2347,7 @@
else:
raise AssertionError(f"unrecognized {dispatch_key} for ufunc")
- structured_func_group_dict = dict()
+ structured_func_group_dict = {}
for func_group in structured_native_functions:
for func in func_group.functions():
if func.structured_delegate is not None:
@@ -2355,7 +2355,7 @@
break
if dispatch_key in (DispatchKey.CPU, DispatchKey.CUDA):
- fallbacks = dict()
+ fallbacks = {}
for func in native_functions:
op_name = get_fallback_op_name(func)
if op_name in inductor_fallback_ops:
diff --git a/torchgen/model.py b/torchgen/model.py
index 30d96c6..05c755b 100644
--- a/torchgen/model.py
+++ b/torchgen/model.py
@@ -1774,7 +1774,7 @@
assert not (
is_write and len(alias_set) > 1
), f"alias set larger than 1 is not mutable, got {ann} instead."
- after_set = tuple(m.group(5).split("|")) if m.group(5) else tuple()
+ after_set = tuple(m.group(5).split("|")) if m.group(5) else ()
assert not (
len(before_alias) > 1 and len(after_set) > 1
), f"before alias set and after alias set cannot be larger than 1 at the same time, got {ann} instead."
@@ -2283,7 +2283,7 @@
# TensorOptions are dropped in signature,
# so we can pair factory functions with their out= variants.
tensor_options=None,
- post_tensor_options_kwarg_only=tuple(),
+ post_tensor_options_kwarg_only=(),
# out arguments are dropped in signature
out=(),
)