Run Black on all of tools/

Signed-off-by: Edward Z. Yang <ezyangfb.com>

Pull Request resolved: https://github.com/pytorch/pytorch/pull/76089

Approved by: https://github.com/albanD
diff --git a/tools/test/test_cmake.py b/tools/test/test_cmake.py
index ecbce07..2c4bead 100644
--- a/tools/test/test_cmake.py
+++ b/tools/test/test_cmake.py
@@ -9,49 +9,60 @@
 import tools.setup_helpers.cmake
 
 
-T = typing.TypeVar('T')
+T = typing.TypeVar("T")
 
 
 class TestCMake(unittest.TestCase):
-
-    @unittest.mock.patch('multiprocessing.cpu_count')
+    @unittest.mock.patch("multiprocessing.cpu_count")
     def test_build_jobs(self, mock_cpu_count: unittest.mock.MagicMock) -> None:
         """Tests that the number of build jobs comes out correctly."""
         mock_cpu_count.return_value = 13
         cases = [
             # MAX_JOBS, USE_NINJA, IS_WINDOWS,         want
-            ((     '8',      True,     False),          ['-j', '8']),  # noqa: E201,E241
-            ((    None,      True,     False),                 None),  # noqa: E201,E241
-            ((     '7',     False,     False),          ['-j', '7']),  # noqa: E201,E241
-            ((    None,     False,     False),         ['-j', '13']),  # noqa: E201,E241
-            ((     '6',      True,      True),          ['-j', '6']),  # noqa: E201,E241
-            ((    None,      True,      True),                 None),  # noqa: E201,E241
-            ((    '11',     False,      True), ['/p:CL_MPCount=11']),  # noqa: E201,E241
-            ((    None,     False,      True), ['/p:CL_MPCount=13']),  # noqa: E201,E241
+            (("8", True, False), ["-j", "8"]),  # noqa: E201,E241
+            ((None, True, False), None),  # noqa: E201,E241
+            (("7", False, False), ["-j", "7"]),  # noqa: E201,E241
+            ((None, False, False), ["-j", "13"]),  # noqa: E201,E241
+            (("6", True, True), ["-j", "6"]),  # noqa: E201,E241
+            ((None, True, True), None),  # noqa: E201,E241
+            (("11", False, True), ["/p:CL_MPCount=11"]),  # noqa: E201,E241
+            ((None, False, True), ["/p:CL_MPCount=13"]),  # noqa: E201,E241
         ]
         for (max_jobs, use_ninja, is_windows), want in cases:
-            with self.subTest(MAX_JOBS=max_jobs, USE_NINJA=use_ninja, IS_WINDOWS=is_windows):
+            with self.subTest(
+                MAX_JOBS=max_jobs, USE_NINJA=use_ninja, IS_WINDOWS=is_windows
+            ):
                 with contextlib.ExitStack() as stack:
-                    stack.enter_context(env_var('MAX_JOBS', max_jobs))
-                    stack.enter_context(unittest.mock.patch.object(tools.setup_helpers.cmake, 'USE_NINJA', use_ninja))
-                    stack.enter_context(unittest.mock.patch.object(tools.setup_helpers.cmake, 'IS_WINDOWS', is_windows))
+                    stack.enter_context(env_var("MAX_JOBS", max_jobs))
+                    stack.enter_context(
+                        unittest.mock.patch.object(
+                            tools.setup_helpers.cmake, "USE_NINJA", use_ninja
+                        )
+                    )
+                    stack.enter_context(
+                        unittest.mock.patch.object(
+                            tools.setup_helpers.cmake, "IS_WINDOWS", is_windows
+                        )
+                    )
 
                     cmake = tools.setup_helpers.cmake.CMake()
 
-                    with unittest.mock.patch.object(cmake, 'run') as cmake_run:
+                    with unittest.mock.patch.object(cmake, "run") as cmake_run:
                         cmake.build({})
 
                     cmake_run.assert_called_once()
-                    call, = cmake_run.mock_calls
+                    (call,) = cmake_run.mock_calls
                     build_args, _ = call.args
 
                 if want is None:
-                    self.assertNotIn('-j', build_args)
+                    self.assertNotIn("-j", build_args)
                 else:
                     self.assert_contains_sequence(build_args, want)
 
     @staticmethod
-    def assert_contains_sequence(sequence: Sequence[T], subsequence: Sequence[T]) -> None:
+    def assert_contains_sequence(
+        sequence: Sequence[T], subsequence: Sequence[T]
+    ) -> None:
         """Raises an assertion if the subsequence is not contained in the sequence."""
         if len(subsequence) == 0:
             return  # all sequences contain the empty subsequence
@@ -63,7 +74,7 @@
             assert len(candidate) == len(subsequence)  # sanity check
             if candidate == subsequence:
                 return  # found it
-        raise AssertionError(f'{subsequence} not found in {sequence}')
+        raise AssertionError(f"{subsequence} not found in {sequence}")
 
 
 @contextlib.contextmanager
diff --git a/tools/test/test_codegen.py b/tools/test/test_codegen.py
index 0dded01..4e48424 100644
--- a/tools/test/test_codegen.py
+++ b/tools/test/test_codegen.py
@@ -6,70 +6,75 @@
 from tools.autograd import load_derivatives
 import tools.codegen.model
 
-class TestCreateDerivative(unittest.TestCase):
 
+class TestCreateDerivative(unittest.TestCase):
     def test_named_grads(self) -> None:
         schema = tools.codegen.model.FunctionSchema.parse(
-            'func(Tensor a, Tensor b) -> (Tensor x, Tensor y)')
-        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
-                                              func=schema)
+            "func(Tensor a, Tensor b) -> (Tensor x, Tensor y)"
+        )
+        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
 
         derivative = load_derivatives.create_derivative(
             native_function,
-            formula='func_backward(grad_x, grad_y)',
+            formula="func_backward(grad_x, grad_y)",
             var_names=(),
-            available_named_gradients=['grad_x', 'grad_y'])
-        self.assertSetEqual(derivative.named_gradients, {'grad_x', 'grad_y'})
+            available_named_gradients=["grad_x", "grad_y"],
+        )
+        self.assertSetEqual(derivative.named_gradients, {"grad_x", "grad_y"})
 
     def test_non_differentiable_output(self) -> None:
-        specification = 'func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)'
+        specification = "func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)"
         schema = tools.codegen.model.FunctionSchema.parse(specification)
-        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
-                                              func=schema)
+        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
 
         differentiability_info = load_derivatives.create_differentiability_info(
-            defn={'name': specification,
-                  'a': 'grads[0]',
-                  'b': 'grads[2]',
-                  },
+            defn={
+                "name": specification,
+                "a": "grads[0]",
+                "b": "grads[2]",
+            },
             functions_by_signature={schema.signature(): [native_function]},
             functions_by_schema={specification: native_function},
             op_counter=typing.Counter[str](),
         )
 
-        self.assertSequenceEqual(differentiability_info.available_named_gradients,
-                                 # grad_y is not present because y is a
-                                 # bool and thus not differentiable.
-                                 ['grad_x', 'grad_z'])
+        self.assertSequenceEqual(
+            differentiability_info.available_named_gradients,
+            # grad_y is not present because y is a
+            # bool and thus not differentiable.
+            ["grad_x", "grad_z"],
+        )
 
     def test_indexed_grads(self) -> None:
         schema = tools.codegen.model.FunctionSchema.parse(
-            'func(Tensor a, Tensor b) -> (Tensor x, Tensor y)')
-        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
-                                              func=schema)
+            "func(Tensor a, Tensor b) -> (Tensor x, Tensor y)"
+        )
+        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
 
         derivative = load_derivatives.create_derivative(
             native_function,
-            formula='func_backward(grads[0], grads[1])',
+            formula="func_backward(grads[0], grads[1])",
             var_names=(),
-            available_named_gradients=['grad_x', 'grad_y'])
+            available_named_gradients=["grad_x", "grad_y"],
+        )
         self.assertSetEqual(derivative.named_gradients, set())
 
     def test_named_grads_and_indexed_grads(self) -> None:
-        specification = 'func(Tensor a, Tensor b) -> (Tensor x, Tensor y)'
+        specification = "func(Tensor a, Tensor b) -> (Tensor x, Tensor y)"
         schema = tools.codegen.model.FunctionSchema.parse(specification)
-        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
-                                              func=schema)
+        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
 
-        with self.assertRaisesRegex(RuntimeError,
-                                    'illegally mixes use of "grad_RETURN_NAME"'):
+        with self.assertRaisesRegex(
+            RuntimeError, 'illegally mixes use of "grad_RETURN_NAME"'
+        ):
             load_derivatives.create_differentiability_info(
-                defn={'name': specification,
-                      # Uh-oh, the derivatives reference gradients by
-                      # name and by index.
-                      'a': 'grad_x',
-                      'b': 'grads[1]',
-                      },
+                defn={
+                    "name": specification,
+                    # Uh-oh, the derivatives reference gradients by
+                    # name and by index.
+                    "a": "grad_x",
+                    "b": "grads[1]",
+                },
                 functions_by_signature={schema.signature(): [native_function]},
                 functions_by_schema={specification: native_function},
                 op_counter=typing.Counter[str](),
@@ -78,60 +83,59 @@
 
 class TestGenAutogradFunctions(unittest.TestCase):
     def test_non_differentiable_output_invalid_type(self) -> None:
-        specification = 'func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)'
+        specification = "func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)"
         schema = tools.codegen.model.FunctionSchema.parse(specification)
-        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
-                                              func=schema)
+        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
 
         differentiability_info = load_derivatives.create_differentiability_info(
-            defn={'name': specification,
-                  'a': 'grad_x',
-                  'b': 'grad_z',
-                  },
+            defn={
+                "name": specification,
+                "a": "grad_x",
+                "b": "grad_z",
+            },
             functions_by_signature={schema.signature(): [native_function]},
             functions_by_schema={specification: native_function},
             op_counter=typing.Counter[str](),
         )
         definition = gen_autograd_functions.process_function(
-            differentiability_info,
-            gen_autograd_functions.FUNCTION_DEFINITION)
+            differentiability_info, gen_autograd_functions.FUNCTION_DEFINITION
+        )
         # grad_z should map to grads[1], not grads[2] because output 1
         # (y) is not differentiable.
-        assert 'grad_z = grads[2]' not in definition
-        assert 'grad_z = grads[1]' in definition
-
+        assert "grad_z = grads[2]" not in definition
+        assert "grad_z = grads[1]" in definition
 
     def test_non_differentiable_output_output_differentiability(self) -> None:
-        specification = 'func(Tensor a, Tensor b) -> (Tensor x, Tensor y, Tensor z)'
+        specification = "func(Tensor a, Tensor b) -> (Tensor x, Tensor y, Tensor z)"
         schema = tools.codegen.model.FunctionSchema.parse(specification)
-        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION,
-                                              func=schema)
+        native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
 
         differentiability_info = load_derivatives.create_differentiability_info(
-            defn={'name': specification,
-                  'a': 'grad_x',
-                  'b': 'grad_z',
-                  'output_differentiability': [True, False, True],
-                  },
+            defn={
+                "name": specification,
+                "a": "grad_x",
+                "b": "grad_z",
+                "output_differentiability": [True, False, True],
+            },
             functions_by_signature={schema.signature(): [native_function]},
             functions_by_schema={specification: native_function},
             op_counter=typing.Counter[str](),
         )
         definition = gen_autograd_functions.process_function(
-            differentiability_info,
-            gen_autograd_functions.FUNCTION_DEFINITION)
+            differentiability_info, gen_autograd_functions.FUNCTION_DEFINITION
+        )
         # grad_z should map to grads[1], not grads[2] because output 1
         # (y) is not differentiable.
-        assert 'grad_z = grads[2]' not in definition
-        assert 'grad_z = grads[1]' in definition
+        assert "grad_z = grads[2]" not in definition
+        assert "grad_z = grads[1]" in definition
 
 
 # Represents the most basic NativeFunction. Use dataclasses.replace()
 # to edit for use.
 DEFAULT_NATIVE_FUNCTION, _ = tools.codegen.model.NativeFunction.from_yaml(
-    {'func': 'func() -> bool'},
-    loc=tools.codegen.model.Location(__file__, 1))
+    {"func": "func() -> bool"}, loc=tools.codegen.model.Location(__file__, 1)
+)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tools/test/test_codegen_model.py b/tools/test/test_codegen_model.py
index 50ea595..59f9563 100644
--- a/tools/test/test_codegen_model.py
+++ b/tools/test/test_codegen_model.py
@@ -10,6 +10,7 @@
 import tools.codegen.gen as gen
 from tools.codegen.gen import LineLoader, parse_native_yaml_struct
 
+
 class TestCodegenModel(expecttest.TestCase):
     def assertParseErrorInline(self, yaml_str: str, expect: str) -> None:
         es = yaml.load(yaml_str, Loader=LineLoader)
@@ -17,8 +18,8 @@
             parse_native_yaml_struct(es)
         except AssertionError as e:
             # hack to strip out the context
-            msg, _ = str(e).split('  in ', 2)
-            self.assertExpectedInline('\n'.join(textwrap.wrap(msg)), expect, skip=1)
+            msg, _ = str(e).split("  in ", 2)
+            self.assertExpectedInline("\n".join(textwrap.wrap(msg)), expect, skip=1)
             return
         self.fail(msg="Did not raise when expected to")
 
@@ -26,7 +27,10 @@
         # parse a single structured group out of the yaml to g
         es = yaml.load(yaml_str, Loader=LineLoader)
         parsed_yaml = parse_native_yaml_struct(es)
-        native_functions, backend_indices = parsed_yaml.native_functions, parsed_yaml.backend_indices
+        native_functions, backend_indices = (
+            parsed_yaml.native_functions,
+            parsed_yaml.backend_indices,
+        )
         grouped_native_functions = gen.get_grouped_native_functions(native_functions)
         assert len(grouped_native_functions) == 1
         g = grouped_native_functions[0]
@@ -44,81 +48,98 @@
             dest.compute_ufunc_cuda(g)
         except AssertionError as e:
             # hack to strip out the context
-            msg, _ = str(e).split('  in ', 2)
-            self.assertExpectedInline('\n'.join(textwrap.wrap(msg)), expect, skip=1)
+            msg, _ = str(e).split("  in ", 2)
+            self.assertExpectedInline("\n".join(textwrap.wrap(msg)), expect, skip=1)
             return
         self.fail(msg="Did not raise when expected to")
 
     # NB: indent is hardcoded to be two here, so format your yaml accordingly
-    binop_out = 'func: binop.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)'
-    ti_binop_out = f'''{binop_out}
+    binop_out = (
+        "func: binop.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
+    )
+    ti_binop_out = f"""{binop_out}
   structured: True
-  structured_inherits: TensorIteratorBase'''
-    ti_binop = '''func: binop(Tensor self, Tensor other) -> Tensor
+  structured_inherits: TensorIteratorBase"""
+    ti_binop = """func: binop(Tensor self, Tensor other) -> Tensor
   structured_delegate: binop.out
-'''
+"""
 
-    ti_unop_out = '''func: unop.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+    ti_unop_out = """func: unop.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
   structured: True
-  structured_inherits: TensorIteratorBase'''
-    ti_unop = '''func: unop(Tensor self) -> Tensor
+  structured_inherits: TensorIteratorBase"""
+    ti_unop = """func: unop(Tensor self) -> Tensor
   structured_delegate: unop.out
-'''
+"""
 
     def test_nonstructured_ufunc(self) -> None:
-        yaml_str = f'''\
+        yaml_str = f"""\
 - {self.binop_out}
   ufunc_inner_loop:
     Generic: binop (Bool)
-'''
-        self.assertParseErrorInline(yaml_str, '''\
-ufunc must be structured''')
+"""
+        self.assertParseErrorInline(
+            yaml_str,
+            """\
+ufunc must be structured""",
+        )
 
     def test_overlapping_ufunc_and_dispatch(self) -> None:
-        yaml_str = f'''\
+        yaml_str = f"""\
 - {self.ti_binop_out}
   ufunc_inner_loop:
     Generic: binop (Bool)
   dispatch:
     CPU: binop_cpu
-'''
-        self.assertParseErrorInline(yaml_str, '''\
-ufunc should not have explicit dispatch entry for CPU''')
+"""
+        self.assertParseErrorInline(
+            yaml_str,
+            """\
+ufunc should not have explicit dispatch entry for CPU""",
+        )
 
     # See https://github.com/pytorch/pytorch/pull/65851#discussion_r810238456
     @unittest.expectedFailure
     def test_scalaronly_shadowed(self) -> None:
-        yaml_str = f'''\
+        yaml_str = f"""\
 - {self.ti_binop_out}
   ufunc_inner_loop:
     Generic: binop (Bool)
     ScalarOnly: binop (Bool)
-'''
-        self.assertParseErrorInline(yaml_str, '''\
-''')
+"""
+        self.assertParseErrorInline(
+            yaml_str,
+            """\
+""",
+        )
 
     def test_conflicting_ufunc(self) -> None:
-        yaml_str = f'''\
+        yaml_str = f"""\
 - {self.ti_binop_out}
   ufunc_inner_loop:
     Generic: binop (Bool)
     ScalarOnly: binop_scalar (Bool)
 - {self.ti_binop}
-'''
-        self.assertUfuncErrorInline(yaml_str, '''\
-ScalarOnly and Generic must have same ufunc name''')
+"""
+        self.assertUfuncErrorInline(
+            yaml_str,
+            """\
+ScalarOnly and Generic must have same ufunc name""",
+        )
 
     def test_invalid_cudafunctoronself_for_binary_op(self) -> None:
-        yaml_str = f'''\
+        yaml_str = f"""\
 - {self.ti_unop_out}
   ufunc_inner_loop:
     Generic: unop (All)
     CUDAFunctorOnSelf: unop_self_cuda (All)
 - {self.ti_unop}
-'''
-        self.assertUfuncErrorInline(yaml_str, '''\
-cannot use CUDAFunctorOnSelf on non-binary function''')
+"""
+        self.assertUfuncErrorInline(
+            yaml_str,
+            """\
+cannot use CUDAFunctorOnSelf on non-binary function""",
+        )
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tools/test/test_extract_scripts.py b/tools/test/test_extract_scripts.py
index 3126893..9914032 100644
--- a/tools/test/test_extract_scripts.py
+++ b/tools/test/test_extract_scripts.py
@@ -2,84 +2,94 @@
 
 from tools import extract_scripts
 
-requirements_sh = '''
+requirements_sh = """
 #!/usr/bin/env bash
 set -eo pipefail
 pip install -r requirements.txt
-'''.strip()
+""".strip()
 
-hello_sh = '''
+hello_sh = """
 #!/usr/bin/env sh
 set -e
 echo hello world
-'''.strip()
+""".strip()
 
 
 class TestExtractScripts(unittest.TestCase):
     def test_extract_none(self) -> None:
         self.assertEqual(
-            extract_scripts.extract({
-                'name': 'Checkout PyTorch',
-                'uses': 'zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9',
-            }),
+            extract_scripts.extract(
+                {
+                    "name": "Checkout PyTorch",
+                    "uses": "zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9",
+                }
+            ),
             None,
         )
 
     def test_extract_run_default_bash(self) -> None:
         self.assertEqual(
-            extract_scripts.extract({
-                'name': 'Install requirements',
-                'run': 'pip install -r requirements.txt',
-            }),
+            extract_scripts.extract(
+                {
+                    "name": "Install requirements",
+                    "run": "pip install -r requirements.txt",
+                }
+            ),
             {
-                'extension': '.sh',
-                'script': requirements_sh,
+                "extension": ".sh",
+                "script": requirements_sh,
             },
         )
 
     def test_extract_run_sh(self) -> None:
         self.assertEqual(
-            extract_scripts.extract({
-                'name': 'Hello world',
-                'run': 'echo hello world',
-                'shell': 'sh',
-            }),
+            extract_scripts.extract(
+                {
+                    "name": "Hello world",
+                    "run": "echo hello world",
+                    "shell": "sh",
+                }
+            ),
             {
-                'extension': '.sh',
-                'script': hello_sh,
+                "extension": ".sh",
+                "script": hello_sh,
             },
         )
 
     def test_extract_run_py(self) -> None:
         self.assertEqual(
-            extract_scripts.extract({
-                'name': 'Hello world',
-                'run': 'print("Hello!")',
-                'shell': 'python',
-            }),
+            extract_scripts.extract(
+                {
+                    "name": "Hello world",
+                    "run": 'print("Hello!")',
+                    "shell": "python",
+                }
+            ),
             {
-                'extension': '.py',
-                'script': 'print("Hello!")',
+                "extension": ".py",
+                "script": 'print("Hello!")',
             },
         )
 
     def test_extract_github_script(self) -> None:
         self.assertEqual(
             # https://github.com/actions/github-script/tree/v3.1.1#reading-step-results
-            extract_scripts.extract({
-                'uses': 'actions/github-script@v3',
-                'id': 'set-result',
-                'with': {
-                    'script': 'return "Hello!"',
-                    'result-encoding': 'string',
-                },
-            }),
+            extract_scripts.extract(
+                {
+                    "uses": "actions/github-script@v3",
+                    "id": "set-result",
+                    "with": {
+                        "script": 'return "Hello!"',
+                        "result-encoding": "string",
+                    },
+                }
+            ),
             {
-                'extension': '.js',
-                'script': 'return "Hello!"',
+                "extension": ".js",
+                "script": 'return "Hello!"',
             },
         )
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tools/test/test_gen_backend_stubs.py b/tools/test/test_gen_backend_stubs.py
index 9dae08c36..0024737 100644
--- a/tools/test/test_gen_backend_stubs.py
+++ b/tools/test/test_gen_backend_stubs.py
@@ -9,229 +9,265 @@
 from tools.codegen.gen import _GLOBAL_PARSE_NATIVE_YAML_CACHE  # noqa: F401
 
 path = os.path.dirname(os.path.realpath(__file__))
-gen_backend_stubs_path = os.path.join(path, '../tools/codegen/gen_backend_stubs.py')
+gen_backend_stubs_path = os.path.join(path, "../tools/codegen/gen_backend_stubs.py")
 
 # gen_backend_stubs.py is an integration point that is called directly by external backends.
 # The tests here are to confirm that badly formed inputs result in reasonable error messages.
 class TestGenBackendStubs(expecttest.TestCase):
-
     def setUp(self) -> None:
         global _GLOBAL_PARSE_NATIVE_YAML_CACHE
         _GLOBAL_PARSE_NATIVE_YAML_CACHE.clear()
 
-
     def assert_success_from_gen_backend_stubs(self, yaml_str: str) -> None:
-        with tempfile.NamedTemporaryFile(mode='w') as fp:
+        with tempfile.NamedTemporaryFile(mode="w") as fp:
             fp.write(yaml_str)
             fp.flush()
-            run(fp.name, '', True)
+            run(fp.name, "", True)
 
     def get_errors_from_gen_backend_stubs(self, yaml_str: str) -> str:
-        with tempfile.NamedTemporaryFile(mode='w') as fp:
+        with tempfile.NamedTemporaryFile(mode="w") as fp:
             fp.write(yaml_str)
             fp.flush()
             try:
-                run(fp.name, '', True)
+                run(fp.name, "", True)
             except AssertionError as e:
                 # Scrub out the temp file name from any error messages to simplify assertions.
-                return str(e).replace(fp.name, '')
-            self.fail('Expected gen_backend_stubs to raise an AssertionError, but it did not.')
+                return str(e).replace(fp.name, "")
+            self.fail(
+                "Expected gen_backend_stubs to raise an AssertionError, but it did not."
+            )
 
     def test_valid_single_op(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace: torch_xla
 supported:
-- abs'''
+- abs"""
         self.assert_success_from_gen_backend_stubs(yaml_str)
 
     def test_valid_multiple_ops(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace: torch_xla
 supported:
 - add.Tensor
-- abs'''
+- abs"""
         self.assert_success_from_gen_backend_stubs(yaml_str)
 
     def test_valid_zero_ops(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace: torch_xla
-supported:'''
+supported:"""
         self.assert_success_from_gen_backend_stubs(yaml_str)
 
     def test_valid_zero_ops_doesnt_require_backend_dispatch_key(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: BAD_XLA
 cpp_namespace: torch_xla
-supported:'''
+supported:"""
         # External codegen on a yaml file with no operators is effectively a no-op,
         # so there's no reason to parse the backend
         self.assert_success_from_gen_backend_stubs(yaml_str)
 
     def test_valid_with_autograd_ops(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace: torch_xla
 supported:
 - abs
 autograd:
-- add.Tensor'''
+- add.Tensor"""
         # External codegen on a yaml file with no operators is effectively a no-op,
         # so there's no reason to parse the backend
         self.assert_success_from_gen_backend_stubs(yaml_str)
 
     def test_missing_backend(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 cpp_namespace: torch_xla
 supported:
-- abs'''
+- abs"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''You must provide a value for "backend"''')
+        self.assertExpectedInline(
+            output_error, '''You must provide a value for "backend"'''
+        )
 
     def test_empty_backend(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend:
 cpp_namespace: torch_xla
 supported:
-- abs'''
+- abs"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''You must provide a value for "backend"''')
+        self.assertExpectedInline(
+            output_error, '''You must provide a value for "backend"'''
+        )
 
     def test_backend_invalid_dispatch_key(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: NOT_XLA
 cpp_namespace: torch_xla
 supported:
-- abs'''
+- abs"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''\
+        self.assertExpectedInline(
+            output_error,
+            """\
 unknown dispatch key NOT_XLA
-  The provided value for "backend" must be a valid DispatchKey, but got NOT_XLA.''')  # noqa: B950
+  The provided value for "backend" must be a valid DispatchKey, but got NOT_XLA.""",
+        )  # noqa: B950
 
     def test_missing_cpp_namespace(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 supported:
-- abs'''
+- abs"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''You must provide a value for "cpp_namespace"''')
+        self.assertExpectedInline(
+            output_error, '''You must provide a value for "cpp_namespace"'''
+        )
 
     def test_whitespace_cpp_namespace(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace:\t
 supported:
-- abs'''
+- abs"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''You must provide a value for "cpp_namespace"''')
+        self.assertExpectedInline(
+            output_error, '''You must provide a value for "cpp_namespace"'''
+        )
 
     # supported is a single item (it should be a list)
     def test_nonlist_supported(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace: torch_xla
-supported: abs'''
+supported: abs"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''expected "supported" to be a list, but got: abs (of type <class 'str'>)''')
+        self.assertExpectedInline(
+            output_error,
+            """expected "supported" to be a list, but got: abs (of type <class 'str'>)""",
+        )
 
     # supported contains an op that isn't in native_functions.yaml
     def test_supported_invalid_op(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace: torch_xla
 supported:
-- abs_BAD'''
+- abs_BAD"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''Found an invalid operator name: abs_BAD''')
+        self.assertExpectedInline(
+            output_error, """Found an invalid operator name: abs_BAD"""
+        )
 
     # The backend is valid, but doesn't have a valid autograd key. They can't override autograd kernels in that case.
     # Only using Vulkan here because it has a valid backend key but not an autograd key- if this changes we can update the test.
     def test_backend_has_no_autograd_key_but_provides_entries(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: Vulkan
 cpp_namespace: torch_vulkan
 supported:
 - add
 autograd:
-- sub'''
+- sub"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''Found an invalid operator name: add''')  # noqa: B950
+        self.assertExpectedInline(
+            output_error, """Found an invalid operator name: add"""
+        )  # noqa: B950
 
     # in an operator group, currently all operators must either be registered to the backend or autograd kernel.
     # Here, functional and out mismatch
     def test_backend_autograd_kernel_mismatch_out_functional(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace: torch_xla
 supported:
 - add.Tensor
 autograd:
-- add.out'''
+- add.out"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''Currently, all variants of an op must either be registered to a backend key, or to a backend's autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! add is listed under "supported", but add_out is listed under "autograd".''')  # noqa: B950
+        self.assertExpectedInline(
+            output_error,
+            """Currently, all variants of an op must either be registered to a backend key, or to a backend's autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! add is listed under "supported", but add_out is listed under "autograd".""",  # noqa: B950
+        )
 
     # in an operator group, currently all operators must either be registered to the backend or autograd kernel.
     # Here, functional and inplace mismatch
     def test_backend_autograd_kernel_mismatch_functional_inplace(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace: torch_xla
 supported:
 - add.Tensor
 autograd:
-- add_.Tensor'''
+- add_.Tensor"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''Currently, all variants of an op must either be registered to a backend key, or to a backend's autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! add is listed under "supported", but add_ is listed under "autograd".''')  # noqa: B950
+        self.assertExpectedInline(
+            output_error,
+            """Currently, all variants of an op must either be registered to a backend key, or to a backend's autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! add is listed under "supported", but add_ is listed under "autograd".""",  # noqa: B950
+        )
 
     # Currently, the same operator can't be listed under both 'supported' and 'autograd', which would
     # involve registering the same kernel to both the XLA and AutogradXLA keys.
     # If we need that functionality in the future, we'll need to augment the codegen.
     def test_op_appears_in_supported_and_autograd_lists(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace: torch_xla
 supported:
 - add.Tensor
 autograd:
-- add.Tensor'''
+- add.Tensor"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''Currently, all variants of an op must either be registered to a backend key, or to a backend's autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! add is listed under "supported", but add is listed under "autograd".''')  # noqa: B950
+        self.assertExpectedInline(
+            output_error,
+            """Currently, all variants of an op must either be registered to a backend key, or to a backend's autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! add is listed under "supported", but add is listed under "autograd".""",  # noqa: B950
+        )
 
     # unrecognized extra yaml key
     def test_unrecognized_key(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace: torch_xla
 supported:
 - abs
-invalid_key: invalid_val'''
+invalid_key: invalid_val"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, ''' contains unexpected keys: invalid_key. Only the following keys are supported: backend, class_name, cpp_namespace, extra_headers, supported, autograd, full_codegen''')  # noqa: B950
+        self.assertExpectedInline(
+            output_error,
+            """ contains unexpected keys: invalid_key. Only the following keys are supported: backend, class_name, cpp_namespace, extra_headers, supported, autograd, full_codegen""",  # noqa: B950
+        )
 
     # if use_out_as_primary is provided, it must be a bool
     def test_use_out_as_primary_non_bool(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace: torch_xla
 use_out_as_primary: frue
 supported:
-- abs'''
+- abs"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''You must provide either True or False for use_out_as_primary. Provided: frue''')  # noqa: B950
+        self.assertExpectedInline(
+            output_error,
+            """You must provide either True or False for use_out_as_primary. Provided: frue""",
+        )  # noqa: B950
 
     # if device_guard is provided, it must be a bool
     def test_device_guard_non_bool(self) -> None:
-        yaml_str = '''\
+        yaml_str = """\
 backend: XLA
 cpp_namespace: torch_xla
 device_guard: frue
 supported:
-- abs'''
+- abs"""
         output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
-        self.assertExpectedInline(output_error, '''You must provide either True or False for device_guard. Provided: frue''')  # noqa: B950
+        self.assertExpectedInline(
+            output_error,
+            """You must provide either True or False for device_guard. Provided: frue""",
+        )  # noqa: B950
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tools/test/test_import_test_stats.py b/tools/test/test_import_test_stats.py
index 5a43a7d..ea9aad8 100644
--- a/tools/test/test_import_test_stats.py
+++ b/tools/test/test_import_test_stats.py
@@ -4,48 +4,64 @@
 from typing import List
 from unittest.mock import patch
 
-class TestGetDisabledIssues(unittest.TestCase):
 
-    def run_assert_disabled_issues(self, pr_body: str, commit_messages: str, expected: List[str]) -> None:
-        with patch.dict(os.environ, {"PR_BODY": pr_body, "COMMIT_MESSAGES": commit_messages}):
+class TestGetDisabledIssues(unittest.TestCase):
+    def run_assert_disabled_issues(
+        self, pr_body: str, commit_messages: str, expected: List[str]
+    ) -> None:
+        with patch.dict(
+            os.environ, {"PR_BODY": pr_body, "COMMIT_MESSAGES": commit_messages}
+        ):
             disabled_issues = get_disabled_issues()
         self.assertEqual(disabled_issues, expected)
 
     # test variations of close in PR_BODY
     def test_closes_pr_body(self) -> None:
-        pr_body = 'closes #123 Close #143 ClOsE #345 closed #10283'
-        self.run_assert_disabled_issues(pr_body, '', ['123', '143', '345', '10283'])
+        pr_body = "closes #123 Close #143 ClOsE #345 closed #10283"
+        self.run_assert_disabled_issues(pr_body, "", ["123", "143", "345", "10283"])
 
     # test variations of fix in COMMIT_MESSAGES
     def test_fixes_commit_messages(self) -> None:
-        commit_messages = 'fix #123 FixEd #143 fixes #345 FiXeD #10283'
-        self.run_assert_disabled_issues('', commit_messages, ['123', '143', '345', '10283'])
+        commit_messages = "fix #123 FixEd #143 fixes #345 FiXeD #10283"
+        self.run_assert_disabled_issues(
+            "", commit_messages, ["123", "143", "345", "10283"]
+        )
 
     # test variations of resolve in PR_BODY and COMMIT_MESSAGES
     def test_resolves_pr_commits(self) -> None:
-        pr_body = 'resolve #123 resolveS #143'
-        commit_messages = 'REsolved #345 RESOLVES #10283'
-        self.run_assert_disabled_issues(pr_body, commit_messages, ['123', '143', '345', '10283'])
+        pr_body = "resolve #123 resolveS #143"
+        commit_messages = "REsolved #345 RESOLVES #10283"
+        self.run_assert_disabled_issues(
+            pr_body, commit_messages, ["123", "143", "345", "10283"]
+        )
 
     # test links
     def test_issue_links(self) -> None:
-        pr_body = 'closes https://github.com/pytorch/pytorch/issues/75198 fixes https://github.com/pytorch/pytorch/issues/75123'
-        self.run_assert_disabled_issues(pr_body, '', ['75198', '75123'])
+        pr_body = "closes https://github.com/pytorch/pytorch/issues/75198 fixes https://github.com/pytorch/pytorch/issues/75123"
+        self.run_assert_disabled_issues(pr_body, "", ["75198", "75123"])
 
     # test strange spacing
     def test_spacing(self) -> None:
-        pr_body = 'resolve #123,resolveS #143Resolved #345\nRESOLVES #10283'
-        commit_messages = 'Fixed #2348fixes https://github.com/pytorch/pytorch/issues/75123resolveS #2134'
-        self.run_assert_disabled_issues(pr_body, commit_messages, ['123', '143', '345', '10283', '2348', '75123', '2134'])
+        pr_body = "resolve #123,resolveS #143Resolved #345\nRESOLVES #10283"
+        commit_messages = "Fixed #2348fixes https://github.com/pytorch/pytorch/issues/75123resolveS #2134"
+        self.run_assert_disabled_issues(
+            pr_body,
+            commit_messages,
+            ["123", "143", "345", "10283", "2348", "75123", "2134"],
+        )
 
     # test bad things
     def test_not_accepted(self) -> None:
-        pr_body = 'fixes189 fixeshttps://github.com/pytorch/pytorch/issues/75123 ' \
-            'closedhttps://githubcom/pytorch/pytorch/issues/75123'
-        commit_messages = 'fix 234, fixes # 45, fixing #123, close 234, closes#45, closing #123 resolve 234, ' \
-            'resolves  #45, resolving #123'
+        pr_body = (
+            "fixes189 fixeshttps://github.com/pytorch/pytorch/issues/75123 "
+            "closedhttps://githubcom/pytorch/pytorch/issues/75123"
+        )
+        commit_messages = (
+            "fix 234, fixes # 45, fixing #123, close 234, closes#45, closing #123 resolve 234, "
+            "resolves  #45, resolving #123"
+        )
         self.run_assert_disabled_issues(pr_body, commit_messages, [])
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tools/test/test_mypy_wrapper.py b/tools/test/test_mypy_wrapper.py
index df7b0ab..460e4dd 100644
--- a/tools/test/test_mypy_wrapper.py
+++ b/tools/test/test_mypy_wrapper.py
@@ -5,45 +5,48 @@
 
 class TestMypyWrapper(unittest.TestCase):
     configs = {
-        'foo.ini': {
-            'file1.abc',
-            'dir2',
-            'dir3/file4.xyz',
+        "foo.ini": {
+            "file1.abc",
+            "dir2",
+            "dir3/file4.xyz",
         },
-        'bar/baz.ini': {
-            'file1.abc',
-            'dir2/dir5/file6.def',
-            'dir3/file7.abc',
+        "bar/baz.ini": {
+            "file1.abc",
+            "dir2/dir5/file6.def",
+            "dir3/file7.abc",
         },
     }
 
     trie: mypy_wrapper.Trie = {
-        'file1.abc': {None: {'foo.ini', 'bar/baz.ini'}},
-        'dir2': {
-            None: {'foo.ini'},
-            'dir5': {'file6.def': {None: {'bar/baz.ini'}}},
+        "file1.abc": {None: {"foo.ini", "bar/baz.ini"}},
+        "dir2": {
+            None: {"foo.ini"},
+            "dir5": {"file6.def": {None: {"bar/baz.ini"}}},
         },
-        'dir3': {
-            'file4.xyz': {None: {'foo.ini'}},
-            'file7.abc': {None: {'bar/baz.ini'}},
+        "dir3": {
+            "file4.xyz": {None: {"foo.ini"}},
+            "file7.abc": {None: {"bar/baz.ini"}},
         },
     }
 
     def test_config_files(self) -> None:
-        self.assertEqual(mypy_wrapper.config_files().keys(), {
-            'mypy.ini',
-            'mypy-strict.ini',
-        })
+        self.assertEqual(
+            mypy_wrapper.config_files().keys(),
+            {
+                "mypy.ini",
+                "mypy-strict.ini",
+            },
+        )
 
     def test_split_path(self) -> None:
-        self.assertEqual(mypy_wrapper.split_path('file1.abc'), ['file1.abc'])
+        self.assertEqual(mypy_wrapper.split_path("file1.abc"), ["file1.abc"])
         self.assertEqual(
-            mypy_wrapper.split_path('dir3/file4.xyz'),
-            ['dir3', 'file4.xyz'],
+            mypy_wrapper.split_path("dir3/file4.xyz"),
+            ["dir3", "file4.xyz"],
         )
         self.assertEqual(
-            mypy_wrapper.split_path('dir2/dir5/file6.def'),
-            ['dir2', 'dir5', 'file6.def'],
+            mypy_wrapper.split_path("dir2/dir5/file6.def"),
+            ["dir2", "dir5", "file6.def"],
         )
 
     def test_make_trie(self) -> None:
@@ -51,108 +54,120 @@
 
     def test_lookup(self) -> None:
         self.assertEqual(
-            mypy_wrapper.lookup(self.trie, 'file1.abc'),
-            {'foo.ini', 'bar/baz.ini'},
+            mypy_wrapper.lookup(self.trie, "file1.abc"),
+            {"foo.ini", "bar/baz.ini"},
         )
         self.assertEqual(
-            mypy_wrapper.lookup(self.trie, 'dir2/dir5/file6.def'),
-            {'foo.ini', 'bar/baz.ini'},
+            mypy_wrapper.lookup(self.trie, "dir2/dir5/file6.def"),
+            {"foo.ini", "bar/baz.ini"},
         )
         self.assertEqual(
-            mypy_wrapper.lookup(self.trie, 'dir3/file4.xyz'),
-            {'foo.ini'},
+            mypy_wrapper.lookup(self.trie, "dir3/file4.xyz"),
+            {"foo.ini"},
         )
         self.assertEqual(
-            mypy_wrapper.lookup(self.trie, 'dir3/file7.abc'),
-            {'bar/baz.ini'},
+            mypy_wrapper.lookup(self.trie, "dir3/file7.abc"),
+            {"bar/baz.ini"},
         )
         self.assertEqual(
-            mypy_wrapper.lookup(self.trie, 'file8.xyz'),
+            mypy_wrapper.lookup(self.trie, "file8.xyz"),
             set(),
         )
         self.assertEqual(
-            mypy_wrapper.lookup(self.trie, 'dir2/dir9/file10.abc'),
-            {'foo.ini'},
+            mypy_wrapper.lookup(self.trie, "dir2/dir9/file10.abc"),
+            {"foo.ini"},
         )
         self.assertEqual(
-            mypy_wrapper.lookup(self.trie, 'dir3/file11.abc'),
+            mypy_wrapper.lookup(self.trie, "dir3/file11.abc"),
             set(),
         )
 
         # non-leaves shouldn't ever be passed to lookup in practice, but
         # still, good to consider/test these cases
         self.assertEqual(
-            mypy_wrapper.lookup(self.trie, 'dir2'),
-            {'foo.ini'},
+            mypy_wrapper.lookup(self.trie, "dir2"),
+            {"foo.ini"},
         )
         self.assertEqual(
-            mypy_wrapper.lookup(self.trie, 'dir2/dir5'),
-            {'foo.ini'},
+            mypy_wrapper.lookup(self.trie, "dir2/dir5"),
+            {"foo.ini"},
         )
         self.assertEqual(
-            mypy_wrapper.lookup(self.trie, 'dir3'),
+            mypy_wrapper.lookup(self.trie, "dir3"),
             set(),
         )
         self.assertEqual(
-            mypy_wrapper.lookup(self.trie, 'dir2/dir9'),
-            {'foo.ini'},
+            mypy_wrapper.lookup(self.trie, "dir2/dir9"),
+            {"foo.ini"},
         )
         self.assertEqual(
-            mypy_wrapper.lookup(self.trie, 'dir4'),
+            mypy_wrapper.lookup(self.trie, "dir4"),
             set(),
         )
 
     def test_make_plan(self) -> None:
         self.assertEqual(
-            mypy_wrapper.make_plan(configs=self.configs, files=[
-                'file8.xyz',
-                'dir3/file11.abc',
-            ]),
-            {}
-        )
-        self.assertEqual(
-            mypy_wrapper.make_plan(configs=self.configs, files=[
-                'file8.xyz',
-                'dir2/dir9/file10.abc',
-                'dir3/file4.xyz',
-                'dir3/file11.abc',
-            ]),
-            {
-                'foo.ini': ['dir2/dir9/file10.abc', 'dir3/file4.xyz'],
-            }
-        )
-        self.assertEqual(
-            mypy_wrapper.make_plan(configs=self.configs, files=[
-                'file8.xyz',
-                'dir3/file11.abc',
-                'dir3/file7.abc',
-            ]),
-            {
-                'bar/baz.ini': ['dir3/file7.abc'],
-            }
-        )
-        self.assertEqual(
-            mypy_wrapper.make_plan(configs=self.configs, files=[
-                'dir2/dir9/file10.abc',
-                'dir2/dir5/file6.def',
-                'dir3/file7.abc',
-                'file1.abc',
-                'dir3/file11.abc',
-            ]),
-            {
-                'foo.ini': [
-                    'dir2/dir9/file10.abc',
-                    'dir2/dir5/file6.def',
-                    'file1.abc',
+            mypy_wrapper.make_plan(
+                configs=self.configs,
+                files=[
+                    "file8.xyz",
+                    "dir3/file11.abc",
                 ],
-                'bar/baz.ini': [
-                    'dir2/dir5/file6.def',
-                    'dir3/file7.abc',
-                    'file1.abc',
+            ),
+            {},
+        )
+        self.assertEqual(
+            mypy_wrapper.make_plan(
+                configs=self.configs,
+                files=[
+                    "file8.xyz",
+                    "dir2/dir9/file10.abc",
+                    "dir3/file4.xyz",
+                    "dir3/file11.abc",
                 ],
-            }
+            ),
+            {
+                "foo.ini": ["dir2/dir9/file10.abc", "dir3/file4.xyz"],
+            },
+        )
+        self.assertEqual(
+            mypy_wrapper.make_plan(
+                configs=self.configs,
+                files=[
+                    "file8.xyz",
+                    "dir3/file11.abc",
+                    "dir3/file7.abc",
+                ],
+            ),
+            {
+                "bar/baz.ini": ["dir3/file7.abc"],
+            },
+        )
+        self.assertEqual(
+            mypy_wrapper.make_plan(
+                configs=self.configs,
+                files=[
+                    "dir2/dir9/file10.abc",
+                    "dir2/dir5/file6.def",
+                    "dir3/file7.abc",
+                    "file1.abc",
+                    "dir3/file11.abc",
+                ],
+            ),
+            {
+                "foo.ini": [
+                    "dir2/dir9/file10.abc",
+                    "dir2/dir5/file6.def",
+                    "file1.abc",
+                ],
+                "bar/baz.ini": [
+                    "dir2/dir5/file6.def",
+                    "dir3/file7.abc",
+                    "file1.abc",
+                ],
+            },
         )
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tools/test/test_stats.py b/tools/test/test_stats.py
index 46ad287..d352084 100644
--- a/tools/test/test_stats.py
+++ b/tools/test/test_stats.py
@@ -3,10 +3,16 @@
 from typing import Dict, List
 
 from tools.stats import print_test_stats
-from tools.stats.s3_stat_parser import (Commit, Report, ReportMetaMeta,
-                                        Status, Version1Case,
-                                        Version1Report, Version2Case,
-                                        Version2Report)
+from tools.stats.s3_stat_parser import (
+    Commit,
+    Report,
+    ReportMetaMeta,
+    Status,
+    Version1Case,
+    Version1Report,
+    Version2Case,
+    Version2Report,
+)
 
 
 def fakehash(char: str) -> str:
@@ -15,14 +21,14 @@
 
 def dummy_meta_meta() -> ReportMetaMeta:
     return {
-        'build_pr': '',
-        'build_tag': '',
-        'build_sha1': '',
-        'build_base_commit': '',
-        'build_branch': '',
-        'build_job': '',
-        'build_workflow_id': '',
-        'build_start_time_epoch': '',
+        "build_pr": "",
+        "build_tag": "",
+        "build_sha1": "",
+        "build_base_commit": "",
+        "build_branch": "",
+        "build_job": "",
+        "build_workflow_id": "",
+        "build_start_time_epoch": "",
     }
 
 
@@ -35,202 +41,210 @@
     skipped: bool = False,
 ) -> Version1Case:
     return {
-        'name': name,
-        'seconds': seconds,
-        'errored': errored,
-        'failed': failed,
-        'skipped': skipped,
+        "name": name,
+        "seconds": seconds,
+        "errored": errored,
+        "failed": failed,
+        "skipped": skipped,
     }
 
 
 def make_report_v1(tests: Dict[str, List[Version1Case]]) -> Version1Report:
     suites = {
         suite_name: {
-            'total_seconds': sum(case['seconds'] for case in cases),
-            'cases': cases,
+            "total_seconds": sum(case["seconds"] for case in cases),
+            "cases": cases,
         }
         for suite_name, cases in tests.items()
     }
     return {
         **dummy_meta_meta(),  # type: ignore[misc]
-        'total_seconds': sum(s['total_seconds'] for s in suites.values()),
-        'suites': suites,
+        "total_seconds": sum(s["total_seconds"] for s in suites.values()),
+        "suites": suites,
     }
 
 
 def make_case_v2(seconds: float, status: Status = None) -> Version2Case:
     return {
-        'seconds': seconds,
-        'status': status,
+        "seconds": seconds,
+        "status": status,
     }
 
 
-def make_report_v2(tests: Dict[str, Dict[str, Dict[str, Version2Case]]]) -> Version2Report:
+def make_report_v2(
+    tests: Dict[str, Dict[str, Dict[str, Version2Case]]]
+) -> Version2Report:
     files = {}
     for file_name, file_suites in tests.items():
         suites = {
             suite_name: {
-                'total_seconds': sum(case['seconds'] for case in cases.values()),
-                'cases': cases,
+                "total_seconds": sum(case["seconds"] for case in cases.values()),
+                "cases": cases,
             }
             for suite_name, cases in file_suites.items()
         }
         files[file_name] = {
-            'suites': suites,
-            'total_seconds': sum(suite['total_seconds'] for suite in suites.values()),
+            "suites": suites,
+            "total_seconds": sum(suite["total_seconds"] for suite in suites.values()),
         }
     return {
         **dummy_meta_meta(),  # type: ignore[misc]
-        'format_version': 2,
-        'total_seconds': sum(s['total_seconds'] for s in files.values()),
-        'files': files,
+        "format_version": 2,
+        "total_seconds": sum(s["total_seconds"] for s in files.values()),
+        "files": files,
     }
+
+
 maxDiff = None
 
+
 class TestPrintTestStats(unittest.TestCase):
-    version1_report: Version1Report = make_report_v1({
-        # input ordering of the suites is ignored
-        'Grault': [
-            # not printed: status same and time similar
-            makecase('test_grault0', 4.78, failed=True),
-            # status same, but time increased a lot
-            makecase('test_grault2', 1.473, errored=True),
-        ],
-        # individual tests times changed, not overall suite
-        'Qux': [
-            # input ordering of the test cases is ignored
-            makecase('test_qux1', 0.001, skipped=True),
-            makecase('test_qux6', 0.002, skipped=True),
-            # time in bounds, but status changed
-            makecase('test_qux4', 7.158, failed=True),
-            # not printed because it's the same as before
-            makecase('test_qux7', 0.003, skipped=True),
-            makecase('test_qux5', 11.968),
-            makecase('test_qux3', 23.496),
-        ],
-        # new test suite
-        'Bar': [
-            makecase('test_bar2', 3.742, failed=True),
-            makecase('test_bar1', 50.447),
-        ],
-        # overall suite time changed but no individual tests
-        'Norf': [
-            makecase('test_norf1', 3),
-            makecase('test_norf2', 3),
-            makecase('test_norf3', 3),
-            makecase('test_norf4', 3),
-        ],
-        # suite doesn't show up if it doesn't change enough
-        'Foo': [
-            makecase('test_foo1', 42),
-            makecase('test_foo2', 56),
-        ],
-    })
+    version1_report: Version1Report = make_report_v1(
+        {
+            # input ordering of the suites is ignored
+            "Grault": [
+                # not printed: status same and time similar
+                makecase("test_grault0", 4.78, failed=True),
+                # status same, but time increased a lot
+                makecase("test_grault2", 1.473, errored=True),
+            ],
+            # individual tests times changed, not overall suite
+            "Qux": [
+                # input ordering of the test cases is ignored
+                makecase("test_qux1", 0.001, skipped=True),
+                makecase("test_qux6", 0.002, skipped=True),
+                # time in bounds, but status changed
+                makecase("test_qux4", 7.158, failed=True),
+                # not printed because it's the same as before
+                makecase("test_qux7", 0.003, skipped=True),
+                makecase("test_qux5", 11.968),
+                makecase("test_qux3", 23.496),
+            ],
+            # new test suite
+            "Bar": [
+                makecase("test_bar2", 3.742, failed=True),
+                makecase("test_bar1", 50.447),
+            ],
+            # overall suite time changed but no individual tests
+            "Norf": [
+                makecase("test_norf1", 3),
+                makecase("test_norf2", 3),
+                makecase("test_norf3", 3),
+                makecase("test_norf4", 3),
+            ],
+            # suite doesn't show up if it doesn't change enough
+            "Foo": [
+                makecase("test_foo1", 42),
+                makecase("test_foo2", 56),
+            ],
+        }
+    )
 
     version2_report: Version2Report = make_report_v2(
         {
-            'test_a': {
-                'Grault': {
-                    'test_grault0': make_case_v2(4.78, 'failed'),
-                    'test_grault2': make_case_v2(1.473, 'errored'),
+            "test_a": {
+                "Grault": {
+                    "test_grault0": make_case_v2(4.78, "failed"),
+                    "test_grault2": make_case_v2(1.473, "errored"),
                 },
-                'Qux': {
-                    'test_qux1': make_case_v2(0.001, 'skipped'),
-                    'test_qux6': make_case_v2(0.002, 'skipped'),
-                    'test_qux4': make_case_v2(7.158, 'failed'),
-                    'test_qux7': make_case_v2(0.003, 'skipped'),
-                    'test_qux8': make_case_v2(11.968),
-                    'test_qux3': make_case_v2(23.496),
-                }
+                "Qux": {
+                    "test_qux1": make_case_v2(0.001, "skipped"),
+                    "test_qux6": make_case_v2(0.002, "skipped"),
+                    "test_qux4": make_case_v2(7.158, "failed"),
+                    "test_qux7": make_case_v2(0.003, "skipped"),
+                    "test_qux8": make_case_v2(11.968),
+                    "test_qux3": make_case_v2(23.496),
+                },
             },
-            'test_b': {
-                'Bar': {
-                    'test_bar2': make_case_v2(3.742, 'failed'),
-                    'test_bar1': make_case_v2(50.447),
+            "test_b": {
+                "Bar": {
+                    "test_bar2": make_case_v2(3.742, "failed"),
+                    "test_bar1": make_case_v2(50.447),
                 },
                 # overall suite time changed but no individual tests
-                'Norf': {
-                    'test_norf1': make_case_v2(3),
-                    'test_norf2': make_case_v2(3),
-                    'test_norf3': make_case_v2(3),
-                    'test_norf4': make_case_v2(3),
+                "Norf": {
+                    "test_norf1": make_case_v2(3),
+                    "test_norf2": make_case_v2(3),
+                    "test_norf3": make_case_v2(3),
+                    "test_norf4": make_case_v2(3),
                 },
             },
-            'test_c': {
-                'Foo': {
-                    'test_foo1': make_case_v2(42),
-                    'test_foo2': make_case_v2(56),
+            "test_c": {
+                "Foo": {
+                    "test_foo1": make_case_v2(42),
+                    "test_foo2": make_case_v2(56),
                 },
-            }
-        })
+            },
+        }
+    )
 
     def test_simplify(self) -> None:
         self.assertEqual(
             {
-                '': {
-                    'Bar': {
-                        'test_bar1': {'seconds': 50.447, 'status': None},
-                        'test_bar2': {'seconds': 3.742, 'status': 'failed'},
+                "": {
+                    "Bar": {
+                        "test_bar1": {"seconds": 50.447, "status": None},
+                        "test_bar2": {"seconds": 3.742, "status": "failed"},
                     },
-                    'Foo': {
-                        'test_foo1': {'seconds': 42, 'status': None},
-                        'test_foo2': {'seconds': 56, 'status': None},
+                    "Foo": {
+                        "test_foo1": {"seconds": 42, "status": None},
+                        "test_foo2": {"seconds": 56, "status": None},
                     },
-                    'Grault': {
-                        'test_grault0': {'seconds': 4.78, 'status': 'failed'},
-                        'test_grault2': {'seconds': 1.473, 'status': 'errored'},
+                    "Grault": {
+                        "test_grault0": {"seconds": 4.78, "status": "failed"},
+                        "test_grault2": {"seconds": 1.473, "status": "errored"},
                     },
-                    'Norf': {
-                        'test_norf1': {'seconds': 3, 'status': None},
-                        'test_norf3': {'seconds': 3, 'status': None},
-                        'test_norf2': {'seconds': 3, 'status': None},
-                        'test_norf4': {'seconds': 3, 'status': None},
+                    "Norf": {
+                        "test_norf1": {"seconds": 3, "status": None},
+                        "test_norf3": {"seconds": 3, "status": None},
+                        "test_norf2": {"seconds": 3, "status": None},
+                        "test_norf4": {"seconds": 3, "status": None},
                     },
-                    'Qux': {
-                        'test_qux1': {'seconds': 0.001, 'status': 'skipped'},
-                        'test_qux3': {'seconds': 23.496, 'status': None},
-                        'test_qux4': {'seconds': 7.158, 'status': 'failed'},
-                        'test_qux5': {'seconds': 11.968, 'status': None},
-                        'test_qux6': {'seconds': 0.002, 'status': 'skipped'},
-                        'test_qux7': {'seconds': 0.003, 'status': 'skipped'},
+                    "Qux": {
+                        "test_qux1": {"seconds": 0.001, "status": "skipped"},
+                        "test_qux3": {"seconds": 23.496, "status": None},
+                        "test_qux4": {"seconds": 7.158, "status": "failed"},
+                        "test_qux5": {"seconds": 11.968, "status": None},
+                        "test_qux6": {"seconds": 0.002, "status": "skipped"},
+                        "test_qux7": {"seconds": 0.003, "status": "skipped"},
                     },
                 },
             },
-            print_test_stats.simplify(self.version1_report)
+            print_test_stats.simplify(self.version1_report),
         )
 
         self.assertEqual(
             {
-                'test_a': {
-                    'Grault': {
-                        'test_grault0': {'seconds': 4.78, 'status': 'failed'},
-                        'test_grault2': {'seconds': 1.473, 'status': 'errored'},
+                "test_a": {
+                    "Grault": {
+                        "test_grault0": {"seconds": 4.78, "status": "failed"},
+                        "test_grault2": {"seconds": 1.473, "status": "errored"},
                     },
-                    'Qux': {
-                        'test_qux1': {'seconds': 0.001, 'status': 'skipped'},
-                        'test_qux3': {'seconds': 23.496, 'status': None},
-                        'test_qux4': {'seconds': 7.158, 'status': 'failed'},
-                        'test_qux6': {'seconds': 0.002, 'status': 'skipped'},
-                        'test_qux7': {'seconds': 0.003, 'status': 'skipped'},
-                        'test_qux8': {'seconds': 11.968, 'status': None},
+                    "Qux": {
+                        "test_qux1": {"seconds": 0.001, "status": "skipped"},
+                        "test_qux3": {"seconds": 23.496, "status": None},
+                        "test_qux4": {"seconds": 7.158, "status": "failed"},
+                        "test_qux6": {"seconds": 0.002, "status": "skipped"},
+                        "test_qux7": {"seconds": 0.003, "status": "skipped"},
+                        "test_qux8": {"seconds": 11.968, "status": None},
                     },
                 },
-                'test_b': {
-                    'Bar': {
-                        'test_bar1': {'seconds': 50.447, 'status': None},
-                        'test_bar2': {'seconds': 3.742, 'status': 'failed'},
+                "test_b": {
+                    "Bar": {
+                        "test_bar1": {"seconds": 50.447, "status": None},
+                        "test_bar2": {"seconds": 3.742, "status": "failed"},
                     },
-                    'Norf': {
-                        'test_norf1': {'seconds': 3, 'status': None},
-                        'test_norf2': {'seconds': 3, 'status': None},
-                        'test_norf3': {'seconds': 3, 'status': None},
-                        'test_norf4': {'seconds': 3, 'status': None},
+                    "Norf": {
+                        "test_norf1": {"seconds": 3, "status": None},
+                        "test_norf2": {"seconds": 3, "status": None},
+                        "test_norf3": {"seconds": 3, "status": None},
+                        "test_norf4": {"seconds": 3, "status": None},
                     },
                 },
-                'test_c': {
-                    'Foo': {
-                        'test_foo1': {'seconds': 42, 'status': None},
-                        'test_foo2': {'seconds': 56, 'status': None},
+                "test_c": {
+                    "Foo": {
+                        "test_foo1": {"seconds": 42, "status": None},
+                        "test_foo2": {"seconds": 56, "status": None},
                     },
                 },
             },
@@ -242,95 +256,101 @@
 
         base_reports: Dict[Commit, List[Report]] = {
             # bbbb has no reports, so base is cccc instead
-            fakehash('b'): [],
-            fakehash('c'): [
-                make_report_v1({
-                    'Baz': [
-                        makecase('test_baz2', 13.605),
-                        # no recent suites have & skip this test
-                        makecase('test_baz1', 0.004, skipped=True),
-                    ],
-                    'Foo': [
-                        makecase('test_foo1', 43),
-                        # test added since dddd
-                        makecase('test_foo2', 57),
-                    ],
-                    'Grault': [
-                        makecase('test_grault0', 4.88, failed=True),
-                        makecase('test_grault1', 11.967, failed=True),
-                        makecase('test_grault2', 0.395, errored=True),
-                        makecase('test_grault3', 30.460),
-                    ],
-                    'Norf': [
-                        makecase('test_norf1', 2),
-                        makecase('test_norf2', 2),
-                        makecase('test_norf3', 2),
-                        makecase('test_norf4', 2),
-                    ],
-                    'Qux': [
-                        makecase('test_qux3', 4.978, errored=True),
-                        makecase('test_qux7', 0.002, skipped=True),
-                        makecase('test_qux2', 5.618),
-                        makecase('test_qux4', 7.766, errored=True),
-                        makecase('test_qux6', 23.589, failed=True),
-                    ],
-                }),
+            fakehash("b"): [],
+            fakehash("c"): [
+                make_report_v1(
+                    {
+                        "Baz": [
+                            makecase("test_baz2", 13.605),
+                            # no recent suites have & skip this test
+                            makecase("test_baz1", 0.004, skipped=True),
+                        ],
+                        "Foo": [
+                            makecase("test_foo1", 43),
+                            # test added since dddd
+                            makecase("test_foo2", 57),
+                        ],
+                        "Grault": [
+                            makecase("test_grault0", 4.88, failed=True),
+                            makecase("test_grault1", 11.967, failed=True),
+                            makecase("test_grault2", 0.395, errored=True),
+                            makecase("test_grault3", 30.460),
+                        ],
+                        "Norf": [
+                            makecase("test_norf1", 2),
+                            makecase("test_norf2", 2),
+                            makecase("test_norf3", 2),
+                            makecase("test_norf4", 2),
+                        ],
+                        "Qux": [
+                            makecase("test_qux3", 4.978, errored=True),
+                            makecase("test_qux7", 0.002, skipped=True),
+                            makecase("test_qux2", 5.618),
+                            makecase("test_qux4", 7.766, errored=True),
+                            makecase("test_qux6", 23.589, failed=True),
+                        ],
+                    }
+                ),
             ],
-            fakehash('d'): [
-                make_report_v1({
-                    'Foo': [
-                        makecase('test_foo1', 40),
-                        # removed in cccc
-                        makecase('test_foo3', 17),
-                    ],
-                    'Baz': [
-                        # not skipped, so not included in stdev
-                        makecase('test_baz1', 3.14),
-                    ],
-                    'Qux': [
-                        makecase('test_qux7', 0.004, skipped=True),
-                        makecase('test_qux2', 6.02),
-                        makecase('test_qux4', 20.932),
-                    ],
-                    'Norf': [
-                        makecase('test_norf1', 3),
-                        makecase('test_norf2', 3),
-                        makecase('test_norf3', 3),
-                        makecase('test_norf4', 3),
-                    ],
-                    'Grault': [
-                        makecase('test_grault0', 5, failed=True),
-                        makecase('test_grault1', 14.325, failed=True),
-                        makecase('test_grault2', 0.31, errored=True),
-                    ],
-                }),
+            fakehash("d"): [
+                make_report_v1(
+                    {
+                        "Foo": [
+                            makecase("test_foo1", 40),
+                            # removed in cccc
+                            makecase("test_foo3", 17),
+                        ],
+                        "Baz": [
+                            # not skipped, so not included in stdev
+                            makecase("test_baz1", 3.14),
+                        ],
+                        "Qux": [
+                            makecase("test_qux7", 0.004, skipped=True),
+                            makecase("test_qux2", 6.02),
+                            makecase("test_qux4", 20.932),
+                        ],
+                        "Norf": [
+                            makecase("test_norf1", 3),
+                            makecase("test_norf2", 3),
+                            makecase("test_norf3", 3),
+                            makecase("test_norf4", 3),
+                        ],
+                        "Grault": [
+                            makecase("test_grault0", 5, failed=True),
+                            makecase("test_grault1", 14.325, failed=True),
+                            makecase("test_grault2", 0.31, errored=True),
+                        ],
+                    }
+                ),
             ],
-            fakehash('e'): [],
-            fakehash('f'): [
-                make_report_v1({
-                    'Foo': [
-                        makecase('test_foo3', 24),
-                        makecase('test_foo1', 43),
-                    ],
-                    'Baz': [
-                        makecase('test_baz2', 16.857),
-                    ],
-                    'Qux': [
-                        makecase('test_qux2', 6.422),
-                        makecase('test_qux4', 6.382, errored=True),
-                    ],
-                    'Norf': [
-                        makecase('test_norf1', 0.9),
-                        makecase('test_norf3', 0.9),
-                        makecase('test_norf2', 0.9),
-                        makecase('test_norf4', 0.9),
-                    ],
-                    'Grault': [
-                        makecase('test_grault0', 4.7, failed=True),
-                        makecase('test_grault1', 13.146, failed=True),
-                        makecase('test_grault2', 0.48, errored=True),
-                    ],
-                }),
+            fakehash("e"): [],
+            fakehash("f"): [
+                make_report_v1(
+                    {
+                        "Foo": [
+                            makecase("test_foo3", 24),
+                            makecase("test_foo1", 43),
+                        ],
+                        "Baz": [
+                            makecase("test_baz2", 16.857),
+                        ],
+                        "Qux": [
+                            makecase("test_qux2", 6.422),
+                            makecase("test_qux4", 6.382, errored=True),
+                        ],
+                        "Norf": [
+                            makecase("test_norf1", 0.9),
+                            makecase("test_norf3", 0.9),
+                            makecase("test_norf2", 0.9),
+                            makecase("test_norf4", 0.9),
+                        ],
+                        "Grault": [
+                            makecase("test_grault0", 4.7, failed=True),
+                            makecase("test_grault1", 13.146, failed=True),
+                            makecase("test_grault2", 0.48, errored=True),
+                        ],
+                    }
+                ),
             ],
         }
 
@@ -344,7 +364,7 @@
         )
 
         self.assertEqual(
-            '''\
+            """\
 
 - class Baz:
 -     # was   15.23s ±   2.30s
@@ -402,14 +422,14 @@
 +     def test_bar2: ...
 +         # now   3.742s           (failed)
 
-''',
+""",
             print_test_stats.anomalies(analysis),
         )
 
     def test_graph(self) -> None:
         # HEAD is on master
         self.assertEqual(
-            '''\
+            """\
 Commit graph (base is most recent master ancestor with at least one S3 report):
 
     : (master)
@@ -420,21 +440,21 @@
     * dddddddddd          0 reports
     |
     :
-''',
+""",
             print_test_stats.graph(
-                head_sha=fakehash('a'),
+                head_sha=fakehash("a"),
                 head_seconds=502.99,
                 base_seconds={
-                    fakehash('b'): [47.84],
-                    fakehash('c'): [332.50],
-                    fakehash('d'): [],
+                    fakehash("b"): [47.84],
+                    fakehash("c"): [332.50],
+                    fakehash("d"): [],
                 },
                 on_master=True,
-            )
+            ),
         )
 
         self.assertEqual(
-            '''\
+            """\
 Commit graph (base is most recent master ancestor with at least one S3 report):
 
     : (master)
@@ -446,21 +466,21 @@
     * dddddddddd          1 report,  total time  1234.56s
     |
     :
-''',
+""",
             print_test_stats.graph(
-                head_sha=fakehash('a'),
+                head_sha=fakehash("a"),
                 head_seconds=9988.77,
                 base_seconds={
-                    fakehash('b'): [7598.77] * 60 + [7654.32] + [7709.87] * 60,
-                    fakehash('c'): [5308.77] * 10 + [5802.33] * 10,
-                    fakehash('d'): [1234.56],
+                    fakehash("b"): [7598.77] * 60 + [7654.32] + [7709.87] * 60,
+                    fakehash("c"): [5308.77] * 10 + [5802.33] * 10,
+                    fakehash("d"): [1234.56],
                 },
                 on_master=False,
-            )
+            ),
         )
 
         self.assertEqual(
-            '''\
+            """\
 Commit graph (base is most recent master ancestor with at least one S3 report):
 
     : (master)
@@ -474,22 +494,22 @@
     * dddddddddd (base)  15 reports, total time    58.92s ±   25.82s
     |
     :
-''',
+""",
             print_test_stats.graph(
-                head_sha=fakehash('a'),
+                head_sha=fakehash("a"),
                 head_seconds=25.52,
                 base_seconds={
-                    fakehash('b'): [],
-                    fakehash('c'): [],
-                    fakehash('d'): [52.25] * 14 + [152.26],
+                    fakehash("b"): [],
+                    fakehash("c"): [],
+                    fakehash("d"): [52.25] * 14 + [152.26],
                 },
                 on_master=False,
                 ancestry_path=5,
-            )
+            ),
         )
 
         self.assertEqual(
-            '''\
+            """\
 Commit graph (base is most recent master ancestor with at least one S3 report):
 
     : (master)
@@ -503,22 +523,22 @@
     * dddddddddd          3 reports, total time     0.10s ±    0.05s
     |
     :
-''',
+""",
             print_test_stats.graph(
-                head_sha=fakehash('a'),
+                head_sha=fakehash("a"),
                 head_seconds=0.08,
                 base_seconds={
-                    fakehash('b'): [],
-                    fakehash('c'): [0.09],
-                    fakehash('d'): [0.05, 0.10, 0.15],
+                    fakehash("b"): [],
+                    fakehash("c"): [0.09],
+                    fakehash("d"): [0.05, 0.10, 0.15],
                 },
                 on_master=False,
                 other_ancestors=1,
-            )
+            ),
         )
 
         self.assertEqual(
-            '''\
+            """\
 Commit graph (base is most recent master ancestor with at least one S3 report):
 
     : (master)
@@ -534,24 +554,24 @@
     * dddddddddd         10 reports, total time     5.84s ±    0.92s
     |
     :
-''',
+""",
             print_test_stats.graph(
-                head_sha=fakehash('a'),
+                head_sha=fakehash("a"),
                 head_seconds=5.98,
                 base_seconds={
-                    fakehash('b'): [4.81, 7.23],
-                    fakehash('c'): [],
-                    fakehash('d'): [4.97] * 5 + [6.71] * 5,
+                    fakehash("b"): [4.81, 7.23],
+                    fakehash("c"): [],
+                    fakehash("d"): [4.97] * 5 + [6.71] * 5,
                 },
                 on_master=False,
                 ancestry_path=1,
                 other_ancestors=7,
-            )
+            ),
         )
 
     def test_regression_info(self) -> None:
         self.assertEqual(
-            '''\
+            """\
 ----- Historic stats comparison result ------
 
     job: foo_job
@@ -571,41 +591,48 @@
 Removed  (across    1 suite)      1 test,  totaling -   1.00s
 Modified (across    1 suite)      1 test,  totaling -  41.48s ±   2.12s
 Added    (across    1 suite)      1 test,  totaling +   3.00s
-''',
+""",
             print_test_stats.regression_info(
-                head_sha=fakehash('a'),
-                head_report=make_report_v1({
-                    'Foo': [
-                        makecase('test_foo', 0.02, skipped=True),
-                        makecase('test_baz', 3),
-                    ]}),
+                head_sha=fakehash("a"),
+                head_report=make_report_v1(
+                    {
+                        "Foo": [
+                            makecase("test_foo", 0.02, skipped=True),
+                            makecase("test_baz", 3),
+                        ]
+                    }
+                ),
                 base_reports={
-                    fakehash('b'): [
-                        make_report_v1({
-                            'Foo': [
-                                makecase('test_foo', 40),
-                                makecase('test_bar', 1),
-                            ],
-                        }),
+                    fakehash("b"): [
+                        make_report_v1(
+                            {
+                                "Foo": [
+                                    makecase("test_foo", 40),
+                                    makecase("test_bar", 1),
+                                ],
+                            }
+                        ),
                     ],
-                    fakehash('c'): [
-                        make_report_v1({
-                            'Foo': [
-                                makecase('test_foo', 43),
-                            ],
-                        }),
+                    fakehash("c"): [
+                        make_report_v1(
+                            {
+                                "Foo": [
+                                    makecase("test_foo", 43),
+                                ],
+                            }
+                        ),
                     ],
                 },
-                job_name='foo_job',
+                job_name="foo_job",
                 on_master=False,
                 ancestry_path=0,
                 other_ancestors=0,
-            )
+            ),
         )
 
     def test_regression_info_new_job(self) -> None:
         self.assertEqual(
-            '''\
+            """\
 ----- Historic stats comparison result ------
 
     job: foo_job
@@ -629,25 +656,28 @@
 Removed  (across    0 suites)     0 tests, totaling     0.00s
 Modified (across    0 suites)     0 tests, totaling     0.00s
 Added    (across    1 suite)      2 tests, totaling +   3.02s
-''',
+""",
             print_test_stats.regression_info(
-                head_sha=fakehash('a'),
-                head_report=make_report_v1({
-                    'Foo': [
-                        makecase('test_foo', 0.02, skipped=True),
-                        makecase('test_baz', 3),
-                    ]}),
+                head_sha=fakehash("a"),
+                head_report=make_report_v1(
+                    {
+                        "Foo": [
+                            makecase("test_foo", 0.02, skipped=True),
+                            makecase("test_baz", 3),
+                        ]
+                    }
+                ),
                 base_reports={
-                    fakehash('b'): [],
-                    fakehash('c'): [],
+                    fakehash("b"): [],
+                    fakehash("c"): [],
                 },
-                job_name='foo_job',
+                job_name="foo_job",
                 on_master=False,
                 ancestry_path=3,
                 other_ancestors=2,
-            )
+            ),
         )
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tools/test/test_test_history.py b/tools/test/test_test_history.py
index 1b8b5c9..7851ca3 100644
--- a/tools/test/test_test_history.py
+++ b/tools/test/test_test_history.py
@@ -16,36 +16,33 @@
 
 def parse_block(block: List[str]) -> Optional[Example]:
     if block:
-        match = re.match(r'^\$ ([^ ]+) (.*)$', block[0])
+        match = re.match(r"^\$ ([^ ]+) (.*)$", block[0])
         if match:
             cmd, first = match.groups()
             args = []
             for i, line in enumerate([first] + block[1:]):
-                if line.endswith('\\'):
+                if line.endswith("\\"):
                     args.append(line[:-1])
                 else:
                     args.append(line)
                     break
             return {
-                'cmd': cmd,
-                'args': shlex.split(''.join(args)),
-                'lines': block[i + 1:]
+                "cmd": cmd,
+                "args": shlex.split("".join(args)),
+                "lines": block[i + 1 :],
             }
     return None
 
 
 def parse_description(description: str) -> List[Example]:
     examples: List[Example] = []
-    for block in description.split('\n\n'):
-        matches = [
-            re.match(r'^    (.*)$', line)
-            for line in block.splitlines()
-        ]
+    for block in description.split("\n\n"):
+        matches = [re.match(r"^    (.*)$", line) for line in block.splitlines()]
         if all(matches):
             lines = []
             for match in matches:
                 assert match
-                line, = match.groups()
+                (line,) = match.groups()
                 lines.append(line)
             example = parse_block(lines)
             if example:
@@ -62,14 +59,16 @@
         self.assertEqual(len(examples), 3)
         for i, example in enumerate(examples):
             with self.subTest(i=i):
-                self.assertTrue(test_history.__file__.endswith(example['cmd']))
-                expected = example['lines']
-                actual = list(itertools.islice(
-                    test_history.run(example['args']),
-                    len(expected),
-                ))
+                self.assertTrue(test_history.__file__.endswith(example["cmd"]))
+                expected = example["lines"]
+                actual = list(
+                    itertools.islice(
+                        test_history.run(example["args"]),
+                        len(expected),
+                    )
+                )
                 self.assertEqual(actual, expected)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tools/test/test_test_selections.py b/tools/test/test_test_selections.py
index 5ea6fa8..10bf3a2 100644
--- a/tools/test/test_test_selections.py
+++ b/tools/test/test_test_selections.py
@@ -7,37 +7,37 @@
 
 class TestCalculateShards(unittest.TestCase):
     tests: List[str] = [
-        'super_long_test',
-        'long_test1',
-        'long_test2',
-        'normal_test1',
-        'normal_test2',
-        'normal_test3',
-        'short_test1',
-        'short_test2',
-        'short_test3',
-        'short_test4',
-        'short_test5',
+        "super_long_test",
+        "long_test1",
+        "long_test2",
+        "normal_test1",
+        "normal_test2",
+        "normal_test3",
+        "short_test1",
+        "short_test2",
+        "short_test3",
+        "short_test4",
+        "short_test5",
     ]
 
     test_times: Dict[str, float] = {
-        'super_long_test': 55,
-        'long_test1': 22,
-        'long_test2': 18,
-        'normal_test1': 9,
-        'normal_test2': 7,
-        'normal_test3': 5,
-        'short_test1': 1,
-        'short_test2': 0.6,
-        'short_test3': 0.4,
-        'short_test4': 0.3,
-        'short_test5': 0.01,
+        "super_long_test": 55,
+        "long_test1": 22,
+        "long_test2": 18,
+        "normal_test1": 9,
+        "normal_test2": 7,
+        "normal_test3": 5,
+        "short_test1": 1,
+        "short_test2": 0.6,
+        "short_test3": 0.4,
+        "short_test4": 0.3,
+        "short_test5": 0.01,
     }
 
     def assert_shards_equal(
         self,
         expected_shards: List[Tuple[float, List[str]]],
-        actual_shards: List[Tuple[float, List[str]]]
+        actual_shards: List[Tuple[float, List[str]]],
     ) -> None:
         for expected, actual in zip(expected_shards, actual_shards):
             self.assertAlmostEqual(expected[0], actual[0])
@@ -45,53 +45,117 @@
 
     def test_calculate_2_shards_with_complete_test_times(self) -> None:
         expected_shards = [
-            (60, ['super_long_test', 'normal_test3']),
-            (58.31, ['long_test1', 'long_test2', 'normal_test1', 'normal_test2', 'short_test1', 'short_test2',
-                     'short_test3', 'short_test4', 'short_test5'])
+            (60, ["super_long_test", "normal_test3"]),
+            (
+                58.31,
+                [
+                    "long_test1",
+                    "long_test2",
+                    "normal_test1",
+                    "normal_test2",
+                    "short_test1",
+                    "short_test2",
+                    "short_test3",
+                    "short_test4",
+                    "short_test5",
+                ],
+            ),
         ]
-        self.assert_shards_equal(expected_shards, calculate_shards(2, self.tests, self.test_times))
-
+        self.assert_shards_equal(
+            expected_shards, calculate_shards(2, self.tests, self.test_times)
+        )
 
     def test_calculate_5_shards_with_complete_test_times(self) -> None:
         expected_shards = [
-            (55.0, ['super_long_test']),
-            (22.0, ['long_test1', ]),
-            (18.0, ['long_test2', ]),
-            (11.31, ['normal_test1', 'short_test1', 'short_test2', 'short_test3', 'short_test4', 'short_test5']),
-            (12.0, ['normal_test2', 'normal_test3']),
+            (55.0, ["super_long_test"]),
+            (
+                22.0,
+                [
+                    "long_test1",
+                ],
+            ),
+            (
+                18.0,
+                [
+                    "long_test2",
+                ],
+            ),
+            (
+                11.31,
+                [
+                    "normal_test1",
+                    "short_test1",
+                    "short_test2",
+                    "short_test3",
+                    "short_test4",
+                    "short_test5",
+                ],
+            ),
+            (12.0, ["normal_test2", "normal_test3"]),
         ]
-        self.assert_shards_equal(expected_shards, calculate_shards(5, self.tests, self.test_times))
-
+        self.assert_shards_equal(
+            expected_shards, calculate_shards(5, self.tests, self.test_times)
+        )
 
     def test_calculate_2_shards_with_incomplete_test_times(self) -> None:
-        incomplete_test_times = {k: v for k, v in self.test_times.items() if 'test1' in k}
+        incomplete_test_times = {
+            k: v for k, v in self.test_times.items() if "test1" in k
+        }
         expected_shards = [
-            (22.0, ['long_test1', 'long_test2', 'normal_test3', 'short_test3', 'short_test5']),
-            (10.0, ['normal_test1', 'short_test1', 'super_long_test', 'normal_test2', 'short_test2', 'short_test4']),
+            (
+                22.0,
+                [
+                    "long_test1",
+                    "long_test2",
+                    "normal_test3",
+                    "short_test3",
+                    "short_test5",
+                ],
+            ),
+            (
+                10.0,
+                [
+                    "normal_test1",
+                    "short_test1",
+                    "super_long_test",
+                    "normal_test2",
+                    "short_test2",
+                    "short_test4",
+                ],
+            ),
         ]
-        self.assert_shards_equal(expected_shards, calculate_shards(2, self.tests, incomplete_test_times))
-
+        self.assert_shards_equal(
+            expected_shards, calculate_shards(2, self.tests, incomplete_test_times)
+        )
 
     def test_calculate_5_shards_with_incomplete_test_times(self) -> None:
-        incomplete_test_times = {k: v for k, v in self.test_times.items() if 'test1' in k}
+        incomplete_test_times = {
+            k: v for k, v in self.test_times.items() if "test1" in k
+        }
         expected_shards = [
-            (22.0, ['long_test1', 'normal_test2', 'short_test5']),
-            (9.0, ['normal_test1', 'normal_test3']),
-            (1.0, ['short_test1', 'short_test2']),
-            (0.0, ['super_long_test', 'short_test3']),
-            (0.0, ['long_test2', 'short_test4']),
+            (22.0, ["long_test1", "normal_test2", "short_test5"]),
+            (9.0, ["normal_test1", "normal_test3"]),
+            (1.0, ["short_test1", "short_test2"]),
+            (0.0, ["super_long_test", "short_test3"]),
+            (0.0, ["long_test2", "short_test4"]),
         ]
-        self.assert_shards_equal(expected_shards, calculate_shards(5, self.tests, incomplete_test_times))
+        self.assert_shards_equal(
+            expected_shards, calculate_shards(5, self.tests, incomplete_test_times)
+        )
 
     def test_calculate_2_shards_against_optimal_shards(self) -> None:
         for _ in range(100):
             random.seed(120)
             random_times = {k: random.random() * 10 for k in self.tests}
             # all test times except first two
-            rest_of_tests = [i for k, i in random_times.items() if k != 'super_long_test' and k != 'long_test1']
+            rest_of_tests = [
+                i
+                for k, i in random_times.items()
+                if k != "super_long_test" and k != "long_test1"
+            ]
             sum_of_rest = sum(rest_of_tests)
-            random_times['super_long_test'] = max(sum_of_rest / 2, max(rest_of_tests))
-            random_times['long_test1'] = sum_of_rest - random_times['super_long_test']
+            random_times["super_long_test"] = max(sum_of_rest / 2, max(rest_of_tests))
+            random_times["long_test1"] = sum_of_rest - random_times["super_long_test"]
             # An optimal sharding would look like the below, but we don't need to compute this for the test:
             # optimal_shards = [
             #     (sum_of_rest, ['super_long_test', 'long_test1']),
@@ -103,10 +167,12 @@
                 # The calculated shard should not have a ratio worse than 7/6 for num_shards = 2
                 self.assertGreaterEqual(7.0 / 6.0, max_shard_time / sum_of_rest)
                 sorted_tests = sorted(self.tests)
-                sorted_shard_tests = sorted(calculated_shards[0][1] + calculated_shards[1][1])
+                sorted_shard_tests = sorted(
+                    calculated_shards[0][1] + calculated_shards[1][1]
+                )
                 # All the tests should be represented by some shard
                 self.assertEqual(sorted_tests, sorted_shard_tests)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tools/test/test_trailing_newlines.py b/tools/test/test_trailing_newlines.py
index 4f4b662..2631c30 100644
--- a/tools/test/test_trailing_newlines.py
+++ b/tools/test/test_trailing_newlines.py
@@ -4,7 +4,7 @@
 
 
 def correct_trailing_newlines(file_contents: str) -> bool:
-    with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp:
+    with tempfile.NamedTemporaryFile(mode="w", delete=False) as tmp:
         filename = tmp.name
         tmp.write(file_contents)
     return trailing_newlines.correct_trailing_newlines(filename)
@@ -12,38 +12,38 @@
 
 class TestTrailingNewlines(unittest.TestCase):
     def test_empty(self) -> None:
-        self.assertTrue(correct_trailing_newlines(''))
+        self.assertTrue(correct_trailing_newlines(""))
 
     def test_single_byte(self) -> None:
-        self.assertFalse(correct_trailing_newlines('a'))
+        self.assertFalse(correct_trailing_newlines("a"))
 
     def test_single_newline(self) -> None:
-        self.assertFalse(correct_trailing_newlines('\n'))
+        self.assertFalse(correct_trailing_newlines("\n"))
 
     def test_two_newlines(self) -> None:
-        self.assertFalse(correct_trailing_newlines('\n\n'))
+        self.assertFalse(correct_trailing_newlines("\n\n"))
 
     def test_three_newlines(self) -> None:
-        self.assertFalse(correct_trailing_newlines('\n\n\n'))
+        self.assertFalse(correct_trailing_newlines("\n\n\n"))
 
     def test_hello_world(self) -> None:
-        self.assertFalse(correct_trailing_newlines('hello world'))
+        self.assertFalse(correct_trailing_newlines("hello world"))
 
     def test_hello_world_newline(self) -> None:
-        self.assertTrue(correct_trailing_newlines('hello world\n'))
+        self.assertTrue(correct_trailing_newlines("hello world\n"))
 
     def test_hello_world_two_newlines(self) -> None:
-        self.assertFalse(correct_trailing_newlines('hello world\n\n'))
+        self.assertFalse(correct_trailing_newlines("hello world\n\n"))
 
     def test_hello_world_three_newlines(self) -> None:
-        self.assertFalse(correct_trailing_newlines('hello world\n\n\n'))
+        self.assertFalse(correct_trailing_newlines("hello world\n\n\n"))
 
     def test_hello_world_multiline(self) -> None:
-        self.assertFalse(correct_trailing_newlines('hello\nworld'))
+        self.assertFalse(correct_trailing_newlines("hello\nworld"))
 
     def test_hello_world_multiline_gap(self) -> None:
-        self.assertTrue(correct_trailing_newlines('hello\n\nworld\n'))
+        self.assertTrue(correct_trailing_newlines("hello\n\nworld\n"))
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
diff --git a/tools/test/test_translate_annotations.py b/tools/test/test_translate_annotations.py
index 867decc..92f0c78 100644
--- a/tools/test/test_translate_annotations.py
+++ b/tools/test/test_translate_annotations.py
@@ -3,10 +3,8 @@
 
 from tools.linter.translate_annotations import parse_annotation, parse_diff, translate
 
-flake8_regex \
-    = r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)'
-clang_tidy_regex \
-    = r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorDesc>.*?) \[(?P<errorCode>.*)\]'
+flake8_regex = r"^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)"
+clang_tidy_regex = r"^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorDesc>.*?) \[(?P<errorCode>.*)\]"
 
 # in the below example patch, note that the filenames differ, so the
 # translation should reflect that as well as the line numbers
@@ -14,7 +12,7 @@
 # $ git clone -b 1.0.2 https://github.com/cscorley/whatthepatch.git
 # $ cd whatthepatch/tests/casefiles
 # $ git diff --no-index --unified=0 lao tzu
-lao_tzu_diff = '''
+lao_tzu_diff = """
 diff --git a/lao b/tzu
 index 635ef2c..5af88a8 100644
 --- a/lao
@@ -30,9 +28,9 @@
 +They both may be called deep and profound.
 +Deeper and more profound,
 +The door of all subtleties!
-'''.lstrip()
+""".lstrip()
 
-sparser_diff = '''
+sparser_diff = """
 diff --git a/foo.txt b/bar.txt
 index 27a6dad..6fae323 100644
 --- a/foo.txt
@@ -46,9 +44,9 @@
 @@ -10,2 +8,0 @@ more lines
 -even more
 -even more
-'''.lstrip()
+""".lstrip()
 
-new_file_diff = '''
+new_file_diff = """
 diff --git a/torch/csrc/jit/tensorexpr/operators/conv2d.h b/torch/csrc/jit/tensorexpr/operators/conv2d.h
 new file mode 100644
 index 0000000000..a81eeae346
@@ -74,10 +72,10 @@
 +} // namespace tensorexpr
 +} // namespace jit
 +} // namespace torch
-'''.lstrip()
+""".lstrip()
 
 # fun fact, this example fools VS Code's diff syntax highlighter
-haskell_diff = '''
+haskell_diff = """
 diff --git a/hello.hs b/hello.hs
 index ffb8d4ad14..0872ac9db6 100644
 --- a/hello.hs
@@ -85,7 +83,7 @@
 @@ -1 +1 @@
 --- a/hello/world/example
 +main = putStrLn "Hello, world!"
-'''.lstrip()
+""".lstrip()
 
 
 class TestTranslateAnnotations(unittest.TestCase):
@@ -95,25 +93,25 @@
         self.assertEqual(
             parse_diff(lao_tzu_diff),
             {
-                'old_filename': 'lao',
-                'hunks': [
+                "old_filename": "lao",
+                "hunks": [
                     {
-                        'old_start': 1,
-                        'old_count': 2,
-                        'new_start': 0,
-                        'new_count': 0,
+                        "old_start": 1,
+                        "old_count": 2,
+                        "new_start": 0,
+                        "new_count": 0,
                     },
                     {
-                        'old_start': 4,
-                        'old_count': 1,
-                        'new_start': 2,
-                        'new_count': 2,
+                        "old_start": 4,
+                        "old_count": 1,
+                        "new_start": 2,
+                        "new_count": 2,
                     },
                     {
-                        'old_start': 11,
-                        'old_count': 0,
-                        'new_start': 11,
-                        'new_count': 3,
+                        "old_start": 11,
+                        "old_count": 0,
+                        "new_start": 11,
+                        "new_count": 3,
                     },
                 ],
             },
@@ -123,13 +121,13 @@
         self.assertEqual(
             parse_diff(new_file_diff),
             {
-                'old_filename': None,
-                'hunks': [
+                "old_filename": None,
+                "hunks": [
                     {
-                        'old_start': 0,
-                        'old_count': 0,
-                        'new_start': 1,
-                        'new_count': 19,
+                        "old_start": 0,
+                        "old_count": 0,
+                        "new_start": 1,
+                        "new_count": 19,
                     },
                 ],
             },
@@ -139,13 +137,13 @@
         self.assertEqual(
             parse_diff(haskell_diff),
             {
-                'old_filename': 'hello.hs',
-                'hunks': [
+                "old_filename": "hello.hs",
+                "hunks": [
                     {
-                        'old_start': 1,
-                        'old_count': 1,
-                        'new_start': 1,
-                        'new_count': 1,
+                        "old_start": 1,
+                        "old_count": 1,
+                        "new_start": 1,
+                        "new_count": 1,
                     },
                 ],
             },
@@ -197,7 +195,7 @@
         self.assertEqual(translate(diff, 15), 13)
 
     def test_translate_empty(self) -> None:
-        diff = parse_diff('--- a/foo')
+        diff = parse_diff("--- a/foo")
 
         # again, we start numbering at 1
         self.assertEqual(translate(diff, -1), None)
@@ -252,29 +250,29 @@
     def test_parse_annotation_flake8(self) -> None:
         regex = re.compile(flake8_regex)
         self.assertEqual(
-            parse_annotation(regex, 'README.md:1:3: R100 make a better title'),
+            parse_annotation(regex, "README.md:1:3: R100 make a better title"),
             {
-                'filename': 'README.md',
-                'lineNumber': 1,
-                'columnNumber': 3,
-                'errorCode': 'R100',
-                'errorDesc': 'make a better title',
+                "filename": "README.md",
+                "lineNumber": 1,
+                "columnNumber": 3,
+                "errorCode": "R100",
+                "errorDesc": "make a better title",
             },
         )
 
     def test_parse_annotation_clang_tidy(self) -> None:
         regex = re.compile(clang_tidy_regex)
         self.assertEqual(
-            parse_annotation(regex, 'README.md:2:1: improve description [R200]'),
+            parse_annotation(regex, "README.md:2:1: improve description [R200]"),
             {
-                'filename': 'README.md',
-                'lineNumber': 2,
-                'columnNumber': 1,
-                'errorCode': 'R200',
-                'errorDesc': 'improve description',
+                "filename": "README.md",
+                "lineNumber": 2,
+                "columnNumber": 1,
+                "errorCode": "R200",
+                "errorDesc": "improve description",
             },
         )
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()