Skip to content

Commit 119b66b

Browse files
ezyangpytorchmergebot
authored andcommitted
Use strict to toggle strict options in MYPYSTRICT (pytorch#118479)
As we force a specific version of mypy, it's OK to use the agglomerated flag. Signed-off-by: Edward Z. Yang <[email protected]> Pull Request resolved: pytorch#118479 Approved by: https://github.com/Skylion007, https://github.com/albanD ghstack dependencies: pytorch#118414, pytorch#118418, pytorch#118432, pytorch#118467, pytorch#118468, pytorch#118469, pytorch#118475
1 parent ecca533 commit 119b66b

File tree

8 files changed

+11
-24
lines changed

8 files changed

+11
-24
lines changed

benchmarks/instruction_counts/core/expand.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ def _generate_torchscript_file(model_src: str, name: str) -> Optional[str]:
7979
assert isinstance(
8080
jit_model, (torch.jit.ScriptFunction, torch.jit.ScriptModule)
8181
), f"Expected ScriptFunction or ScriptModule, got: {type(jit_model)}"
82-
jit_model.save(artifact_path)
82+
jit_model.save(artifact_path) # type: ignore[call-arg]
8383

8484
# Cleanup now that we have the actual serialized model.
8585
os.remove(module_path)

mypy-strict.ini

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -17,21 +17,8 @@ show_column_numbers = True
1717
warn_no_return = True
1818
disallow_any_unimported = True
1919

20-
# Across versions of mypy, the flags toggled by --strict vary. To ensure
21-
# we have reproducible type check, we instead manually specify the flags
22-
warn_unused_configs = True
23-
disallow_any_generics = True
24-
disallow_subclassing_any = True
25-
disallow_untyped_calls = True
26-
disallow_untyped_defs = True
27-
disallow_incomplete_defs = True
28-
check_untyped_defs = True
29-
disallow_untyped_decorators = True
30-
no_implicit_optional = True
31-
warn_redundant_casts = True
32-
warn_return_any = True
20+
strict = True
3321
implicit_reexport = False
34-
strict_equality = True
3522

3623
# do not reenable this:
3724
# https://github.com/pytorch/pytorch/pull/60006#issuecomment-866130657

tools/update_masked_docs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def main() -> None:
4141

4242
for func_name in sorted(torch.masked._ops.__all__):
4343
func = getattr(torch.masked._ops, func_name)
44-
func_doc = torch.masked._generate_docstring(func)
44+
func_doc = torch.masked._generate_docstring(func) # type: ignore[no-untyped-call, attr-defined]
4545
_new_content.append(f'{func_name}_docstring = """{func_doc}"""\n')
4646

4747
new_content = "\n".join(_new_content)

torch/utils/_cxx_pytree.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929

3030
import torch
3131

32-
if torch._running_with_deploy():
32+
if torch._running_with_deploy(): # type: ignore[no-untyped-call]
3333
raise ImportError("C++ pytree utilities do not work with torch::deploy.")
3434

3535
import optree

torch/utils/benchmark/utils/timer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
__all__ = ["Timer", "timer", "Language"]
1414

1515

16-
if torch.backends.cuda.is_built() and torch.cuda.is_available():
16+
if torch.backends.cuda.is_built() and torch.cuda.is_available(): # type: ignore[no-untyped-call]
1717
def timer() -> float:
1818
torch.cuda.synchronize()
1919
return timeit.default_timer()

torch/utils/benchmark/utils/valgrind_wrapper/timer_interface.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -465,7 +465,7 @@ def construct(self) -> str:
465465
path = os.path.join(self._data_dir, f"{name}.pt")
466466
load_lines.append(f"{name} = torch.jit.load({repr(path)})")
467467
with open(path, "wb") as f:
468-
torch.jit.save(wrapped_value.value, f)
468+
torch.jit.save(wrapped_value.value, f) # type: ignore[no-untyped-call]
469469

470470
else:
471471
raise NotImplementedError(
@@ -502,7 +502,7 @@ def __init__(self) -> None:
502502
).returncode
503503

504504
self._build_type: Optional[str] = None
505-
build_search = re.search("BUILD_TYPE=(.+),", torch.__config__.show())
505+
build_search = re.search("BUILD_TYPE=(.+),", torch.__config__.show()) # type: ignore[no-untyped-call]
506506
if build_search is not None:
507507
self._build_type = build_search.groups()[0].split(",")[0]
508508

torchgen/decompositions/gen_jit_decompositions.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,15 +55,15 @@
5555

5656
def gen_serialized_decompisitions() -> str:
5757
return "\n".join(
58-
[scripted_func.code for scripted_func in decomposition_table.values()]
58+
[scripted_func.code for scripted_func in decomposition_table.values()] # type: ignore[misc]
5959
)
6060

6161

6262
def gen_decomposition_mappings() -> str:
6363
decomposition_mappings = []
6464
for schema, scripted_func in decomposition_table.items():
6565
decomposition_mappings.append(
66-
' {"' + schema + '", "' + scripted_func.name + '"},'
66+
' {"' + schema + '", "' + scripted_func.name + '"},' # type: ignore[operator]
6767
)
6868
return "\n".join(decomposition_mappings)
6969

torchgen/fuse_attention_patterns/gen_attention_patterns.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ def serialize_functions() -> None:
8989
for (
9090
key,
9191
kwargs,
92-
) in _get_sfdp_patterns():
92+
) in _get_sfdp_patterns(): # type: ignore[no-untyped-call]
9393
pattern_name = kwargs["search_fn"].__name__
9494
gen_kwargs = {
9595
key: kwargs[key]
@@ -134,5 +134,5 @@ def serialize_functions() -> None:
134134

135135

136136
if __name__ == "__main__":
137-
with torch._subclasses.FakeTensorMode():
137+
with torch._subclasses.FakeTensorMode(): # type: ignore[no-untyped-call]
138138
serialize_functions()

0 commit comments

Comments
 (0)