Skip to content

Commit 44826cd

Browse files
authored
Use ruff to format and lint Python code (#8684)
1 parent 990eb5f commit 44826cd

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+1071
-844
lines changed

.github/workflows/presubmit.yml

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,21 +17,25 @@ permissions:
1717

1818
jobs:
1919
check_clang_format:
20-
name: Check clang-format
20+
name: Check clang-format and ruff
2121
runs-on: macos-14
2222
steps:
23-
- uses: actions/checkout@v3
23+
- uses: actions/checkout@v4
24+
2425
- name: Install clang-format
2526
run: brew install llvm@19
27+
2628
- name: Check clang-format
2729
run: ./run-clang-format.sh -c
2830
env:
2931
CLANG_FORMAT_LLVM_INSTALL_DIR: /opt/homebrew/opt/llvm@19
32+
33+
- uses: astral-sh/ruff-action@v3
3034
check_clang_tidy:
3135
name: Check clang-tidy
3236
runs-on: macos-14
3337
steps:
34-
- uses: actions/checkout@v3
38+
- uses: actions/checkout@v4
3539
- name: Install clang-tidy
3640
run: brew install llvm@19 ninja lld@19
3741
- name: Run clang-tidy
@@ -42,7 +46,7 @@ jobs:
4246
name: Check CMake file lists
4347
runs-on: ubuntu-22.04
4448
steps:
45-
- uses: actions/checkout@v3
49+
- uses: actions/checkout@v4
4650
- name: Run test sources check
4751
run: |
4852
shopt -s nullglob

apps/HelloPyTorch/modules.py

Lines changed: 34 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
# TODO(mgharbi): maybe find a way to wrap function and module directly in C++
88
# instead of generating the C++ wrapper on the fly?
99

10+
1011
def _dispatch(opname, optype=th.float32, cuda=False):
1112
"""
1213
Helper function that matches an opname and type to the Halide backend.
@@ -23,7 +24,7 @@ def _dispatch(opname, optype=th.float32, cuda=False):
2324
op: a python function wrapping the requested Halide operator.
2425
"""
2526

26-
assert type(opname) == str, "opname should be a string"
27+
assert type(opname) is str, "opname should be a string"
2728
assert type(optype) == th.dtype, "optype should be a tensor datatype (torch.dtype)"
2829

2930
if cuda:
@@ -34,12 +35,13 @@ def _dispatch(opname, optype=th.float32, cuda=False):
3435
elif optype == th.float64:
3536
opname += "_float64"
3637
else:
37-
raise ValueError("Optype %s not recognized %s" % optype)
38+
raise ValueError("Optype {} not recognized {}".format(*optype))
3839
op = getattr(ops, opname)
3940
if not hasattr(ops, opname):
40-
raise ValueError("Module has no operator %s" % opname)
41+
raise ValueError(f"Module has no operator {opname}")
4142
return op
4243

44+
4345
def _forward_common(ctx, input_a, input_b):
4446
tp = input_a.dtype
4547
cuda = input_a.is_cuda
@@ -57,6 +59,7 @@ def _forward_common(ctx, input_a, input_b):
5759
fn_(input_a, input_b, out)
5860
return out
5961

62+
6063
def _backward_common(ctx, d_out, backward_op):
6164
tp = d_out.dtype
6265
cuda = d_out.is_cuda
@@ -75,48 +78,54 @@ def _backward_common(ctx, d_out, backward_op):
7578
fn_(input_a, input_b, d_out.contiguous(), d_input_a, d_input_b)
7679
return d_input_a, d_input_b
7780

81+
7882
# TODO(srj): surely there's a better way to do this,
7983
# but PyTorch seems to make it tricky to pass in
8084
# extra info to the backward() method.
8185
class AddFunction_Grad(th.autograd.Function):
82-
"""Version using the manually-written backprop"""
83-
def __init__(self):
84-
super(AddFunction_Grad, self).__init__()
86+
"""Version using the manually-written backprop"""
87+
88+
def __init__(self):
89+
super().__init__()
8590

86-
@staticmethod
87-
def forward(ctx, input_a, input_b):
88-
return _forward_common(ctx, input_a, input_b)
91+
@staticmethod
92+
def forward(ctx, input_a, input_b):
93+
return _forward_common(ctx, input_a, input_b)
94+
95+
@staticmethod
96+
def backward(ctx, d_out):
97+
return _backward_common(ctx, d_out, "add_grad")
8998

90-
@staticmethod
91-
def backward(ctx, d_out):
92-
return _backward_common(ctx, d_out, "add_grad")
9399

94100
class AddFunction_HalideGrad(th.autograd.Function):
95-
"""Version using the Halide-generated backprop"""
96-
def __init__(self):
97-
super(AddFunction_HalideGrad, self).__init__()
101+
"""Version using the Halide-generated backprop"""
102+
103+
def __init__(self):
104+
super().__init__()
98105

99-
@staticmethod
100-
def forward(ctx, input_a, input_b):
101-
return _forward_common(ctx, input_a, input_b)
106+
@staticmethod
107+
def forward(ctx, input_a, input_b):
108+
return _forward_common(ctx, input_a, input_b)
109+
110+
@staticmethod
111+
def backward(ctx, d_out):
112+
return _backward_common(ctx, d_out, "add_halidegrad")
102113

103-
@staticmethod
104-
def backward(ctx, d_out):
105-
return _backward_common(ctx, d_out, "add_halidegrad")
106114

107115
class Add(th.nn.Module):
108116
"""Defines a module that uses our autograd function.
109117
110118
This is so we can use it as an operator.
111119
"""
120+
112121
def __init__(self, backward_op):
113-
super(Add, self).__init__()
122+
super().__init__()
114123
if backward_op == "add_grad":
115-
self._adder = AddFunction_Grad
124+
self._adder = AddFunction_Grad
116125
elif backward_op == "add_halidegrad":
117-
self._adder = AddFunction_HalideGrad
126+
self._adder = AddFunction_HalideGrad
118127
else:
119-
assert False
128+
assert False
120129

121130
def forward(self, a, b):
122131
return self._adder.apply(a, b)

apps/HelloPyTorch/setup.py

Lines changed: 40 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,41 +1,41 @@
11
"""Synthesizes the cpp wrapper code and builds dynamic Python extension."""
2+
23
import os
34
import platform
45
import re
5-
from setuptools import setup, find_packages
6+
from setuptools import setup
67

78
from torch.utils.cpp_extension import BuildExtension
8-
import torch as th
99

1010

1111
def generate_pybind_wrapper(path, headers, has_cuda):
12-
s = "#include \"torch/extension.h\"\n\n"
12+
s = '#include "torch/extension.h"\n\n'
1313
if has_cuda:
14-
s += "#include \"HalidePyTorchCudaHelpers.h\"\n"
15-
s += "#include \"HalidePyTorchHelpers.h\"\n"
14+
s += '#include "HalidePyTorchCudaHelpers.h"\n'
15+
s += '#include "HalidePyTorchHelpers.h"\n'
1616
for h in headers:
17-
s += "#include \"{}\"\n".format(os.path.splitext(h)[0]+".pytorch.h")
17+
s += '#include "{}"\n'.format(os.path.splitext(h)[0] + ".pytorch.h")
1818

1919
s += "\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n"
2020
for h in headers:
2121
name = os.path.splitext(h)[0]
22-
s += " m.def(\"{}\", &{}_th_, \"PyTorch wrapper of the Halide pipeline {}\");\n".format(
23-
name, name, name)
22+
s += f' m.def("{name}", &{name}_th_, "PyTorch wrapper of the Halide pipeline {name}");\n'
2423
s += "}\n"
25-
with open(path, 'w') as fid:
24+
with open(path, "w") as fid:
2625
fid.write(s)
2726

27+
2828
if __name__ == "__main__":
2929
# This is where the generate Halide ops headers live. We also generate the .cpp
3030
# wrapper in this directory
3131
build_dir = os.getenv("BIN")
3232
if build_dir is None or not os.path.exists(build_dir):
33-
raise ValueError("Bin directory {} is invalid".format(build_dir))
33+
raise ValueError(f"Bin directory {build_dir} is invalid")
3434

3535
# Path to a distribution of Halide
3636
halide_dir = os.getenv("HALIDE_DISTRIB_PATH")
3737
if halide_dir is None or not os.path.exists(halide_dir):
38-
raise ValueError("Halide directory {} is invalid".format(halide_dir))
38+
raise ValueError(f"Halide directory {halide_dir} is invalid")
3939

4040
has_cuda = os.getenv("HAS_CUDA")
4141
if has_cuda is None or has_cuda == "0":
@@ -76,27 +76,38 @@ def generate_pybind_wrapper(path, headers, has_cuda):
7676
print("Generating CUDA wrapper")
7777
generate_pybind_wrapper(wrapper_path, hl_headers, True)
7878
from torch.utils.cpp_extension import CUDAExtension
79-
extension = CUDAExtension(ext_name, sources,
80-
include_dirs=include_dirs,
81-
extra_objects=hl_libs,
82-
libraries=["cuda"], # Halide ops need the full cuda lib, not just the RT library
83-
extra_compile_args=compile_args)
79+
80+
extension = CUDAExtension(
81+
ext_name,
82+
sources,
83+
include_dirs=include_dirs,
84+
extra_objects=hl_libs,
85+
libraries=[
86+
"cuda"
87+
], # Halide ops need the full cuda lib, not just the RT library
88+
extra_compile_args=compile_args,
89+
)
8490
else:
8591
print("Generating CPU wrapper")
8692
generate_pybind_wrapper(wrapper_path, hl_headers, False)
8793
from torch.utils.cpp_extension import CppExtension
88-
extension = CppExtension(ext_name, sources,
89-
include_dirs=include_dirs,
90-
extra_objects=hl_libs,
91-
extra_compile_args=compile_args)
94+
95+
extension = CppExtension(
96+
ext_name,
97+
sources,
98+
include_dirs=include_dirs,
99+
extra_objects=hl_libs,
100+
extra_compile_args=compile_args,
101+
)
92102

93103
# Build the Python extension module
94-
setup(name=ext_name,
95-
verbose=True,
96-
url="",
97-
author_email="[email protected]",
98-
author="Some Author",
99-
version="0.0.0",
100-
ext_modules=[extension],
101-
cmdclass={"build_ext": BuildExtension}
102-
)
104+
setup(
105+
name=ext_name,
106+
verbose=True,
107+
url="",
108+
author_email="[email protected]",
109+
author="Some Author",
110+
version="0.0.0",
111+
ext_modules=[extension],
112+
cmdclass={"build_ext": BuildExtension},
113+
)

apps/HelloPyTorch/test.py

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
"""Verifies the Halide operator functions properly."""
22

3-
4-
import os
53
import unittest
64
import warnings
75

@@ -12,8 +10,8 @@
1210
class TestAdd(unittest.TestCase):
1311
def setUp(self):
1412
self.a = th.ones(1, 2, 8, 8)
15-
self.b = th.ones(1, 2, 8, 8)*3
16-
self.gt = th.ones(1, 2, 8, 8)*4
13+
self.b = th.ones(1, 2, 8, 8) * 3
14+
self.gt = th.ones(1, 2, 8, 8) * 4
1715

1816
def test_cpu_single(self):
1917
self._test_add(is_double=False)
@@ -53,23 +51,22 @@ def _test_add(self, is_cuda=False, is_double=False):
5351
else:
5452
print(" .Single-precision mode, backward_op:", backward_op)
5553

56-
diff = (output-self.gt).sum().item()
57-
assert diff == 0.0, "Test failed: sum should be 4, got %f" % diff
54+
diff = (output - self.gt).sum().item()
55+
assert diff == 0.0, f"Test failed: sum should be 4, got {diff:f}"
5856

5957
self.a.requires_grad = True
6058
self.b.requires_grad = True
6159

6260
for i in range(100):
6361
output = add(self.a, self.b).sum()
6462
output.backward()
65-
63+
6664
# Inputs are float, the gradient checker wants double inputs and
6765
# will issue a warning.
68-
warnings.filterwarnings(
69-
"ignore", module=r".*gradcheck*")
66+
warnings.filterwarnings("ignore", module=r".*gradcheck*")
7067

7168
# Test the gradient is correct
72-
res = th.autograd.gradcheck(add, [self.a, self.b], eps=1e-2)
69+
th.autograd.gradcheck(add, [self.a, self.b], eps=1e-2)
7370

7471
print(" Test ran successfully: difference is", diff)
7572

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,26 @@
11
import tensorflow as tf
22
import time
33

4-
with tf.device('/GPU:0'):
5-
4+
with tf.device("/GPU:0"):
65
img = tf.random.uniform([4, 112, 112, 32])
76
depthwise_filter = tf.random.uniform([3, 3, 32, 1])
87
pointwise_filter = tf.random.uniform([1, 1, 32 * 1, 16])
9-
8+
109
best = None
1110
num_trials = 10
1211
num_iter = 10
1312
for j in range(num_trials):
1413
start = time.time()
1514
for i in range(num_iter):
1615
out = tf.nn.separable_conv2d(
17-
img, depthwise_filter, pointwise_filter,
18-
strides = (1, 1, 1, 1), padding = 'VALID')
16+
img,
17+
depthwise_filter,
18+
pointwise_filter,
19+
strides=(1, 1, 1, 1),
20+
padding="VALID",
21+
)
1922
end = time.time()
2023
t = (end - start) / num_iter
21-
if not best or t < best: best = t
22-
print('time: {} ms'.format(1000 * best))
24+
if not best or t < best:
25+
best = t
26+
print(f"time: {1000 * best} ms")

0 commit comments

Comments
 (0)