Skip to content

Commit 117ec9d

Browse files
committed
fix quality failures
Signed-off-by: Dan Huang <[email protected]>
1 parent 318bd3d commit 117ec9d

File tree

1 file changed

+49
-18
lines changed

1 file changed

+49
-18
lines changed

tests/e2e/vLLM/test_vllm.py

Lines changed: 49 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
from tests.test_timer.timer_utils import get_singleton_manager, log_time
1616
from tests.testing_utils import requires_gpu
1717

18-
1918
HF_MODEL_HUB_NAME = "nm-testing"
2019

2120
TEST_DATA_FILE = os.environ.get(
@@ -25,7 +24,7 @@
2524
# vllm environment: same (default), the path of vllm virtualenv, deployed runner name
2625
VLLM_PYTHON_ENV = os.environ.get("VLLM_PYTHON_ENV", "same")
2726
IS_VLLM_IMAGE = False
28-
RUN_SAVE_DIR=os.environ.get("RUN_SAVE_DIR", "none")
27+
RUN_SAVE_DIR = os.environ.get("RUN_SAVE_DIR", "none")
2928
# when using vllm image, needs to save the generated model
3029
if VLLM_PYTHON_ENV.lower() != "same" and (not Path(VLLM_PYTHON_ENV).exists()):
3130
IS_VLLM_IMAGE = True
@@ -40,6 +39,7 @@
4039
"tokenizer.json",
4140
]
4241

42+
4343
# Will run each test case in its own process through run_tests.sh
4444
# emulating vLLM CI testing
4545
@requires_gpu(1)
@@ -63,7 +63,6 @@ class TestvLLM:
6363
be used for quantization. Otherwise, the recipe will always be used if given.
6464
""" # noqa: E501
6565

66-
6766
def set_up(self, test_data_file: str):
6867
eval_config = yaml.safe_load(Path(test_data_file).read_text(encoding="utf-8"))
6968

@@ -90,10 +89,14 @@ def set_up(self, test_data_file: str):
9089
self.vllm_env = VLLM_PYTHON_ENV
9190

9291
if RUN_SAVE_DIR != "none":
93-
assert Path(RUN_SAVE_DIR).exists(), f"RUN_SAVE_DIR path doesn't exist: {RUN_SAVE_DIR}"
92+
assert Path(
93+
RUN_SAVE_DIR
94+
).exists(), f"RUN_SAVE_DIR path doesn't exist: {RUN_SAVE_DIR}"
9495
self.run_save_dir = RUN_SAVE_DIR
9596
# RUN_SAVE_DIR overwrites config save_dir if specified
96-
self.save_dir = os.path.join(RUN_SAVE_DIR, self.model.split("/")[1] + f"-{self.scheme}")
97+
self.save_dir = os.path.join(
98+
RUN_SAVE_DIR, self.model.split("/")[1] + f"-{self.scheme}"
99+
)
97100

98101
if not self.save_dir:
99102
self.save_dir = self.model.split("/")[1] + f"-{self.scheme}"
@@ -174,15 +177,21 @@ def test_vllm(self, test_data_file: str):
174177
if VLLM_PYTHON_ENV.lower() == "same":
175178
logger.info("========== RUNNING vLLM in the same python env ==========")
176179
else:
177-
logger.info("========== RUNNING vLLM in a separate python env ==========")
180+
logger.info(
181+
"========== RUNNING vLLM in a separate python env =========="
182+
)
178183

179184
self._run_vllm(logger)
180185

181186
self.tear_down()
182187

183188
def tear_down(self):
184189
# model save_dir is needed for vllm image testing
185-
if not IS_VLLM_IMAGE and self.save_dir is not None and os.path.isdir(self.save_dir):
190+
if (
191+
not IS_VLLM_IMAGE
192+
and self.save_dir is not None
193+
and os.path.isdir(self.save_dir)
194+
):
186195
shutil.rmtree(self.save_dir)
187196

188197
timer = get_singleton_manager()
@@ -226,35 +235,57 @@ def _run_vllm(self, logger):
226235
if IS_VLLM_IMAGE:
227236
# generate python command to run in the vllm image
228237
run_file_path = os.path.join(RUN_SAVE_DIR, "run_vllm.py")
229-
shutil.copy(os.path.join(test_file_dir, "run_vllm.py"),
230-
os.path.join(RUN_SAVE_DIR, "run_vllm.py"))
231-
cmds = ["python", run_file_path, f"'{json_scheme}'",
232-
f"'{json_llm_kwargs}'", f"'{json_prompts}'"]
238+
shutil.copy(
239+
os.path.join(test_file_dir, "run_vllm.py"),
240+
os.path.join(RUN_SAVE_DIR, "run_vllm.py"),
241+
)
242+
cmds = [
243+
"python",
244+
run_file_path,
245+
f"'{json_scheme}'",
246+
f"'{json_llm_kwargs}'",
247+
f"'{json_prompts}'",
248+
]
233249
vllm_cmd = " ".join(cmds)
234250
with open(self.vllm_bash, "w") as cf:
235-
cf.write(f"""#!/bin/bash
251+
cf.write(
252+
f"""#!/bin/bash
236253
export HF_HUB_OFFLINE=0
237254
export VLLM_NO_USAGE_STATS=1
238255
{vllm_cmd}
239-
""")
256+
"""
257+
)
240258
os.chmod(self.vllm_bash, 0o755)
241259
logger.info(f"Wrote vllm cmd into {self.vllm_bash}:")
242260
logger.info("vllm image. Run vllm cmd with kubectl.")
243261
result = subprocess.Popen(
244262
[
245-
"kubectl", "exec", "-it",
246-
VLLM_PYTHON_ENV, "-n", "arc-runners",
247-
"--", "/bin/bash", self.vllm_bash,
263+
"kubectl",
264+
"exec",
265+
"-it",
266+
VLLM_PYTHON_ENV,
267+
"-n",
268+
"arc-runners",
269+
"--",
270+
"/bin/bash",
271+
self.vllm_bash,
248272
],
249273
stdout=subprocess.PIPE,
250274
stderr=subprocess.PIPE,
251-
text=True)
275+
text=True,
276+
)
252277
else:
253278
run_file_path = os.path.join(test_file_dir, "run_vllm.py")
254279
logger.info("Run vllm in subprocess.Popen using python env:")
255280
logger.info(self.vllm_env)
256281
result = subprocess.Popen(
257-
[self.vllm_env, run_file_path, json_scheme, json_llm_kwargs, json_prompts],
282+
[
283+
self.vllm_env,
284+
run_file_path,
285+
json_scheme,
286+
json_llm_kwargs,
287+
json_prompts,
288+
],
258289
stdout=subprocess.PIPE,
259290
stderr=subprocess.PIPE,
260291
text=True,

0 commit comments

Comments
 (0)