Skip to content

Commit b17d3c2

Browse files
authored
docs: fix typos (#972)
Found via `codespell -S docs,*.ipynb -L thre,te,erro` and `typos --hidden --format brief`
1 parent 9bfbc9a commit b17d3c2

File tree

6 files changed

+13
-13
lines changed

6 files changed

+13
-13
lines changed

ChatTTS/model/velocity/block_manager.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ def append_slot(self, seq: Sequence) -> Optional[Tuple[int, int]]:
156156
self.block_sliding_window
157157
and len(block_table) >= self.block_sliding_window
158158
):
159-
# re-use a block
159+
# reuse a block
160160
block_table.append(
161161
block_table[len(block_table) % self.block_sliding_window]
162162
)

ChatTTS/model/velocity/model_runner.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -401,9 +401,9 @@ def get_size_or_none(x: Optional[torch.Tensor]):
401401
broadcast(input_metadata.block_tables, src=0)
402402
broadcast(sampling_metadata.selected_token_indices, src=0)
403403
else:
404-
receving_list = [None]
405-
broadcast_object_list(receving_list, src=0)
406-
py_data = receving_list[0]
404+
receiving_list = [None]
405+
broadcast_object_list(receiving_list, src=0)
406+
py_data = receiving_list[0]
407407
input_tokens = torch.empty(
408408
*py_data["input_tokens_size"], dtype=torch.long, device="cuda"
409409
)
@@ -505,9 +505,9 @@ def execute_model(
505505
model_executable = self.model
506506

507507
infer_text = sampling_metadata.seq_groups[0][1].infer_text
508-
temperture = sampling_metadata.seq_groups[0][1].temperature
508+
temperature = sampling_metadata.seq_groups[0][1].temperature
509509
if not infer_text:
510-
temperture = torch.tensor(temperture).to(input_tokens.device)
510+
temperature = torch.tensor(temperature).to(input_tokens.device)
511511
logits_processors, logits_warpers = sampling_metadata.seq_groups[0][
512512
1
513513
].logits_processors
@@ -553,7 +553,7 @@ def execute_model(
553553
),
554554
hidden_states=hidden_states,
555555
infer_text=infer_text,
556-
temperature=temperture,
556+
temperature=temperature,
557557
logits_processors=logits_processors,
558558
logits_warpers=logits_warpers,
559559
min_new_token=min_new_token,

ChatTTS/model/velocity/output.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,14 +107,14 @@ def from_seq_group(cls, seq_group: SequenceGroup) -> "RequestOutput":
107107
# always has the logprobs of the sampled tokens even if the
108108
# logprobs are not requested.
109109
logprobs = None
110-
finshed_reason = SequenceStatus.get_finished_reason(seq.status)
110+
finished_reason = SequenceStatus.get_finished_reason(seq.status)
111111
output = CompletionOutput(
112112
seqs.index(seq),
113113
seq.output_text,
114114
seq.get_output_token_ids(),
115115
seq.get_cumulative_logprob(),
116116
logprobs,
117-
finshed_reason,
117+
finished_reason,
118118
seq.data.hidden_states,
119119
)
120120
outputs.append(output)

ChatTTS/model/velocity/scheduler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ def get_num_unfinished_seq_groups(self) -> int:
128128
return len(self.waiting) + len(self.running) + len(self.swapped)
129129

130130
def _schedule(self) -> SchedulerOutputs:
131-
# Blocks that need to be swaped or copied before model execution.
131+
# Blocks that need to be swapped or copied before model execution.
132132
blocks_to_swap_in: Dict[int, int] = {}
133133
blocks_to_swap_out: Dict[int, int] = {}
134134
blocks_to_copy: Dict[int, List[int]] = {}

ChatTTS/utils/gpu.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def select_device(min_memory=2047, experimental=False):
3838
"""
3939
if experimental:
4040
# For Apple M1/M2 chips with Metal Performance Shaders
41-
logger.get_logger().warning("experimantal: found apple GPU, using MPS.")
41+
logger.get_logger().warning("experimental: found apple GPU, using MPS.")
4242
device = torch.device("mps")
4343
else:
4444
logger.get_logger().info("found Apple GPU, but use CPU.")

examples/web/funcs.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def reload_chat(coef: Optional[str]) -> str:
102102
chat.unload()
103103
gr.Info("Model unloaded.")
104104
if len(coef) != 230:
105-
gr.Warning("Ingore invalid DVAE coefficient.")
105+
gr.Warning("Ignore invalid DVAE coefficient.")
106106
coef = None
107107
try:
108108
global custom_path
@@ -111,7 +111,7 @@ def reload_chat(coef: Optional[str]) -> str:
111111
raise gr.Error(str(e))
112112
if not ret:
113113
raise gr.Error("Unable to load model.")
114-
gr.Info("Reload succeess.")
114+
gr.Info("Reload success.")
115115
return chat.coef
116116

117117

0 commit comments

Comments
 (0)