Skip to content

Commit 6f699c1

Browse files
author
Lincoln Stein
committed
buggy with multiple debug statements
1 parent e26360f commit 6f699c1

File tree

5 files changed

+14
-6
lines changed

5 files changed

+14
-6
lines changed

invokeai/app/invocations/compel.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from typing import Iterator, List, Optional, Tuple, Union, cast
2-
2+
import threading # for debugging only
33
import torch
44
from compel import Compel, ReturnedEmbeddingsType
55
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
@@ -91,6 +91,9 @@ def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
9191
ti_manager,
9292
),
9393
):
94+
95+
print(f'DEBUG: compel: tid={threading.current_thread().ident}, gpu={TorchDevice.choose_torch_device()}, text_encoder={text_encoder.device}')
96+
9497
assert isinstance(text_encoder, CLIPTextModel)
9598
assert isinstance(tokenizer, CLIPTokenizer)
9699
compel = Compel(
@@ -99,6 +102,7 @@ def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
99102
textual_inversion_manager=ti_manager,
100103
dtype_for_device_getter=TorchDevice.choose_torch_dtype,
101104
truncate_long_prompts=False,
105+
device=TorchDevice.choose_torch_device(),
102106
)
103107

104108
conjunction = Compel.parse_prompt_string(self.prompt)
@@ -113,6 +117,8 @@ def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
113117
conditioning_data = ConditioningFieldData(conditionings=[BasicConditioningInfo(embeds=c)])
114118

115119
conditioning_name = context.conditioning.save(conditioning_data)
120+
print(f'DEBUG: conditioning_name={conditioning_name}')
121+
116122
return ConditioningOutput(
117123
conditioning=ConditioningField(
118124
conditioning_name=conditioning_name,

invokeai/app/invocations/latent.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
22
import inspect
33
import math
4+
import threading # for debugging
5+
46
from contextlib import ExitStack
57
from functools import singledispatchmethod
68
from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, Union
@@ -1078,6 +1080,7 @@ def invoke(self, context: InvocationContext) -> ImageOutput:
10781080
with torch.inference_mode():
10791081
# copied from diffusers pipeline
10801082
latents = latents / vae.config.scaling_factor
1083+
print(f'DEBUG: tid={threading.current_thread().ident}, gpu={TorchDevice.choose_torch_device()}, latent_device={latents.device}')
10811084
image = vae.decode(latents, return_dict=False)[0]
10821085
image = (image / 2 + 0.5).clamp(0, 1) # denormalize
10831086
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16

invokeai/app/services/invocation_stats/invocation_stats_default.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,9 +74,9 @@ def collect_stats(self, invocation: BaseInvocation, graph_execution_state_id: st
7474
)
7575
self._stats[graph_execution_state_id].add_node_execution_stats(node_stats)
7676

77-
def reset_stats(self):
78-
self._stats = {}
79-
self._cache_stats = {}
77+
def reset_stats(self, graph_execution_state_id: str):
78+
self._stats.pop(graph_execution_state_id)
79+
self._cache_stats.pop(graph_execution_state_id)
8080

8181
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
8282
graph_stats_summary = self._get_graph_summary(graph_execution_state_id)

invokeai/app/services/session_processor/session_processor_default.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ def _on_after_run_session(self, queue_item: SessionQueueItem) -> None:
214214
# we don't care about that - suppress the error.
215215
with suppress(GESStatsNotFoundError):
216216
self._services.performance_statistics.log_stats(queue_item.session.id)
217-
self._services.performance_statistics.reset_stats()
217+
self._services.performance_statistics.reset_stats(queue_item.session.id)
218218

219219
for callback in self._on_after_run_session_callbacks:
220220
callback(queue_item=queue_item)

invokeai/app/services/shared/invocation_context.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,6 @@ def load(self, name: str) -> ConditioningFieldData:
325325
Returns:
326326
The loaded conditioning data.
327327
"""
328-
329328
return self._services.conditioning.load(name)
330329

331330

0 commit comments

Comments
 (0)