-
Notifications
You must be signed in to change notification settings - Fork 41
Open
Description
When I apply “flash-diffusion” to a SD3 model that has been fine-tuned with DreamBooth-LoRAs, I get a lot of errors, and I'm currently trying to get Flash-Diffusion and DreamBooth-LoRAs to work in this way. I'm currently trying to use this to get Flash-Diffusion and DreamBooth-LoRAs to combine with pytorch_lora_weights.safetensors:
import torch
from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, FlashFlowMatchEulerDiscreteScheduler
from peft import PeftModel, LoraConfig, get_peft_model
transformer = SD3Transformer2DModel.from_pretrained(
"./diffusers/stabilityai/stable-diffusion-3-medium-diffusers",
subfolder="transformer",
torch_dtype=torch.float16,
)
# the first LoRA (flash-sd3)
flash_lora = PeftModel.from_pretrained(
transformer,
"./jasperai/flash-sd3",
adapter_name="flash"
)
# Create Stable Diffusion 3 Pipeline
pipe = StableDiffusion3Pipeline.from_pretrained(
"./diffusers/stabilityai/stable-diffusion-3-medium-diffusers",
transformer=flash_lora,
torch_dtype=torch.float16,
text_encoder_3=None,
tokenizer_3=None
)
# # Tried that once:
# # the second LoRA (test_lora)
# test_lora_path = "./diffusers/trained-sd3-lora"
# test_lora = PeftModel.from_pretrained(
# transformer,
# test_lora_path,
# adapter_name="test_lora"
# )
# # An error is reported:
# # ValueError: Can't find 'adapter_config.json' at './diffusers/trained-sd3-lora'
# Now trying:
test_lora_path = "./diffusers/trained-sd3-lora"
peft_config = LoraConfig(
r=8, # LoRA rank
lora_alpha=32, # Scaling factor
target_modules=["q_proj", "v_proj"],
lora_dropout=0.1,
bias="none",
task_type="CAUSAL_LM",
)
transformer = get_peft_model(transformer, peft_config)
transformer.load_adapter(test_lora_path, adapter_name="test_lora")
# An error is reported:
# ValueError Traceback (most recent call last)
# Cell In[7], line 56
# 46 peft_config = LoraConfig(
# 47 r=8, # LoRA rank
# 48 lora_alpha=32, # Scaling factor
# (...)
# 52 task_type="CAUSAL_LM",
# 53 )
# 55
# ---> 56 transformer = get_peft_model(transformer, peft_config)
# 58
# 59 transformer.load_adapter(test_lora_path, adapter_name="test_lora")
# ValueError: Target modules {'v_proj', 'q_proj'} not found in the base model. Please check the target modules and try again.
pipe.scheduler = FlashFlowMatchEulerDiscreteScheduler.from_pretrained(
"/root/autodl-tmp/diffusers/stabilityai/stable-diffusion-3-medium-diffusers",
subfolder="scheduler",
)
pipe.to("cuda", torch.float16)
def apply_lora_weights(model, lora1, lora2, weight1=0.5, weight2=0.5):
for param, lora1_param, lora2_param in zip(model.parameters(), lora1.parameters(), lora2.parameters()):
param.data = (
param.data
+ weight1 * lora1_param.data
+ weight2 * lora2_param.data
)
# Combining
apply_lora_weights(pipe.unet, flash_lora, test_lora, weight1=1.0, weight2=1.0)
prompt = "... my prompt"
image = pipe(prompt, num_inference_steps=10, guidance_scale=7.5).images[0]
output_path = "./flash_output.png"
image.save(output_path)
print(f"Image saved to {output_path}")
What is my problem?
Metadata
Metadata
Assignees
Labels
No labels