第一章:模型加載與基礎生成
1.1 基礎模型加載
from diffusers import StableDiffusionPipeline
import torch# 加載SD 1.5基礎模型(FP32精度)
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",torch_dtype=torch.float32
).to("cuda")# 生成第一張圖片
image = pipe("a cat wearing sunglasses").images[0]
image.save("basic_cat.png")
1.2 半精度優化加載
# 加載SDXL模型(FP16精度 + xformers加速)
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0",torch_dtype=torch.float16,use_xformers=True
).to("cuda")# 生成高分辨率圖片
image = pipe("cyberpunk city at night, 8k").images[0]
image.save("cyber_city.png")
1.3 低顯存設備適配
# CPU卸載模式(顯存<6GB適用)
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",torch_dtype=torch.float16
)
pipe.enable_model_cpu_offload() # 動態加載模型到顯存image = pipe("watercolor landscape").images[0]
image.save("low_vram_image.png")
第二章:Prompt工程優化
2.1 結構化Prompt模板
template = """
(cute corgi:1.3) wearing {glasses|hat|scarf}, # 主體
Studio Ghibli style, soft shading, # 風格
in a flower field at sunset, # 環境
4k resolution, bokeh effect # 畫質
[blurry, low quality] # 負面提示
"""image = pipe(template.format("sunglasses")).images[0]
image.save("styled_corgi.png")
2.2 動態權重控制
# 使用數值權重調整元素重要性
prompt = """
(a beautiful castle:1.5) on a cliff,
(medieval style:0.8) with (futuristic elements:0.6),
intricate details, 8k cinematic lighting
"""image = pipe(prompt, guidance_scale=7).images[0]
image.save("hybrid_castle.png")
2.3 多語言Prompt融合
# 中英混合Prompt(需CLIP多語言支持)
prompt = """
A girl in 漢服 (hanfu:1.2) standing by 西湖 (West Lake),
水墨畫風格 (ink wash painting style), ultra detailed
"""image = pipe(prompt).images[0]
image.save("hanfu_girl.png")
第三章:高級控制技術
3.1 ControlNet姿勢控制
from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
from PIL import Image# 加載OpenPose ControlNet
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose",torch_dtype=torch.float16
)
pipe = StableDiffusionControlNetPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",controlnet=controlnet,torch_dtype=torch.float16
).to("cuda")# 輸入姿勢圖
pose_image = Image.open("pose_ref.png")
image = pipe("dancing woman", image=pose_image).images[0]
image.save("controlled_dance.png")
3.2 LoRA風格疊加
from diffusers import StableDiffusionXLPipeline
import torch# 加載基礎模型
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0",torch_dtype=torch.float16
).to("cuda")# 加載動漫LoRA
pipe.load_lora_weights("lora/anime_style_xl.safetensors")image = pipe("a warrior in armor").images[0]
image.save("anime_warrior.png")
3.3 多ControlNet聯合控制
# 同時使用Canny邊緣和深度圖控制
controlnets = [ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16),ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-depth", torch_dtype=torch.float16)
]pipe = StableDiffusionControlNetPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",controlnet=controlnets,torch_dtype=torch.float16
).to("cuda")# 輸入多控制圖
canny_img = Image.open("edge.png")
depth_img = Image.open("depth.png")
image = pipe("futuristic car", image=[canny_img, depth_img]).images[0]
image.save("multi_control_car.png")
第四章:性能優化
4.1 TensorRT加速
from diffusers import DiffusionPipeline
import torch_tensorrt# 轉換模型為TensorRT格式
pipe = DiffusionPipeline.from_pretrained(...)
trt_unet = torch_tensorrt.compile(pipe.unet,inputs=[torch.randn(1,4,64,64).to("cuda")],enabled_precisions={torch.float16}
)
pipe.unet = trt_unet# 加速生成
image = pipe("speed test image").images[0]
4.2 批處理生成
# 一次生成4張不同提示的圖片
prompts = ["a red rose","a blue rose", "a golden rose","a black rose"
]images = pipe(prompt=prompts, num_images_per_prompt=1).images
for idx, img in enumerate(images):img.save(f"rose_{idx}.png")
4.3 緩存優化
from diffusers import StableDiffusionPipeline
import torch# 啟用KV緩存加速
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",torch_dtype=torch.float16,enable_sequential_cpu_offload=True,enable_kv_caching=True # 關鍵優化
).to("cuda")# 第一次生成較慢(編譯緩存)
image = pipe("warmup image").images[0]# 后續生成加速30%
image = pipe("optimized image").images[0]
第五章:商業化生產
5.1 批量產品圖生成
product_data = [{"name": "sneakers", "color": "neon green"},{"name": "backpack", "color": "matte black"},{"name": "watch", "color": "rose gold"}
]for product in product_data:prompt = f"""Professional product photo of {product['color']} {product['name']},studio lighting, 8k resolution, product design award winner"""image = pipe(prompt).images[0]image.save(f"{product['name']}_{product['color']}.png")
5.2 自動質量檢測
from transformers import CLIPModel, CLIPProcessor# 加載CLIP模型
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")def quality_check(image, target_prompt):inputs = clip_processor(text=target_prompt, images=image, return_tensors="pt")outputs = clip_model(**inputs)similarity = outputs.logits_per_image.item()return similarity > 25 # 閾值根據實際情況調整if quality_check(image, prompt):image.save("approved.png")
else:print("Quality check failed!")
5.3 多尺寸適配生成
resolutions = [(512,512), (768,768), (1024,1024)]for w, h in resolutions:pipe = StableDiffusionPipeline.from_pretrained(...)image = pipe(prompt, width=w,height=h,target_size=(w,h)).images[0]image.save(f"output_{w}x{h}.png")
第六章:故障排查
6.1 顯存監控
import nvidia_sminvidia_smi.nvmlInit()
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)def print_mem_usage():info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)print(f"Used VRAM: {info.used//1024**2} MB")print_mem_usage() # 生成前后調用檢測
6.2 異常處理
try:image = pipe("problematic prompt").images[0]
except torch.cuda.OutOfMemoryError:print("顯存不足!嘗試啟用--medvram")pipe.enable_model_cpu_offload()image = pipe("problematic prompt").images[0]
6.3 采樣調試
# 記錄采樣過程
pipe = StableDiffusionPipeline.from_pretrained(...)
pipe.set_progress_bar_config(leave=True) # 顯示詳細進度# 生成并保存中間步驟
for i in range(pipe.scheduler.config.num_train_timesteps):image = pipe(prompt, callback_on_step_end=lambda step, t, latents: latents.save(f"step_{step}.pt"))