@@ -1320,3 +1320,46 @@ prompt = "photorealistic new zealand hills"
13201320image = pipe(prompt, image = input_image, strength = 0.75 ,).images[0 ]
13211321image.save(' tensorrt_img2img_new_zealand_hills.png' )
13221322```
1323+
1324+ ### Stable Diffusion Reference
1325+
1326+ This pipeline uses the Reference only Control. Refer to the [ sd-webui-controlnet discussion] ( https://github.com/Mikubill/sd-webui-controlnet/discussions/1236 ) .
1327+
1328+
1329+ ``` py
1330+ import torch
1331+ from diffusers import UniPCMultistepScheduler
1332+ from diffusers.utils import load_image
1333+
1334+ input_image = load_image(" https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" )
1335+
1336+ pipe = StableDiffusionReferencePipeline.from_pretrained(
1337+ " runwayml/stable-diffusion-v1-5" ,
1338+ safety_checker = None ,
1339+ torch_dtype = torch.float16
1340+ ).to(' cuda:0' )
1341+
1342+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
1343+
1344+ result_img = pipe(ref_image = input_image,
1345+ prompt = " 1girl" ,
1346+ num_inference_steps = 20 ,
1347+ reference_attn = True ,
1348+ reference_adain = True ).images[0 ]
1349+ ```
1350+
1351+ Reference Image
1352+
1353+ ![ reference_image] ( https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png )
1354+
1355+ Output Image of ` reference_attn=True ` and ` reference_adain=False `
1356+
1357+ ![ output_image] ( https://github.com/huggingface/diffusers/assets/24734142/813b5c6a-6d89-46ba-b7a4-2624e240eea5 )
1358+
1359+ Output Image of ` reference_attn=False ` and ` reference_adain=True `
1360+
1361+ ![ output_image] ( https://github.com/huggingface/diffusers/assets/24734142/ffc90339-9ef0-4c4d-a544-135c3e5644da )
1362+
1363+ Output Image of ` reference_attn=True ` and ` reference_adain=True `
1364+
1365+ ![ output_image] ( https://github.com/huggingface/diffusers/assets/24734142/3c5255d6-867d-4d35-b202-8dfd30cc6827 )
0 commit comments