@@ -76,15 +76,13 @@ You need to accept the model license before downloading or using the Stable Diff
7676
7777``` python
7878# make sure you're logged in with `huggingface-cli login`
79- from torch import autocast
8079from diffusers import StableDiffusionPipeline
8180
8281pipe = StableDiffusionPipeline.from_pretrained(" CompVis/stable-diffusion-v1-4" , use_auth_token = True )
8382pipe = pipe.to(" cuda" )
8483
8584prompt = " a photo of an astronaut riding a horse on mars"
86- with autocast(" cuda" ):
87- image = pipe(prompt).images[0 ]
85+ image = pipe(prompt).images[0 ]
8886```
8987
9088** Note** : If you don't want to use the token, you can also simply download the model weights
@@ -104,8 +102,7 @@ pipe = StableDiffusionPipeline.from_pretrained("./stable-diffusion-v1-4")
104102pipe = pipe.to(" cuda" )
105103
106104prompt = " a photo of an astronaut riding a horse on mars"
107- with autocast(" cuda" ):
108- image = pipe(prompt).images[0 ]
105+ image = pipe(prompt).images[0 ]
109106```
110107
111108If you are limited by GPU memory, you might want to consider using the model in ` fp16 ` as
@@ -123,8 +120,7 @@ pipe = pipe.to("cuda")
123120
124121prompt = " a photo of an astronaut riding a horse on mars"
125122pipe.enable_attention_slicing()
126- with autocast(" cuda" ):
127- image = pipe(prompt).images[0 ]
123+ image = pipe(prompt).images[0 ]
128124```
129125
130126Finally, if you wish to use a different scheduler, you can simply instantiate
@@ -149,8 +145,7 @@ pipe = StableDiffusionPipeline.from_pretrained(
149145pipe = pipe.to(" cuda" )
150146
151147prompt = " a photo of an astronaut riding a horse on mars"
152- with autocast(" cuda" ):
153- image = pipe(prompt).images[0 ]
148+ image = pipe(prompt).images[0 ]
154149
155150image.save(" astronaut_rides_horse.png" )
156151```
@@ -160,7 +155,6 @@ image.save("astronaut_rides_horse.png")
160155The ` StableDiffusionImg2ImgPipeline ` lets you pass a text prompt and an initial image to condition the generation of new images.
161156
162157``` python
163- from torch import autocast
164158import requests
165159import torch
166160from PIL import Image
@@ -190,8 +184,7 @@ init_image = init_image.resize((768, 512))
190184
191185prompt = " A fantasy landscape, trending on artstation"
192186
193- with autocast(" cuda" ):
194- images = pipe(prompt = prompt, init_image = init_image, strength = 0.75 , guidance_scale = 7.5 ).images
187+ images = pipe(prompt = prompt, init_image = init_image, strength = 0.75 , guidance_scale = 7.5 ).images
195188
196189images[0 ].save(" fantasy_landscape.png" )
197190```
@@ -204,7 +197,6 @@ The `StableDiffusionInpaintPipeline` lets you edit specific parts of an image by
204197``` python
205198from io import BytesIO
206199
207- from torch import autocast
208200import torch
209201import requests
210202import PIL
@@ -234,8 +226,7 @@ pipe = StableDiffusionInpaintPipeline.from_pretrained(
234226pipe = pipe.to(device)
235227
236228prompt = " a cat sitting on a bench"
237- with autocast(" cuda" ):
238- images = pipe(prompt = prompt, init_image = init_image, mask_image = mask_image, strength = 0.75 ).images
229+ images = pipe(prompt = prompt, init_image = init_image, mask_image = mask_image, strength = 0.75 ).images
239230
240231images[0 ].save(" cat_on_bench.png" )
241232```
@@ -258,7 +249,6 @@ If you want to run the code yourself 💻, you can try out:
258249- [ Text-to-Image Latent Diffusion] ( https://huggingface.co/CompVis/ldm-text2im-large-256 )
259250``` python
260251# !pip install diffusers transformers
261- from torch import autocast
262252from diffusers import DiffusionPipeline
263253
264254device = " cuda"
@@ -270,16 +260,14 @@ ldm = ldm.to(device)
270260
271261# run pipeline in inference (sample random noise and denoise)
272262prompt = " A painting of a squirrel eating a burger"
273- with autocast(device):
274- image = ldm([prompt], num_inference_steps = 50 , eta = 0.3 , guidance_scale = 6 ).images[0 ]
263+ image = ldm([prompt], num_inference_steps = 50 , eta = 0.3 , guidance_scale = 6 ).images[0 ]
275264
276265# save image
277266image.save(" squirrel.png" )
278267```
279268- [ Unconditional Diffusion with discrete scheduler] ( https://huggingface.co/google/ddpm-celebahq-256 )
280269``` python
281270# !pip install diffusers
282- from torch import autocast
283271from diffusers import DDPMPipeline, DDIMPipeline, PNDMPipeline
284272
285273model_id = " google/ddpm-celebahq-256"
@@ -290,8 +278,7 @@ ddpm = DDPMPipeline.from_pretrained(model_id) # you can replace DDPMPipeline wi
290278ddpm.to(device)
291279
292280# run pipeline in inference (sample random noise and denoise)
293- with autocast(" cuda" ):
294- image = ddpm().images[0 ]
281+ image = ddpm().images[0 ]
295282
296283# save image
297284image.save(" ddpm_generated_image.png" )
0 commit comments