Skip to content

Commit 61719bf

Browse files
authored
Fix gpu_id (#1326)
1 parent b3911f8 commit 61719bf

File tree

5 files changed

+6
-6
lines changed

5 files changed

+6
-6
lines changed

src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ def disable_attention_slicing(self):
178178
self.enable_attention_slicing(None)
179179

180180
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.AltDiffusionPipeline.enable_sequential_cpu_offload
181-
def enable_sequential_cpu_offload(self):
181+
def enable_sequential_cpu_offload(self, gpu_id=0):
182182
r"""
183183
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
184184
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
@@ -189,7 +189,7 @@ def enable_sequential_cpu_offload(self):
189189
else:
190190
raise ImportError("Please install accelerate via `pip install accelerate`")
191191

192-
device = torch.device("cuda")
192+
device = torch.device(f"cuda:{gpu_id}")
193193

194194
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
195195
if cpu_offloaded_model is not None:

src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@ def disable_attention_slicing(self):
209209
self.enable_attention_slicing(None)
210210

211211
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
212-
def enable_sequential_cpu_offload(self):
212+
def enable_sequential_cpu_offload(self, gpu_id=0):
213213
r"""
214214
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
215215
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a

src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ def disable_attention_slicing(self):
176176
self.enable_attention_slicing(None)
177177

178178
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
179-
def enable_sequential_cpu_offload(self):
179+
def enable_sequential_cpu_offload(self, gpu_id=0):
180180
r"""
181181
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
182182
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a

src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ def disable_attention_slicing(self):
169169
self.enable_attention_slicing(None)
170170

171171
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
172-
def enable_sequential_cpu_offload(self):
172+
def enable_sequential_cpu_offload(self, gpu_id=0):
173173
r"""
174174
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
175175
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a

src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ def disable_attention_slicing(self):
189189
self.enable_attention_slicing(None)
190190

191191
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
192-
def enable_sequential_cpu_offload(self):
192+
def enable_sequential_cpu_offload(self, gpu_id=0):
193193
r"""
194194
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
195195
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a

0 commit comments

Comments
 (0)