Skip to content

Commit b3911f8

Browse files
make fix copies
1 parent 245e9cc commit b3911f8

File tree

5 files changed

+6
-6
lines changed

5 files changed

+6
-6
lines changed

src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ def disable_attention_slicing(self):
179179
# set slice_size = `None` to disable `attention slicing`
180180
self.enable_attention_slicing(None)
181181

182-
def enable_sequential_cpu_offload(self):
182+
def enable_sequential_cpu_offload(self, gpu_id=0):
183183
r"""
184184
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
185185
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
@@ -190,7 +190,7 @@ def enable_sequential_cpu_offload(self):
190190
else:
191191
raise ImportError("Please install accelerate via `pip install accelerate`")
192192

193-
device = torch.device("cuda")
193+
device = torch.device(f"cuda:{gpu_id}")
194194

195195
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
196196
if cpu_offloaded_model is not None:

src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ def enable_sequential_cpu_offload(self):
220220
else:
221221
raise ImportError("Please install accelerate via `pip install accelerate`")
222222

223-
device = torch.device("cuda")
223+
device = torch.device(f"cuda:{gpu_id}")
224224

225225
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
226226
if cpu_offloaded_model is not None:

src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ def enable_sequential_cpu_offload(self):
187187
else:
188188
raise ImportError("Please install accelerate via `pip install accelerate`")
189189

190-
device = torch.device("cuda")
190+
device = torch.device(f"cuda:{gpu_id}")
191191

192192
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
193193
if cpu_offloaded_model is not None:

src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ def enable_sequential_cpu_offload(self):
180180
else:
181181
raise ImportError("Please install accelerate via `pip install accelerate`")
182182

183-
device = torch.device("cuda")
183+
device = torch.device(f"cuda:{gpu_id}")
184184

185185
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
186186
if cpu_offloaded_model is not None:

src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,7 @@ def enable_sequential_cpu_offload(self):
200200
else:
201201
raise ImportError("Please install accelerate via `pip install accelerate`")
202202

203-
device = torch.device("cuda")
203+
device = torch.device(f"cuda:{gpu_id}")
204204

205205
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
206206
if cpu_offloaded_model is not None:

0 commit comments

Comments
 (0)