Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
bbfa8b4
new file: euler ancestral scheduler
AbdullahAlfaraj Sep 24, 2022
edff6bf
Add helper functions for euler_a scheduler
AbdullahAlfaraj Sep 24, 2022
5514fbd
New class: EulerAScheduler
AbdullahAlfaraj Sep 24, 2022
b7138e7
Import EulerAScheduler
AbdullahAlfaraj Sep 24, 2022
b8703aa
Add more helper functions
AbdullahAlfaraj Sep 24, 2022
6973cb6
Use EulerAScheduler in StableDiffusionPipeline
AbdullahAlfaraj Sep 24, 2022
f8479e3
Merge branch 'huggingface:main' into main
AbdullahAlfaraj Sep 24, 2022
6429085
Fix Euler Ancestral deterministic output
AbdullahAlfaraj Sep 25, 2022
c76ab9b
Fix: forgot to initialize generator to None
AbdullahAlfaraj Sep 25, 2022
8364353
move helper functionality to EulerAScheduler class
AbdullahAlfaraj Sep 27, 2022
d0de909
temp fix: pass device to EulerAScheduler
AbdullahAlfaraj Oct 10, 2022
f382013
convert numpy arrays to torch tensors
AbdullahAlfaraj Oct 10, 2022
b12659f
assign betas tensor to device
AbdullahAlfaraj Oct 10, 2022
1310d03
fix typo
AbdullahAlfaraj Oct 10, 2022
700c85d
Merge pull request #1 from AbdullahAlfaraj/clean_euler_a
AbdullahAlfaraj Oct 11, 2022
e8b7396
Merge remote-tracking branch 'upstream/main' into euler_a_redesign_merge
AbdullahAlfaraj Oct 12, 2022
558ced5
remove batch_size
AbdullahAlfaraj Oct 12, 2022
3e95a3f
Merge remote-tracking branch 'upstream/main' into euler_a_redesign_merge
AbdullahAlfaraj Oct 12, 2022
8e59861
EulerAScheduler work again but break the redesign
AbdullahAlfaraj Oct 13, 2022
a659e02
passing index to step() to access t and prev_t
AbdullahAlfaraj Oct 15, 2022
4df7a22
Merge pull request #2 from AbdullahAlfaraj/euler_a_redesign_merge
AbdullahAlfaraj Oct 15, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/diffusers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
PNDMScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
EulerAScheduler,
)
from .training_utils import EMAModel
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from ...configuration_utils import FrozenDict
from ...models import AutoencoderKL, UNet2DConditionModel
from ...pipeline_utils import DiffusionPipeline
from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, EulerAScheduler
from ...utils import deprecate, logging
from . import StableDiffusionPipelineOutput
from .safety_checker import StableDiffusionSafetyChecker
Expand Down Expand Up @@ -306,22 +306,43 @@ def __call__(
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
if generator is not None:
extra_step_kwargs["generator"] = generator

for i, t in enumerate(self.progress_bar(timesteps_tensor)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)

#TODO merge conform the EulerAScheduler interface to the standardized scheduler interface
if isinstance(self.scheduler, EulerAScheduler):
latent_unscaled = latent_model_input # store the unscaled latent
c_out, c_in, sigma_in = self.scheduler.prepare_input(latent_model_input, t)
# latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
latent_model_input = latent_unscaled * c_in
# sigma_in = self.scheduler.get_sigma_in(latent_model_input,t)
eps = self.unet(latent_model_input, sigma_in , encoder_hidden_states=text_embeddings).sample
noise_pred = latent_unscaled + eps * c_out

# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
else:
# predict the noise residual
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)

noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample

# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)

if isinstance(self.scheduler, EulerAScheduler):
# change from self.scheduler.timesteps.shape[0] - 1 to num_inference_steps
if i < self.scheduler.num_inference_steps: #avoid out of bound error
# t_prev = self.scheduler.timesteps[i+1]
latents = self.scheduler.step(noise_pred, i, latents, **extra_step_kwargs).prev_sample
else:
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample

# call the callback, if provided
if callback is not None and i % callback_steps == 0:
Expand Down
1 change: 1 addition & 0 deletions src/diffusers/schedulers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_utils import SchedulerMixin
from .scheduling_euler_a import EulerAScheduler
else:
from ..utils.dummy_pt_objects import * # noqa F403

Expand Down
Loading