Skip to content

Commit cfdea72

Browse files
anton-lPrathik Rao
authored andcommitted
Bump the version to 0.7.0.dev0 (huggingface#912)
* Bump the version to 0.7.0.dev0 * deprecate offsets * deprecate LMS timesteps * LMS 0.7.0->0.8.0
1 parent 30a933e commit cfdea72

File tree

5 files changed

+9
-19
lines changed

5 files changed

+9
-19
lines changed

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ def run(self):
211211

212212
setup(
213213
name="diffusers",
214-
version="0.6.0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
214+
version="0.7.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
215215
description="Diffusers",
216216
long_description=open("README.md", "r", encoding="utf-8").read(),
217217
long_description_content_type="text/markdown",

src/diffusers/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
)
1010

1111

12-
__version__ = "0.6.0"
12+
__version__ = "0.7.0.dev0"
1313

1414
from .configuration_utils import ConfigMixin
1515
from .onnx_utils import OnnxRuntimeModel

src/diffusers/schedulers/scheduling_ddim.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
import torch
2424

2525
from ..configuration_utils import ConfigMixin, register_to_config
26-
from ..utils import BaseOutput, deprecate
26+
from ..utils import BaseOutput
2727
from .scheduling_utils import SchedulerMixin
2828

2929

@@ -175,26 +175,21 @@ def _get_variance(self, timestep, prev_timestep):
175175

176176
return variance
177177

178-
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None, **kwargs):
178+
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
179179
"""
180180
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
181181
182182
Args:
183183
num_inference_steps (`int`):
184184
the number of diffusion steps used when generating samples with a pre-trained model.
185185
"""
186-
deprecated_offset = deprecate(
187-
"offset", "0.7.0", "Please pass `steps_offset` to `__init__` instead.", take_from=kwargs
188-
)
189-
offset = deprecated_offset or self.config.steps_offset
190-
191186
self.num_inference_steps = num_inference_steps
192187
step_ratio = self.config.num_train_timesteps // self.num_inference_steps
193188
# creates integer timesteps by multiplying by ratio
194189
# casting to int to avoid issues when num_inference_step is power of 3
195190
timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64)
196191
self.timesteps = torch.from_numpy(timesteps).to(device)
197-
self.timesteps += offset
192+
self.timesteps += self.config.steps_offset
198193

199194
def step(
200195
self,

src/diffusers/schedulers/scheduling_lms_discrete.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@ def step(
209209
):
210210
deprecate(
211211
"timestep as an index",
212-
"0.7.0",
212+
"0.8.0",
213213
"Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
214214
" `LMSDiscreteScheduler.step()` will not be supported in future versions. Make sure to pass"
215215
" one of the `scheduler.timesteps` as a timestep.",
@@ -259,7 +259,7 @@ def add_noise(
259259
if isinstance(timesteps, torch.IntTensor) or isinstance(timesteps, torch.LongTensor):
260260
deprecate(
261261
"timesteps as indices",
262-
"0.7.0",
262+
"0.8.0",
263263
"Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
264264
" `LMSDiscreteScheduler.add_noise()` will not be supported in future versions. Make sure to"
265265
" pass values from `scheduler.timesteps` as timesteps.",

src/diffusers/schedulers/scheduling_pndm.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
import torch
2222

2323
from ..configuration_utils import ConfigMixin, register_to_config
24-
from ..utils import deprecate
2524
from .scheduling_utils import SchedulerMixin, SchedulerOutput
2625

2726

@@ -142,25 +141,21 @@ def __init__(
142141
self.plms_timesteps = None
143142
self.timesteps = None
144143

145-
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None, **kwargs):
144+
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
146145
"""
147146
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
148147
149148
Args:
150149
num_inference_steps (`int`):
151150
the number of diffusion steps used when generating samples with a pre-trained model.
152151
"""
153-
deprecated_offset = deprecate(
154-
"offset", "0.7.0", "Please pass `steps_offset` to `__init__` instead.", take_from=kwargs
155-
)
156-
offset = deprecated_offset or self.config.steps_offset
157152

158153
self.num_inference_steps = num_inference_steps
159154
step_ratio = self.config.num_train_timesteps // self.num_inference_steps
160155
# creates integer timesteps by multiplying by ratio
161156
# casting to int to avoid issues when num_inference_step is power of 3
162157
self._timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()
163-
self._timesteps += offset
158+
self._timesteps += self.config.steps_offset
164159

165160
if self.config.skip_prk_steps:
166161
# for some models like stable diffusion the prk steps can/should be skipped to

0 commit comments

Comments
 (0)