From 8d1a17caa5cef5d2fcf6a7c129d4c85335386a40 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 19 Jul 2022 15:43:38 -0700 Subject: [PATCH 001/133] re-add RL model code --- src/diffusers/models/__init__.py | 1 + src/diffusers/models/resnet.py | 134 +++++ src/diffusers/models/unet_rl.py | 228 ++++++++ tests/test_modeling_utils.py | 915 +++++++++++++++++++++++++++++++ 4 files changed, 1278 insertions(+) create mode 100644 src/diffusers/models/unet_rl.py create mode 100755 tests/test_modeling_utils.py diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index 1242ad6fca7f..47f7fa71682b 100644 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -18,6 +18,7 @@ if is_torch_available(): from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel + from .unet_rl import TemporalUNet from .vae import AutoencoderKL, VQModel if is_flax_available(): diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 49ff7d6bfa45..9b52681c3b99 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -5,6 +5,70 @@ import torch.nn.functional as F +class Upsample1D(nn.Module): + """ + An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param + use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. + If 3D, then + upsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_conv_transpose = use_conv_transpose + self.name = name + + # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed + self.conv = None + if use_conv_transpose: + self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1) + elif use_conv: + self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.use_conv_transpose: + return self.conv(x) + + x = F.interpolate(x, scale_factor=2.0, mode="nearest") + + if self.use_conv: + x = self.conv(x) + + return x + + +class Downsample1D(nn.Module): + """ + A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param + use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. + If 3D, then + downsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.padding = padding + stride = 2 + self.name = name + + if use_conv: + self.conv = nn.Conv1d(self.channels, self.out_channels, 3, stride=stride, padding=padding) + else: + assert self.channels == self.out_channels + self.conv = nn.AvgPool1d(kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + return self.conv(x) + + class Upsample2D(nn.Module): """ An upsampling layer with an optional convolution. @@ -374,6 +438,76 @@ def forward(self, x): return x * torch.tanh(torch.nn.functional.softplus(x)) +class Conv1dBlock(nn.Module): + """ + Conv1d --> GroupNorm --> Mish + """ + + def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): + super().__init__() + + self.block = nn.Sequential( + nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), + RearrangeDim(), + # Rearrange("batch channels horizon -> batch channels 1 horizon"), + nn.GroupNorm(n_groups, out_channels), + RearrangeDim(), + # Rearrange("batch channels 1 horizon -> batch channels horizon"), + nn.Mish(), + ) + + def forward(self, x): + return self.block(x) + + +class RearrangeDim(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, tensor): + if len(tensor.shape) == 2: + return tensor[:, :, None] + if len(tensor.shape) == 3: + return tensor[:, :, None, :] + elif len(tensor.shape) == 4: + return tensor[:, :, 0, :] + else: + raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") + + +# unet_rl.py +class ResidualTemporalBlock(nn.Module): + def __init__(self, inp_channels, out_channels, embed_dim, horizon, kernel_size=5): + super().__init__() + + self.blocks = nn.ModuleList( + [ + Conv1dBlock(inp_channels, out_channels, kernel_size), + Conv1dBlock(out_channels, out_channels, kernel_size), + ] + ) + + self.time_mlp = nn.Sequential( + nn.Mish(), + nn.Linear(embed_dim, out_channels), + RearrangeDim(), + # Rearrange("batch t -> batch t 1"), + ) + + self.residual_conv = ( + nn.Conv1d(inp_channels, out_channels, 1) if inp_channels != out_channels else nn.Identity() + ) + + def forward(self, x, t): + """ + x : [ batch_size x inp_channels x horizon ] t : [ batch_size x embed_dim ] returns: out : [ batch_size x + out_channels x horizon ] + """ + out = self.blocks[0](x) + self.time_mlp(t) + out = self.blocks[1](out) + return out + self.residual_conv(x) + + def upsample_2d(x, kernel=None, factor=2, gain=1): r"""Upsample2D a batch of 2D images with the given filter. diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py new file mode 100644 index 000000000000..786a80a38a6e --- /dev/null +++ b/src/diffusers/models/unet_rl.py @@ -0,0 +1,228 @@ +# model adapted from diffuser https://github.com/jannerm/diffuser/blob/main/diffuser/models/temporal.py + +import torch +import torch.nn as nn + +from diffusers.models.resnet import Downsample1D, ResidualTemporalBlock, Upsample1D + +from ..configuration_utils import ConfigMixin +from ..modeling_utils import ModelMixin +from .embeddings import get_timestep_embedding + + +class SinusoidalPosEmb(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + return get_timestep_embedding(x, self.dim) + + +class RearrangeDim(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, tensor): + if len(tensor.shape) == 2: + return tensor[:, :, None] + if len(tensor.shape) == 3: + return tensor[:, :, None, :] + elif len(tensor.shape) == 4: + return tensor[:, :, 0, :] + else: + raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") + + +class Conv1dBlock(nn.Module): + """ + Conv1d --> GroupNorm --> Mish + """ + + def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): + super().__init__() + + self.block = nn.Sequential( + nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), + RearrangeDim(), + # Rearrange("batch channels horizon -> batch channels 1 horizon"), + nn.GroupNorm(n_groups, out_channels), + RearrangeDim(), + # Rearrange("batch channels 1 horizon -> batch channels horizon"), + nn.Mish(), + ) + + def forward(self, x): + return self.block(x) + + +class TemporalUNet(ModelMixin, ConfigMixin): # (nn.Module): + def __init__( + self, + training_horizon=128, + transition_dim=14, + cond_dim=3, + predict_epsilon=False, + clip_denoised=True, + dim=32, + dim_mults=(1, 4, 8), + ): + super().__init__() + + self.transition_dim = transition_dim + self.cond_dim = cond_dim + self.predict_epsilon = predict_epsilon + self.clip_denoised = clip_denoised + + dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] + in_out = list(zip(dims[:-1], dims[1:])) + + time_dim = dim + self.time_mlp = nn.Sequential( + SinusoidalPosEmb(dim), + nn.Linear(dim, dim * 4), + nn.Mish(), + nn.Linear(dim * 4, dim), + ) + + self.downs = nn.ModuleList([]) + self.ups = nn.ModuleList([]) + num_resolutions = len(in_out) + + for ind, (dim_in, dim_out) in enumerate(in_out): + is_last = ind >= (num_resolutions - 1) + + self.downs.append( + nn.ModuleList( + [ + ResidualTemporalBlock(dim_in, dim_out, embed_dim=time_dim, horizon=training_horizon), + ResidualTemporalBlock(dim_out, dim_out, embed_dim=time_dim, horizon=training_horizon), + Downsample1D(dim_out, use_conv=True) if not is_last else nn.Identity(), + ] + ) + ) + + if not is_last: + training_horizon = training_horizon // 2 + + mid_dim = dims[-1] + self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=time_dim, horizon=training_horizon) + self.mid_block2 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=time_dim, horizon=training_horizon) + + for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): + is_last = ind >= (num_resolutions - 1) + + self.ups.append( + nn.ModuleList( + [ + ResidualTemporalBlock(dim_out * 2, dim_in, embed_dim=time_dim, horizon=training_horizon), + ResidualTemporalBlock(dim_in, dim_in, embed_dim=time_dim, horizon=training_horizon), + Upsample1D(dim_in, use_conv_transpose=True) if not is_last else nn.Identity(), + ] + ) + ) + + if not is_last: + training_horizon = training_horizon * 2 + + self.final_conv = nn.Sequential( + Conv1dBlock(dim, dim, kernel_size=5), + nn.Conv1d(dim, transition_dim, 1), + ) + + def forward(self, sample, timesteps): + """ + x : [ batch x horizon x transition ] + """ + x = sample + + x = x.permute(0, 2, 1) + + t = self.time_mlp(timesteps) + h = [] + + for resnet, resnet2, downsample in self.downs: + x = resnet(x, t) + x = resnet2(x, t) + h.append(x) + x = downsample(x) + + x = self.mid_block1(x, t) + x = self.mid_block2(x, t) + + for resnet, resnet2, upsample in self.ups: + x = torch.cat((x, h.pop()), dim=1) + x = resnet(x, t) + x = resnet2(x, t) + x = upsample(x) + + x = self.final_conv(x) + + x = x.permute(0, 2, 1) + return x + + +class TemporalValue(nn.Module): + def __init__( + self, + horizon, + transition_dim, + cond_dim, + dim=32, + time_dim=None, + out_dim=1, + dim_mults=(1, 2, 4, 8), + ): + super().__init__() + + dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] + in_out = list(zip(dims[:-1], dims[1:])) + + time_dim = time_dim or dim + self.time_mlp = nn.Sequential( + SinusoidalPosEmb(dim), + nn.Linear(dim, dim * 4), + nn.Mish(), + nn.Linear(dim * 4, dim), + ) + + self.blocks = nn.ModuleList([]) + + print(in_out) + for dim_in, dim_out in in_out: + self.blocks.append( + nn.ModuleList( + [ + ResidualTemporalBlock(dim_in, dim_out, kernel_size=5, embed_dim=time_dim, horizon=horizon), + ResidualTemporalBlock(dim_out, dim_out, kernel_size=5, embed_dim=time_dim, horizon=horizon), + Downsample1d(dim_out), + ] + ) + ) + + horizon = horizon // 2 + + fc_dim = dims[-1] * max(horizon, 1) + + self.final_block = nn.Sequential( + nn.Linear(fc_dim + time_dim, fc_dim // 2), + nn.Mish(), + nn.Linear(fc_dim // 2, out_dim), + ) + + def forward(self, x, cond, time, *args): + """ + x : [ batch x horizon x transition ] + """ + x = x.permute(0, 2, 1) + + t = self.time_mlp(time) + + for resnet, resnet2, downsample in self.blocks: + x = resnet(x, t) + x = resnet2(x, t) + x = downsample(x) + + x = x.view(len(x), -1) + out = self.final_block(torch.cat([x, t], dim=-1)) + return out diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py new file mode 100755 index 000000000000..bbeb76e44503 --- /dev/null +++ b/tests/test_modeling_utils.py @@ -0,0 +1,915 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +import math +import tempfile +import unittest + +import numpy as np +import torch + +from diffusers import UNetConditionalModel # noqa: F401 TODO(Patrick) - need to write tests with it +from diffusers import ( + AutoencoderKL, + DDIMPipeline, + DDIMScheduler, + DDPMPipeline, + DDPMScheduler, + LatentDiffusionPipeline, + LatentDiffusionUncondPipeline, + PNDMPipeline, + PNDMScheduler, + ScoreSdeVePipeline, + ScoreSdeVeScheduler, + TemporalUNet, + UNetUnconditionalModel, + VQModel, +) +from diffusers.configuration_utils import ConfigMixin +from diffusers.pipeline_utils import DiffusionPipeline +from diffusers.testing_utils import floats_tensor, slow, torch_device +from diffusers.training_utils import EMAModel + + +torch.backends.cuda.matmul.allow_tf32 = False + + +class ConfigTester(unittest.TestCase): + def test_load_not_from_mixin(self): + with self.assertRaises(ValueError): + ConfigMixin.from_config("dummy_path") + + def test_save_load(self): + class SampleObject(ConfigMixin): + config_name = "config.json" + + def __init__( + self, + a=2, + b=5, + c=(2, 5), + d="for diffusion", + e=[1, 3], + ): + self.register_to_config(a=a, b=b, c=c, d=d, e=e) + + obj = SampleObject() + config = obj.config + + assert config["a"] == 2 + assert config["b"] == 5 + assert config["c"] == (2, 5) + assert config["d"] == "for diffusion" + assert config["e"] == [1, 3] + + with tempfile.TemporaryDirectory() as tmpdirname: + obj.save_config(tmpdirname) + new_obj = SampleObject.from_config(tmpdirname) + new_config = new_obj.config + + # unfreeze configs + config = dict(config) + new_config = dict(new_config) + + assert config.pop("c") == (2, 5) # instantiated as tuple + assert new_config.pop("c") == [2, 5] # saved & loaded as list because of json + assert config == new_config + + +class ModelTesterMixin: + def test_from_pretrained_save_pretrained(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname) + new_model = self.model_class.from_pretrained(tmpdirname) + new_model.to(torch_device) + + with torch.no_grad(): + image = model(**inputs_dict) + if isinstance(image, dict): + image = image["sample"] + + new_image = new_model(**inputs_dict) + + if isinstance(new_image, dict): + new_image = new_image["sample"] + + max_diff = (image - new_image).abs().sum().item() + self.assertLessEqual(max_diff, 5e-5, "Models give different forward passes") + + def test_determinism(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + with torch.no_grad(): + first = model(**inputs_dict) + if isinstance(first, dict): + first = first["sample"] + + second = model(**inputs_dict) + if isinstance(second, dict): + second = second["sample"] + + out_1 = first.cpu().numpy() + out_2 = second.cpu().numpy() + out_1 = out_1[~np.isnan(out_1)] + out_2 = out_2[~np.isnan(out_2)] + max_diff = np.amax(np.abs(out_1 - out_2)) + self.assertLessEqual(max_diff, 1e-5) + + def test_output(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output["sample"] + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_forward_signature(self): + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["sample", "timestep"] + self.assertListEqual(arg_names[:2], expected_arg_names) + + def test_model_from_config(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + # test if the model can be loaded from the config + # and has all the expected shape + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_config(tmpdirname) + new_model = self.model_class.from_config(tmpdirname) + new_model.to(torch_device) + new_model.eval() + + # check if all paramters shape are the same + for param_name in model.state_dict().keys(): + param_1 = model.state_dict()[param_name] + param_2 = new_model.state_dict()[param_name] + self.assertEqual(param_1.shape, param_2.shape) + + with torch.no_grad(): + output_1 = model(**inputs_dict) + + if isinstance(output_1, dict): + output_1 = output_1["sample"] + + output_2 = new_model(**inputs_dict) + + if isinstance(output_2, dict): + output_2 = output_2["sample"] + + self.assertEqual(output_1.shape, output_2.shape) + + def test_training(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + model.to(torch_device) + model.train() + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output["sample"] + + noise = torch.randn((inputs_dict["sample"].shape[0],) + self.output_shape).to(torch_device) + loss = torch.nn.functional.mse_loss(output, noise) + loss.backward() + + def test_ema_training(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + model.to(torch_device) + model.train() + ema_model = EMAModel(model, device=torch_device) + + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output["sample"] + + noise = torch.randn((inputs_dict["sample"].shape[0],) + self.output_shape).to(torch_device) + loss = torch.nn.functional.mse_loss(output, noise) + loss.backward() + ema_model.step(model) + + +class UnetModelTests(ModelTesterMixin, unittest.TestCase): + model_class = UNetUnconditionalModel + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor([10]).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_channels": (32, 64), + "down_blocks": ("UNetResDownBlock2D", "UNetResAttnDownBlock2D"), + "up_blocks": ("UNetResAttnUpBlock2D", "UNetResUpBlock2D"), + "num_head_channels": None, + "out_channels": 3, + "in_channels": 3, + "num_res_blocks": 2, + "image_size": 32, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + +# TODO(Patrick) - Re-add this test after having correctly added the final VE checkpoints +# def test_output_pretrained(self): +# model = UNetUnconditionalModel.from_pretrained("fusing/ddpm_dummy_update", subfolder="unet") +# model.eval() +# +# torch.manual_seed(0) +# if torch.cuda.is_available(): +# torch.cuda.manual_seed_all(0) +# +# noise = torch.randn(1, model.config.in_channels, model.config.image_size, model.config.image_size) +# time_step = torch.tensor([10]) +# +# with torch.no_grad(): +# output = model(noise, time_step)["sample"] +# +# output_slice = output[0, -1, -3:, -3:].flatten() +# fmt: off +# expected_output_slice = torch.tensor([0.2891, -0.1899, 0.2595, -0.6214, 0.0968, -0.2622, 0.4688, 0.1311, 0.0053]) +# fmt: on +# self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) + + +class UNetLDMModelTests(ModelTesterMixin, unittest.TestCase): + model_class = UNetUnconditionalModel + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 4 + sizes = (32, 32) + + noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor([10]).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (4, 32, 32) + + @property + def output_shape(self): + return (4, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "image_size": 32, + "in_channels": 4, + "out_channels": 4, + "num_res_blocks": 2, + "block_channels": (32, 64), + "num_head_channels": 32, + "conv_resample": True, + "down_blocks": ("UNetResDownBlock2D", "UNetResDownBlock2D"), + "up_blocks": ("UNetResUpBlock2D", "UNetResUpBlock2D"), + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = UNetUnconditionalModel.from_pretrained( + "fusing/unet-ldm-dummy-update", output_loading_info=True + ) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input)["sample"] + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = UNetUnconditionalModel.from_pretrained("fusing/unet-ldm-dummy-update") + model.eval() + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + noise = torch.randn(1, model.config.in_channels, model.config.image_size, model.config.image_size) + time_step = torch.tensor([10] * noise.shape[0]) + + with torch.no_grad(): + output = model(noise, time_step)["sample"] + + output_slice = output[0, -1, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800]) + # fmt: on + + self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + + +# TODO(Patrick) - Re-add this test after having cleaned up LDM +# def test_output_pretrained_spatial_transformer(self): +# model = UNetLDMModel.from_pretrained("fusing/unet-ldm-dummy-spatial") +# model.eval() +# +# torch.manual_seed(0) +# if torch.cuda.is_available(): +# torch.cuda.manual_seed_all(0) +# +# noise = torch.randn(1, model.config.in_channels, model.config.image_size, model.config.image_size) +# context = torch.ones((1, 16, 64), dtype=torch.float32) +# time_step = torch.tensor([10] * noise.shape[0]) +# +# with torch.no_grad(): +# output = model(noise, time_step, context=context) +# +# output_slice = output[0, -1, -3:, -3:].flatten() +# fmt: off +# expected_output_slice = torch.tensor([61.3445, 56.9005, 29.4339, 59.5497, 60.7375, 34.1719, 48.1951, 42.6569, 25.0890]) +# fmt: on +# +# self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) +# + + +class NCSNppModelTests(ModelTesterMixin, unittest.TestCase): + model_class = UNetUnconditionalModel + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [10]).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_channels": [32, 64, 64, 64], + "in_channels": 3, + "num_res_blocks": 1, + "out_channels": 3, + "time_embedding_type": "fourier", + "resnet_eps": 1e-6, + "mid_block_scale_factor": math.sqrt(2.0), + "resnet_num_groups": None, + "down_blocks": [ + "UNetResSkipDownBlock2D", + "UNetResAttnSkipDownBlock2D", + "UNetResSkipDownBlock2D", + "UNetResSkipDownBlock2D", + ], + "up_blocks": [ + "UNetResSkipUpBlock2D", + "UNetResSkipUpBlock2D", + "UNetResAttnSkipUpBlock2D", + "UNetResSkipUpBlock2D", + ], + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = UNetUnconditionalModel.from_pretrained( + "fusing/ncsnpp-ffhq-ve-dummy-update", output_loading_info=True + ) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained_ve_mid(self): + model = UNetUnconditionalModel.from_pretrained("google/ncsnpp-celebahq-256") + model.to(torch_device) + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + batch_size = 4 + num_channels = 3 + sizes = (256, 256) + + noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) + + with torch.no_grad(): + output = model(noise, time_step)["sample"] + + output_slice = output[0, -3:, -3:, -1].flatten().cpu() + # fmt: off + expected_output_slice = torch.tensor([-4836.2231, -6487.1387, -3816.7969, -7964.9253, -10966.2842, -20043.6016, 8137.0571, 2340.3499, 544.6114]) + # fmt: on + + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) + + def test_output_pretrained_ve_large(self): + model = UNetUnconditionalModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update") + model.to(torch_device) + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) + + with torch.no_grad(): + output = model(noise, time_step)["sample"] + + output_slice = output[0, -3:, -3:, -1].flatten().cpu() + # fmt: off + expected_output_slice = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256]) + # fmt: on + + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) + + +class VQModelTests(ModelTesterMixin, unittest.TestCase): + model_class = VQModel + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + + return {"sample": image} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "ch": 64, + "out_ch": 3, + "num_res_blocks": 1, + "in_channels": 3, + "attn_resolutions": [], + "resolution": 32, + "z_channels": 3, + "n_embed": 256, + "embed_dim": 3, + "sane_index_shape": False, + "ch_mult": (1,), + "dropout": 0.0, + "double_z": False, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_forward_signature(self): + pass + + def test_training(self): + pass + + def test_from_pretrained_hub(self): + model, loading_info = VQModel.from_pretrained("fusing/vqgan-dummy", output_loading_info=True) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = VQModel.from_pretrained("fusing/vqgan-dummy") + model.eval() + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + image = torch.randn(1, model.config.in_channels, model.config.resolution, model.config.resolution) + with torch.no_grad(): + output = model(image) + + output_slice = output[0, -1, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-1.1321, 0.1056, 0.3505, -0.6461, -0.2014, 0.0419, -0.5763, -0.8462, -0.4218]) + # fmt: on + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) + + +class TemporalUNetModelTests(ModelTesterMixin, unittest.TestCase): + model_class = TemporalUNet + + @property + def dummy_input(self): + batch_size = 4 + num_features = 14 + seq_len = 16 + + noise = floats_tensor((batch_size, seq_len, num_features)).to(torch_device) + time_step = torch.tensor([10] * batch_size).to(torch_device) + + return {"sample": noise, "timesteps": time_step} + + @property + def input_shape(self): + return (4, 16, 14) + + @property + def output_shape(self): + return (4, 16, 14) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "training_horizon": 128, + "dim": 32, + "dim_mults": [1, 4, 8], + "predict_epsilon": False, + "clip_denoised": True, + "transition_dim": 14, + "cond_dim": 3, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = TemporalUNet.from_pretrained( + "fusing/ddpm-unet-rl-hopper-hor128", output_loading_info=True + ) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128") + model.eval() + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + num_features = model.transition_dim + seq_len = 16 + noise = torch.randn((1, seq_len, num_features)) + time_step = torch.full((num_features,), 0) + + with torch.no_grad(): + output = model(noise, time_step) + + output_slice = output[0, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-0.2714, 0.1042, -0.0794, -0.2820, 0.0803, -0.0811, -0.2345, 0.0580, -0.0584]) + # fmt: on + + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) + + +class AutoencoderKLTests(ModelTesterMixin, unittest.TestCase): + model_class = AutoencoderKL + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + + return {"sample": image} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "ch": 64, + "ch_mult": (1,), + "embed_dim": 4, + "in_channels": 3, + "attn_resolutions": [], + "num_res_blocks": 1, + "out_ch": 3, + "resolution": 32, + "z_channels": 4, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_forward_signature(self): + pass + + def test_training(self): + pass + + def test_from_pretrained_hub(self): + model, loading_info = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy", output_loading_info=True) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy") + model.eval() + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + image = torch.randn(1, model.config.in_channels, model.config.resolution, model.config.resolution) + with torch.no_grad(): + output = model(image, sample_posterior=True) + + output_slice = output[0, -1, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-0.0814, -0.0229, -0.1320, -0.4123, -0.0366, -0.3473, 0.0438, -0.1662, 0.1750]) + # fmt: on + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) + + +class PipelineTesterMixin(unittest.TestCase): + def test_from_pretrained_save_pretrained(self): + # 1. Load models + model = UNetUnconditionalModel( + block_channels=(32, 64), + num_res_blocks=2, + image_size=32, + in_channels=3, + out_channels=3, + down_blocks=("UNetResDownBlock2D", "UNetResAttnDownBlock2D"), + up_blocks=("UNetResAttnUpBlock2D", "UNetResUpBlock2D"), + ) + schedular = DDPMScheduler(num_train_timesteps=10) + + ddpm = DDPMPipeline(model, schedular) + + with tempfile.TemporaryDirectory() as tmpdirname: + ddpm.save_pretrained(tmpdirname) + new_ddpm = DDPMPipeline.from_pretrained(tmpdirname) + + generator = torch.manual_seed(0) + + image = ddpm(generator=generator)["sample"] + generator = generator.manual_seed(0) + new_image = new_ddpm(generator=generator)["sample"] + + assert (image - new_image).abs().sum() < 1e-5, "Models don't give the same forward pass" + + @slow + def test_from_pretrained_hub(self): + model_path = "google/ddpm-cifar10-32" + + ddpm = DDPMPipeline.from_pretrained(model_path) + ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path) + + ddpm.scheduler.num_timesteps = 10 + ddpm_from_hub.scheduler.num_timesteps = 10 + + generator = torch.manual_seed(0) + + image = ddpm(generator=generator)["sample"] + generator = generator.manual_seed(0) + new_image = ddpm_from_hub(generator=generator)["sample"] + + assert (image - new_image).abs().sum() < 1e-5, "Models don't give the same forward pass" + + @slow + def test_ddpm_cifar10(self): + model_id = "google/ddpm-cifar10-32" + + unet = UNetUnconditionalModel.from_pretrained(model_id) + scheduler = DDPMScheduler.from_config(model_id) + scheduler = scheduler.set_format("pt") + + ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) + + generator = torch.manual_seed(0) + image = ddpm(generator=generator)["sample"] + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 32, 32) + expected_slice = torch.tensor( + [-0.1601, -0.2823, -0.6123, -0.2305, -0.3236, -0.4706, -0.1691, -0.2836, -0.3231] + ) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + @slow + def test_ddim_lsun(self): + model_id = "google/ddpm-ema-bedroom-256" + + unet = UNetUnconditionalModel.from_pretrained(model_id) + scheduler = DDIMScheduler.from_config(model_id) + + ddpm = DDIMPipeline(unet=unet, scheduler=scheduler) + + generator = torch.manual_seed(0) + image = ddpm(generator=generator)["sample"] + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 256, 256) + expected_slice = torch.tensor( + [-0.9879, -0.9598, -0.9312, -0.9953, -0.9963, -0.9995, -0.9957, -1.0000, -0.9863] + ) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + @slow + def test_ddim_cifar10(self): + model_id = "google/ddpm-cifar10-32" + + unet = UNetUnconditionalModel.from_pretrained(model_id) + scheduler = DDIMScheduler(tensor_format="pt") + + ddim = DDIMPipeline(unet=unet, scheduler=scheduler) + + generator = torch.manual_seed(0) + image = ddim(generator=generator, eta=0.0)["sample"] + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 32, 32) + expected_slice = torch.tensor( + [-0.6553, -0.6765, -0.6799, -0.6749, -0.7006, -0.6974, -0.6991, -0.7116, -0.7094] + ) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + @slow + def test_pndm_cifar10(self): + model_id = "google/ddpm-cifar10-32" + + unet = UNetUnconditionalModel.from_pretrained(model_id) + scheduler = PNDMScheduler(tensor_format="pt") + + pndm = PNDMPipeline(unet=unet, scheduler=scheduler) + generator = torch.manual_seed(0) + image = pndm(generator=generator)["sample"] + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 32, 32) + expected_slice = torch.tensor( + [-0.6872, -0.7071, -0.7188, -0.7057, -0.7515, -0.7191, -0.7377, -0.7565, -0.7500] + ) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + @slow + def test_ldm_text2img(self): + ldm = LatentDiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") + + prompt = "A painting of a squirrel eating a burger" + generator = torch.manual_seed(0) + image = ldm([prompt], generator=generator, num_inference_steps=20) + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 256, 256) + expected_slice = torch.tensor([0.7295, 0.7358, 0.7256, 0.7435, 0.7095, 0.6884, 0.7325, 0.6921, 0.6458]) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + @slow + def test_ldm_text2img_fast(self): + ldm = LatentDiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") + + prompt = "A painting of a squirrel eating a burger" + generator = torch.manual_seed(0) + image = ldm([prompt], generator=generator, num_inference_steps=1) + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 256, 256) + expected_slice = torch.tensor([0.3163, 0.8670, 0.6465, 0.1865, 0.6291, 0.5139, 0.2824, 0.3723, 0.4344]) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 + + @slow + def test_score_sde_ve_pipeline(self): + model = UNetUnconditionalModel.from_pretrained("google/ncsnpp-ffhq-1024") + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + scheduler = ScoreSdeVeScheduler.from_config("google/ncsnpp-ffhq-1024") + + sde_ve = ScoreSdeVePipeline(model=model, scheduler=scheduler) + + torch.manual_seed(0) + image = sde_ve(num_inference_steps=2) + + if model.device.type == "cpu": + # patrick's cpu + expected_image_sum = 3384805888.0 + expected_image_mean = 1076.00085 + + # m1 mbp + # expected_image_sum = 3384805376.0 + # expected_image_mean = 1076.000610351562 + else: + expected_image_sum = 3382849024.0 + expected_image_mean = 1075.3788 + + assert (image.abs().sum() - expected_image_sum).abs().cpu().item() < 1e-2 + assert (image.abs().mean() - expected_image_mean).abs().cpu().item() < 1e-4 + + @slow + def test_ldm_uncond(self): + ldm = LatentDiffusionUncondPipeline.from_pretrained("CompVis/ldm-celebahq-256") + + generator = torch.manual_seed(0) + image = ldm(generator=generator, num_inference_steps=5)["sample"] + + image_slice = image[0, -1, -3:, -3:].cpu() + + assert image.shape == (1, 3, 256, 256) + expected_slice = torch.tensor( + [-0.1202, -0.1005, -0.0635, -0.0520, -0.1282, -0.0838, -0.0981, -0.1318, -0.1106] + ) + assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 From 84e94d7229ffefadef24219201752632a5e8d2bf Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 19 Jul 2022 15:57:11 -0700 Subject: [PATCH 002/133] match model forward api --- src/diffusers/models/unet_rl.py | 4 ++-- tests/test_modeling_utils.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index 786a80a38a6e..ebf6209e9382 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -130,7 +130,7 @@ def __init__( nn.Conv1d(dim, transition_dim, 1), ) - def forward(self, sample, timesteps): + def forward(self, sample, timestep): """ x : [ batch x horizon x transition ] """ @@ -138,7 +138,7 @@ def forward(self, sample, timesteps): x = x.permute(0, 2, 1) - t = self.time_mlp(timesteps) + t = self.time_mlp(timestep) h = [] for resnet, resnet2, downsample in self.downs: diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index bbeb76e44503..06dd43c97aae 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -585,7 +585,7 @@ def dummy_input(self): noise = floats_tensor((batch_size, seq_len, num_features)).to(torch_device) time_step = torch.tensor([10] * batch_size).to(torch_device) - return {"sample": noise, "timesteps": time_step} + return {"sample": noise, "timestep": time_step} @property def input_shape(self): From f67b036e862b34741282af0d9477c04326ea9cfb Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 25 Jul 2022 18:33:17 -0700 Subject: [PATCH 003/133] add register_to_config, pass training tests --- src/diffusers/models/unet_rl.py | 3 ++- tests/test_modeling_utils.py | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index ebf6209e9382..ea47cc58934e 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -5,7 +5,7 @@ from diffusers.models.resnet import Downsample1D, ResidualTemporalBlock, Upsample1D -from ..configuration_utils import ConfigMixin +from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin from .embeddings import get_timestep_embedding @@ -57,6 +57,7 @@ def forward(self, x): class TemporalUNet(ModelMixin, ConfigMixin): # (nn.Module): + @register_to_config def __init__( self, training_horizon=128, diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index 06dd43c97aae..f137d40fc38a 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -595,6 +595,12 @@ def input_shape(self): def output_shape(self): return (4, 16, 14) + def test_ema_training(self): + pass + + def test_training(self): + pass + def prepare_init_args_and_inputs_for_common(self): init_dict = { "training_horizon": 128, From e42d1c05afc41a8938cf60796b16dab53c81aea7 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 3 Oct 2022 13:49:10 -0400 Subject: [PATCH 004/133] fix tests, update forward outputs --- src/diffusers/__init__.py | 2 +- src/diffusers/models/unet_rl.py | 73 ++- tests/test_modeling_utils.py | 921 -------------------------------- tests/test_models_unet.py | 83 ++- 4 files changed, 136 insertions(+), 943 deletions(-) delete mode 100755 tests/test_modeling_utils.py diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 1cf64a4a2ebf..e6b920a31b4c 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -18,7 +18,7 @@ if is_torch_available(): from .modeling_utils import ModelMixin - from .models import AutoencoderKL, UNet2DConditionModel, UNet2DModel, VQModel + from .models import AutoencoderKL, TemporalUNet, UNet2DConditionModel, UNet2DModel, VQModel from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index ea47cc58934e..be668c9c02a3 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -1,4 +1,6 @@ # model adapted from diffuser https://github.com/jannerm/diffuser/blob/main/diffuser/models/temporal.py +from dataclasses import dataclass +from typing import Tuple, Union import torch import torch.nn as nn @@ -7,9 +9,21 @@ from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin +from ..utils import BaseOutput from .embeddings import get_timestep_embedding +@dataclass +class TemporalUNetOutput(BaseOutput): + """ + Args: + sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Hidden states output. Output of last layer of model. + """ + + sample: torch.FloatTensor + + class SinusoidalPosEmb(nn.Module): def __init__(self, dim): super().__init__() @@ -131,36 +145,55 @@ def __init__( nn.Conv1d(dim, transition_dim, 1), ) - def forward(self, sample, timestep): - """ - x : [ batch x horizon x transition ] + # def forward(self, sample, timestep): + # """ + # x : [ batch x horizon x transition ] #""" + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + return_dict: bool = True, + ) -> Union[TemporalUNetOutput, Tuple]: + """r + Args: + sample (`torch.FloatTensor`): TODO verify shape (batch, channel, height, width) noisy inputs tensor + timestep (`torch.FloatTensor` or `float` or `int): TODO verify batch (batch) timesteps + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple. + + Returns: + [`~models.unet_2d.UNet2DOutput`] or `tuple`: [`~models.unet_2d.UNet2DOutput`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ - x = sample - - x = x.permute(0, 2, 1) + # x = sample + sample = sample.permute(0, 2, 1) t = self.time_mlp(timestep) h = [] for resnet, resnet2, downsample in self.downs: - x = resnet(x, t) - x = resnet2(x, t) - h.append(x) - x = downsample(x) + sample = resnet(sample, t) + sample = resnet2(sample, t) + h.append(sample) + sample = downsample(sample) - x = self.mid_block1(x, t) - x = self.mid_block2(x, t) + sample = self.mid_block1(sample, t) + sample = self.mid_block2(sample, t) for resnet, resnet2, upsample in self.ups: - x = torch.cat((x, h.pop()), dim=1) - x = resnet(x, t) - x = resnet2(x, t) - x = upsample(x) + sample = torch.cat((sample, h.pop()), dim=1) + sample = resnet(sample, t) + sample = resnet2(sample, t) + sample = upsample(sample) - x = self.final_conv(x) + sample = self.final_conv(sample) - x = x.permute(0, 2, 1) - return x + sample = sample.permute(0, 2, 1) + + if not return_dict: + return (sample,) + + return TemporalUNetOutput(sample=sample) class TemporalValue(nn.Module): @@ -196,7 +229,7 @@ def __init__( [ ResidualTemporalBlock(dim_in, dim_out, kernel_size=5, embed_dim=time_dim, horizon=horizon), ResidualTemporalBlock(dim_out, dim_out, kernel_size=5, embed_dim=time_dim, horizon=horizon), - Downsample1d(dim_out), + Downsample1D(dim_out), ] ) ) diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py deleted file mode 100755 index f137d40fc38a..000000000000 --- a/tests/test_modeling_utils.py +++ /dev/null @@ -1,921 +0,0 @@ -# coding=utf-8 -# Copyright 2022 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import inspect -import math -import tempfile -import unittest - -import numpy as np -import torch - -from diffusers import UNetConditionalModel # noqa: F401 TODO(Patrick) - need to write tests with it -from diffusers import ( - AutoencoderKL, - DDIMPipeline, - DDIMScheduler, - DDPMPipeline, - DDPMScheduler, - LatentDiffusionPipeline, - LatentDiffusionUncondPipeline, - PNDMPipeline, - PNDMScheduler, - ScoreSdeVePipeline, - ScoreSdeVeScheduler, - TemporalUNet, - UNetUnconditionalModel, - VQModel, -) -from diffusers.configuration_utils import ConfigMixin -from diffusers.pipeline_utils import DiffusionPipeline -from diffusers.testing_utils import floats_tensor, slow, torch_device -from diffusers.training_utils import EMAModel - - -torch.backends.cuda.matmul.allow_tf32 = False - - -class ConfigTester(unittest.TestCase): - def test_load_not_from_mixin(self): - with self.assertRaises(ValueError): - ConfigMixin.from_config("dummy_path") - - def test_save_load(self): - class SampleObject(ConfigMixin): - config_name = "config.json" - - def __init__( - self, - a=2, - b=5, - c=(2, 5), - d="for diffusion", - e=[1, 3], - ): - self.register_to_config(a=a, b=b, c=c, d=d, e=e) - - obj = SampleObject() - config = obj.config - - assert config["a"] == 2 - assert config["b"] == 5 - assert config["c"] == (2, 5) - assert config["d"] == "for diffusion" - assert config["e"] == [1, 3] - - with tempfile.TemporaryDirectory() as tmpdirname: - obj.save_config(tmpdirname) - new_obj = SampleObject.from_config(tmpdirname) - new_config = new_obj.config - - # unfreeze configs - config = dict(config) - new_config = dict(new_config) - - assert config.pop("c") == (2, 5) # instantiated as tuple - assert new_config.pop("c") == [2, 5] # saved & loaded as list because of json - assert config == new_config - - -class ModelTesterMixin: - def test_from_pretrained_save_pretrained(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - with tempfile.TemporaryDirectory() as tmpdirname: - model.save_pretrained(tmpdirname) - new_model = self.model_class.from_pretrained(tmpdirname) - new_model.to(torch_device) - - with torch.no_grad(): - image = model(**inputs_dict) - if isinstance(image, dict): - image = image["sample"] - - new_image = new_model(**inputs_dict) - - if isinstance(new_image, dict): - new_image = new_image["sample"] - - max_diff = (image - new_image).abs().sum().item() - self.assertLessEqual(max_diff, 5e-5, "Models give different forward passes") - - def test_determinism(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - with torch.no_grad(): - first = model(**inputs_dict) - if isinstance(first, dict): - first = first["sample"] - - second = model(**inputs_dict) - if isinstance(second, dict): - second = second["sample"] - - out_1 = first.cpu().numpy() - out_2 = second.cpu().numpy() - out_1 = out_1[~np.isnan(out_1)] - out_2 = out_2[~np.isnan(out_2)] - max_diff = np.amax(np.abs(out_1 - out_2)) - self.assertLessEqual(max_diff, 1e-5) - - def test_output(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - with torch.no_grad(): - output = model(**inputs_dict) - - if isinstance(output, dict): - output = output["sample"] - - self.assertIsNotNone(output) - expected_shape = inputs_dict["sample"].shape - self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") - - def test_forward_signature(self): - init_dict, _ = self.prepare_init_args_and_inputs_for_common() - - model = self.model_class(**init_dict) - signature = inspect.signature(model.forward) - # signature.parameters is an OrderedDict => so arg_names order is deterministic - arg_names = [*signature.parameters.keys()] - - expected_arg_names = ["sample", "timestep"] - self.assertListEqual(arg_names[:2], expected_arg_names) - - def test_model_from_config(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - # test if the model can be loaded from the config - # and has all the expected shape - with tempfile.TemporaryDirectory() as tmpdirname: - model.save_config(tmpdirname) - new_model = self.model_class.from_config(tmpdirname) - new_model.to(torch_device) - new_model.eval() - - # check if all paramters shape are the same - for param_name in model.state_dict().keys(): - param_1 = model.state_dict()[param_name] - param_2 = new_model.state_dict()[param_name] - self.assertEqual(param_1.shape, param_2.shape) - - with torch.no_grad(): - output_1 = model(**inputs_dict) - - if isinstance(output_1, dict): - output_1 = output_1["sample"] - - output_2 = new_model(**inputs_dict) - - if isinstance(output_2, dict): - output_2 = output_2["sample"] - - self.assertEqual(output_1.shape, output_2.shape) - - def test_training(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - model = self.model_class(**init_dict) - model.to(torch_device) - model.train() - output = model(**inputs_dict) - - if isinstance(output, dict): - output = output["sample"] - - noise = torch.randn((inputs_dict["sample"].shape[0],) + self.output_shape).to(torch_device) - loss = torch.nn.functional.mse_loss(output, noise) - loss.backward() - - def test_ema_training(self): - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - - model = self.model_class(**init_dict) - model.to(torch_device) - model.train() - ema_model = EMAModel(model, device=torch_device) - - output = model(**inputs_dict) - - if isinstance(output, dict): - output = output["sample"] - - noise = torch.randn((inputs_dict["sample"].shape[0],) + self.output_shape).to(torch_device) - loss = torch.nn.functional.mse_loss(output, noise) - loss.backward() - ema_model.step(model) - - -class UnetModelTests(ModelTesterMixin, unittest.TestCase): - model_class = UNetUnconditionalModel - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor([10]).to(torch_device) - - return {"sample": noise, "timestep": time_step} - - @property - def input_shape(self): - return (3, 32, 32) - - @property - def output_shape(self): - return (3, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "block_channels": (32, 64), - "down_blocks": ("UNetResDownBlock2D", "UNetResAttnDownBlock2D"), - "up_blocks": ("UNetResAttnUpBlock2D", "UNetResUpBlock2D"), - "num_head_channels": None, - "out_channels": 3, - "in_channels": 3, - "num_res_blocks": 2, - "image_size": 32, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - -# TODO(Patrick) - Re-add this test after having correctly added the final VE checkpoints -# def test_output_pretrained(self): -# model = UNetUnconditionalModel.from_pretrained("fusing/ddpm_dummy_update", subfolder="unet") -# model.eval() -# -# torch.manual_seed(0) -# if torch.cuda.is_available(): -# torch.cuda.manual_seed_all(0) -# -# noise = torch.randn(1, model.config.in_channels, model.config.image_size, model.config.image_size) -# time_step = torch.tensor([10]) -# -# with torch.no_grad(): -# output = model(noise, time_step)["sample"] -# -# output_slice = output[0, -1, -3:, -3:].flatten() -# fmt: off -# expected_output_slice = torch.tensor([0.2891, -0.1899, 0.2595, -0.6214, 0.0968, -0.2622, 0.4688, 0.1311, 0.0053]) -# fmt: on -# self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) - - -class UNetLDMModelTests(ModelTesterMixin, unittest.TestCase): - model_class = UNetUnconditionalModel - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 4 - sizes = (32, 32) - - noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor([10]).to(torch_device) - - return {"sample": noise, "timestep": time_step} - - @property - def input_shape(self): - return (4, 32, 32) - - @property - def output_shape(self): - return (4, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "image_size": 32, - "in_channels": 4, - "out_channels": 4, - "num_res_blocks": 2, - "block_channels": (32, 64), - "num_head_channels": 32, - "conv_resample": True, - "down_blocks": ("UNetResDownBlock2D", "UNetResDownBlock2D"), - "up_blocks": ("UNetResUpBlock2D", "UNetResUpBlock2D"), - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - model, loading_info = UNetUnconditionalModel.from_pretrained( - "fusing/unet-ldm-dummy-update", output_loading_info=True - ) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input)["sample"] - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = UNetUnconditionalModel.from_pretrained("fusing/unet-ldm-dummy-update") - model.eval() - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - noise = torch.randn(1, model.config.in_channels, model.config.image_size, model.config.image_size) - time_step = torch.tensor([10] * noise.shape[0]) - - with torch.no_grad(): - output = model(noise, time_step)["sample"] - - output_slice = output[0, -1, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800]) - # fmt: on - - self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) - - -# TODO(Patrick) - Re-add this test after having cleaned up LDM -# def test_output_pretrained_spatial_transformer(self): -# model = UNetLDMModel.from_pretrained("fusing/unet-ldm-dummy-spatial") -# model.eval() -# -# torch.manual_seed(0) -# if torch.cuda.is_available(): -# torch.cuda.manual_seed_all(0) -# -# noise = torch.randn(1, model.config.in_channels, model.config.image_size, model.config.image_size) -# context = torch.ones((1, 16, 64), dtype=torch.float32) -# time_step = torch.tensor([10] * noise.shape[0]) -# -# with torch.no_grad(): -# output = model(noise, time_step, context=context) -# -# output_slice = output[0, -1, -3:, -3:].flatten() -# fmt: off -# expected_output_slice = torch.tensor([61.3445, 56.9005, 29.4339, 59.5497, 60.7375, 34.1719, 48.1951, 42.6569, 25.0890]) -# fmt: on -# -# self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) -# - - -class NCSNppModelTests(ModelTesterMixin, unittest.TestCase): - model_class = UNetUnconditionalModel - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor(batch_size * [10]).to(torch_device) - - return {"sample": noise, "timestep": time_step} - - @property - def input_shape(self): - return (3, 32, 32) - - @property - def output_shape(self): - return (3, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "block_channels": [32, 64, 64, 64], - "in_channels": 3, - "num_res_blocks": 1, - "out_channels": 3, - "time_embedding_type": "fourier", - "resnet_eps": 1e-6, - "mid_block_scale_factor": math.sqrt(2.0), - "resnet_num_groups": None, - "down_blocks": [ - "UNetResSkipDownBlock2D", - "UNetResAttnSkipDownBlock2D", - "UNetResSkipDownBlock2D", - "UNetResSkipDownBlock2D", - ], - "up_blocks": [ - "UNetResSkipUpBlock2D", - "UNetResSkipUpBlock2D", - "UNetResAttnSkipUpBlock2D", - "UNetResSkipUpBlock2D", - ], - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - model, loading_info = UNetUnconditionalModel.from_pretrained( - "fusing/ncsnpp-ffhq-ve-dummy-update", output_loading_info=True - ) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained_ve_mid(self): - model = UNetUnconditionalModel.from_pretrained("google/ncsnpp-celebahq-256") - model.to(torch_device) - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - batch_size = 4 - num_channels = 3 - sizes = (256, 256) - - noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) - - with torch.no_grad(): - output = model(noise, time_step)["sample"] - - output_slice = output[0, -3:, -3:, -1].flatten().cpu() - # fmt: off - expected_output_slice = torch.tensor([-4836.2231, -6487.1387, -3816.7969, -7964.9253, -10966.2842, -20043.6016, 8137.0571, 2340.3499, 544.6114]) - # fmt: on - - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) - - def test_output_pretrained_ve_large(self): - model = UNetUnconditionalModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update") - model.to(torch_device) - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) - time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) - - with torch.no_grad(): - output = model(noise, time_step)["sample"] - - output_slice = output[0, -3:, -3:, -1].flatten().cpu() - # fmt: off - expected_output_slice = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256]) - # fmt: on - - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) - - -class VQModelTests(ModelTesterMixin, unittest.TestCase): - model_class = VQModel - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - - return {"sample": image} - - @property - def input_shape(self): - return (3, 32, 32) - - @property - def output_shape(self): - return (3, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "ch": 64, - "out_ch": 3, - "num_res_blocks": 1, - "in_channels": 3, - "attn_resolutions": [], - "resolution": 32, - "z_channels": 3, - "n_embed": 256, - "embed_dim": 3, - "sane_index_shape": False, - "ch_mult": (1,), - "dropout": 0.0, - "double_z": False, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_forward_signature(self): - pass - - def test_training(self): - pass - - def test_from_pretrained_hub(self): - model, loading_info = VQModel.from_pretrained("fusing/vqgan-dummy", output_loading_info=True) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = VQModel.from_pretrained("fusing/vqgan-dummy") - model.eval() - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - image = torch.randn(1, model.config.in_channels, model.config.resolution, model.config.resolution) - with torch.no_grad(): - output = model(image) - - output_slice = output[0, -1, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([-1.1321, 0.1056, 0.3505, -0.6461, -0.2014, 0.0419, -0.5763, -0.8462, -0.4218]) - # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) - - -class TemporalUNetModelTests(ModelTesterMixin, unittest.TestCase): - model_class = TemporalUNet - - @property - def dummy_input(self): - batch_size = 4 - num_features = 14 - seq_len = 16 - - noise = floats_tensor((batch_size, seq_len, num_features)).to(torch_device) - time_step = torch.tensor([10] * batch_size).to(torch_device) - - return {"sample": noise, "timestep": time_step} - - @property - def input_shape(self): - return (4, 16, 14) - - @property - def output_shape(self): - return (4, 16, 14) - - def test_ema_training(self): - pass - - def test_training(self): - pass - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "training_horizon": 128, - "dim": 32, - "dim_mults": [1, 4, 8], - "predict_epsilon": False, - "clip_denoised": True, - "transition_dim": 14, - "cond_dim": 3, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - model, loading_info = TemporalUNet.from_pretrained( - "fusing/ddpm-unet-rl-hopper-hor128", output_loading_info=True - ) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128") - model.eval() - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - num_features = model.transition_dim - seq_len = 16 - noise = torch.randn((1, seq_len, num_features)) - time_step = torch.full((num_features,), 0) - - with torch.no_grad(): - output = model(noise, time_step) - - output_slice = output[0, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([-0.2714, 0.1042, -0.0794, -0.2820, 0.0803, -0.0811, -0.2345, 0.0580, -0.0584]) - # fmt: on - - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) - - -class AutoencoderKLTests(ModelTesterMixin, unittest.TestCase): - model_class = AutoencoderKL - - @property - def dummy_input(self): - batch_size = 4 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) - - return {"sample": image} - - @property - def input_shape(self): - return (3, 32, 32) - - @property - def output_shape(self): - return (3, 32, 32) - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "ch": 64, - "ch_mult": (1,), - "embed_dim": 4, - "in_channels": 3, - "attn_resolutions": [], - "num_res_blocks": 1, - "out_ch": 3, - "resolution": 32, - "z_channels": 4, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_forward_signature(self): - pass - - def test_training(self): - pass - - def test_from_pretrained_hub(self): - model, loading_info = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy", output_loading_info=True) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy") - model.eval() - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - image = torch.randn(1, model.config.in_channels, model.config.resolution, model.config.resolution) - with torch.no_grad(): - output = model(image, sample_posterior=True) - - output_slice = output[0, -1, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([-0.0814, -0.0229, -0.1320, -0.4123, -0.0366, -0.3473, 0.0438, -0.1662, 0.1750]) - # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2)) - - -class PipelineTesterMixin(unittest.TestCase): - def test_from_pretrained_save_pretrained(self): - # 1. Load models - model = UNetUnconditionalModel( - block_channels=(32, 64), - num_res_blocks=2, - image_size=32, - in_channels=3, - out_channels=3, - down_blocks=("UNetResDownBlock2D", "UNetResAttnDownBlock2D"), - up_blocks=("UNetResAttnUpBlock2D", "UNetResUpBlock2D"), - ) - schedular = DDPMScheduler(num_train_timesteps=10) - - ddpm = DDPMPipeline(model, schedular) - - with tempfile.TemporaryDirectory() as tmpdirname: - ddpm.save_pretrained(tmpdirname) - new_ddpm = DDPMPipeline.from_pretrained(tmpdirname) - - generator = torch.manual_seed(0) - - image = ddpm(generator=generator)["sample"] - generator = generator.manual_seed(0) - new_image = new_ddpm(generator=generator)["sample"] - - assert (image - new_image).abs().sum() < 1e-5, "Models don't give the same forward pass" - - @slow - def test_from_pretrained_hub(self): - model_path = "google/ddpm-cifar10-32" - - ddpm = DDPMPipeline.from_pretrained(model_path) - ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path) - - ddpm.scheduler.num_timesteps = 10 - ddpm_from_hub.scheduler.num_timesteps = 10 - - generator = torch.manual_seed(0) - - image = ddpm(generator=generator)["sample"] - generator = generator.manual_seed(0) - new_image = ddpm_from_hub(generator=generator)["sample"] - - assert (image - new_image).abs().sum() < 1e-5, "Models don't give the same forward pass" - - @slow - def test_ddpm_cifar10(self): - model_id = "google/ddpm-cifar10-32" - - unet = UNetUnconditionalModel.from_pretrained(model_id) - scheduler = DDPMScheduler.from_config(model_id) - scheduler = scheduler.set_format("pt") - - ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) - - generator = torch.manual_seed(0) - image = ddpm(generator=generator)["sample"] - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 32, 32) - expected_slice = torch.tensor( - [-0.1601, -0.2823, -0.6123, -0.2305, -0.3236, -0.4706, -0.1691, -0.2836, -0.3231] - ) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - @slow - def test_ddim_lsun(self): - model_id = "google/ddpm-ema-bedroom-256" - - unet = UNetUnconditionalModel.from_pretrained(model_id) - scheduler = DDIMScheduler.from_config(model_id) - - ddpm = DDIMPipeline(unet=unet, scheduler=scheduler) - - generator = torch.manual_seed(0) - image = ddpm(generator=generator)["sample"] - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 256, 256) - expected_slice = torch.tensor( - [-0.9879, -0.9598, -0.9312, -0.9953, -0.9963, -0.9995, -0.9957, -1.0000, -0.9863] - ) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - @slow - def test_ddim_cifar10(self): - model_id = "google/ddpm-cifar10-32" - - unet = UNetUnconditionalModel.from_pretrained(model_id) - scheduler = DDIMScheduler(tensor_format="pt") - - ddim = DDIMPipeline(unet=unet, scheduler=scheduler) - - generator = torch.manual_seed(0) - image = ddim(generator=generator, eta=0.0)["sample"] - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 32, 32) - expected_slice = torch.tensor( - [-0.6553, -0.6765, -0.6799, -0.6749, -0.7006, -0.6974, -0.6991, -0.7116, -0.7094] - ) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - @slow - def test_pndm_cifar10(self): - model_id = "google/ddpm-cifar10-32" - - unet = UNetUnconditionalModel.from_pretrained(model_id) - scheduler = PNDMScheduler(tensor_format="pt") - - pndm = PNDMPipeline(unet=unet, scheduler=scheduler) - generator = torch.manual_seed(0) - image = pndm(generator=generator)["sample"] - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 32, 32) - expected_slice = torch.tensor( - [-0.6872, -0.7071, -0.7188, -0.7057, -0.7515, -0.7191, -0.7377, -0.7565, -0.7500] - ) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - @slow - def test_ldm_text2img(self): - ldm = LatentDiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") - - prompt = "A painting of a squirrel eating a burger" - generator = torch.manual_seed(0) - image = ldm([prompt], generator=generator, num_inference_steps=20) - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 256, 256) - expected_slice = torch.tensor([0.7295, 0.7358, 0.7256, 0.7435, 0.7095, 0.6884, 0.7325, 0.6921, 0.6458]) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - @slow - def test_ldm_text2img_fast(self): - ldm = LatentDiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") - - prompt = "A painting of a squirrel eating a burger" - generator = torch.manual_seed(0) - image = ldm([prompt], generator=generator, num_inference_steps=1) - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 256, 256) - expected_slice = torch.tensor([0.3163, 0.8670, 0.6465, 0.1865, 0.6291, 0.5139, 0.2824, 0.3723, 0.4344]) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 - - @slow - def test_score_sde_ve_pipeline(self): - model = UNetUnconditionalModel.from_pretrained("google/ncsnpp-ffhq-1024") - - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - scheduler = ScoreSdeVeScheduler.from_config("google/ncsnpp-ffhq-1024") - - sde_ve = ScoreSdeVePipeline(model=model, scheduler=scheduler) - - torch.manual_seed(0) - image = sde_ve(num_inference_steps=2) - - if model.device.type == "cpu": - # patrick's cpu - expected_image_sum = 3384805888.0 - expected_image_mean = 1076.00085 - - # m1 mbp - # expected_image_sum = 3384805376.0 - # expected_image_mean = 1076.000610351562 - else: - expected_image_sum = 3382849024.0 - expected_image_mean = 1075.3788 - - assert (image.abs().sum() - expected_image_sum).abs().cpu().item() < 1e-2 - assert (image.abs().mean() - expected_image_mean).abs().cpu().item() < 1e-4 - - @slow - def test_ldm_uncond(self): - ldm = LatentDiffusionUncondPipeline.from_pretrained("CompVis/ldm-celebahq-256") - - generator = torch.manual_seed(0) - image = ldm(generator=generator, num_inference_steps=5)["sample"] - - image_slice = image[0, -1, -3:, -3:].cpu() - - assert image.shape == (1, 3, 256, 256) - expected_slice = torch.tensor( - [-0.1202, -0.1005, -0.0635, -0.0520, -0.1282, -0.0838, -0.0981, -0.1318, -0.1106] - ) - assert (image_slice.flatten() - expected_slice).abs().max() < 1e-2 diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index 94a186d1c06a..12f38ab4e557 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -18,7 +18,7 @@ import torch -from diffusers import UNet2DConditionModel, UNet2DModel +from diffusers import TemporalUNet, UNet2DConditionModel, UNet2DModel from diffusers.testing_utils import floats_tensor, slow, torch_device from .test_modeling_common import ModelTesterMixin @@ -375,3 +375,84 @@ def test_output_pretrained_ve_large(self): def test_forward_with_norm_groups(self): # not required for this model pass + + +class TemporalUNetModelTests(ModelTesterMixin, unittest.TestCase): + model_class = TemporalUNet + + @property + def dummy_input(self): + batch_size = 4 + num_features = 14 + seq_len = 16 + + noise = floats_tensor((batch_size, seq_len, num_features)).to(torch_device) + time_step = torch.tensor([10] * batch_size).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (4, 16, 14) + + @property + def output_shape(self): + return (4, 16, 14) + + def test_ema_training(self): + pass + + def test_training(self): + pass + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "training_horizon": 128, + "dim": 32, + "dim_mults": [1, 4, 8], + "predict_epsilon": False, + "clip_denoised": True, + "transition_dim": 14, + "cond_dim": 3, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = TemporalUNet.from_pretrained( + "fusing/ddpm-unet-rl-hopper-hor128", output_loading_info=True + ) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128") + model.eval() + + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + num_features = model.transition_dim + seq_len = 16 + noise = torch.randn((1, seq_len, num_features)) + time_step = torch.full((num_features,), 0) + + with torch.no_grad(): + output = model(noise, time_step).sample + + output_slice = output[0, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-0.2714, 0.1042, -0.0794, -0.2820, 0.0803, -0.0811, -0.2345, 0.0580, -0.0584]) + # fmt: on + + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) + + def test_forward_with_norm_groups(self): + # Not implemented yet for this UNet + pass From 2dd514ea536468664c8ca07e9e3505da691507e4 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 3 Oct 2022 14:16:57 -0400 Subject: [PATCH 005/133] remove unused code, some comments --- src/diffusers/models/unet_rl.py | 75 ++------------------------------- 1 file changed, 3 insertions(+), 72 deletions(-) diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index be668c9c02a3..fd04fa7d9fb9 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -17,7 +17,7 @@ class TemporalUNetOutput(BaseOutput): """ Args: - sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + sample (`torch.FloatTensor` of shape `(batch, horizon, obs_dimension)`): Hidden states output. Output of last layer of model. """ @@ -59,10 +59,8 @@ def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): self.block = nn.Sequential( nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), RearrangeDim(), - # Rearrange("batch channels horizon -> batch channels 1 horizon"), nn.GroupNorm(n_groups, out_channels), RearrangeDim(), - # Rearrange("batch channels 1 horizon -> batch channels horizon"), nn.Mish(), ) @@ -156,8 +154,8 @@ def forward( ) -> Union[TemporalUNetOutput, Tuple]: """r Args: - sample (`torch.FloatTensor`): TODO verify shape (batch, channel, height, width) noisy inputs tensor - timestep (`torch.FloatTensor` or `float` or `int): TODO verify batch (batch) timesteps + sample (`torch.FloatTensor`): (batch, horizon, obs_dimension) noisy inputs tensor + timestep (`torch.FloatTensor` or `float` or `int): batch (batch) timesteps return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple. @@ -165,7 +163,6 @@ def forward( [`~models.unet_2d.UNet2DOutput`] or `tuple`: [`~models.unet_2d.UNet2DOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ - # x = sample sample = sample.permute(0, 2, 1) t = self.time_mlp(timestep) @@ -194,69 +191,3 @@ def forward( return (sample,) return TemporalUNetOutput(sample=sample) - - -class TemporalValue(nn.Module): - def __init__( - self, - horizon, - transition_dim, - cond_dim, - dim=32, - time_dim=None, - out_dim=1, - dim_mults=(1, 2, 4, 8), - ): - super().__init__() - - dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] - in_out = list(zip(dims[:-1], dims[1:])) - - time_dim = time_dim or dim - self.time_mlp = nn.Sequential( - SinusoidalPosEmb(dim), - nn.Linear(dim, dim * 4), - nn.Mish(), - nn.Linear(dim * 4, dim), - ) - - self.blocks = nn.ModuleList([]) - - print(in_out) - for dim_in, dim_out in in_out: - self.blocks.append( - nn.ModuleList( - [ - ResidualTemporalBlock(dim_in, dim_out, kernel_size=5, embed_dim=time_dim, horizon=horizon), - ResidualTemporalBlock(dim_out, dim_out, kernel_size=5, embed_dim=time_dim, horizon=horizon), - Downsample1D(dim_out), - ] - ) - ) - - horizon = horizon // 2 - - fc_dim = dims[-1] * max(horizon, 1) - - self.final_block = nn.Sequential( - nn.Linear(fc_dim + time_dim, fc_dim // 2), - nn.Mish(), - nn.Linear(fc_dim // 2, out_dim), - ) - - def forward(self, x, cond, time, *args): - """ - x : [ batch x horizon x transition ] - """ - x = x.permute(0, 2, 1) - - t = self.time_mlp(time) - - for resnet, resnet2, downsample in self.blocks: - x = resnet(x, t) - x = resnet2(x, t) - x = downsample(x) - - x = x.view(len(x), -1) - out = self.final_block(torch.cat([x, t], dim=-1)) - return out From b4c6188998773ca0563461521acbbb880427b50c Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 3 Oct 2022 14:17:54 -0400 Subject: [PATCH 006/133] add to docs --- docs/source/api/models.mdx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/source/api/models.mdx b/docs/source/api/models.mdx index c92fdccb8333..98687b5e7038 100644 --- a/docs/source/api/models.mdx +++ b/docs/source/api/models.mdx @@ -34,6 +34,9 @@ The models are built on the base class ['ModelMixin'] that is a `torch.nn.module ## DecoderOutput [[autodoc]] models.vae.DecoderOutput +## TemporalUNet +[[autodoc]] TemporalUNet + ## VQEncoderOutput [[autodoc]] models.vae.VQEncoderOutput From c53bba903626691c143ebb8e3a2a65ac65f5c129 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Thu, 6 Oct 2022 15:56:15 -0700 Subject: [PATCH 007/133] remove extra embedding code --- src/diffusers/models/unet_rl.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index fd04fa7d9fb9..a8354cdb64ec 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -10,8 +10,7 @@ from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin from ..utils import BaseOutput -from .embeddings import get_timestep_embedding - +from .embeddings import get_timestep_embedding, Timesteps @dataclass class TemporalUNetOutput(BaseOutput): @@ -24,14 +23,6 @@ class TemporalUNetOutput(BaseOutput): sample: torch.FloatTensor -class SinusoidalPosEmb(nn.Module): - def __init__(self, dim): - super().__init__() - self.dim = dim - - def forward(self, x): - return get_timestep_embedding(x, self.dim) - class RearrangeDim(nn.Module): def __init__(self): @@ -92,7 +83,7 @@ def __init__( time_dim = dim self.time_mlp = nn.Sequential( - SinusoidalPosEmb(dim), + Timesteps(num_channels=dim, flip_sin_to_cos=False, downscale_freq_shift=1), nn.Linear(dim, dim * 4), nn.Mish(), nn.Linear(dim * 4, dim), From effcbdbe95182b7f414786e42db5c6e192e3c2f0 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Fri, 7 Oct 2022 16:47:56 -0700 Subject: [PATCH 008/133] unify time embedding --- src/diffusers/models/embeddings.py | 11 +++++-- src/diffusers/models/resnet.py | 27 +++++++++++----- src/diffusers/models/unet_rl.py | 49 ++++++++++++++++++------------ 3 files changed, 58 insertions(+), 29 deletions(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index 06b814e2bbcd..7d2e1b677a9f 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -62,14 +62,21 @@ def get_timestep_embedding( class TimestepEmbedding(nn.Module): - def __init__(self, channel: int, time_embed_dim: int, act_fn: str = "silu"): + def __init__(self, channel: int, time_embed_dim: int, act_fn: str = "silu", out_dim: int = None): super().__init__() self.linear_1 = nn.Linear(channel, time_embed_dim) self.act = None if act_fn == "silu": self.act = nn.SiLU() - self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim) + if act_fn == "mish": + self.act = nn.Mish() + + if out_dim is not None: + time_embed_dim_out = out_dim + else: + time_embed_dim_out = time_embed_dim + self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out) def forward(self, sample): sample = self.linear_1(sample) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 9b52681c3b99..7fd0f1db3d36 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -474,6 +474,16 @@ def forward(self, tensor): else: raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") +def rearrange_dims(tensor): + if len(tensor.shape) == 2: + return tensor[:, :, None] + if len(tensor.shape) == 3: + return tensor[:, :, None, :] + elif len(tensor.shape) == 4: + return tensor[:, :, 0, :] + else: + raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") + # unet_rl.py class ResidualTemporalBlock(nn.Module): @@ -486,13 +496,14 @@ def __init__(self, inp_channels, out_channels, embed_dim, horizon, kernel_size=5 Conv1dBlock(out_channels, out_channels, kernel_size), ] ) + self.time_emb_act = nn.Mish() + self.time_emb = nn.Linear(embed_dim, out_channels) - self.time_mlp = nn.Sequential( - nn.Mish(), - nn.Linear(embed_dim, out_channels), - RearrangeDim(), - # Rearrange("batch t -> batch t 1"), - ) + # self.time_mlp = nn.Sequential( + # nn.Mish(), + # nn.Linear(embed_dim, out_channels), + # RearrangeDim(), + # ) self.residual_conv = ( nn.Conv1d(inp_channels, out_channels, 1) if inp_channels != out_channels else nn.Identity() @@ -503,7 +514,9 @@ def forward(self, x, t): x : [ batch_size x inp_channels x horizon ] t : [ batch_size x embed_dim ] returns: out : [ batch_size x out_channels x horizon ] """ - out = self.blocks[0](x) + self.time_mlp(t) + t = self.time_emb_act(t) + t = self.time_emb(t) + out = self.blocks[0](x) + rearrange_dims(t) out = self.blocks[1](out) return out + self.residual_conv(x) diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index a8354cdb64ec..e9fcc4c8535b 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -10,7 +10,7 @@ from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin from ..utils import BaseOutput -from .embeddings import get_timestep_embedding, Timesteps +from .embeddings import get_timestep_embedding, Timesteps, TimestepEmbedding @dataclass class TemporalUNetOutput(BaseOutput): @@ -78,17 +78,12 @@ def __init__( self.predict_epsilon = predict_epsilon self.clip_denoised = clip_denoised + self.time_proj = Timesteps(num_channels=dim, flip_sin_to_cos=False, downscale_freq_shift=1) + self.time_mlp = TimestepEmbedding(channel=dim, time_embed_dim=4*dim, act_fn="mish", out_dim=dim) + dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] in_out = list(zip(dims[:-1], dims[1:])) - time_dim = dim - self.time_mlp = nn.Sequential( - Timesteps(num_channels=dim, flip_sin_to_cos=False, downscale_freq_shift=1), - nn.Linear(dim, dim * 4), - nn.Mish(), - nn.Linear(dim * 4, dim), - ) - self.downs = nn.ModuleList([]) self.ups = nn.ModuleList([]) num_resolutions = len(in_out) @@ -99,8 +94,8 @@ def __init__( self.downs.append( nn.ModuleList( [ - ResidualTemporalBlock(dim_in, dim_out, embed_dim=time_dim, horizon=training_horizon), - ResidualTemporalBlock(dim_out, dim_out, embed_dim=time_dim, horizon=training_horizon), + ResidualTemporalBlock(dim_in, dim_out, embed_dim=dim, horizon=training_horizon), + ResidualTemporalBlock(dim_out, dim_out, embed_dim=dim, horizon=training_horizon), Downsample1D(dim_out, use_conv=True) if not is_last else nn.Identity(), ] ) @@ -110,8 +105,8 @@ def __init__( training_horizon = training_horizon // 2 mid_dim = dims[-1] - self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=time_dim, horizon=training_horizon) - self.mid_block2 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=time_dim, horizon=training_horizon) + self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim, horizon=training_horizon) + self.mid_block2 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim, horizon=training_horizon) for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): is_last = ind >= (num_resolutions - 1) @@ -119,8 +114,8 @@ def __init__( self.ups.append( nn.ModuleList( [ - ResidualTemporalBlock(dim_out * 2, dim_in, embed_dim=time_dim, horizon=training_horizon), - ResidualTemporalBlock(dim_in, dim_in, embed_dim=time_dim, horizon=training_horizon), + ResidualTemporalBlock(dim_out * 2, dim_in, embed_dim=dim, horizon=training_horizon), + ResidualTemporalBlock(dim_in, dim_in, embed_dim=dim, horizon=training_horizon), Upsample1D(dim_in, use_conv_transpose=True) if not is_last else nn.Identity(), ] ) @@ -134,9 +129,6 @@ def __init__( nn.Conv1d(dim, transition_dim, 1), ) - # def forward(self, sample, timestep): - # """ - # x : [ batch x horizon x transition ] #""" def forward( self, sample: torch.FloatTensor, @@ -145,7 +137,7 @@ def forward( ) -> Union[TemporalUNetOutput, Tuple]: """r Args: - sample (`torch.FloatTensor`): (batch, horizon, obs_dimension) noisy inputs tensor + sample (`torch.FloatTensor`): (batch, horizon, obs_dimension + action_dimension) noisy inputs tensor timestep (`torch.FloatTensor` or `float` or `int): batch (batch) timesteps return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple. @@ -156,24 +148,41 @@ def forward( """ sample = sample.permute(0, 2, 1) - t = self.time_mlp(timestep) + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) + elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # t = self.time_mlp(timesteps) + t = self.time_proj(timesteps) + t = self.time_mlp(t) + # t = self.time_embedding(timesteps) + # t = self.time_emb_lin1(t) + # t = self.time_emb_act(t) + # t = self.time_emb_lin2(t) h = [] + # 2. down for resnet, resnet2, downsample in self.downs: sample = resnet(sample, t) sample = resnet2(sample, t) h.append(sample) sample = downsample(sample) + # 3. mid sample = self.mid_block1(sample, t) sample = self.mid_block2(sample, t) + # 4. up for resnet, resnet2, upsample in self.ups: sample = torch.cat((sample, h.pop()), dim=1) sample = resnet(sample, t) sample = resnet2(sample, t) sample = upsample(sample) + # 5. post-process sample = self.final_conv(sample) sample = sample.permute(0, 2, 1) From 78652313cff7f59f02997182f3f282c563d129bf Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Fri, 7 Oct 2022 17:16:27 -0700 Subject: [PATCH 009/133] remove conv1d output sequential --- src/diffusers/models/resnet.py | 40 ++++++++++++++++----------------- src/diffusers/models/unet_rl.py | 23 ++++++++++--------- 2 files changed, 31 insertions(+), 32 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 7fd0f1db3d36..728c662ffdf6 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -438,27 +438,6 @@ def forward(self, x): return x * torch.tanh(torch.nn.functional.softplus(x)) -class Conv1dBlock(nn.Module): - """ - Conv1d --> GroupNorm --> Mish - """ - - def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): - super().__init__() - - self.block = nn.Sequential( - nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), - RearrangeDim(), - # Rearrange("batch channels horizon -> batch channels 1 horizon"), - nn.GroupNorm(n_groups, out_channels), - RearrangeDim(), - # Rearrange("batch channels 1 horizon -> batch channels horizon"), - nn.Mish(), - ) - - def forward(self, x): - return self.block(x) - class RearrangeDim(nn.Module): def __init__(self): @@ -484,6 +463,25 @@ def rearrange_dims(tensor): else: raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") +class Conv1dBlock(nn.Module): + """ + Conv1d --> GroupNorm --> Mish + """ + + def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): + super().__init__() + + self.block = nn.Sequential( + nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), + RearrangeDim(), + nn.GroupNorm(n_groups, out_channels), + RearrangeDim(), + nn.Mish(), + ) + + def forward(self, x): + return self.block(x) + # unet_rl.py class ResidualTemporalBlock(nn.Module): diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index e9fcc4c8535b..cf6079af35df 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -11,6 +11,7 @@ from ..modeling_utils import ModelMixin from ..utils import BaseOutput from .embeddings import get_timestep_embedding, Timesteps, TimestepEmbedding +from .resnet import rearrange_dims @dataclass class TemporalUNetOutput(BaseOutput): @@ -59,7 +60,7 @@ def forward(self, x): return self.block(x) -class TemporalUNet(ModelMixin, ConfigMixin): # (nn.Module): +class TemporalUNet(ModelMixin, ConfigMixin): @register_to_config def __init__( self, @@ -124,10 +125,10 @@ def __init__( if not is_last: training_horizon = training_horizon * 2 - self.final_conv = nn.Sequential( - Conv1dBlock(dim, dim, kernel_size=5), - nn.Conv1d(dim, transition_dim, 1), - ) + self.final_conv1d_1 = nn.Conv1d(dim, dim, 5, padding=2) + self.final_conv1d_gn = nn.GroupNorm(8, dim) + self.final_conv1d_act = nn.Mish() + self.final_conv1d_2 = nn.Conv1d(dim, transition_dim, 1) def forward( self, @@ -155,13 +156,8 @@ def forward( elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) - # t = self.time_mlp(timesteps) t = self.time_proj(timesteps) t = self.time_mlp(t) - # t = self.time_embedding(timesteps) - # t = self.time_emb_lin1(t) - # t = self.time_emb_act(t) - # t = self.time_emb_lin2(t) h = [] # 2. down @@ -183,7 +179,12 @@ def forward( sample = upsample(sample) # 5. post-process - sample = self.final_conv(sample) + sample = self.final_conv1d_1(sample) + sample = rearrange_dims(sample) + sample = self.final_conv1d_gn(sample) + sample = rearrange_dims(sample) + sample = self.final_conv1d_act(sample) + sample = self.final_conv1d_2(sample) sample = sample.permute(0, 2, 1) From 35b0a43c6ff3a90bebefb7a85a0f3e28a09ff4ca Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Fri, 7 Oct 2022 17:43:13 -0700 Subject: [PATCH 010/133] remove sequential from conv1dblock --- src/diffusers/models/resnet.py | 26 +++++++++++--------------- src/diffusers/models/unet_rl.py | 17 ----------------- 2 files changed, 11 insertions(+), 32 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 728c662ffdf6..ed54635fc398 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -471,17 +471,19 @@ class Conv1dBlock(nn.Module): def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): super().__init__() - self.block = nn.Sequential( - nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), - RearrangeDim(), - nn.GroupNorm(n_groups, out_channels), - RearrangeDim(), - nn.Mish(), - ) - def forward(self, x): - return self.block(x) + self.conv1d = nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2) + self.group_norm = nn.GroupNorm(n_groups, out_channels) + self.mish = nn.Mish() + + def forward(self, x): + x = self.conv1d(x) + x = rearrange_dims(x) + x = self.group_norm(x) + x = rearrange_dims(x) + x = self.mish(x) + return x # unet_rl.py class ResidualTemporalBlock(nn.Module): @@ -497,12 +499,6 @@ def __init__(self, inp_channels, out_channels, embed_dim, horizon, kernel_size=5 self.time_emb_act = nn.Mish() self.time_emb = nn.Linear(embed_dim, out_channels) - # self.time_mlp = nn.Sequential( - # nn.Mish(), - # nn.Linear(embed_dim, out_channels), - # RearrangeDim(), - # ) - self.residual_conv = ( nn.Conv1d(inp_channels, out_channels, 1) if inp_channels != out_channels else nn.Identity() ) diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index cf6079af35df..85e459fe6e5d 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -40,24 +40,7 @@ def forward(self, tensor): raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") -class Conv1dBlock(nn.Module): - """ - Conv1d --> GroupNorm --> Mish - """ - - def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): - super().__init__() - - self.block = nn.Sequential( - nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), - RearrangeDim(), - nn.GroupNorm(n_groups, out_channels), - RearrangeDim(), - nn.Mish(), - ) - def forward(self, x): - return self.block(x) class TemporalUNet(ModelMixin, ConfigMixin): From 9b1379d40f91c5626aca91a27a62cfa7428282a5 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Fri, 7 Oct 2022 17:45:27 -0700 Subject: [PATCH 011/133] style and deleting duplicated code --- src/diffusers/models/resnet.py | 19 +++---------------- src/diffusers/models/unet_rl.py | 23 +++-------------------- 2 files changed, 6 insertions(+), 36 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index ed54635fc398..831bb02eb566 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -439,20 +439,7 @@ def forward(self, x): -class RearrangeDim(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, tensor): - if len(tensor.shape) == 2: - return tensor[:, :, None] - if len(tensor.shape) == 3: - return tensor[:, :, None, :] - elif len(tensor.shape) == 4: - return tensor[:, :, 0, :] - else: - raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") - +# unet_rl.py def rearrange_dims(tensor): if len(tensor.shape) == 2: return tensor[:, :, None] @@ -463,6 +450,7 @@ def rearrange_dims(tensor): else: raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") + class Conv1dBlock(nn.Module): """ Conv1d --> GroupNorm --> Mish @@ -471,12 +459,10 @@ class Conv1dBlock(nn.Module): def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): super().__init__() - self.conv1d = nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2) self.group_norm = nn.GroupNorm(n_groups, out_channels) self.mish = nn.Mish() - def forward(self, x): x = self.conv1d(x) x = rearrange_dims(x) @@ -485,6 +471,7 @@ def forward(self, x): x = self.mish(x) return x + # unet_rl.py class ResidualTemporalBlock(nn.Module): def __init__(self, inp_channels, out_channels, embed_dim, horizon, kernel_size=5): diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index 85e459fe6e5d..69f4c4cd37ed 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -10,9 +10,10 @@ from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin from ..utils import BaseOutput -from .embeddings import get_timestep_embedding, Timesteps, TimestepEmbedding +from .embeddings import TimestepEmbedding, Timesteps from .resnet import rearrange_dims + @dataclass class TemporalUNetOutput(BaseOutput): """ @@ -25,24 +26,6 @@ class TemporalUNetOutput(BaseOutput): -class RearrangeDim(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, tensor): - if len(tensor.shape) == 2: - return tensor[:, :, None] - if len(tensor.shape) == 3: - return tensor[:, :, None, :] - elif len(tensor.shape) == 4: - return tensor[:, :, 0, :] - else: - raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") - - - - - class TemporalUNet(ModelMixin, ConfigMixin): @register_to_config def __init__( @@ -63,7 +46,7 @@ def __init__( self.clip_denoised = clip_denoised self.time_proj = Timesteps(num_channels=dim, flip_sin_to_cos=False, downscale_freq_shift=1) - self.time_mlp = TimestepEmbedding(channel=dim, time_embed_dim=4*dim, act_fn="mish", out_dim=dim) + self.time_mlp = TimestepEmbedding(channel=dim, time_embed_dim=4 * dim, act_fn="mish", out_dim=dim) dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] in_out = list(zip(dims[:-1], dims[1:])) From e97a61066d4093a22c741aa2ca8ed7fd5300d8cc Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Sat, 8 Oct 2022 09:09:48 -0700 Subject: [PATCH 012/133] clean files --- src/diffusers/models/resnet.py | 11 +++++++---- src/diffusers/models/unet_rl.py | 31 ++++++++++++++++++++++++------- 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 831bb02eb566..c4649647dd41 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -438,7 +438,6 @@ def forward(self, x): return x * torch.tanh(torch.nn.functional.softplus(x)) - # unet_rl.py def rearrange_dims(tensor): if len(tensor.shape) == 2: @@ -474,7 +473,7 @@ def forward(self, x): # unet_rl.py class ResidualTemporalBlock(nn.Module): - def __init__(self, inp_channels, out_channels, embed_dim, horizon, kernel_size=5): + def __init__(self, inp_channels, out_channels, embed_dim, kernel_size=5): super().__init__() self.blocks = nn.ModuleList( @@ -492,8 +491,12 @@ def __init__(self, inp_channels, out_channels, embed_dim, horizon, kernel_size=5 def forward(self, x, t): """ - x : [ batch_size x inp_channels x horizon ] t : [ batch_size x embed_dim ] returns: out : [ batch_size x - out_channels x horizon ] + Args: + x : [ batch_size x inp_channels x horizon ] + t : [ batch_size x embed_dim ] + + returns: + out : [ batch_size x out_channels x horizon ] """ t = self.time_emb_act(t) t = self.time_emb(t) diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index 69f4c4cd37ed..420a1661d526 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -25,8 +25,20 @@ class TemporalUNetOutput(BaseOutput): sample: torch.FloatTensor - class TemporalUNet(ModelMixin, ConfigMixin): + """ + A UNet for multi-dimensional temporal data. This model takes the batch over the `training_horizon`. + + Parameters: + training_horizon: horizon of training samples used for diffusion process. + transition_dim: state-dimension of samples to predict over + cond_dim: held dimension in input (e.g. for actions) -- TODO remove from pretrained + predict_epsilon: TODO remove from pretrained + clip_denoised: TODO remove from pretrained + dim: embedding dimension of model + dim_mults: dimension multiples of the up/down blocks + """ + @register_to_config def __init__( self, @@ -45,6 +57,7 @@ def __init__( self.predict_epsilon = predict_epsilon self.clip_denoised = clip_denoised + # time self.time_proj = Timesteps(num_channels=dim, flip_sin_to_cos=False, downscale_freq_shift=1) self.time_mlp = TimestepEmbedding(channel=dim, time_embed_dim=4 * dim, act_fn="mish", out_dim=dim) @@ -55,14 +68,15 @@ def __init__( self.ups = nn.ModuleList([]) num_resolutions = len(in_out) + # down for ind, (dim_in, dim_out) in enumerate(in_out): is_last = ind >= (num_resolutions - 1) self.downs.append( nn.ModuleList( [ - ResidualTemporalBlock(dim_in, dim_out, embed_dim=dim, horizon=training_horizon), - ResidualTemporalBlock(dim_out, dim_out, embed_dim=dim, horizon=training_horizon), + ResidualTemporalBlock(dim_in, dim_out, embed_dim=dim), + ResidualTemporalBlock(dim_out, dim_out, embed_dim=dim), Downsample1D(dim_out, use_conv=True) if not is_last else nn.Identity(), ] ) @@ -71,18 +85,20 @@ def __init__( if not is_last: training_horizon = training_horizon // 2 + # mid mid_dim = dims[-1] - self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim, horizon=training_horizon) - self.mid_block2 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim, horizon=training_horizon) + self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim) + self.mid_block2 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim) + # up for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): is_last = ind >= (num_resolutions - 1) self.ups.append( nn.ModuleList( [ - ResidualTemporalBlock(dim_out * 2, dim_in, embed_dim=dim, horizon=training_horizon), - ResidualTemporalBlock(dim_in, dim_in, embed_dim=dim, horizon=training_horizon), + ResidualTemporalBlock(dim_out * 2, dim_in, embed_dim=dim), + ResidualTemporalBlock(dim_in, dim_in, embed_dim=dim), Upsample1D(dim_in, use_conv_transpose=True) if not is_last else nn.Identity(), ] ) @@ -91,6 +107,7 @@ def __init__( if not is_last: training_horizon = training_horizon * 2 + # out self.final_conv1d_1 = nn.Conv1d(dim, dim, 5, padding=2) self.final_conv1d_gn = nn.GroupNorm(8, dim) self.final_conv1d_act = nn.Mish() From f29ace484b6950daecc1e75578117676f23e0e4e Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Sat, 8 Oct 2022 12:15:04 -0400 Subject: [PATCH 013/133] valuefunction code --- scripts/run_diffuser.py | 121 ++++++++++++++++++++++++++++++++ src/diffusers/models/unet_rl.py | 117 ++++++++++++++++++++++++++++++ 2 files changed, 238 insertions(+) create mode 100644 scripts/run_diffuser.py diff --git a/scripts/run_diffuser.py b/scripts/run_diffuser.py new file mode 100644 index 000000000000..c672421c210c --- /dev/null +++ b/scripts/run_diffuser.py @@ -0,0 +1,121 @@ +import d4rl + +import torch +import tqdm +import numpy as np +import gym + +env_name = "hopper-medium-expert-v2" +env = gym.make(env_name) +data = env.get_dataset() # dataset is only used for normalization in this colab + +# Cuda settings for colab +# torch.cuda.get_device_name(0) +DEVICE = 'cpu' +DTYPE = torch.float + +# diffusion model settings +n_samples = 4 # number of trajectories planned via diffusion +horizon = 128 # length of sampled trajectories +state_dim = env.observation_space.shape[0] +action_dim = env.action_space.shape[0] +num_inference_steps = 100 # number of difusion steps + +def normalize(x_in, data, key): + upper = np.max(data[key], axis=0) + lower = np.min(data[key], axis=0) + x_out = 2*(x_in - lower)/(upper-lower) - 1 + return x_out + +def de_normalize(x_in, data, key): + upper = np.max(data[key], axis=0) + lower = np.min(data[key], axis=0) + x_out = lower + (upper - lower)*(1 + x_in) /2 + return x_out + +def to_torch(x_in, dtype=None, device=None): + dtype = dtype or DTYPE + device = device or DEVICE + if type(x_in) is dict: + return {k: to_torch(v, dtype, device) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(device).type(dtype) + return torch.tensor(x_in, dtype=dtype, device=device) + +obs = env.reset() +obs_raw = obs + +# normalize observations for forward passes +obs = normalize(obs, data, 'observations') + +from diffusers import DDPMScheduler, TemporalUNet + +# Two generators for different parts of the diffusion loop to work in colab +generator = torch.Generator(device='cuda') +generator_cpu = torch.Generator(device='cpu') + +scheduler = DDPMScheduler(num_train_timesteps=100,beta_schedule="squaredcos_cap_v2") + +# 3 different pretrained models are available for this task. +# The horizion represents the length of trajectories used in training. +network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) +# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) +# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) +def reset_x0(x_in, cond, act_dim): + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + +# network specific constants for inference +clip_denoised = network.clip_denoised +predict_epsilon = network.predict_epsilon + +## add a batch dimension and repeat for multiple samples +## [ observation_dim ] --> [ n_samples x observation_dim ] +obs = obs[None].repeat(n_samples, axis=0) +conditions = { + 0: to_torch(obs, device=DEVICE) + } + +# constants for inference +batch_size = len(conditions[0]) +shape = (batch_size, horizon, state_dim+action_dim) + +# sample random initial noise vector +x1 = torch.randn(shape, device=DEVICE, generator=generator) + +# this model is conditioned from an initial state, so you will see this function +# multiple times to change the initial state of generated data to the state +# generated via env.reset() above or env.step() below +x = reset_x0(x1, conditions, action_dim) + +# convert a np observation to torch for model forward pass +x = to_torch(x) + +eta = 1.0 # noise factor for sampling reconstructed state + +# run the diffusion process +# for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): +for i in tqdm.tqdm(scheduler.timesteps): + + # create batch of timesteps to pass into model + timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) + + # 1. generate prediction from model + with torch.no_grad(): + residual = network(x, timesteps).sample + + # 2. use the model prediction to reconstruct an observation (de-noise) + obs_reconstruct = scheduler.step(residual, i, x, predict_epsilon=predict_epsilon)["prev_sample"] + + # 3. [optional] add posterior noise to the sample + if eta > 0: + noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device) + posterior_variance = scheduler._get_variance(i) # * noise + # no noise when t == 0 + # NOTE: original implementation missing sqrt on posterior_variance + obs_reconstruct = obs_reconstruct + int(i>0) * (0.5 * posterior_variance) * eta* noise # MJ had as log var, exponentiated + + # 4. apply conditions to the trajectory + obs_reconstruct_postcond = reset_x0(obs_reconstruct, conditions, action_dim) + x = to_torch(obs_reconstruct_postcond) \ No newline at end of file diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index 420a1661d526..a14489a5734e 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -175,3 +175,120 @@ def forward( return (sample,) return TemporalUNetOutput(sample=sample) + + +class ValueFunction(ModelMixin, ConfigMixin): + @register_to_config + def __init__( + self, + training_horizon=128, + transition_dim=14, + cond_dim=3, + predict_epsilon=False, + clip_denoised=True, + dim=32, + dim_mults=(1, 4, 8), + out_dim=1, + ): + super().__init__() + + self.transition_dim = transition_dim + self.cond_dim = cond_dim + self.predict_epsilon = predict_epsilon + self.clip_denoised = clip_denoised + + self.time_proj = Timesteps(num_channels=dim, flip_sin_to_cos=False, downscale_freq_shift=1) + self.time_mlp = TimestepEmbedding(channel=dim, time_embed_dim=4 * dim, act_fn="mish", out_dim=dim) + + dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] + in_out = list(zip(dims[:-1], dims[1:])) + + self.blocks = nn.ModuleList([]) + num_resolutions = len(in_out) + + for ind, (dim_in, dim_out) in enumerate(in_out): + is_last = ind >= (num_resolutions - 1) + + self.blocks.append( + nn.ModuleList( + [ + ResidualTemporalBlock(dim_in, dim_out, embed_dim=dim, horizon=training_horizon), + ResidualTemporalBlock(dim_out, dim_out, embed_dim=dim, horizon=training_horizon), + Downsample1D(dim_out, use_conv=True), + ] + ) + ) + + if not is_last: + training_horizon = training_horizon // 2 + + mid_dim = dims[-1] + mid_dim_2 = mid_dim // 2 + mid_dim_3 = mid_dim // 4 + ## + self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim_2, embed_dim=dim, horizon=training_horizon) + self.mid_down1 = Downsample1D(mid_dim_2, use_conv=True) + training_horizon = training_horizon // 2 + ## + self.mid_block2 = ResidualTemporalBlock(mid_dim_2, mid_dim_3, embed_dim=dim, horizon=training_horizon) + self.mid_down2 = Downsample1D(mid_dim_3, use_conv=True) + training_horizon = training_horizon // 2 + ## + fc_dim = mid_dim_3 * max(training_horizon, 1) + self.final_block = nn.ModuleList( + nn.Linear(fc_dim + dim, fc_dim // 2), + nn.Mish(), + nn.Linear(fc_dim // 2, out_dim), + ) + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + return_dict: bool = True, + ) -> Union[TemporalUNetOutput, Tuple]: + """r + Args: + sample (`torch.FloatTensor`): (batch, horizon, obs_dimension + action_dimension) noisy inputs tensor + timestep (`torch.FloatTensor` or `float` or `int): batch (batch) timesteps + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple. + + Returns: + [`~models.unet_2d.UNet2DOutput`] or `tuple`: [`~models.unet_2d.UNet2DOutput`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. + """ + sample = sample.permute(0, 2, 1) + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) + elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + t = self.time_proj(timesteps) + t = self.time_mlp(t) + h = [] + + # 2. down + for resnet, resnet2, downsample in self.blocks: + sample = resnet(sample, t) + sample = resnet2(sample, t) + h.append(sample) + sample = downsample(sample) + + # 3. mid + sample = self.mid_block1(sample, t) + sample = self.mid_down1(sample) + sample = self.mid_block2(sample, t) + sample = self.mid_down2(sample) + + sample = sample.view(sample.shape[0], -1) + sample = torch.cat((sample, t), dim=1) + sample = self.final_block(sample) + + if not return_dict: + return (sample,) + + return TemporalUNetOutput(sample=sample) From 1684e8b870549cc04f9596036a97476ab9e4e123 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Sat, 8 Oct 2022 12:26:25 -0400 Subject: [PATCH 014/133] start example scripts --- .../diffuser}/run_diffuser.py | 0 examples/diffuser/train_diffuser.py | 75 +++++++++++++++++++ 2 files changed, 75 insertions(+) rename {scripts => examples/diffuser}/run_diffuser.py (100%) create mode 100644 examples/diffuser/train_diffuser.py diff --git a/scripts/run_diffuser.py b/examples/diffuser/run_diffuser.py similarity index 100% rename from scripts/run_diffuser.py rename to examples/diffuser/run_diffuser.py diff --git a/examples/diffuser/train_diffuser.py b/examples/diffuser/train_diffuser.py new file mode 100644 index 000000000000..902f5ec7357c --- /dev/null +++ b/examples/diffuser/train_diffuser.py @@ -0,0 +1,75 @@ +import d4rl + +import torch +import tqdm +import numpy as np +import gym +from accelerate import Accelerator +env_name = "hopper-medium-expert-v2" +env = gym.make(env_name) +data = env.get_dataset() # dataset is only used for normalization in this colab + +# Cuda settings for colab +# torch.cuda.get_device_name(0) +DEVICE = 'cpu' +DTYPE = torch.float + +# diffusion model settings +n_samples = 4 # number of trajectories planned via diffusion +horizon = 128 # length of sampled trajectories +state_dim = env.observation_space.shape[0] +action_dim = env.action_space.shape[0] +num_inference_steps = 100 # number of difusion steps + +def normalize(x_in, data, key): + upper = np.max(data[key], axis=0) + lower = np.min(data[key], axis=0) + x_out = 2*(x_in - lower)/(upper-lower) - 1 + return x_out + +def de_normalize(x_in, data, key): + upper = np.max(data[key], axis=0) + lower = np.min(data[key], axis=0) + x_out = lower + (upper - lower)*(1 + x_in) /2 + return x_out + +def to_torch(x_in, dtype=None, device=None): + dtype = dtype or DTYPE + device = device or DEVICE + if type(x_in) is dict: + return {k: to_torch(v, dtype, device) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(device).type(dtype) + return torch.tensor(x_in, dtype=dtype, device=device) + +obs = env.reset() +obs_raw = obs + +# normalize observations for forward passes +obs = normalize(obs, data, 'observations') + +from diffusers import DDPMScheduler, TemporalUNet + +# Two generators for different parts of the diffusion loop to work in colab +generator = torch.Generator(device='cuda') +generator_cpu = torch.Generator(device='cpu') +network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) + +scheduler = DDPMScheduler(num_train_timesteps=100,beta_schedule="squaredcos_cap_v2") +optimizer = torch.optim.AdamW( + network.parameters(), + lr=0.001, + betas=(0.95, 0.99), + weight_decay=1e-6, + eps=1e-8, + ) +# 3 different pretrained models are available for this task. +# The horizion represents the length of trajectories used in training. +# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) +# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) +def reset_x0(x_in, cond, act_dim): + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + +# TODO: Flesh this out using accelerate library (a la other examples) \ No newline at end of file From c7579858272b57c564b8fedc94cbc8b31ff91752 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Sat, 8 Oct 2022 12:46:34 -0400 Subject: [PATCH 015/133] missing imports --- src/diffusers/__init__.py | 2 +- src/diffusers/models/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index e6b920a31b4c..d3419860d48d 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -18,7 +18,7 @@ if is_torch_available(): from .modeling_utils import ModelMixin - from .models import AutoencoderKL, TemporalUNet, UNet2DConditionModel, UNet2DModel, VQModel + from .models import AutoencoderKL, TemporalUNet, UNet2DConditionModel, UNet2DModel, VQModel, ValueFunction from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index 47f7fa71682b..4bedc43e9007 100644 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -18,7 +18,7 @@ if is_torch_available(): from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel - from .unet_rl import TemporalUNet + from .unet_rl import TemporalUNet, ValueFunction from .vae import AutoencoderKL, VQModel if is_flax_available(): From b3159182e97790c54baae5c5f6d4d45cf95b9c18 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Sat, 8 Oct 2022 12:58:37 -0400 Subject: [PATCH 016/133] bug fixes and placeholder example script --- .../diffuser/run_diffuser_value_guided.py | 121 ++++++++++++++++++ src/diffusers/models/unet_rl.py | 12 +- 2 files changed, 127 insertions(+), 6 deletions(-) create mode 100644 examples/diffuser/run_diffuser_value_guided.py diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py new file mode 100644 index 000000000000..a18678ee4b9a --- /dev/null +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -0,0 +1,121 @@ +import d4rl + +import torch +import tqdm +import numpy as np +import gym + +env_name = "hopper-medium-expert-v2" +env = gym.make(env_name) +data = env.get_dataset() # dataset is only used for normalization in this colab + +# Cuda settings for colab +# torch.cuda.get_device_name(0) +DEVICE = 'cpu' +DTYPE = torch.float + +# diffusion model settings +n_samples = 4 # number of trajectories planned via diffusion +horizon = 128 # length of sampled trajectories +state_dim = env.observation_space.shape[0] +action_dim = env.action_space.shape[0] +num_inference_steps = 100 # number of difusion steps + +def normalize(x_in, data, key): + upper = np.max(data[key], axis=0) + lower = np.min(data[key], axis=0) + x_out = 2*(x_in - lower)/(upper-lower) - 1 + return x_out + +def de_normalize(x_in, data, key): + upper = np.max(data[key], axis=0) + lower = np.min(data[key], axis=0) + x_out = lower + (upper - lower)*(1 + x_in) /2 + return x_out + +def to_torch(x_in, dtype=None, device=None): + dtype = dtype or DTYPE + device = device or DEVICE + if type(x_in) is dict: + return {k: to_torch(v, dtype, device) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(device).type(dtype) + return torch.tensor(x_in, dtype=dtype, device=device) + +obs = env.reset() +obs_raw = obs + +# normalize observations for forward passes +obs = normalize(obs, data, 'observations') + +from diffusers import DDPMScheduler, TemporalUNet, ValueFunction + +# Two generators for different parts of the diffusion loop to work in colab +# generator = torch.Generator(device='cuda') +generator_cpu = torch.Generator(device='cpu') + +scheduler = DDPMScheduler(num_train_timesteps=100,beta_schedule="squaredcos_cap_v2") + +# 3 different pretrained models are available for this task. +# The horizion represents the length of trajectories used in training. +network = ValueFunction.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) +# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) +# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) +def reset_x0(x_in, cond, act_dim): + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + +# network specific constants for inference +clip_denoised = network.clip_denoised +predict_epsilon = network.predict_epsilon + +## add a batch dimension and repeat for multiple samples +## [ observation_dim ] --> [ n_samples x observation_dim ] +obs = obs[None].repeat(n_samples, axis=0) +conditions = { + 0: to_torch(obs, device=DEVICE) + } + +# constants for inference +batch_size = len(conditions[0]) +shape = (batch_size, horizon, state_dim+action_dim) + +# sample random initial noise vector +x1 = torch.randn(shape, device=DEVICE, generator=generator) + +# this model is conditioned from an initial state, so you will see this function +# multiple times to change the initial state of generated data to the state +# generated via env.reset() above or env.step() below +x = reset_x0(x1, conditions, action_dim) + +# convert a np observation to torch for model forward pass +x = to_torch(x) + +eta = 1.0 # noise factor for sampling reconstructed state + +# run the diffusion process +# for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): +for i in tqdm.tqdm(scheduler.timesteps): + + # create batch of timesteps to pass into model + timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) + + # 1. generate prediction from model + with torch.no_grad(): + residual = network(x, timesteps).sample + + # 2. use the model prediction to reconstruct an observation (de-noise) + obs_reconstruct = scheduler.step(residual, i, x, predict_epsilon=predict_epsilon)["prev_sample"] + + # 3. [optional] add posterior noise to the sample + if eta > 0: + noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device) + posterior_variance = scheduler._get_variance(i) # * noise + # no noise when t == 0 + # NOTE: original implementation missing sqrt on posterior_variance + obs_reconstruct = obs_reconstruct + int(i>0) * (0.5 * posterior_variance) * eta* noise # MJ had as log var, exponentiated + + # 4. apply conditions to the trajectory + obs_reconstruct_postcond = reset_x0(obs_reconstruct, conditions, action_dim) + x = to_torch(obs_reconstruct_postcond) \ No newline at end of file diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index a14489a5734e..7fcade5b79c1 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -212,8 +212,8 @@ def __init__( self.blocks.append( nn.ModuleList( [ - ResidualTemporalBlock(dim_in, dim_out, embed_dim=dim, horizon=training_horizon), - ResidualTemporalBlock(dim_out, dim_out, embed_dim=dim, horizon=training_horizon), + ResidualTemporalBlock(dim_in, dim_out, embed_dim=dim), + ResidualTemporalBlock(dim_out, dim_out, embed_dim=dim), Downsample1D(dim_out, use_conv=True), ] ) @@ -226,19 +226,19 @@ def __init__( mid_dim_2 = mid_dim // 2 mid_dim_3 = mid_dim // 4 ## - self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim_2, embed_dim=dim, horizon=training_horizon) + self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim_2, embed_dim=dim) self.mid_down1 = Downsample1D(mid_dim_2, use_conv=True) training_horizon = training_horizon // 2 ## - self.mid_block2 = ResidualTemporalBlock(mid_dim_2, mid_dim_3, embed_dim=dim, horizon=training_horizon) + self.mid_block2 = ResidualTemporalBlock(mid_dim_2, mid_dim_3, embed_dim=dim) self.mid_down2 = Downsample1D(mid_dim_3, use_conv=True) training_horizon = training_horizon // 2 ## fc_dim = mid_dim_3 * max(training_horizon, 1) - self.final_block = nn.ModuleList( + self.final_block = nn.ModuleList([ nn.Linear(fc_dim + dim, fc_dim // 2), nn.Mish(), - nn.Linear(fc_dim // 2, out_dim), + nn.Linear(fc_dim // 2, out_dim),] ) def forward( From f01c014f837f77587c51fe6f4783cf4b142e7943 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Sun, 9 Oct 2022 14:15:20 -0400 Subject: [PATCH 017/133] add value function scheduler --- .../diffuser/run_diffuser_value_guided.py | 29 +- src/diffusers/__init__.py | 1 + src/diffusers/models/unet_rl.py | 5 +- src/diffusers/schedulers/__init__.py | 1 + .../schedulers/scheduling_value_function.py | 299 ++++++++++++++++++ 5 files changed, 325 insertions(+), 10 deletions(-) create mode 100644 src/diffusers/schedulers/scheduling_value_function.py diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index a18678ee4b9a..3093abffb55e 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -4,6 +4,13 @@ import tqdm import numpy as np import gym +from diffusers import DDPMScheduler, TemporalUNet, ValueFunction, ValueFunctionScheduler + + +# model = torch.load("../diffuser/test.torch") +# hf_value_function = ValueFunction(training_horizon=32, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) +# hf_value_function.load_state_dict(model.state_dict()) +# hf_value_function.to_hub("bglick13/hf_value_function") env_name = "hopper-medium-expert-v2" env = gym.make(env_name) @@ -16,7 +23,7 @@ # diffusion model settings n_samples = 4 # number of trajectories planned via diffusion -horizon = 128 # length of sampled trajectories +horizon = 32 # length of sampled trajectories state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] num_inference_steps = 100 # number of difusion steps @@ -48,17 +55,18 @@ def to_torch(x_in, dtype=None, device=None): # normalize observations for forward passes obs = normalize(obs, data, 'observations') -from diffusers import DDPMScheduler, TemporalUNet, ValueFunction # Two generators for different parts of the diffusion loop to work in colab # generator = torch.Generator(device='cuda') generator_cpu = torch.Generator(device='cpu') -scheduler = DDPMScheduler(num_train_timesteps=100,beta_schedule="squaredcos_cap_v2") +scheduler = ValueFunctionScheduler(num_train_timesteps=100,beta_schedule="squaredcos_cap_v2", clip_sample=False) # 3 different pretrained models are available for this task. # The horizion represents the length of trajectories used in training. -network = ValueFunction.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) +network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) + +# network = ValueFunction.from_pretrained("/Users/bglickenhaus/Documents/diffuser/logs/hopper-medium-v2/values/defaults_H32_T20_d0.997").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) def reset_x0(x_in, cond, act_dim): @@ -82,7 +90,7 @@ def reset_x0(x_in, cond, act_dim): shape = (batch_size, horizon, state_dim+action_dim) # sample random initial noise vector -x1 = torch.randn(shape, device=DEVICE, generator=generator) +x1 = torch.randn(shape, device=DEVICE, generator=generator_cpu) # this model is conditioned from an initial state, so you will see this function # multiple times to change the initial state of generated data to the state @@ -102,11 +110,16 @@ def reset_x0(x_in, cond, act_dim): timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) # 1. generate prediction from model - with torch.no_grad(): - residual = network(x, timesteps).sample + with torch.enable_grad(): + x.requires_grad_() + y = network(x, timesteps).sample + grad = torch.autograd.grad([y.sum()], [x])[0] + # tile to (batch_size, 128, 14) + x.detach() + pass # 2. use the model prediction to reconstruct an observation (de-noise) - obs_reconstruct = scheduler.step(residual, i, x, predict_epsilon=predict_epsilon)["prev_sample"] + obs_reconstruct = scheduler.step(grad, i, x, predict_epsilon=predict_epsilon)["prev_sample"] # 3. [optional] add posterior noise to the sample if eta > 0: diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index d3419860d48d..664b62dd29c9 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -37,6 +37,7 @@ PNDMScheduler, SchedulerMixin, ScoreSdeVeScheduler, + ValueFunctionScheduler ) from .training_utils import EMAModel else: diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index 7fcade5b79c1..8b0ee02966c2 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -285,8 +285,9 @@ def forward( sample = self.mid_down2(sample) sample = sample.view(sample.shape[0], -1) - sample = torch.cat((sample, t), dim=1) - sample = self.final_block(sample) + sample = torch.cat((sample, t), dim=-1) + for layer in self.final_block: + sample = layer(sample) if not return_dict: return (sample,) diff --git a/src/diffusers/schedulers/__init__.py b/src/diffusers/schedulers/__init__.py index a906c39eb24c..c4770de538cc 100644 --- a/src/diffusers/schedulers/__init__.py +++ b/src/diffusers/schedulers/__init__.py @@ -24,6 +24,7 @@ from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_utils import SchedulerMixin + from .scheduling_value_function import ValueFunctionScheduler else: from ..utils.dummy_pt_objects import * # noqa F403 diff --git a/src/diffusers/schedulers/scheduling_value_function.py b/src/diffusers/schedulers/scheduling_value_function.py new file mode 100644 index 000000000000..bf2fe3ec0412 --- /dev/null +++ b/src/diffusers/schedulers/scheduling_value_function.py @@ -0,0 +1,299 @@ +# Copyright 2022 UC Berkeley Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +import math +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from .scheduling_utils import SchedulerMixin + + +@dataclass +class ValueFunctionSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's step function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample (x_{0}) based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + + def alpha_bar(time_step): + return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class ValueFunctionScheduler(SchedulerMixin, ConfigMixin): + """ + Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and + Langevin dynamics sampling. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and + [`~ConfigMixin.from_config`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2006.11239 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + variance_type (`str`): + options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, + `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. + clip_sample (`bool`, default `True`): + option to clip predicted sample between -1 and 1 for numerical stability. + + """ + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[np.ndarray] = None, + variance_type: str = "fixed_small", + clip_sample: bool = True, + **kwargs, + ): + if "tensor_format" in kwargs: + warnings.warn( + "`tensor_format` is deprecated as an argument and will be removed in version `0.5.0`." + "If you're running your code in PyTorch, you can safely remove this argument.", + DeprecationWarning, + ) + + if trained_betas is not None: + self.betas = torch.from_numpy(trained_betas) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = ( + torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + ) + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.one = torch.tensor(1.0) + + # setable values + self.num_inference_steps = None + self.timesteps = np.arange(0, num_train_timesteps)[::-1] + + self.variance_type = variance_type + + def set_timesteps(self, num_inference_steps: int): + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + """ + num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps) + self.num_inference_steps = num_inference_steps + self.timesteps = np.arange( + 0, self.config.num_train_timesteps, self.config.num_train_timesteps // self.num_inference_steps + )[::-1] + + def _get_variance(self, t, predicted_variance=None, variance_type=None): + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one + + # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) + # and sample from it to get previous sample + # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample + variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t] + + if variance_type is None: + variance_type = self.config.variance_type + + # hacks - were probably added for training stability + if variance_type == "fixed_small": + variance = torch.clamp(variance, min=1e-20) + # for rl-diffuser https://arxiv.org/abs/2205.09991 + elif variance_type == "fixed_small_log": + variance = torch.log(torch.clamp(variance, min=1e-20)) + elif variance_type == "fixed_large": + variance = self.betas[t] + elif variance_type == "fixed_large_log": + # Glide max_log + variance = torch.log(self.betas[t]) + elif variance_type == "learned": + return predicted_variance + elif variance_type == "learned_range": + min_log = variance + max_log = self.betas[t] + frac = (predicted_variance + 1) / 2 + variance = frac * max_log + (1 - frac) * min_log + + return variance + + def step( + self, + model_output: torch.FloatTensor, + timestep: int, + sample: torch.FloatTensor, + predict_epsilon=True, + generator=None, + scale=0.001, + return_dict: bool = True, + ) -> Union[ValueFunctionSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + current instance of sample being created by diffusion process. + predict_epsilon (`bool`): + optional flag to use when model predicts the samples directly instead of the noise, epsilon. + generator: random number generator. + return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class + + Returns: + [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`: + [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is the sample tensor. + + """ + t = timestep + + if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: + model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) + else: + predicted_variance = None + + # 1. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + + pred_original_sample = sample + scale * model_output + + # 3. Clip "predicted x_0" + if self.config.clip_sample: + pred_original_sample = torch.clamp(pred_original_sample, -1, 1) + + # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t + current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t + + # 5. Compute predicted previous sample µ_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample + + # 6. Add noise + variance = 0 + if t > 0: + noise = torch.randn( + model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator + ).to(model_output.device) + variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise + + pred_prev_sample = pred_prev_sample + variance + + if not return_dict: + return (pred_prev_sample,) + + return ValueFunctionSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) + + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.IntTensor, + ) -> torch.FloatTensor: + if self.alphas_cumprod.device != original_samples.device: + self.alphas_cumprod = self.alphas_cumprod.to(original_samples.device) + + if timesteps.device != original_samples.device: + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps From 7b60c9322e12304bc4109b4ba9a4d6d35bd84ff7 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Sun, 9 Oct 2022 18:47:50 -0400 Subject: [PATCH 018/133] load value function from hub and get best actions in example --- examples/diffuser/run_diffuser_value_guided.py | 13 +++++++++---- .../schedulers/scheduling_value_function.py | 1 - 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 3093abffb55e..91ff0f2bced9 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -64,9 +64,9 @@ def to_torch(x_in, dtype=None, device=None): # 3 different pretrained models are available for this task. # The horizion represents the length of trajectories used in training. -network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) +# network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) -# network = ValueFunction.from_pretrained("/Users/bglickenhaus/Documents/diffuser/logs/hopper-medium-v2/values/defaults_H32_T20_d0.997").to(device=DEVICE) +network = ValueFunction.from_pretrained("bglick13/hopper-medium-expert-v2-value-function-hor32").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) def reset_x0(x_in, cond, act_dim): @@ -119,7 +119,7 @@ def reset_x0(x_in, cond, act_dim): pass # 2. use the model prediction to reconstruct an observation (de-noise) - obs_reconstruct = scheduler.step(grad, i, x, predict_epsilon=predict_epsilon)["prev_sample"] + obs_reconstruct = scheduler.step(grad, i, x)["prev_sample"] # 3. [optional] add posterior noise to the sample if eta > 0: @@ -131,4 +131,9 @@ def reset_x0(x_in, cond, act_dim): # 4. apply conditions to the trajectory obs_reconstruct_postcond = reset_x0(obs_reconstruct, conditions, action_dim) - x = to_torch(obs_reconstruct_postcond) \ No newline at end of file + x = to_torch(obs_reconstruct_postcond) +sorted_idx = y.argsort(-1, descending=True).squeeze() +sorted_values = x[sorted_idx] +actions = sorted_values[:, :, :action_dim] +actions = de_normalize(actions[0, 0].detach().numpy(), data, key='actions') +obs, reward, is_done, info = env.step(actions) diff --git a/src/diffusers/schedulers/scheduling_value_function.py b/src/diffusers/schedulers/scheduling_value_function.py index bf2fe3ec0412..fc56a2b18a9f 100644 --- a/src/diffusers/schedulers/scheduling_value_function.py +++ b/src/diffusers/schedulers/scheduling_value_function.py @@ -199,7 +199,6 @@ def step( model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, - predict_epsilon=True, generator=None, scale=0.001, return_dict: bool = True, From 8642560db0223e1a42d270d584e94e2cadbdf4ed Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 10 Oct 2022 11:10:37 -0700 Subject: [PATCH 019/133] remove unused variables --- src/diffusers/models/unet_rl.py | 11 +---------- tests/test_models_unet.py | 4 ---- 2 files changed, 1 insertion(+), 14 deletions(-) diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index 420a1661d526..c926ef3a67c5 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -32,9 +32,6 @@ class TemporalUNet(ModelMixin, ConfigMixin): Parameters: training_horizon: horizon of training samples used for diffusion process. transition_dim: state-dimension of samples to predict over - cond_dim: held dimension in input (e.g. for actions) -- TODO remove from pretrained - predict_epsilon: TODO remove from pretrained - clip_denoised: TODO remove from pretrained dim: embedding dimension of model dim_mults: dimension multiples of the up/down blocks """ @@ -44,18 +41,12 @@ def __init__( self, training_horizon=128, transition_dim=14, - cond_dim=3, - predict_epsilon=False, - clip_denoised=True, dim=32, dim_mults=(1, 4, 8), ): super().__init__() self.transition_dim = transition_dim - self.cond_dim = cond_dim - self.predict_epsilon = predict_epsilon - self.clip_denoised = clip_denoised # time self.time_proj = Timesteps(num_channels=dim, flip_sin_to_cos=False, downscale_freq_shift=1) @@ -119,7 +110,7 @@ def forward( timestep: Union[torch.Tensor, float, int], return_dict: bool = True, ) -> Union[TemporalUNetOutput, Tuple]: - """r + r""" Args: sample (`torch.FloatTensor`): (batch, horizon, obs_dimension + action_dimension) noisy inputs tensor timestep (`torch.FloatTensor` or `float` or `int): batch (batch) timesteps diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index 12f38ab4e557..f9390bcbdc33 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -407,13 +407,9 @@ def test_training(self): def prepare_init_args_and_inputs_for_common(self): init_dict = { - "training_horizon": 128, "dim": 32, "dim_mults": [1, 4, 8], - "predict_epsilon": False, - "clip_denoised": True, "transition_dim": 14, - "cond_dim": 3, } inputs_dict = self.dummy_input return init_dict, inputs_dict From f58c91529c431946c53b82aa0a67366f2f3ddc2c Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 10 Oct 2022 11:26:19 -0700 Subject: [PATCH 020/133] clean variables --- src/diffusers/models/unet_rl.py | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index c926ef3a67c5..5560bd90371e 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -30,7 +30,6 @@ class TemporalUNet(ModelMixin, ConfigMixin): A UNet for multi-dimensional temporal data. This model takes the batch over the `training_horizon`. Parameters: - training_horizon: horizon of training samples used for diffusion process. transition_dim: state-dimension of samples to predict over dim: embedding dimension of model dim_mults: dimension multiples of the up/down blocks @@ -39,7 +38,6 @@ class TemporalUNet(ModelMixin, ConfigMixin): @register_to_config def __init__( self, - training_horizon=128, transition_dim=14, dim=32, dim_mults=(1, 4, 8), @@ -55,15 +53,15 @@ def __init__( dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] in_out = list(zip(dims[:-1], dims[1:])) - self.downs = nn.ModuleList([]) - self.ups = nn.ModuleList([]) + self.down_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) num_resolutions = len(in_out) # down for ind, (dim_in, dim_out) in enumerate(in_out): is_last = ind >= (num_resolutions - 1) - self.downs.append( + self.down_blocks.append( nn.ModuleList( [ ResidualTemporalBlock(dim_in, dim_out, embed_dim=dim), @@ -73,9 +71,6 @@ def __init__( ) ) - if not is_last: - training_horizon = training_horizon // 2 - # mid mid_dim = dims[-1] self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim) @@ -85,7 +80,7 @@ def __init__( for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): is_last = ind >= (num_resolutions - 1) - self.ups.append( + self.up_blocks.append( nn.ModuleList( [ ResidualTemporalBlock(dim_out * 2, dim_in, embed_dim=dim), @@ -95,9 +90,6 @@ def __init__( ) ) - if not is_last: - training_horizon = training_horizon * 2 - # out self.final_conv1d_1 = nn.Conv1d(dim, dim, 5, padding=2) self.final_conv1d_gn = nn.GroupNorm(8, dim) @@ -135,7 +127,7 @@ def forward( h = [] # 2. down - for resnet, resnet2, downsample in self.downs: + for resnet, resnet2, downsample in self.down_blocks: sample = resnet(sample, t) sample = resnet2(sample, t) h.append(sample) @@ -146,7 +138,7 @@ def forward( sample = self.mid_block2(sample, t) # 4. up - for resnet, resnet2, upsample in self.ups: + for resnet, resnet2, upsample in self.up_blocks: sample = torch.cat((sample, h.pop()), dim=1) sample = resnet(sample, t) sample = resnet2(sample, t) From 0de435e0a12e8500474e4803748b215188ff0f14 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 10 Oct 2022 15:20:09 -0400 Subject: [PATCH 021/133] very close to working example --- examples/diffuser/helpers.py | 186 ++++++++++++++++++ .../diffuser/run_diffuser_value_guided.py | 170 +++++++++------- .../schedulers/scheduling_value_function.py | 4 +- 3 files changed, 292 insertions(+), 68 deletions(-) create mode 100644 examples/diffuser/helpers.py diff --git a/examples/diffuser/helpers.py b/examples/diffuser/helpers.py new file mode 100644 index 000000000000..ef853d3a6039 --- /dev/null +++ b/examples/diffuser/helpers.py @@ -0,0 +1,186 @@ +import os +import mediapy as media +import numpy as np +import torch +import gym +import warnings +def to_np(x_in): + if torch.is_tensor(x_in): + x_in = x_in.detach().cpu().numpy() + return x_in + +# from MJ's Diffuser code +# https://github.com/jannerm/diffuser/blob/76ae49ae85ba1c833bf78438faffdc63b8b4d55d/diffuser/utils/colab.py#L79 +def mkdir(savepath): + """ + returns `True` iff `savepath` is created + """ + if not os.path.exists(savepath): + os.makedirs(savepath) + return True + else: + return False + + +def show_sample(renderer, observations, filename='sample.mp4', savebase='videos'): + ''' + observations : [ batch_size x horizon x observation_dim ] + ''' + + mkdir(savebase) + savepath = os.path.join(savebase, filename) + + images = [] + for rollout in observations: + ## [ horizon x height x width x channels ] + img = renderer._renders(rollout, partial=True) + images.append(img) + + ## [ horizon x height x (batch_size * width) x channels ] + images = np.concatenate(images, axis=2) + media.write_video(savepath, images, fps=60) + media.show_video(images, codec='h264', fps=60) + +# Code adapted from Michael Janner +# source: https://github.com/jannerm/diffuser/blob/main/diffuser/utils/rendering.py +import mujoco_py as mjc + +def env_map(env_name): + ''' + map D4RL dataset names to custom fully-observed + variants for rendering + ''' + if 'halfcheetah' in env_name: + return 'HalfCheetahFullObs-v2' + elif 'hopper' in env_name: + return 'HopperFullObs-v2' + elif 'walker2d' in env_name: + return 'Walker2dFullObs-v2' + else: + return env_name + +def get_image_mask(img): + background = (img == 255).all(axis=-1, keepdims=True) + mask = ~background.repeat(3, axis=-1) + return mask + +def atmost_2d(x): + while x.ndim > 2: + x = x.squeeze(0) + return x + +def set_state(env, state): + qpos_dim = env.sim.data.qpos.size + qvel_dim = env.sim.data.qvel.size + if not state.size == qpos_dim + qvel_dim: + warnings.warn( + f'[ utils/rendering ] Expected state of size {qpos_dim + qvel_dim}, ' + f'but got state of size {state.size}') + state = state[:qpos_dim + qvel_dim] + + env.set_state(state[:qpos_dim], state[qpos_dim:]) + +class MuJoCoRenderer: + ''' + default mujoco renderer + ''' + + def __init__(self, env): + if type(env) is str: + env = env_map(env) + self.env = gym.make(env) + else: + self.env = env + ## - 1 because the envs in renderer are fully-observed + ## @TODO : clean up + self.observation_dim = np.prod(self.env.observation_space.shape) - 1 + self.action_dim = np.prod(self.env.action_space.shape) + try: + self.viewer = mjc.MjRenderContextOffscreen(self.env.sim) + except: + print('[ utils/rendering ] Warning: could not initialize offscreen renderer') + self.viewer = None + + def pad_observation(self, observation): + state = np.concatenate([ + np.zeros(1), + observation, + ]) + return state + + def pad_observations(self, observations): + qpos_dim = self.env.sim.data.qpos.size + ## xpos is hidden + xvel_dim = qpos_dim - 1 + xvel = observations[:, xvel_dim] + xpos = np.cumsum(xvel) * self.env.dt + states = np.concatenate([ + xpos[:,None], + observations, + ], axis=-1) + return states + + def render(self, observation, dim=256, partial=False, qvel=True, render_kwargs=None, conditions=None): + + if type(dim) == int: + dim = (dim, dim) + + if self.viewer is None: + return np.zeros((*dim, 3), np.uint8) + + if render_kwargs is None: + xpos = observation[0] if not partial else 0 + render_kwargs = { + 'trackbodyid': 2, + 'distance': 3, + 'lookat': [xpos, -0.5, 1], + 'elevation': -20 + } + + for key, val in render_kwargs.items(): + if key == 'lookat': + self.viewer.cam.lookat[:] = val[:] + else: + setattr(self.viewer.cam, key, val) + + if partial: + state = self.pad_observation(observation) + else: + state = observation + + qpos_dim = self.env.sim.data.qpos.size + if not qvel or state.shape[-1] == qpos_dim: + qvel_dim = self.env.sim.data.qvel.size + state = np.concatenate([state, np.zeros(qvel_dim)]) + + set_state(self.env, state) + + self.viewer.render(*dim) + data = self.viewer.read_pixels(*dim, depth=False) + data = data[::-1, :, :] + return data + + def _renders(self, observations, **kwargs): + images = [] + for observation in observations: + img = self.render(observation, **kwargs) + images.append(img) + return np.stack(images, axis=0) + + def renders(self, samples, partial=False, **kwargs): + if partial: + samples = self.pad_observations(samples) + partial = False + + sample_images = self._renders(samples, partial=partial, **kwargs) + + composite = np.ones_like(sample_images[0]) * 255 + + for img in sample_images: + mask = get_image_mask(img) + composite[mask] = img[mask] + + return composite + + def __call__(self, *args, **kwargs): + return self.renders(*args, **kwargs) \ No newline at end of file diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 91ff0f2bced9..41bc977a06f4 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -5,6 +5,7 @@ import numpy as np import gym from diffusers import DDPMScheduler, TemporalUNet, ValueFunction, ValueFunctionScheduler +from helpers import MuJoCoRenderer, show_sample # model = torch.load("../diffuser/test.torch") @@ -49,24 +50,20 @@ def to_torch(x_in, dtype=None, device=None): return x_in.to(device).type(dtype) return torch.tensor(x_in, dtype=dtype, device=device) -obs = env.reset() -obs_raw = obs - -# normalize observations for forward passes -obs = normalize(obs, data, 'observations') # Two generators for different parts of the diffusion loop to work in colab # generator = torch.Generator(device='cuda') generator_cpu = torch.Generator(device='cpu') -scheduler = ValueFunctionScheduler(num_train_timesteps=100,beta_schedule="squaredcos_cap_v2", clip_sample=False) +scheduler = ValueFunctionScheduler(num_train_timesteps=20,beta_schedule="squaredcos_cap_v2", clip_sample=False) # 3 different pretrained models are available for this task. # The horizion represents the length of trajectories used in training. # network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) network = ValueFunction.from_pretrained("bglick13/hopper-medium-expert-v2-value-function-hor32").to(device=DEVICE) +unet = TemporalUNet.from_pretrained("bglick13/hopper-medium-expert-v2-unet-hor32").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) def reset_x0(x_in, cond, act_dim): @@ -75,65 +72,106 @@ def reset_x0(x_in, cond, act_dim): return x_in # network specific constants for inference -clip_denoised = network.clip_denoised -predict_epsilon = network.predict_epsilon - +clip_denoised = False +predict_epsilon = False +n_guide_steps = 2 +scale_grad_by_std = True +scale = 0.001 ## add a batch dimension and repeat for multiple samples ## [ observation_dim ] --> [ n_samples x observation_dim ] -obs = obs[None].repeat(n_samples, axis=0) -conditions = { - 0: to_torch(obs, device=DEVICE) - } - -# constants for inference -batch_size = len(conditions[0]) -shape = (batch_size, horizon, state_dim+action_dim) - -# sample random initial noise vector -x1 = torch.randn(shape, device=DEVICE, generator=generator_cpu) - -# this model is conditioned from an initial state, so you will see this function -# multiple times to change the initial state of generated data to the state -# generated via env.reset() above or env.step() below -x = reset_x0(x1, conditions, action_dim) - -# convert a np observation to torch for model forward pass -x = to_torch(x) - -eta = 1.0 # noise factor for sampling reconstructed state - -# run the diffusion process -# for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): -for i in tqdm.tqdm(scheduler.timesteps): - - # create batch of timesteps to pass into model - timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) - - # 1. generate prediction from model - with torch.enable_grad(): - x.requires_grad_() - y = network(x, timesteps).sample - grad = torch.autograd.grad([y.sum()], [x])[0] - # tile to (batch_size, 128, 14) - x.detach() - pass - - # 2. use the model prediction to reconstruct an observation (de-noise) - obs_reconstruct = scheduler.step(grad, i, x)["prev_sample"] - - # 3. [optional] add posterior noise to the sample - if eta > 0: - noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device) - posterior_variance = scheduler._get_variance(i) # * noise - # no noise when t == 0 - # NOTE: original implementation missing sqrt on posterior_variance - obs_reconstruct = obs_reconstruct + int(i>0) * (0.5 * posterior_variance) * eta* noise # MJ had as log var, exponentiated - - # 4. apply conditions to the trajectory - obs_reconstruct_postcond = reset_x0(obs_reconstruct, conditions, action_dim) - x = to_torch(obs_reconstruct_postcond) -sorted_idx = y.argsort(-1, descending=True).squeeze() -sorted_values = x[sorted_idx] -actions = sorted_values[:, :, :action_dim] -actions = de_normalize(actions[0, 0].detach().numpy(), data, key='actions') -obs, reward, is_done, info = env.step(actions) +obs = env.reset() +total_reward = 0 +done = False +T = 300 +rollout = [obs.copy()] +try: + for t in tqdm.tqdm(range(T)): + obs_raw = obs + # 1. Call the policy + # normalize observations for forward passes + obs = normalize(obs, data, 'observations') + + obs = obs[None].repeat(n_samples, axis=0) + conditions = { + 0: to_torch(obs, device=DEVICE) + } + + # 2. Call the diffusion model + # constants for inference + batch_size = len(conditions[0]) + shape = (batch_size, horizon, state_dim+action_dim) + + # sample random initial noise vector + x1 = torch.randn(shape, device=DEVICE, generator=generator_cpu) + + # this model is conditioned from an initial state, so you will see this function + # multiple times to change the initial state of generated data to the state + # generated via env.reset() above or env.step() below + x = reset_x0(x1, conditions, action_dim) + + # convert a np observation to torch for model forward pass + x = to_torch(x) + + eta = 1.0 # noise factor for sampling reconstructed state + + # run the diffusion process + # for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): + for i in tqdm.tqdm(scheduler.timesteps): + + # create batch of timesteps to pass into model + timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) + + # 3. call the sample function + for _ in range(n_guide_steps): + with torch.enable_grad(): + x.requires_grad_() + y = network(x, timesteps).sample + grad = torch.autograd.grad([y.sum()], [x])[0] + if scale_grad_by_std: + posterior_variance = scheduler._get_variance(i) + grad = posterior_variance * 0.5 * grad + grad[i < 4] = 0 + x = x.detach() + x = x + scale * grad + x = reset_x0(x, conditions, action_dim) + prev_x = unet(x, timesteps).sample + # TODO: This should really be a TemporalUnet that predicts previos state given x + x = scheduler.step(prev_x, i, x)["prev_sample"] + x = reset_x0(x, conditions, action_dim) + if clip_denoised: + x.clamp_(-1., 1.) + # 2. use the model prediction to reconstruct an observation (de-noise) + + + # # 3. [optional] add posterior noise to the sample + # if eta > 0: + # noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device) + # posterior_variance = scheduler._get_variance(i) # * noise + # # no noise when t == 0 + # # NOTE: original implementation missing sqrt on posterior_variance + # obs_reconstruct = obs_reconstruct + int(i>0) * (0.5 * posterior_variance) * eta* noise # MJ had as log var, exponentiated + + # 4. apply conditions to the trajectory + # obs_reconstruct_postcond = reset_x0(obs_reconstruct, conditions, action_dim) + x = to_torch(x) + sorted_idx = y.argsort(0, descending=True).squeeze() + sorted_values = x[sorted_idx] + actions = sorted_values[:, :, :action_dim] + actions = actions[0, 0].detach().cpu().numpy() + actions = de_normalize(actions, data, key='actions') + ## execute action in environment + next_observation, reward, terminal, _ = env.step(actions) + + ## update return + total_reward += reward + print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}") + # save observations for rendering + rollout.append(next_observation.copy()) + + obs = next_observation +except KeyboardInterrupt: + pass + +print(f"Total reward: {total_reward}") +render = MuJoCoRenderer(env) +show_sample(render, np.expand_dims(np.stack(rollout),axis=0)) \ No newline at end of file diff --git a/src/diffusers/schedulers/scheduling_value_function.py b/src/diffusers/schedulers/scheduling_value_function.py index fc56a2b18a9f..246d65758522 100644 --- a/src/diffusers/schedulers/scheduling_value_function.py +++ b/src/diffusers/schedulers/scheduling_value_function.py @@ -239,7 +239,7 @@ def step( # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf - pred_original_sample = sample + scale * model_output + pred_original_sample = model_output # 3. Clip "predicted x_0" if self.config.clip_sample: @@ -262,7 +262,7 @@ def step( ).to(model_output.device) variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise - pred_prev_sample = pred_prev_sample + variance + pred_prev_sample = pred_prev_sample + variance * noise if not return_dict: return (pred_prev_sample,) From a39652986661c962463e98d3da67d3132ed813e1 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 10 Oct 2022 15:41:35 -0400 Subject: [PATCH 022/133] larger batch size for planning --- examples/diffuser/run_diffuser_value_guided.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 41bc977a06f4..6083232a8773 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -23,7 +23,7 @@ DTYPE = torch.float # diffusion model settings -n_samples = 4 # number of trajectories planned via diffusion +n_samples = 64 # number of trajectories planned via diffusion horizon = 32 # length of sampled trajectories state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] From 3b08bea39ff8a754842aed6c6f729ea1b9710a9c Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 10 Oct 2022 15:14:55 -0700 Subject: [PATCH 023/133] add 1d resnet block structure for downsample --- src/diffusers/models/resnet.py | 29 ++++++++----- src/diffusers/models/unet_blocks.py | 66 ++++++++++++++++++++++++++++- src/diffusers/models/unet_rl.py | 33 +++++++++------ 3 files changed, 104 insertions(+), 24 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index b83c4367e7bf..363fed372cbc 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -7,10 +7,13 @@ class Upsample1D(nn.Module): """ - An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param - use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. - If 3D, then - upsampling occurs in the inner-two dimensions. + An upsampling layer with an optional convolution. + + Parameters: + channels: channels in the inputs and outputs. + use_conv: a bool determining if a convolution is applied. + use_conv_transpose: + out_channels: """ def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"): @@ -21,7 +24,6 @@ def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_chann self.use_conv_transpose = use_conv_transpose self.name = name - # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed self.conv = None if use_conv_transpose: self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1) @@ -43,10 +45,13 @@ def forward(self, x): class Downsample1D(nn.Module): """ - A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param - use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. - If 3D, then - downsampling occurs in the inner-two dimensions. + A downsampling layer with an optional convolution. + + Parameters: + channels: channels in the inputs and outputs. + use_conv: a bool determining if a convolution is applied. + out_channels: + padding: """ def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"): @@ -76,7 +81,8 @@ class Upsample2D(nn.Module): Parameters: channels: channels in the inputs and outputs. use_conv: a bool determining if a convolution is applied. - dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. + use_conv_transpose: + out_channels: """ def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"): @@ -129,7 +135,8 @@ class Downsample2D(nn.Module): Parameters: channels: channels in the inputs and outputs. use_conv: a bool determining if a convolution is applied. - dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. + out_channels: + padding: """ def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"): diff --git a/src/diffusers/models/unet_blocks.py b/src/diffusers/models/unet_blocks.py index a17b1d2a5333..a5d3fa499dc7 100644 --- a/src/diffusers/models/unet_blocks.py +++ b/src/diffusers/models/unet_blocks.py @@ -15,10 +15,19 @@ # limitations under the License. import torch +import torch.nn.functional as F from torch import nn from .attention import AttentionBlock, SpatialTransformer -from .resnet import Downsample2D, FirDownsample2D, FirUpsample2D, ResnetBlock2D, Upsample2D +from .resnet import ( + Downsample1D, + Downsample2D, + FirDownsample2D, + FirUpsample2D, + ResidualTemporalBlock, + ResnetBlock2D, + Upsample2D, +) def get_down_block( @@ -460,6 +469,61 @@ def forward(self, hidden_states, temb=None): return hidden_states, output_states +class DownResnetBlock1D(nn.Module): + def __init__( + self, + *, + in_channels, + out_channels=None, + conv_shortcut=False, + temb_channels=32, + groups=32, + groups_out=None, + non_linearity=None, + time_embedding_norm="default", + output_scale_factor=1.0, + add_downsample=True, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + self.time_embedding_norm = time_embedding_norm + self.add_downsample = add_downsample + self.output_scale_factor = output_scale_factor + + if groups_out is None: + groups_out = groups + + self.resnet1 = ResidualTemporalBlock(in_channels, out_channels, embed_dim=temb_channels) + self.resnet2 = ResidualTemporalBlock(out_channels, out_channels, embed_dim=temb_channels) + + if non_linearity == "swish": + self.nonlinearity = lambda x: F.silu(x) + elif non_linearity == "mish": + self.nonlinearity = nn.Mish() + elif non_linearity == "silu": + self.nonlinearity = nn.SiLU() + + self.downsample = None + if add_downsample: + self.downsample = Downsample1D(out_channels, use_conv=True, padding=1) + + def forward(self, hidden_states, temb=None): + output_states = () + + hidden_states = self.resnet1(hidden_states, temb) + hidden_states = self.resnet2(hidden_states, temb) + output_states += (hidden_states,) + + if self.downsample is not None: + hidden_states = self.downsample(hidden_states) + + return hidden_states, output_states + + class CrossAttnDownBlock2D(nn.Module): def __init__( self, diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index 5560bd90371e..d4432d2f562f 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -5,7 +5,8 @@ import torch import torch.nn as nn -from diffusers.models.resnet import Downsample1D, ResidualTemporalBlock, Upsample1D +from diffusers.models.resnet import ResidualTemporalBlock, Upsample1D +from diffusers.models.unet_blocks import DownResnetBlock1D from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin @@ -61,13 +62,18 @@ def __init__( for ind, (dim_in, dim_out) in enumerate(in_out): is_last = ind >= (num_resolutions - 1) + # self.down_blocks.append( + # nn.ModuleList( + # [ + # ResidualTemporalBlock(dim_in, dim_out, embed_dim=dim), + # ResidualTemporalBlock(dim_out, dim_out, embed_dim=dim), + # Downsample1D(dim_out, use_conv=True) if not is_last else nn.Identity(), + # ] + # ) + # ) self.down_blocks.append( - nn.ModuleList( - [ - ResidualTemporalBlock(dim_in, dim_out, embed_dim=dim), - ResidualTemporalBlock(dim_out, dim_out, embed_dim=dim), - Downsample1D(dim_out, use_conv=True) if not is_last else nn.Identity(), - ] + DownResnetBlock1D( + in_channels=dim_in, out_channels=dim_out, temb_channels=dim, add_downsample=(not is_last) ) ) @@ -127,11 +133,14 @@ def forward( h = [] # 2. down - for resnet, resnet2, downsample in self.down_blocks: - sample = resnet(sample, t) - sample = resnet2(sample, t) - h.append(sample) - sample = downsample(sample) + # for resnet, resnet2, downsample in self.down_blocks: + # sample = resnet(sample, t) + # sample = resnet2(sample, t) + # h.append(sample) + # sample = downsample(sample) + for downsample_block in self.down_blocks: + sample, res_samples = downsample_block(hidden_states=sample, temb=t) + h.append(res_samples[0]) # 3. mid sample = self.mid_block1(sample, t) From aae2a9a69f329e9f54496c2f2b472ae1597a2fc8 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 10 Oct 2022 15:38:52 -0700 Subject: [PATCH 024/133] rename as unet1d --- src/diffusers/__init__.py | 2 +- src/diffusers/models/__init__.py | 2 +- src/diffusers/models/unet_blocks.py | 53 ++++++++++++++++++++++++++++- src/diffusers/models/unet_rl.py | 51 +++++++-------------------- tests/test_models_unet.py | 8 ++--- 5 files changed, 71 insertions(+), 45 deletions(-) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 8267aaff73fe..fa97effaaf0a 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -18,7 +18,7 @@ if is_torch_available(): from .modeling_utils import ModelMixin - from .models import AutoencoderKL, TemporalUNet, UNet2DConditionModel, UNet2DModel, VQModel + from .models import AutoencoderKL, UNet1DModel, UNet2DConditionModel, UNet2DModel, VQModel from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index 47f7fa71682b..dc0946cf4d54 100644 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -18,7 +18,7 @@ if is_torch_available(): from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel - from .unet_rl import TemporalUNet + from .unet_rl import UNet1DModel from .vae import AutoencoderKL, VQModel if is_flax_available(): diff --git a/src/diffusers/models/unet_blocks.py b/src/diffusers/models/unet_blocks.py index a5d3fa499dc7..64dcac75c232 100644 --- a/src/diffusers/models/unet_blocks.py +++ b/src/diffusers/models/unet_blocks.py @@ -26,6 +26,7 @@ FirUpsample2D, ResidualTemporalBlock, ResnetBlock2D, + Upsample1D, Upsample2D, ) @@ -488,7 +489,6 @@ def __init__( self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels - self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.time_embedding_norm = time_embedding_norm self.add_downsample = add_downsample @@ -523,6 +523,57 @@ def forward(self, hidden_states, temb=None): return hidden_states, output_states +class UpResnetBlock1D(nn.Module): + def __init__( + self, + *, + in_channels, + out_channels=None, + temb_channels=32, + groups=32, + groups_out=None, + non_linearity=None, + time_embedding_norm="default", + output_scale_factor=1.0, + add_upsample=True, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.time_embedding_norm = time_embedding_norm + self.add_upsample = add_upsample + self.output_scale_factor = output_scale_factor + + if groups_out is None: + groups_out = groups + + self.resnet1 = ResidualTemporalBlock(in_channels, out_channels, embed_dim=temb_channels) + self.resnet2 = ResidualTemporalBlock(out_channels, out_channels, embed_dim=temb_channels) + + if non_linearity == "swish": + self.nonlinearity = lambda x: F.silu(x) + elif non_linearity == "mish": + self.nonlinearity = nn.Mish() + elif non_linearity == "silu": + self.nonlinearity = nn.SiLU() + + self.upsample = None + if add_upsample: + self.upsample = Upsample1D(out_channels, use_conv_transpose=True) + + def forward(self, hidden_states, res_hidden_states=None, temb=None): + if res_hidden_states is not None: + hidden_states = torch.cat((hidden_states, res_hidden_states), dim=1) + + hidden_states = self.resnet1(hidden_states, temb) + hidden_states = self.resnet2(hidden_states, temb) + + if self.upsample is not None: + hidden_states = self.upsample(hidden_states) + + return hidden_states + class CrossAttnDownBlock2D(nn.Module): def __init__( diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index d4432d2f562f..b62a76ff27c1 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -6,7 +6,7 @@ import torch.nn as nn from diffusers.models.resnet import ResidualTemporalBlock, Upsample1D -from diffusers.models.unet_blocks import DownResnetBlock1D +from diffusers.models.unet_blocks import DownResnetBlock1D, UpResnetBlock1D from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin @@ -16,7 +16,7 @@ @dataclass -class TemporalUNetOutput(BaseOutput): +class UNet1DOutput(BaseOutput): """ Args: sample (`torch.FloatTensor` of shape `(batch, horizon, obs_dimension)`): @@ -26,7 +26,7 @@ class TemporalUNetOutput(BaseOutput): sample: torch.FloatTensor -class TemporalUNet(ModelMixin, ConfigMixin): +class UNet1DModel(ModelMixin, ConfigMixin): """ A UNet for multi-dimensional temporal data. This model takes the batch over the `training_horizon`. @@ -62,15 +62,6 @@ def __init__( for ind, (dim_in, dim_out) in enumerate(in_out): is_last = ind >= (num_resolutions - 1) - # self.down_blocks.append( - # nn.ModuleList( - # [ - # ResidualTemporalBlock(dim_in, dim_out, embed_dim=dim), - # ResidualTemporalBlock(dim_out, dim_out, embed_dim=dim), - # Downsample1D(dim_out, use_conv=True) if not is_last else nn.Identity(), - # ] - # ) - # ) self.down_blocks.append( DownResnetBlock1D( in_channels=dim_in, out_channels=dim_out, temb_channels=dim, add_downsample=(not is_last) @@ -86,15 +77,7 @@ def __init__( for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): is_last = ind >= (num_resolutions - 1) - self.up_blocks.append( - nn.ModuleList( - [ - ResidualTemporalBlock(dim_out * 2, dim_in, embed_dim=dim), - ResidualTemporalBlock(dim_in, dim_in, embed_dim=dim), - Upsample1D(dim_in, use_conv_transpose=True) if not is_last else nn.Identity(), - ] - ) - ) + self.up_blocks.append(UpResnetBlock1D(in_channels=dim_out*2, out_channels=dim_in, temb_channels=dim, add_upsample=(not is_last))) # out self.final_conv1d_1 = nn.Conv1d(dim, dim, 5, padding=2) @@ -128,30 +111,22 @@ def forward( elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) - t = self.time_proj(timesteps) - t = self.time_mlp(t) - h = [] + temb = self.time_proj(timesteps) + temb = self.time_mlp(temb) + down_block_res_samples = [] # 2. down - # for resnet, resnet2, downsample in self.down_blocks: - # sample = resnet(sample, t) - # sample = resnet2(sample, t) - # h.append(sample) - # sample = downsample(sample) for downsample_block in self.down_blocks: - sample, res_samples = downsample_block(hidden_states=sample, temb=t) - h.append(res_samples[0]) + sample, res_samples = downsample_block(hidden_states=sample, temb=temb) + down_block_res_samples.append(res_samples[0]) # 3. mid - sample = self.mid_block1(sample, t) - sample = self.mid_block2(sample, t) + sample = self.mid_block1(sample, temb) + sample = self.mid_block2(sample, temb) # 4. up - for resnet, resnet2, upsample in self.up_blocks: - sample = torch.cat((sample, h.pop()), dim=1) - sample = resnet(sample, t) - sample = resnet2(sample, t) - sample = upsample(sample) + for up_block in self.up_blocks: + sample = up_block(hidden_states=sample, res_hidden_states=down_block_res_samples.pop(), temb=temb) # 5. post-process sample = self.final_conv1d_1(sample) diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index e78cca2b7537..4ff1ebc6d241 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -20,7 +20,7 @@ import torch -from diffusers import TemporalUNet, UNet2DConditionModel, UNet2DModel +from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel from diffusers.utils import floats_tensor, slow, torch_device from .test_modeling_common import ModelTesterMixin @@ -451,7 +451,7 @@ def test_forward_with_norm_groups(self): class TemporalUNetModelTests(ModelTesterMixin, unittest.TestCase): - model_class = TemporalUNet + model_class = UNet1DModel @property def dummy_input(self): @@ -488,7 +488,7 @@ def prepare_init_args_and_inputs_for_common(self): return init_dict, inputs_dict def test_from_pretrained_hub(self): - model, loading_info = TemporalUNet.from_pretrained( + model, loading_info = UNet1DModel.from_pretrained( "fusing/ddpm-unet-rl-hopper-hor128", output_loading_info=True ) self.assertIsNotNone(model) @@ -500,7 +500,7 @@ def test_from_pretrained_hub(self): assert image is not None, "Make sure output is not None" def test_output_pretrained(self): - model = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128") + model = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128") model.eval() torch.manual_seed(0) From dd872aff4d5d1980c8f7c4d29202d4299b36c05d Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 10 Oct 2022 15:41:49 -0700 Subject: [PATCH 025/133] fix renaming --- src/diffusers/models/unet_blocks.py | 1 + src/diffusers/models/unet_rl.py | 12 ++++++++---- tests/test_models_unet.py | 2 +- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/diffusers/models/unet_blocks.py b/src/diffusers/models/unet_blocks.py index 64dcac75c232..413500338d48 100644 --- a/src/diffusers/models/unet_blocks.py +++ b/src/diffusers/models/unet_blocks.py @@ -523,6 +523,7 @@ def forward(self, hidden_states, temb=None): return hidden_states, output_states + class UpResnetBlock1D(nn.Module): def __init__( self, diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index b62a76ff27c1..aeaabc1b3719 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -5,7 +5,7 @@ import torch import torch.nn as nn -from diffusers.models.resnet import ResidualTemporalBlock, Upsample1D +from diffusers.models.resnet import ResidualTemporalBlock from diffusers.models.unet_blocks import DownResnetBlock1D, UpResnetBlock1D from ..configuration_utils import ConfigMixin, register_to_config @@ -77,7 +77,11 @@ def __init__( for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): is_last = ind >= (num_resolutions - 1) - self.up_blocks.append(UpResnetBlock1D(in_channels=dim_out*2, out_channels=dim_in, temb_channels=dim, add_upsample=(not is_last))) + self.up_blocks.append( + UpResnetBlock1D( + in_channels=dim_out * 2, out_channels=dim_in, temb_channels=dim, add_upsample=(not is_last) + ) + ) # out self.final_conv1d_1 = nn.Conv1d(dim, dim, 5, padding=2) @@ -90,7 +94,7 @@ def forward( sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], return_dict: bool = True, - ) -> Union[TemporalUNetOutput, Tuple]: + ) -> Union[UNet1DOutput, Tuple]: r""" Args: sample (`torch.FloatTensor`): (batch, horizon, obs_dimension + action_dimension) noisy inputs tensor @@ -141,4 +145,4 @@ def forward( if not return_dict: return (sample,) - return TemporalUNetOutput(sample=sample) + return UNet1DOutput(sample=sample) diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index 4ff1ebc6d241..d822153cd44c 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -450,7 +450,7 @@ def test_forward_with_norm_groups(self): pass -class TemporalUNetModelTests(ModelTesterMixin, unittest.TestCase): +class UNet1DModelTests(ModelTesterMixin, unittest.TestCase): model_class = UNet1DModel @property From 713bd80a5244a0f6106cc796ceab788c7c60c4c3 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 11 Oct 2022 11:37:49 -0400 Subject: [PATCH 026/133] more tests --- .gitignore | 4 +- convert_model.py | 15 ++ examples/diffuser/run_diffuser.py | 134 +++++++++++------- .../diffuser/run_diffuser_value_guided.py | 47 ++++-- 4 files changed, 132 insertions(+), 68 deletions(-) create mode 100644 convert_model.py diff --git a/.gitignore b/.gitignore index cf8183463613..f066e7f84299 100644 --- a/.gitignore +++ b/.gitignore @@ -163,4 +163,6 @@ tags *.lock # DS_Store (MacOS) -.DS_Store \ No newline at end of file +.DS_Store +*.mp4 +hub/* \ No newline at end of file diff --git a/convert_model.py b/convert_model.py new file mode 100644 index 000000000000..d7504cb38828 --- /dev/null +++ b/convert_model.py @@ -0,0 +1,15 @@ + +import torch +from diffusers import DDPMScheduler, TemporalUNet, ValueFunction, ValueFunctionScheduler + + + +model = torch.load("/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-hor32.torch") +state_dict = model.state_dict() +hf_value_function = TemporalUNet(training_horizon=32, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) +mapping = dict((k, hfk) for k, hfk in zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) +for k, v in mapping.items(): + state_dict[v] = state_dict.pop(k) +hf_value_function.load_state_dict(state_dict) + +torch.save(hf_value_function.state_dict(), "hub/hopper-medium-v2-unet/diffusion_pytorch_model.bin") diff --git a/examples/diffuser/run_diffuser.py b/examples/diffuser/run_diffuser.py index c672421c210c..97f38fd3f261 100644 --- a/examples/diffuser/run_diffuser.py +++ b/examples/diffuser/run_diffuser.py @@ -4,6 +4,7 @@ import tqdm import numpy as np import gym +import helpers env_name = "hopper-medium-expert-v2" env = gym.make(env_name) @@ -42,16 +43,11 @@ def to_torch(x_in, dtype=None, device=None): return x_in.to(device).type(dtype) return torch.tensor(x_in, dtype=dtype, device=device) -obs = env.reset() -obs_raw = obs -# normalize observations for forward passes -obs = normalize(obs, data, 'observations') from diffusers import DDPMScheduler, TemporalUNet # Two generators for different parts of the diffusion loop to work in colab -generator = torch.Generator(device='cuda') generator_cpu = torch.Generator(device='cpu') scheduler = DDPMScheduler(num_train_timesteps=100,beta_schedule="squaredcos_cap_v2") @@ -72,50 +68,84 @@ def reset_x0(x_in, cond, act_dim): ## add a batch dimension and repeat for multiple samples ## [ observation_dim ] --> [ n_samples x observation_dim ] -obs = obs[None].repeat(n_samples, axis=0) -conditions = { - 0: to_torch(obs, device=DEVICE) - } - -# constants for inference -batch_size = len(conditions[0]) -shape = (batch_size, horizon, state_dim+action_dim) - -# sample random initial noise vector -x1 = torch.randn(shape, device=DEVICE, generator=generator) - -# this model is conditioned from an initial state, so you will see this function -# multiple times to change the initial state of generated data to the state -# generated via env.reset() above or env.step() below -x = reset_x0(x1, conditions, action_dim) - -# convert a np observation to torch for model forward pass -x = to_torch(x) - -eta = 1.0 # noise factor for sampling reconstructed state - -# run the diffusion process -# for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): -for i in tqdm.tqdm(scheduler.timesteps): - - # create batch of timesteps to pass into model - timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) - - # 1. generate prediction from model - with torch.no_grad(): - residual = network(x, timesteps).sample - - # 2. use the model prediction to reconstruct an observation (de-noise) - obs_reconstruct = scheduler.step(residual, i, x, predict_epsilon=predict_epsilon)["prev_sample"] - - # 3. [optional] add posterior noise to the sample - if eta > 0: - noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device) - posterior_variance = scheduler._get_variance(i) # * noise - # no noise when t == 0 - # NOTE: original implementation missing sqrt on posterior_variance - obs_reconstruct = obs_reconstruct + int(i>0) * (0.5 * posterior_variance) * eta* noise # MJ had as log var, exponentiated - - # 4. apply conditions to the trajectory - obs_reconstruct_postcond = reset_x0(obs_reconstruct, conditions, action_dim) - x = to_torch(obs_reconstruct_postcond) \ No newline at end of file +obs = env.reset() +total_reward = 0 +done = False +T = 300 +rollout = [obs.copy()] + +try: + for t in tqdm.tqdm(range(T)): + obs_raw = obs + + # normalize observations for forward passes + obs = normalize(obs, data, 'observations') + obs = obs[None].repeat(n_samples, axis=0) + conditions = { + 0: to_torch(obs, device=DEVICE) + } + + # constants for inference + batch_size = len(conditions[0]) + shape = (batch_size, horizon, state_dim+action_dim) + + # sample random initial noise vector + x1 = torch.randn(shape, device=DEVICE, generator=generator_cpu) + + # this model is conditioned from an initial state, so you will see this function + # multiple times to change the initial state of generated data to the state + # generated via env.reset() above or env.step() below + x = reset_x0(x1, conditions, action_dim) + + # convert a np observation to torch for model forward pass + x = to_torch(x) + + eta = 1.0 # noise factor for sampling reconstructed state + + # run the diffusion process + # for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): + for i in tqdm.tqdm(scheduler.timesteps): + + # create batch of timesteps to pass into model + timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) + + # 1. generate prediction from model + with torch.no_grad(): + residual = network(x, timesteps).sample + + # 2. use the model prediction to reconstruct an observation (de-noise) + obs_reconstruct = scheduler.step(residual, i, x, predict_epsilon=predict_epsilon)["prev_sample"] + + # 3. [optional] add posterior noise to the sample + if eta > 0: + noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device) + posterior_variance = scheduler._get_variance(i) # * noise + # no noise when t == 0 + # NOTE: original implementation missing sqrt on posterior_variance + obs_reconstruct = obs_reconstruct + int(i>0) * (0.5 * posterior_variance) * eta* noise # MJ had as log var, exponentiated + + # 4. apply conditions to the trajectory + obs_reconstruct_postcond = reset_x0(obs_reconstruct, conditions, action_dim) + x = to_torch(obs_reconstruct_postcond) + plans = helpers.to_np(x[:,:,:action_dim]) + # select random plan + idx = np.random.randint(plans.shape[0]) + # select action at correct time + action = plans[idx, 0, :] + actions= de_normalize(action, data, 'actions') + ## execute action in environment + next_observation, reward, terminal, _ = env.step(action) + + ## update return + total_reward += reward + print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}") + + # save observations for rendering + rollout.append(next_observation.copy()) + obs = next_observation +except KeyboardInterrupt: + pass + +print(f"Total reward: {total_reward}") +render =helpers.MuJoCoRenderer(env) +helpers.show_sample(render, np.expand_dims(np.stack(rollout),axis=0)) \ No newline at end of file diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 6083232a8773..2fae766bcbff 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -16,6 +16,7 @@ env_name = "hopper-medium-expert-v2" env = gym.make(env_name) data = env.get_dataset() # dataset is only used for normalization in this colab +render = MuJoCoRenderer(env) # Cuda settings for colab # torch.cuda.get_device_name(0) @@ -23,11 +24,11 @@ DTYPE = torch.float # diffusion model settings -n_samples = 64 # number of trajectories planned via diffusion +n_samples = 4 # number of trajectories planned via diffusion horizon = 32 # length of sampled trajectories state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] -num_inference_steps = 100 # number of difusion steps +num_inference_steps = 20 # number of difusion steps def normalize(x_in, data, key): upper = np.max(data[key], axis=0) @@ -56,7 +57,7 @@ def to_torch(x_in, dtype=None, device=None): # generator = torch.Generator(device='cuda') generator_cpu = torch.Generator(device='cpu') -scheduler = ValueFunctionScheduler(num_train_timesteps=20,beta_schedule="squaredcos_cap_v2", clip_sample=False) +scheduler = ValueFunctionScheduler(num_train_timesteps=num_inference_steps,beta_schedule="squaredcos_cap_v2", clip_sample=False) # 3 different pretrained models are available for this task. # The horizion represents the length of trajectories used in training. @@ -77,13 +78,24 @@ def reset_x0(x_in, cond, act_dim): n_guide_steps = 2 scale_grad_by_std = True scale = 0.001 +eta = 1.0 # noise factor for sampling reconstructed state + ## add a batch dimension and repeat for multiple samples ## [ observation_dim ] --> [ n_samples x observation_dim ] obs = env.reset() +# start_idx = 340 +# obs = data['observations'][start_idx] +# qpos = data['infos/qpos'][start_idx] +# qvel = data['infos/qvel'][start_idx] + +# env.set_state(qpos, qvel) total_reward = 0 done = False T = 300 rollout = [obs.copy()] +trajectories = [] +y_maxes = [] +t_grad_cutoff = 0 try: for t in tqdm.tqdm(range(T)): obs_raw = obs @@ -112,7 +124,6 @@ def reset_x0(x_in, cond, act_dim): # convert a np observation to torch for model forward pass x = to_torch(x) - eta = 1.0 # noise factor for sampling reconstructed state # run the diffusion process # for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): @@ -130,12 +141,11 @@ def reset_x0(x_in, cond, act_dim): if scale_grad_by_std: posterior_variance = scheduler._get_variance(i) grad = posterior_variance * 0.5 * grad - grad[i < 4] = 0 + grad[timesteps < t_grad_cutoff] = 0 x = x.detach() x = x + scale * grad x = reset_x0(x, conditions, action_dim) prev_x = unet(x, timesteps).sample - # TODO: This should really be a TemporalUnet that predicts previos state given x x = scheduler.step(prev_x, i, x)["prev_sample"] x = reset_x0(x, conditions, action_dim) if clip_denoised: @@ -143,20 +153,25 @@ def reset_x0(x_in, cond, act_dim): # 2. use the model prediction to reconstruct an observation (de-noise) - # # 3. [optional] add posterior noise to the sample - # if eta > 0: - # noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device) - # posterior_variance = scheduler._get_variance(i) # * noise - # # no noise when t == 0 - # # NOTE: original implementation missing sqrt on posterior_variance - # obs_reconstruct = obs_reconstruct + int(i>0) * (0.5 * posterior_variance) * eta* noise # MJ had as log var, exponentiated + # 3. [optional] add posterior noise to the sample + if eta > 0: + noise = torch.randn(x.shape, generator=generator_cpu).to(x.device) + posterior_variance = scheduler._get_variance(i) # * noise + # no noise when t == 0 + # NOTE: original implementation missing sqrt on posterior_variance + x = x + int(i>0) * (0.5 * posterior_variance) * eta * noise # MJ had as log var, exponentiated # 4. apply conditions to the trajectory - # obs_reconstruct_postcond = reset_x0(obs_reconstruct, conditions, action_dim) + x = reset_x0(x, conditions, action_dim) x = to_torch(x) sorted_idx = y.argsort(0, descending=True).squeeze() + y_maxes.append(y[sorted_idx[0]]) sorted_values = x[sorted_idx] actions = sorted_values[:, :, :action_dim] + if t % 10 == 0: + trajectory = sorted_values[:, :, action_dim:][0].unsqueeze(0).detach().numpy() + trajectory = de_normalize(trajectory, data, 'observations') + trajectories.append(trajectory) actions = actions[0, 0].detach().cpu().numpy() actions = de_normalize(actions, data, key='actions') ## execute action in environment @@ -173,5 +188,7 @@ def reset_x0(x_in, cond, act_dim): pass print(f"Total reward: {total_reward}") -render = MuJoCoRenderer(env) +for i, trajectory in enumerate(trajectories): + show_sample(render, trajectory, f"trajectory_{i}.mp4") + show_sample(render, np.expand_dims(np.stack(rollout),axis=0)) \ No newline at end of file From d9384ffeb3805190e54b74dc0dc5f17ac0d7d64f Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 11 Oct 2022 15:24:53 -0400 Subject: [PATCH 027/133] merge unet1d changes --- convert_model.py | 47 +++++++++++++++---- .../diffuser/run_diffuser_value_guided.py | 12 ++--- src/diffusers/__init__.py | 2 +- src/diffusers/models/__init__.py | 2 +- src/diffusers/models/unet_rl.py | 41 +++++----------- 5 files changed, 57 insertions(+), 47 deletions(-) diff --git a/convert_model.py b/convert_model.py index d7504cb38828..b44b4d390725 100644 --- a/convert_model.py +++ b/convert_model.py @@ -1,15 +1,44 @@ import torch -from diffusers import DDPMScheduler, TemporalUNet, ValueFunction, ValueFunctionScheduler +from diffusers import DDPMScheduler, UNet1DModel, ValueFunction, ValueFunctionScheduler +import os +import json +os.makedirs("hub/hopper-medium-v2/unet", exist_ok=True) +os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) +def unet(): + model = torch.load("/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-hor32.torch") + state_dict = model.state_dict() + hf_value_function = UNet1DModel(dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14) + mapping = dict((k, hfk) for k, hfk in zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) + for k, v in mapping.items(): + state_dict[v] = state_dict.pop(k) + hf_value_function.load_state_dict(state_dict) + torch.save(hf_value_function.state_dict(), "hub/hopper-medium-v2/unet/diffusion_pytorch_model.bin") + config = dict(dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14) + with open("hub/hopper-medium-v2/unet/config.json", "w") as f: + json.dump(config, f) -model = torch.load("/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-hor32.torch") -state_dict = model.state_dict() -hf_value_function = TemporalUNet(training_horizon=32, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) -mapping = dict((k, hfk) for k, hfk in zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) -for k, v in mapping.items(): - state_dict[v] = state_dict.pop(k) -hf_value_function.load_state_dict(state_dict) +def value_function(): + model = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-hor32.torch") + state_dict = model.state_dict() + hf_value_function = ValueFunction(dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14) + print(f"length of state dict: {len(state_dict.keys())}") + print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") -torch.save(hf_value_function.state_dict(), "hub/hopper-medium-v2-unet/diffusion_pytorch_model.bin") + mapping = dict((k, hfk) for k, hfk in zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) + for k, v in mapping.items(): + state_dict[v] = state_dict.pop(k) + + hf_value_function.load_state_dict(state_dict) + + torch.save(hf_value_function.state_dict(), "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin") + config = dict(dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14) + with open("hub/hopper-medium-v2/value_function/config.json", "w") as f: + json.dump(config, f) + + +if __name__ == "__main__": + unet() + value_function() \ No newline at end of file diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 2fae766bcbff..ccbcb9204cfd 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -4,7 +4,7 @@ import tqdm import numpy as np import gym -from diffusers import DDPMScheduler, TemporalUNet, ValueFunction, ValueFunctionScheduler +from diffusers import DDPMScheduler, UNet1DModel, ValueFunction, ValueFunctionScheduler from helpers import MuJoCoRenderer, show_sample @@ -24,8 +24,8 @@ DTYPE = torch.float # diffusion model settings -n_samples = 4 # number of trajectories planned via diffusion -horizon = 32 # length of sampled trajectories +n_samples = 64 # number of trajectories planned via diffusion +horizon = 64 # length of sampled trajectories state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] num_inference_steps = 20 # number of difusion steps @@ -64,7 +64,7 @@ def to_torch(x_in, dtype=None, device=None): # network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) network = ValueFunction.from_pretrained("bglick13/hopper-medium-expert-v2-value-function-hor32").to(device=DEVICE) -unet = TemporalUNet.from_pretrained("bglick13/hopper-medium-expert-v2-unet-hor32").to(device=DEVICE) +unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-expert-v2-unet-hor32").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) def reset_x0(x_in, cond, act_dim): @@ -78,7 +78,7 @@ def reset_x0(x_in, cond, act_dim): n_guide_steps = 2 scale_grad_by_std = True scale = 0.001 -eta = 1.0 # noise factor for sampling reconstructed state +eta = 0.0 # noise factor for sampling reconstructed state ## add a batch dimension and repeat for multiple samples ## [ observation_dim ] --> [ n_samples x observation_dim ] @@ -95,7 +95,7 @@ def reset_x0(x_in, cond, act_dim): rollout = [obs.copy()] trajectories = [] y_maxes = [] -t_grad_cutoff = 0 +t_grad_cutoff = 4 try: for t in tqdm.tqdm(range(T)): obs_raw = obs diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 528eb396fbb1..c21ba3c7c3c2 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -18,7 +18,7 @@ if is_torch_available(): from .modeling_utils import ModelMixin - from .models import AutoencoderKL, UNet1DModel, UNet2DConditionModel, UNet2DModel, VQModel + from .models import AutoencoderKL, UNet1DModel, UNet2DConditionModel, UNet2DModel, VQModel, ValueFunction from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index dc0946cf4d54..f7f2b77ecdf3 100644 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -18,7 +18,7 @@ if is_torch_available(): from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel - from .unet_rl import UNet1DModel + from .unet_rl import UNet1DModel, ValueFunction from .vae import AutoencoderKL, VQModel if is_flax_available(): diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index 73b81870b670..129d18f330a8 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -6,7 +6,7 @@ import torch.nn as nn from diffusers.models.resnet import ResidualTemporalBlock -from diffusers.models.unet_blocks import DownResnetBlock1D, UpResnetBlock1D +from diffusers.models.unet_blocks import DownResnetBlock1D, UpResnetBlock1D, Downsample1D from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin @@ -152,22 +152,13 @@ class ValueFunction(ModelMixin, ConfigMixin): @register_to_config def __init__( self, - training_horizon=128, transition_dim=14, - cond_dim=3, - predict_epsilon=False, - clip_denoised=True, dim=32, dim_mults=(1, 4, 8), - out_dim=1, ): super().__init__() self.transition_dim = transition_dim - self.cond_dim = cond_dim - self.predict_epsilon = predict_epsilon - self.clip_denoised = clip_denoised - self.time_proj = Timesteps(num_channels=dim, flip_sin_to_cos=False, downscale_freq_shift=1) self.time_mlp = TimestepEmbedding(channel=dim, time_embed_dim=4 * dim, act_fn="mish", out_dim=dim) @@ -181,17 +172,11 @@ def __init__( is_last = ind >= (num_resolutions - 1) self.blocks.append( - nn.ModuleList( - [ - ResidualTemporalBlock(dim_in, dim_out, embed_dim=dim), - ResidualTemporalBlock(dim_out, dim_out, embed_dim=dim), - Downsample1D(dim_out, use_conv=True), - ] + DownResnetBlock1D( + in_channels=dim_in, out_channels=dim_out, temb_channels=dim, add_downsample=True ) ) - if not is_last: - training_horizon = training_horizon // 2 mid_dim = dims[-1] mid_dim_2 = mid_dim // 2 @@ -199,17 +184,15 @@ def __init__( ## self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim_2, embed_dim=dim) self.mid_down1 = Downsample1D(mid_dim_2, use_conv=True) - training_horizon = training_horizon // 2 ## self.mid_block2 = ResidualTemporalBlock(mid_dim_2, mid_dim_3, embed_dim=dim) self.mid_down2 = Downsample1D(mid_dim_3, use_conv=True) - training_horizon = training_horizon // 2 ## - fc_dim = mid_dim_3 * max(training_horizon, 1) + fc_dim = mid_dim_3 self.final_block = nn.ModuleList([ nn.Linear(fc_dim + dim, fc_dim // 2), nn.Mish(), - nn.Linear(fc_dim // 2, out_dim),] + nn.Linear(fc_dim // 2, 1),] ) def forward( @@ -217,7 +200,7 @@ def forward( sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], return_dict: bool = True, - ) -> Union[TemporalUNetOutput, Tuple]: + ) -> Union[UNet1DOutput, Tuple]: """r Args: sample (`torch.FloatTensor`): (batch, horizon, obs_dimension + action_dimension) noisy inputs tensor @@ -240,14 +223,12 @@ def forward( t = self.time_proj(timesteps) t = self.time_mlp(t) - h = [] + down_block_res_samples = [] # 2. down - for resnet, resnet2, downsample in self.blocks: - sample = resnet(sample, t) - sample = resnet2(sample, t) - h.append(sample) - sample = downsample(sample) + for downsample_block in self.blocks: + sample, res_samples = downsample_block(hidden_states=sample, temb=t) + down_block_res_samples.append(res_samples[0]) # 3. mid sample = self.mid_block1(sample, t) @@ -263,4 +244,4 @@ def forward( if not return_dict: return (sample,) - return TemporalUNetOutput(sample=sample) + return UNet1DOutput(sample=sample) From 52e26680dfc888a31ea82dca83e18035db4e3883 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 11 Oct 2022 17:36:12 -0400 Subject: [PATCH 028/133] wandb for debugging, use newer models --- examples/diffuser/helpers.py | 1 + .../diffuser/run_diffuser_value_guided.py | 61 +++++++++---------- 2 files changed, 30 insertions(+), 32 deletions(-) diff --git a/examples/diffuser/helpers.py b/examples/diffuser/helpers.py index ef853d3a6039..947c4d6cb216 100644 --- a/examples/diffuser/helpers.py +++ b/examples/diffuser/helpers.py @@ -40,6 +40,7 @@ def show_sample(renderer, observations, filename='sample.mp4', savebase='videos' images = np.concatenate(images, axis=2) media.write_video(savepath, images, fps=60) media.show_video(images, codec='h264', fps=60) + return images # Code adapted from Michael Janner # source: https://github.com/jannerm/diffuser/blob/main/diffuser/utils/rendering.py diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index ccbcb9204cfd..cdbc3fc84cb4 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -6,7 +6,19 @@ import gym from diffusers import DDPMScheduler, UNet1DModel, ValueFunction, ValueFunctionScheduler from helpers import MuJoCoRenderer, show_sample - +import wandb +wandb.init(project="diffusers-value-guided-rl") + +config = dict( + n_samples=64, + horizon=32, + num_inference_steps=20, + n_guide_steps=2, + scale_grad_by_std=True, + scale=0.001, + eta=0.0, + t_grad_cutoff=4 +) # model = torch.load("../diffuser/test.torch") # hf_value_function = ValueFunction(training_horizon=32, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) @@ -24,11 +36,8 @@ DTYPE = torch.float # diffusion model settings -n_samples = 64 # number of trajectories planned via diffusion -horizon = 64 # length of sampled trajectories state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] -num_inference_steps = 20 # number of difusion steps def normalize(x_in, data, key): upper = np.max(data[key], axis=0) @@ -57,29 +66,20 @@ def to_torch(x_in, dtype=None, device=None): # generator = torch.Generator(device='cuda') generator_cpu = torch.Generator(device='cpu') -scheduler = ValueFunctionScheduler(num_train_timesteps=num_inference_steps,beta_schedule="squaredcos_cap_v2", clip_sample=False) +scheduler = ValueFunctionScheduler(num_train_timesteps=config['num_inference_steps'],beta_schedule="squaredcos_cap_v2", clip_sample=False) # 3 different pretrained models are available for this task. # The horizion represents the length of trajectories used in training. # network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) network = ValueFunction.from_pretrained("bglick13/hopper-medium-expert-v2-value-function-hor32").to(device=DEVICE) -unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-expert-v2-unet-hor32").to(device=DEVICE) +unet = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) def reset_x0(x_in, cond, act_dim): for key, val in cond.items(): x_in[:, key, act_dim:] = val.clone() return x_in - -# network specific constants for inference -clip_denoised = False -predict_epsilon = False -n_guide_steps = 2 -scale_grad_by_std = True -scale = 0.001 -eta = 0.0 # noise factor for sampling reconstructed state - ## add a batch dimension and repeat for multiple samples ## [ observation_dim ] --> [ n_samples x observation_dim ] obs = env.reset() @@ -91,11 +91,10 @@ def reset_x0(x_in, cond, act_dim): # env.set_state(qpos, qvel) total_reward = 0 done = False -T = 300 +T = 200 rollout = [obs.copy()] trajectories = [] y_maxes = [] -t_grad_cutoff = 4 try: for t in tqdm.tqdm(range(T)): obs_raw = obs @@ -103,7 +102,7 @@ def reset_x0(x_in, cond, act_dim): # normalize observations for forward passes obs = normalize(obs, data, 'observations') - obs = obs[None].repeat(n_samples, axis=0) + obs = obs[None].repeat(config['n_samples'], axis=0) conditions = { 0: to_torch(obs, device=DEVICE) } @@ -111,7 +110,7 @@ def reset_x0(x_in, cond, act_dim): # 2. Call the diffusion model # constants for inference batch_size = len(conditions[0]) - shape = (batch_size, horizon, state_dim+action_dim) + shape = (batch_size, config['horizon'], state_dim+action_dim) # sample random initial noise vector x1 = torch.randn(shape, device=DEVICE, generator=generator_cpu) @@ -133,39 +132,37 @@ def reset_x0(x_in, cond, act_dim): timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) # 3. call the sample function - for _ in range(n_guide_steps): + for _ in range(config['n_guide_steps']): with torch.enable_grad(): x.requires_grad_() y = network(x, timesteps).sample grad = torch.autograd.grad([y.sum()], [x])[0] - if scale_grad_by_std: + if config['scale_grad_by_std']: posterior_variance = scheduler._get_variance(i) grad = posterior_variance * 0.5 * grad - grad[timesteps < t_grad_cutoff] = 0 + grad[timesteps < config['t_grad_cutoff']] = 0 x = x.detach() - x = x + scale * grad + x = x + config['scale'] * grad x = reset_x0(x, conditions, action_dim) + y = network(x, timesteps).sample prev_x = unet(x, timesteps).sample x = scheduler.step(prev_x, i, x)["prev_sample"] x = reset_x0(x, conditions, action_dim) - if clip_denoised: - x.clamp_(-1., 1.) - # 2. use the model prediction to reconstruct an observation (de-noise) # 3. [optional] add posterior noise to the sample - if eta > 0: + if config['eta'] > 0: noise = torch.randn(x.shape, generator=generator_cpu).to(x.device) posterior_variance = scheduler._get_variance(i) # * noise # no noise when t == 0 # NOTE: original implementation missing sqrt on posterior_variance - x = x + int(i>0) * (0.5 * posterior_variance) * eta * noise # MJ had as log var, exponentiated + x = x + int(i>0) * (0.5 * posterior_variance) * config['eta'] * noise # MJ had as log var, exponentiated # 4. apply conditions to the trajectory x = reset_x0(x, conditions, action_dim) x = to_torch(x) sorted_idx = y.argsort(0, descending=True).squeeze() - y_maxes.append(y[sorted_idx[0]]) + y_maxes.append(y[sorted_idx[0]].detach().cpu().numpy()) sorted_values = x[sorted_idx] actions = sorted_values[:, :, :action_dim] if t % 10 == 0: @@ -179,6 +176,7 @@ def reset_x0(x_in, cond, act_dim): ## update return total_reward += reward + wandb.log({"total_reward": total_reward, "reward": reward, "y_max": y_maxes[-1], "diff_from_expert_reward": reward - data['rewards'][t]}) print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}") # save observations for rendering rollout.append(next_observation.copy()) @@ -188,7 +186,6 @@ def reset_x0(x_in, cond, act_dim): pass print(f"Total reward: {total_reward}") -for i, trajectory in enumerate(trajectories): - show_sample(render, trajectory, f"trajectory_{i}.mp4") -show_sample(render, np.expand_dims(np.stack(rollout),axis=0)) \ No newline at end of file +images = show_sample(render, np.expand_dims(np.stack(rollout),axis=0)) +wandb.log({"rollout": wandb.Video('videos/sample.mp4', fps=60, format='mp4')}) \ No newline at end of file From 75fe8b463a7d42dda78462ee3efba3220c1009c0 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 11 Oct 2022 18:01:39 -0400 Subject: [PATCH 029/133] success! --- examples/diffuser/run_diffuser_value_guided.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index cdbc3fc84cb4..c0ae2e1ea4b9 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -10,14 +10,14 @@ wandb.init(project="diffusers-value-guided-rl") config = dict( - n_samples=64, + n_samples=4, horizon=32, - num_inference_steps=20, - n_guide_steps=2, + num_inference_steps=100, + n_guide_steps=0, scale_grad_by_std=True, scale=0.001, eta=0.0, - t_grad_cutoff=4 + t_grad_cutoff=0 ) # model = torch.load("../diffuser/test.torch") @@ -73,7 +73,7 @@ def to_torch(x_in, dtype=None, device=None): # network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) network = ValueFunction.from_pretrained("bglick13/hopper-medium-expert-v2-value-function-hor32").to(device=DEVICE) -unet = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) +unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-expert-v2-unet-hor32").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) def reset_x0(x_in, cond, act_dim): @@ -91,7 +91,7 @@ def reset_x0(x_in, cond, act_dim): # env.set_state(qpos, qvel) total_reward = 0 done = False -T = 200 +T = 400 rollout = [obs.copy()] trajectories = [] y_maxes = [] From 9b67bb77375431742aaa3fca86cdae531816e223 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 11 Oct 2022 19:18:24 -0700 Subject: [PATCH 030/133] rename files --- docs/source/api/models.mdx | 7 +- src/diffusers/models/__init__.py | 2 +- .../models/{unet_rl.py => unet_1d.py} | 21 ++- src/diffusers/models/unet_1d_blocks.py | 126 ++++++++++++++++++ src/diffusers/models/unet_2d.py | 2 +- .../{unet_blocks.py => unet_2d_blocks.py} | 121 +---------------- src/diffusers/models/unet_2d_condition.py | 2 +- src/diffusers/models/vae.py | 2 +- 8 files changed, 154 insertions(+), 129 deletions(-) rename src/diffusers/models/{unet_rl.py => unet_1d.py} (85%) create mode 100644 src/diffusers/models/unet_1d_blocks.py rename src/diffusers/models/{unet_blocks.py => unet_2d_blocks.py} (93%) diff --git a/docs/source/api/models.mdx b/docs/source/api/models.mdx index 98687b5e7038..b944a1d13089 100644 --- a/docs/source/api/models.mdx +++ b/docs/source/api/models.mdx @@ -34,8 +34,11 @@ The models are built on the base class ['ModelMixin'] that is a `torch.nn.module ## DecoderOutput [[autodoc]] models.vae.DecoderOutput -## TemporalUNet -[[autodoc]] TemporalUNet +## UNet1DModel +[[autodoc]] UNet1DModel + +## UNet1DOutput +[[autodoc]] UNet1DOutput ## VQEncoderOutput [[autodoc]] models.vae.VQEncoderOutput diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index dc0946cf4d54..c5d53b2feb4b 100644 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -16,9 +16,9 @@ if is_torch_available(): + from .unet_1d import UNet1DModel from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel - from .unet_rl import UNet1DModel from .vae import AutoencoderKL, VQModel if is_flax_available(): diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_1d.py similarity index 85% rename from src/diffusers/models/unet_rl.py rename to src/diffusers/models/unet_1d.py index aeaabc1b3719..c32f2de62091 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_1d.py @@ -1,4 +1,17 @@ -# model adapted from diffuser https://github.com/jannerm/diffuser/blob/main/diffuser/models/temporal.py +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from dataclasses import dataclass from typing import Tuple, Union @@ -6,7 +19,7 @@ import torch.nn as nn from diffusers.models.resnet import ResidualTemporalBlock -from diffusers.models.unet_blocks import DownResnetBlock1D, UpResnetBlock1D +from diffusers.models.unet_1d_blocks import DownResnetBlock1D, UpResnetBlock1D from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin @@ -120,8 +133,8 @@ def forward( down_block_res_samples = [] # 2. down - for downsample_block in self.down_blocks: - sample, res_samples = downsample_block(hidden_states=sample, temb=temb) + for down_block in self.down_blocks: + sample, res_samples = down_block(hidden_states=sample, temb=temb) down_block_res_samples.append(res_samples[0]) # 3. mid diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py new file mode 100644 index 000000000000..8df7e81b5fd2 --- /dev/null +++ b/src/diffusers/models/unet_1d_blocks.py @@ -0,0 +1,126 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import torch +import torch.nn.functional as F +from torch import nn + +from .resnet import Downsample1D, ResidualTemporalBlock, Upsample1D + + +class DownResnetBlock1D(nn.Module): + def __init__( + self, + *, + in_channels, + out_channels=None, + conv_shortcut=False, + temb_channels=32, + groups=32, + groups_out=None, + non_linearity=None, + time_embedding_norm="default", + output_scale_factor=1.0, + add_downsample=True, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + self.time_embedding_norm = time_embedding_norm + self.add_downsample = add_downsample + self.output_scale_factor = output_scale_factor + + if groups_out is None: + groups_out = groups + + self.resnet1 = ResidualTemporalBlock(in_channels, out_channels, embed_dim=temb_channels) + self.resnet2 = ResidualTemporalBlock(out_channels, out_channels, embed_dim=temb_channels) + + if non_linearity == "swish": + self.nonlinearity = lambda x: F.silu(x) + elif non_linearity == "mish": + self.nonlinearity = nn.Mish() + elif non_linearity == "silu": + self.nonlinearity = nn.SiLU() + + self.downsample = None + if add_downsample: + self.downsample = Downsample1D(out_channels, use_conv=True, padding=1) + + def forward(self, hidden_states, temb=None): + output_states = () + + hidden_states = self.resnet1(hidden_states, temb) + hidden_states = self.resnet2(hidden_states, temb) + output_states += (hidden_states,) + + if self.downsample is not None: + hidden_states = self.downsample(hidden_states) + + return hidden_states, output_states + + +class UpResnetBlock1D(nn.Module): + def __init__( + self, + *, + in_channels, + out_channels=None, + temb_channels=32, + groups=32, + groups_out=None, + non_linearity=None, + time_embedding_norm="default", + output_scale_factor=1.0, + add_upsample=True, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.time_embedding_norm = time_embedding_norm + self.add_upsample = add_upsample + self.output_scale_factor = output_scale_factor + + if groups_out is None: + groups_out = groups + + self.resnet1 = ResidualTemporalBlock(in_channels, out_channels, embed_dim=temb_channels) + self.resnet2 = ResidualTemporalBlock(out_channels, out_channels, embed_dim=temb_channels) + + if non_linearity == "swish": + self.nonlinearity = lambda x: F.silu(x) + elif non_linearity == "mish": + self.nonlinearity = nn.Mish() + elif non_linearity == "silu": + self.nonlinearity = nn.SiLU() + + self.upsample = None + if add_upsample: + self.upsample = Upsample1D(out_channels, use_conv_transpose=True) + + def forward(self, hidden_states, res_hidden_states=None, temb=None): + if res_hidden_states is not None: + hidden_states = torch.cat((hidden_states, res_hidden_states), dim=1) + + hidden_states = self.resnet1(hidden_states, temb) + hidden_states = self.resnet2(hidden_states, temb) + + if self.upsample is not None: + hidden_states = self.upsample(hidden_states) + + return hidden_states diff --git a/src/diffusers/models/unet_2d.py b/src/diffusers/models/unet_2d.py index 2415bf4ee78d..d423cbc02fae 100644 --- a/src/diffusers/models/unet_2d.py +++ b/src/diffusers/models/unet_2d.py @@ -8,7 +8,7 @@ from ..modeling_utils import ModelMixin from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps -from .unet_blocks import UNetMidBlock2D, get_down_block, get_up_block +from .unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block @dataclass diff --git a/src/diffusers/models/unet_blocks.py b/src/diffusers/models/unet_2d_blocks.py similarity index 93% rename from src/diffusers/models/unet_blocks.py rename to src/diffusers/models/unet_2d_blocks.py index 413500338d48..4aec1ede0275 100644 --- a/src/diffusers/models/unet_blocks.py +++ b/src/diffusers/models/unet_2d_blocks.py @@ -10,25 +10,14 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and +# limitations under the License. import numpy as np - -# limitations under the License. import torch -import torch.nn.functional as F from torch import nn from .attention import AttentionBlock, SpatialTransformer -from .resnet import ( - Downsample1D, - Downsample2D, - FirDownsample2D, - FirUpsample2D, - ResidualTemporalBlock, - ResnetBlock2D, - Upsample1D, - Upsample2D, -) +from .resnet import Downsample2D, FirDownsample2D, FirUpsample2D, ResnetBlock2D, Upsample2D def get_down_block( @@ -470,112 +459,6 @@ def forward(self, hidden_states, temb=None): return hidden_states, output_states -class DownResnetBlock1D(nn.Module): - def __init__( - self, - *, - in_channels, - out_channels=None, - conv_shortcut=False, - temb_channels=32, - groups=32, - groups_out=None, - non_linearity=None, - time_embedding_norm="default", - output_scale_factor=1.0, - add_downsample=True, - ): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - self.time_embedding_norm = time_embedding_norm - self.add_downsample = add_downsample - self.output_scale_factor = output_scale_factor - - if groups_out is None: - groups_out = groups - - self.resnet1 = ResidualTemporalBlock(in_channels, out_channels, embed_dim=temb_channels) - self.resnet2 = ResidualTemporalBlock(out_channels, out_channels, embed_dim=temb_channels) - - if non_linearity == "swish": - self.nonlinearity = lambda x: F.silu(x) - elif non_linearity == "mish": - self.nonlinearity = nn.Mish() - elif non_linearity == "silu": - self.nonlinearity = nn.SiLU() - - self.downsample = None - if add_downsample: - self.downsample = Downsample1D(out_channels, use_conv=True, padding=1) - - def forward(self, hidden_states, temb=None): - output_states = () - - hidden_states = self.resnet1(hidden_states, temb) - hidden_states = self.resnet2(hidden_states, temb) - output_states += (hidden_states,) - - if self.downsample is not None: - hidden_states = self.downsample(hidden_states) - - return hidden_states, output_states - - -class UpResnetBlock1D(nn.Module): - def __init__( - self, - *, - in_channels, - out_channels=None, - temb_channels=32, - groups=32, - groups_out=None, - non_linearity=None, - time_embedding_norm="default", - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.time_embedding_norm = time_embedding_norm - self.add_upsample = add_upsample - self.output_scale_factor = output_scale_factor - - if groups_out is None: - groups_out = groups - - self.resnet1 = ResidualTemporalBlock(in_channels, out_channels, embed_dim=temb_channels) - self.resnet2 = ResidualTemporalBlock(out_channels, out_channels, embed_dim=temb_channels) - - if non_linearity == "swish": - self.nonlinearity = lambda x: F.silu(x) - elif non_linearity == "mish": - self.nonlinearity = nn.Mish() - elif non_linearity == "silu": - self.nonlinearity = nn.SiLU() - - self.upsample = None - if add_upsample: - self.upsample = Upsample1D(out_channels, use_conv_transpose=True) - - def forward(self, hidden_states, res_hidden_states=None, temb=None): - if res_hidden_states is not None: - hidden_states = torch.cat((hidden_states, res_hidden_states), dim=1) - - hidden_states = self.resnet1(hidden_states, temb) - hidden_states = self.resnet2(hidden_states, temb) - - if self.upsample is not None: - hidden_states = self.upsample(hidden_states) - - return hidden_states - - class CrossAttnDownBlock2D(nn.Module): def __init__( self, diff --git a/src/diffusers/models/unet_2d_condition.py b/src/diffusers/models/unet_2d_condition.py index 4e4eaddf5dfe..7f7d6d541f9e 100644 --- a/src/diffusers/models/unet_2d_condition.py +++ b/src/diffusers/models/unet_2d_condition.py @@ -9,7 +9,7 @@ from ..modeling_utils import ModelMixin from ..utils import BaseOutput, logging from .embeddings import TimestepEmbedding, Timesteps -from .unet_blocks import ( +from .unet_2d_blocks import ( CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, diff --git a/src/diffusers/models/vae.py b/src/diffusers/models/vae.py index 7ce2f98eee27..515c9f17a4c9 100644 --- a/src/diffusers/models/vae.py +++ b/src/diffusers/models/vae.py @@ -8,7 +8,7 @@ from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin from ..utils import BaseOutput -from .unet_blocks import UNetMidBlock2D, get_down_block, get_up_block +from .unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block @dataclass From c7fe1dc4768fce7cee311fcaef31f8788367bcc6 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 11 Oct 2022 22:59:17 -0400 Subject: [PATCH 031/133] turns out we just need more diffusion steps --- examples/diffuser/helpers.py | 68 +++++++++++++ .../diffuser/run_diffuser_value_guided.py | 97 ++++--------------- 2 files changed, 86 insertions(+), 79 deletions(-) diff --git a/examples/diffuser/helpers.py b/examples/diffuser/helpers.py index 947c4d6cb216..7d103ee7eba8 100644 --- a/examples/diffuser/helpers.py +++ b/examples/diffuser/helpers.py @@ -4,6 +4,74 @@ import torch import gym import warnings +import tqdm + +DEVICE = 'cpu' + +DTYPE = torch.float +def normalize(x_in, data, key): + upper = np.max(data[key], axis=0) + lower = np.min(data[key], axis=0) + x_out = 2*(x_in - lower)/(upper-lower) - 1 + return x_out + +def de_normalize(x_in, data, key): + upper = np.max(data[key], axis=0) + lower = np.min(data[key], axis=0) + x_out = lower + (upper - lower)*(1 + x_in) /2 + return x_out + +def to_torch(x_in, dtype=None, device=None): + dtype = dtype or DTYPE + device = device or DEVICE + if type(x_in) is dict: + return {k: to_torch(v, dtype, device) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(device).type(dtype) + return torch.tensor(x_in, dtype=dtype, device=device) + + +def reset_x0(x_in, cond, act_dim): + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + +def run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim, config): + for i in tqdm.tqdm(scheduler.timesteps): + + # create batch of timesteps to pass into model + timesteps = torch.full((config['n_samples'],), i, device=DEVICE, dtype=torch.long) + + # 3. call the sample function + for _ in range(config['n_guide_steps']): + with torch.enable_grad(): + x.requires_grad_() + y = network(x, timesteps).sample + grad = torch.autograd.grad([y.sum()], [x])[0] + if config['scale_grad_by_std']: + posterior_variance = scheduler._get_variance(i) + grad = posterior_variance * 0.5 * grad + grad[timesteps < config['t_grad_cutoff']] = 0 + x = x.detach() + x = x + config['scale'] * grad + x = reset_x0(x, conditions, action_dim) + y = network(x, timesteps).sample + prev_x = unet(x, timesteps).sample + x = scheduler.step(prev_x, i, x)["prev_sample"] + + # 3. [optional] add posterior noise to the sample + if config['eta'] > 0: + noise = torch.randn(x.shape, generator=generator).to(x.device) + posterior_variance = scheduler._get_variance(i) # * noise + # no noise when t == 0 + # NOTE: original implementation missing sqrt on posterior_variance + x = x + int(i>0) * (0.5 * posterior_variance) * config['eta'] * noise # MJ had as log var, exponentiated + + # 4. apply conditions to the trajectory + x = reset_x0(x, conditions, action_dim) + x = to_torch(x) + return x, y + def to_np(x_in): if torch.is_tensor(x_in): x_in = x_in.detach().cpu().numpy() diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index c0ae2e1ea4b9..41d2632c2202 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -6,18 +6,19 @@ import gym from diffusers import DDPMScheduler, UNet1DModel, ValueFunction, ValueFunctionScheduler from helpers import MuJoCoRenderer, show_sample +import helpers import wandb wandb.init(project="diffusers-value-guided-rl") config = dict( n_samples=4, horizon=32, - num_inference_steps=100, + num_inference_steps=200, n_guide_steps=0, scale_grad_by_std=True, scale=0.001, eta=0.0, - t_grad_cutoff=0 + t_grad_cutoff=4 ) # model = torch.load("../diffuser/test.torch") @@ -39,26 +40,6 @@ state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] -def normalize(x_in, data, key): - upper = np.max(data[key], axis=0) - lower = np.min(data[key], axis=0) - x_out = 2*(x_in - lower)/(upper-lower) - 1 - return x_out - -def de_normalize(x_in, data, key): - upper = np.max(data[key], axis=0) - lower = np.min(data[key], axis=0) - x_out = lower + (upper - lower)*(1 + x_in) /2 - return x_out - -def to_torch(x_in, dtype=None, device=None): - dtype = dtype or DTYPE - device = device or DEVICE - if type(x_in) is dict: - return {k: to_torch(v, dtype, device) for k, v in x_in.items()} - elif torch.is_tensor(x_in): - return x_in.to(device).type(dtype) - return torch.tensor(x_in, dtype=dtype, device=device) @@ -76,19 +57,10 @@ def to_torch(x_in, dtype=None, device=None): unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-expert-v2-unet-hor32").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) -def reset_x0(x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in + ## add a batch dimension and repeat for multiple samples ## [ observation_dim ] --> [ n_samples x observation_dim ] obs = env.reset() -# start_idx = 340 -# obs = data['observations'][start_idx] -# qpos = data['infos/qpos'][start_idx] -# qvel = data['infos/qvel'][start_idx] - -# env.set_state(qpos, qvel) total_reward = 0 done = False T = 400 @@ -100,11 +72,11 @@ def reset_x0(x_in, cond, act_dim): obs_raw = obs # 1. Call the policy # normalize observations for forward passes - obs = normalize(obs, data, 'observations') + obs = helpers.normalize(obs, data, 'observations') obs = obs[None].repeat(config['n_samples'], axis=0) conditions = { - 0: to_torch(obs, device=DEVICE) + 0: helpers.to_torch(obs, device=DEVICE) } # 2. Call the diffusion model @@ -118,61 +90,28 @@ def reset_x0(x_in, cond, act_dim): # this model is conditioned from an initial state, so you will see this function # multiple times to change the initial state of generated data to the state # generated via env.reset() above or env.step() below - x = reset_x0(x1, conditions, action_dim) + x = helpers.reset_x0(x1, conditions, action_dim) # convert a np observation to torch for model forward pass - x = to_torch(x) - - - # run the diffusion process - # for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): - for i in tqdm.tqdm(scheduler.timesteps): - - # create batch of timesteps to pass into model - timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) - - # 3. call the sample function - for _ in range(config['n_guide_steps']): - with torch.enable_grad(): - x.requires_grad_() - y = network(x, timesteps).sample - grad = torch.autograd.grad([y.sum()], [x])[0] - if config['scale_grad_by_std']: - posterior_variance = scheduler._get_variance(i) - grad = posterior_variance * 0.5 * grad - grad[timesteps < config['t_grad_cutoff']] = 0 - x = x.detach() - x = x + config['scale'] * grad - x = reset_x0(x, conditions, action_dim) - y = network(x, timesteps).sample - prev_x = unet(x, timesteps).sample - x = scheduler.step(prev_x, i, x)["prev_sample"] - x = reset_x0(x, conditions, action_dim) - - - # 3. [optional] add posterior noise to the sample - if config['eta'] > 0: - noise = torch.randn(x.shape, generator=generator_cpu).to(x.device) - posterior_variance = scheduler._get_variance(i) # * noise - # no noise when t == 0 - # NOTE: original implementation missing sqrt on posterior_variance - x = x + int(i>0) * (0.5 * posterior_variance) * config['eta'] * noise # MJ had as log var, exponentiated - - # 4. apply conditions to the trajectory - x = reset_x0(x, conditions, action_dim) - x = to_torch(x) + x = helpers.to_torch(x) + x, y = helpers.run_diffusion(x, scheduler, generator_cpu, network, unet, conditions, action_dim, config) sorted_idx = y.argsort(0, descending=True).squeeze() y_maxes.append(y[sorted_idx[0]].detach().cpu().numpy()) sorted_values = x[sorted_idx] actions = sorted_values[:, :, :action_dim] if t % 10 == 0: trajectory = sorted_values[:, :, action_dim:][0].unsqueeze(0).detach().numpy() - trajectory = de_normalize(trajectory, data, 'observations') + trajectory = helpers.de_normalize(trajectory, data, 'observations') trajectories.append(trajectory) - actions = actions[0, 0].detach().cpu().numpy() - actions = de_normalize(actions, data, key='actions') + + actions = actions.detach().cpu().numpy() + denorm_actions = helpers.de_normalize(actions, data, key='actions') + # denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] + denorm_actions = denorm_actions[0, 0] + + ## execute action in environment - next_observation, reward, terminal, _ = env.step(actions) + next_observation, reward, terminal, _ = env.step(denorm_actions) ## update return total_reward += reward From db012ebe9f065d31a3e3033502cdbcf43e64a39b Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 11 Oct 2022 20:15:08 -0700 Subject: [PATCH 032/133] add get_block(...) api --- src/diffusers/models/unet_1d.py | 60 ++++++++++++++++---------- src/diffusers/models/unet_1d_blocks.py | 47 +++++++++++++++++++- 2 files changed, 82 insertions(+), 25 deletions(-) diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index c32f2de62091..c5d6c8afbe3d 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from dataclasses import dataclass from typing import Tuple, Union @@ -19,7 +18,7 @@ import torch.nn as nn from diffusers.models.resnet import ResidualTemporalBlock -from diffusers.models.unet_1d_blocks import DownResnetBlock1D, UpResnetBlock1D +from diffusers.models.unet_1d_blocks import get_down_block, get_up_block from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin @@ -55,52 +54,67 @@ def __init__( transition_dim=14, dim=32, dim_mults=(1, 4, 8), + in_channels: int = 14, + out_channels: int = 14, + down_block_types: Tuple[str] = ["DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"], + up_block_types: Tuple[str] = ["UpResnetBlock1D", "UpResnetBlock1D"], + block_out_channels: Tuple[int] = [32, 128, 256], ): super().__init__() - self.transition_dim = transition_dim + self.transition_dim = in_channels # time self.time_proj = Timesteps(num_channels=dim, flip_sin_to_cos=False, downscale_freq_shift=1) self.time_mlp = TimestepEmbedding(channel=dim, time_embed_dim=4 * dim, act_fn="mish", out_dim=dim) - dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] - in_out = list(zip(dims[:-1], dims[1:])) - self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) - num_resolutions = len(in_out) + mid_dim = block_out_channels[-1] # down - for ind, (dim_in, dim_out) in enumerate(in_out): - is_last = ind >= (num_resolutions - 1) - - self.down_blocks.append( - DownResnetBlock1D( - in_channels=dim_in, out_channels=dim_out, temb_channels=dim, add_downsample=(not is_last) - ) + output_channel = in_channels + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block_type = down_block_types[i] + down_block = get_down_block( + down_block_type, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=dim, + add_downsample=not is_final_block, ) + self.down_blocks.append(down_block) # mid - mid_dim = dims[-1] self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim) self.mid_block2 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim) # up - for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): - is_last = ind >= (num_resolutions - 1) - - self.up_blocks.append( - UpResnetBlock1D( - in_channels=dim_out * 2, out_channels=dim_in, temb_channels=dim, add_upsample=(not is_last) - ) + reversed_block_out_channels = list(reversed(block_out_channels)) + for i, up_block_type in enumerate(up_block_types): + input_channel = reversed_block_out_channels[i] + output_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + is_final_block = i == len(block_out_channels) - 1 + + up_block = get_up_block( + up_block_type, + in_channels=input_channel * 2, + out_channels=output_channel, + temb_channels=dim, + add_upsample=not is_final_block, ) + self.up_blocks.append(up_block) # out self.final_conv1d_1 = nn.Conv1d(dim, dim, 5, padding=2) self.final_conv1d_gn = nn.GroupNorm(8, dim) self.final_conv1d_act = nn.Mish() - self.final_conv1d_2 = nn.Conv1d(dim, transition_dim, 1) + self.final_conv1d_2 = nn.Conv1d(dim, out_channels, 1) def forward( self, diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index 8df7e81b5fd2..cfdc1f762d47 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -23,7 +23,6 @@ class DownResnetBlock1D(nn.Module): def __init__( self, - *, in_channels, out_channels=None, conv_shortcut=False, @@ -77,7 +76,6 @@ def forward(self, hidden_states, temb=None): class UpResnetBlock1D(nn.Module): def __init__( self, - *, in_channels, out_channels=None, temb_channels=32, @@ -124,3 +122,48 @@ def forward(self, hidden_states, res_hidden_states=None, temb=None): hidden_states = self.upsample(hidden_states) return hidden_states + + +class DownBlock1D(nn.Module): + pass + + +class AttnDownBlock1D(nn.Module): + pass + + +class DownBlock1DNoSkip(nn.Module): + pass + + +class UpBlock1D(nn.Module): + pass + + +class AttnUpBlock1D(nn.Module): + pass + + +class UpBlock1DNoSkip(nn.Module): + pass + + +def get_down_block(down_block_type, in_channels, out_channels, temb_channels, add_downsample): + if down_block_type == "DownResnetBlock1D": + return DownResnetBlock1D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + ) + + raise ValueError(f"{down_block_type} does not exist.") + + +def get_up_block(up_block_type, in_channels, out_channels, temb_channels, add_upsample): + if up_block_type == "UpResnetBlock1D": + return UpResnetBlock1D( + in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_upsample=add_upsample + ) + + raise ValueError(f"{up_block_type} does not exist.") From 4db6e0b552d158e5258fc2e3d1eb01dee680ca69 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Wed, 12 Oct 2022 09:59:23 -0700 Subject: [PATCH 033/133] unify args for model1d like model2d --- src/diffusers/models/unet_1d.py | 34 ++++++++++++++------------ src/diffusers/models/unet_1d_blocks.py | 10 ++++++++ 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index c5d6c8afbe3d..96de7a101d84 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -40,7 +40,10 @@ class UNet1DOutput(BaseOutput): class UNet1DModel(ModelMixin, ConfigMixin): """ - A UNet for multi-dimensional temporal data. This model takes the batch over the `training_horizon`. + UNet1DModel is a 1D UNet model that takes in a noisy sample and a timestep and returns sample shaped output. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library + implements for all the model (such as downloading or saving, etc.) Parameters: transition_dim: state-dimension of samples to predict over @@ -51,22 +54,21 @@ class UNet1DModel(ModelMixin, ConfigMixin): @register_to_config def __init__( self, - transition_dim=14, - dim=32, - dim_mults=(1, 4, 8), in_channels: int = 14, out_channels: int = 14, - down_block_types: Tuple[str] = ["DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"], - up_block_types: Tuple[str] = ["UpResnetBlock1D", "UpResnetBlock1D"], - block_out_channels: Tuple[int] = [32, 128, 256], + down_block_types: Tuple[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), + up_block_types: Tuple[str] = ("UpResnetBlock1D", "UpResnetBlock1D"), + block_out_channels: Tuple[int] = (32, 128, 256), + act_fn: str = "mish", ): super().__init__() self.transition_dim = in_channels + time_embed_dim = block_out_channels[0] * 4 # time - self.time_proj = Timesteps(num_channels=dim, flip_sin_to_cos=False, downscale_freq_shift=1) - self.time_mlp = TimestepEmbedding(channel=dim, time_embed_dim=4 * dim, act_fn="mish", out_dim=dim) + self.time_proj = Timesteps(num_channels=block_out_channels[0], flip_sin_to_cos=False, downscale_freq_shift=1) + self.time_mlp = TimestepEmbedding(channel=block_out_channels[0], time_embed_dim=time_embed_dim, act_fn=act_fn, out_dim=block_out_channels[0]) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) @@ -84,14 +86,14 @@ def __init__( down_block_type, in_channels=input_channel, out_channels=output_channel, - temb_channels=dim, + temb_channels=block_out_channels[0], add_downsample=not is_final_block, ) self.down_blocks.append(down_block) # mid - self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim) - self.mid_block2 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim) + self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=block_out_channels[0]) + self.mid_block2 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=block_out_channels[0]) # up reversed_block_out_channels = list(reversed(block_out_channels)) @@ -105,16 +107,16 @@ def __init__( up_block_type, in_channels=input_channel * 2, out_channels=output_channel, - temb_channels=dim, + temb_channels=block_out_channels[0], add_upsample=not is_final_block, ) self.up_blocks.append(up_block) # out - self.final_conv1d_1 = nn.Conv1d(dim, dim, 5, padding=2) - self.final_conv1d_gn = nn.GroupNorm(8, dim) + self.final_conv1d_1 = nn.Conv1d(block_out_channels[0], block_out_channels[0], 5, padding=2) + self.final_conv1d_gn = nn.GroupNorm(8, block_out_channels[0]) self.final_conv1d_act = nn.Mish() - self.final_conv1d_2 = nn.Conv1d(dim, out_channels, 1) + self.final_conv1d_2 = nn.Conv1d(block_out_channels[0], out_channels, 1) def forward( self, diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index cfdc1f762d47..c65ce221f3dc 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -55,6 +55,8 @@ def __init__( self.nonlinearity = nn.Mish() elif non_linearity == "silu": self.nonlinearity = nn.SiLU() + else: + self.nonlinearity = None self.downsample = None if add_downsample: @@ -67,6 +69,9 @@ def forward(self, hidden_states, temb=None): hidden_states = self.resnet2(hidden_states, temb) output_states += (hidden_states,) + if self.nonlinearity is not None: + hidden_states = self.nonlinearity(hidden_states) + if self.downsample is not None: hidden_states = self.downsample(hidden_states) @@ -106,6 +111,8 @@ def __init__( self.nonlinearity = nn.Mish() elif non_linearity == "silu": self.nonlinearity = nn.SiLU() + else: + self.nonlinearity = None self.upsample = None if add_upsample: @@ -118,6 +125,9 @@ def forward(self, hidden_states, res_hidden_states=None, temb=None): hidden_states = self.resnet1(hidden_states, temb) hidden_states = self.resnet2(hidden_states, temb) + if self.nonlinearity is not None: + hidden_states = self.nonlinearity(hidden_states) + if self.upsample is not None: hidden_states = self.upsample(hidden_states) From 634a526ff7e230194cfff7c74e25d745dcc2509f Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Wed, 12 Oct 2022 10:16:34 -0700 Subject: [PATCH 034/133] minor cleaning --- src/diffusers/models/resnet.py | 2 +- src/diffusers/models/unet_1d.py | 27 ++++++++++++++++---------- src/diffusers/models/unet_1d_blocks.py | 10 +++++----- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 363fed372cbc..4b1be8c13c51 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -501,7 +501,7 @@ def forward(self, x): # unet_rl.py -class ResidualTemporalBlock(nn.Module): +class ResidualTemporalBlock1D(nn.Module): def __init__(self, inp_channels, out_channels, embed_dim, kernel_size=5): super().__init__() diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index 96de7a101d84..c8b14ad8fc95 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -17,7 +17,7 @@ import torch import torch.nn as nn -from diffusers.models.resnet import ResidualTemporalBlock +from diffusers.models.resnet import ResidualTemporalBlock1D from diffusers.models.unet_1d_blocks import get_down_block, get_up_block from ..configuration_utils import ConfigMixin, register_to_config @@ -46,9 +46,13 @@ class UNet1DModel(ModelMixin, ConfigMixin): implements for all the model (such as downloading or saving, etc.) Parameters: - transition_dim: state-dimension of samples to predict over - dim: embedding dimension of model - dim_mults: dimension multiples of the up/down blocks + in_channels: + out_channels: + down_block_types: + up_block_types: + block_out_channels: + act_fn: + norm_num_groups: """ @register_to_config @@ -60,15 +64,17 @@ def __init__( up_block_types: Tuple[str] = ("UpResnetBlock1D", "UpResnetBlock1D"), block_out_channels: Tuple[int] = (32, 128, 256), act_fn: str = "mish", + norm_num_groups: int = 8, ): super().__init__() - self.transition_dim = in_channels time_embed_dim = block_out_channels[0] * 4 # time self.time_proj = Timesteps(num_channels=block_out_channels[0], flip_sin_to_cos=False, downscale_freq_shift=1) - self.time_mlp = TimestepEmbedding(channel=block_out_channels[0], time_embed_dim=time_embed_dim, act_fn=act_fn, out_dim=block_out_channels[0]) + self.time_mlp = TimestepEmbedding( + channel=block_out_channels[0], time_embed_dim=time_embed_dim, act_fn=act_fn, out_dim=block_out_channels[0] + ) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) @@ -92,8 +98,8 @@ def __init__( self.down_blocks.append(down_block) # mid - self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=block_out_channels[0]) - self.mid_block2 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=block_out_channels[0]) + self.mid_block1 = ResidualTemporalBlock1D(mid_dim, mid_dim, embed_dim=block_out_channels[0]) + self.mid_block2 = ResidualTemporalBlock1D(mid_dim, mid_dim, embed_dim=block_out_channels[0]) # up reversed_block_out_channels = list(reversed(block_out_channels)) @@ -105,7 +111,7 @@ def __init__( up_block = get_up_block( up_block_type, - in_channels=input_channel * 2, + in_channels=input_channel, out_channels=output_channel, temb_channels=block_out_channels[0], add_upsample=not is_final_block, @@ -113,8 +119,9 @@ def __init__( self.up_blocks.append(up_block) # out + num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) self.final_conv1d_1 = nn.Conv1d(block_out_channels[0], block_out_channels[0], 5, padding=2) - self.final_conv1d_gn = nn.GroupNorm(8, block_out_channels[0]) + self.final_conv1d_gn = nn.GroupNorm(num_groups_out, block_out_channels[0]) self.final_conv1d_act = nn.Mish() self.final_conv1d_2 = nn.Conv1d(block_out_channels[0], out_channels, 1) diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index c65ce221f3dc..8ed258ee4f2a 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -17,7 +17,7 @@ import torch.nn.functional as F from torch import nn -from .resnet import Downsample1D, ResidualTemporalBlock, Upsample1D +from .resnet import Downsample1D, ResidualTemporalBlock1D, Upsample1D class DownResnetBlock1D(nn.Module): @@ -46,8 +46,8 @@ def __init__( if groups_out is None: groups_out = groups - self.resnet1 = ResidualTemporalBlock(in_channels, out_channels, embed_dim=temb_channels) - self.resnet2 = ResidualTemporalBlock(out_channels, out_channels, embed_dim=temb_channels) + self.resnet1 = ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=temb_channels) + self.resnet2 = ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels) if non_linearity == "swish": self.nonlinearity = lambda x: F.silu(x) @@ -102,8 +102,8 @@ def __init__( if groups_out is None: groups_out = groups - self.resnet1 = ResidualTemporalBlock(in_channels, out_channels, embed_dim=temb_channels) - self.resnet2 = ResidualTemporalBlock(out_channels, out_channels, embed_dim=temb_channels) + self.resnet1 = ResidualTemporalBlock1D(2 * in_channels, out_channels, embed_dim=temb_channels) + self.resnet2 = ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels) if non_linearity == "swish": self.nonlinearity = lambda x: F.silu(x) From aebf547329546aa0ab72f2b96f208ffceef5ec58 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Wed, 12 Oct 2022 10:17:52 -0700 Subject: [PATCH 035/133] fix docs --- docs/source/api/models.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/api/models.mdx b/docs/source/api/models.mdx index b944a1d13089..b8fa056dc5be 100644 --- a/docs/source/api/models.mdx +++ b/docs/source/api/models.mdx @@ -38,7 +38,7 @@ The models are built on the base class ['ModelMixin'] that is a `torch.nn.module [[autodoc]] UNet1DModel ## UNet1DOutput -[[autodoc]] UNet1DOutput +[[autodoc]] models.unet_1d.UNet1DOutput ## VQEncoderOutput [[autodoc]] models.vae.VQEncoderOutput From 305ecd891be91329fe2ee984c947702d8f7a3d18 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Wed, 12 Oct 2022 10:33:21 -0700 Subject: [PATCH 036/133] improve 1d resnet blocks --- src/diffusers/models/unet_1d.py | 3 ++ src/diffusers/models/unet_1d_blocks.py | 42 +++++++++++++++++++------- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index c8b14ad8fc95..9fa1b145a7d5 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -65,6 +65,7 @@ def __init__( block_out_channels: Tuple[int] = (32, 128, 256), act_fn: str = "mish", norm_num_groups: int = 8, + layers_per_block: int = 1, ): super().__init__() @@ -90,6 +91,7 @@ def __init__( down_block_type = down_block_types[i] down_block = get_down_block( down_block_type, + num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=block_out_channels[0], @@ -111,6 +113,7 @@ def __init__( up_block = get_up_block( up_block_type, + num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=block_out_channels[0], diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index 8ed258ee4f2a..40e25fb43afb 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -25,6 +25,7 @@ def __init__( self, in_channels, out_channels=None, + num_layers=1, conv_shortcut=False, temb_channels=32, groups=32, @@ -46,8 +47,13 @@ def __init__( if groups_out is None: groups_out = groups - self.resnet1 = ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=temb_channels) - self.resnet2 = ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels) + # there will always be at least one resenet + resnets = [ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=temb_channels)] + + for _ in range(num_layers): + resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels)) + + self.resnets = nn.ModuleList(resnets) if non_linearity == "swish": self.nonlinearity = lambda x: F.silu(x) @@ -65,8 +71,10 @@ def __init__( def forward(self, hidden_states, temb=None): output_states = () - hidden_states = self.resnet1(hidden_states, temb) - hidden_states = self.resnet2(hidden_states, temb) + hidden_states = self.resnets[0](hidden_states, temb) + for resnet in self.resnets[1:]: + hidden_states = resnet(hidden_states, temb) + output_states += (hidden_states,) if self.nonlinearity is not None: @@ -83,6 +91,7 @@ def __init__( self, in_channels, out_channels=None, + num_layers=1, temb_channels=32, groups=32, groups_out=None, @@ -102,8 +111,13 @@ def __init__( if groups_out is None: groups_out = groups - self.resnet1 = ResidualTemporalBlock1D(2 * in_channels, out_channels, embed_dim=temb_channels) - self.resnet2 = ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels) + # there will always be at least one resenet + resnets = [ResidualTemporalBlock1D(2 * in_channels, out_channels, embed_dim=temb_channels)] + + for _ in range(num_layers): + resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels)) + + self.resnets = nn.ModuleList(resnets) if non_linearity == "swish": self.nonlinearity = lambda x: F.silu(x) @@ -122,8 +136,9 @@ def forward(self, hidden_states, res_hidden_states=None, temb=None): if res_hidden_states is not None: hidden_states = torch.cat((hidden_states, res_hidden_states), dim=1) - hidden_states = self.resnet1(hidden_states, temb) - hidden_states = self.resnet2(hidden_states, temb) + hidden_states = self.resnets[0](hidden_states, temb) + for resnet in self.resnets[1:]: + hidden_states = resnet(hidden_states, temb) if self.nonlinearity is not None: hidden_states = self.nonlinearity(hidden_states) @@ -158,10 +173,11 @@ class UpBlock1DNoSkip(nn.Module): pass -def get_down_block(down_block_type, in_channels, out_channels, temb_channels, add_downsample): +def get_down_block(down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample): if down_block_type == "DownResnetBlock1D": return DownResnetBlock1D( in_channels=in_channels, + num_layers=num_layers, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, @@ -170,10 +186,14 @@ def get_down_block(down_block_type, in_channels, out_channels, temb_channels, ad raise ValueError(f"{down_block_type} does not exist.") -def get_up_block(up_block_type, in_channels, out_channels, temb_channels, add_upsample): +def get_up_block(up_block_type, num_layers, in_channels, out_channels, temb_channels, add_upsample): if up_block_type == "UpResnetBlock1D": return UpResnetBlock1D( - in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_upsample=add_upsample + in_channels=in_channels, + num_layers=num_layers, + out_channels=out_channels, + temb_channels=temb_channels, + add_upsample=add_upsample, ) raise ValueError(f"{up_block_type} does not exist.") From 95d3a1c267fff39e7202c2b0fc1fd46ea404c3a1 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Wed, 12 Oct 2022 10:59:47 -0700 Subject: [PATCH 037/133] fix tests, remove permuts --- src/diffusers/models/unet_1d.py | 2 -- tests/test_models_unet.py | 22 ++++++++++------------ 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index 9fa1b145a7d5..3f58a682d449 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -145,7 +145,6 @@ def forward( [`~models.unet_2d.UNet2DOutput`] or `tuple`: [`~models.unet_2d.UNet2DOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ - sample = sample.permute(0, 2, 1) # 1. time timesteps = timestep @@ -179,7 +178,6 @@ def forward( sample = self.final_conv1d_act(sample) sample = self.final_conv1d_2(sample) - sample = sample.permute(0, 2, 1) if not return_dict: return (sample,) diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index d822153cd44c..6683ff97099e 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -15,6 +15,7 @@ import gc import math +import pdb import tracemalloc import unittest @@ -459,18 +460,18 @@ def dummy_input(self): num_features = 14 seq_len = 16 - noise = floats_tensor((batch_size, seq_len, num_features)).to(torch_device) + noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) time_step = torch.tensor([10] * batch_size).to(torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): - return (4, 16, 14) + return (4, 14, 16) @property def output_shape(self): - return (4, 16, 14) + return (4, 14, 16) def test_ema_training(self): pass @@ -480,9 +481,9 @@ def test_training(self): def prepare_init_args_and_inputs_for_common(self): init_dict = { - "dim": 32, - "dim_mults": [1, 4, 8], - "transition_dim": 14, + "block_out_channels": (32, 128, 256), + "in_channels": 14, + "out_channels": 14, } inputs_dict = self.dummy_input return init_dict, inputs_dict @@ -501,25 +502,22 @@ def test_from_pretrained_hub(self): def test_output_pretrained(self): model = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128") - model.eval() - torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) - num_features = model.transition_dim + num_features = model.in_channels seq_len = 16 - noise = torch.randn((1, seq_len, num_features)) + noise = torch.randn((1, seq_len, num_features)).permute(0, 2, 1) # match original, we can update values and remove time_step = torch.full((num_features,), 0) with torch.no_grad(): - output = model(noise, time_step).sample + output = model(noise, time_step).sample.permute(0, 2, 1) output_slice = output[0, -3:, -3:].flatten() # fmt: off expected_output_slice = torch.tensor([-0.2714, 0.1042, -0.0794, -0.2820, 0.0803, -0.0811, -0.2345, 0.0580, -0.0584]) # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) def test_forward_with_norm_groups(self): From 6cbb73b57e6b679d0de8ae47e80d50874ad5fb14 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Wed, 12 Oct 2022 11:00:38 -0700 Subject: [PATCH 038/133] fix style --- src/diffusers/models/unet_1d.py | 1 - tests/test_models_unet.py | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index 3f58a682d449..2e20cacb64f1 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -178,7 +178,6 @@ def forward( sample = self.final_conv1d_act(sample) sample = self.final_conv1d_2(sample) - if not return_dict: return (sample,) diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index 6683ff97099e..e1dbdfaa4611 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -15,7 +15,6 @@ import gc import math -import pdb import tracemalloc import unittest @@ -508,7 +507,9 @@ def test_output_pretrained(self): num_features = model.in_channels seq_len = 16 - noise = torch.randn((1, seq_len, num_features)).permute(0, 2, 1) # match original, we can update values and remove + noise = torch.randn((1, seq_len, num_features)).permute( + 0, 2, 1 + ) # match original, we can update values and remove time_step = torch.full((num_features,), 0) with torch.no_grad(): From a6871b1bfd82f000ac9dd369b5ba1b185a5b7021 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Wed, 12 Oct 2022 16:47:19 -0400 Subject: [PATCH 039/133] run on modal --- convert_model.py | 6 +- examples/diffuser/helpers.py | 14 +- .../diffuser/run_diffuser_value_guided.py | 261 ++++++++++-------- 3 files changed, 160 insertions(+), 121 deletions(-) diff --git a/convert_model.py b/convert_model.py index b44b4d390725..4323854faadd 100644 --- a/convert_model.py +++ b/convert_model.py @@ -7,16 +7,16 @@ os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) def unet(): - model = torch.load("/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-hor32.torch") + model = torch.load("/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-hor128.torch") state_dict = model.state_dict() - hf_value_function = UNet1DModel(dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14) + hf_value_function = UNet1DModel(dim=32, dim_mults=(1, 4, 8), transition_dim=14) mapping = dict((k, hfk) for k, hfk in zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) for k, v in mapping.items(): state_dict[v] = state_dict.pop(k) hf_value_function.load_state_dict(state_dict) torch.save(hf_value_function.state_dict(), "hub/hopper-medium-v2/unet/diffusion_pytorch_model.bin") - config = dict(dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14) + config = dict(dim=32, dim_mults=(1, 4, 8), transition_dim=14) with open("hub/hopper-medium-v2/unet/config.json", "w") as f: json.dump(config, f) diff --git a/examples/diffuser/helpers.py b/examples/diffuser/helpers.py index 7d103ee7eba8..d8dc50e77388 100644 --- a/examples/diffuser/helpers.py +++ b/examples/diffuser/helpers.py @@ -6,7 +6,6 @@ import warnings import tqdm -DEVICE = 'cpu' DTYPE = torch.float def normalize(x_in, data, key): @@ -21,9 +20,9 @@ def de_normalize(x_in, data, key): x_out = lower + (upper - lower)*(1 + x_in) /2 return x_out -def to_torch(x_in, dtype=None, device=None): +def to_torch(x_in, dtype=None, device='cuda'): dtype = dtype or DTYPE - device = device or DEVICE + device = device if type(x_in) is dict: return {k: to_torch(v, dtype, device) for k, v in x_in.items()} elif torch.is_tensor(x_in): @@ -37,11 +36,11 @@ def reset_x0(x_in, cond, act_dim): return x_in def run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim, config): + y = None for i in tqdm.tqdm(scheduler.timesteps): # create batch of timesteps to pass into model - timesteps = torch.full((config['n_samples'],), i, device=DEVICE, dtype=torch.long) - + timesteps = torch.full((config['n_samples'],), i, device=config['device'], dtype=torch.long) # 3. call the sample function for _ in range(config['n_guide_steps']): with torch.enable_grad(): @@ -55,8 +54,8 @@ def run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim x = x.detach() x = x + config['scale'] * grad x = reset_x0(x, conditions, action_dim) - y = network(x, timesteps).sample - prev_x = unet(x, timesteps).sample + with torch.no_grad(): + prev_x = unet(x, timesteps).sample x = scheduler.step(prev_x, i, x)["prev_sample"] # 3. [optional] add posterior noise to the sample @@ -70,6 +69,7 @@ def run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim # 4. apply conditions to the trajectory x = reset_x0(x, conditions, action_dim) x = to_torch(x) + # y = network(x, timesteps).sample return x, y def to_np(x_in): diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 41d2632c2202..535b647a64c9 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -8,123 +8,162 @@ from helpers import MuJoCoRenderer, show_sample import helpers import wandb -wandb.init(project="diffusers-value-guided-rl") +import modal +import os + +stub = modal.Stub("diffusers-value-guided") +image = modal.Image.debian_slim().apt_install([ + "libgl1-mesa-dev", + "libgl1-mesa-glx", + "libglew-dev", + "libosmesa6-dev", + "software-properties-common", + "patchelf", + "git", + "ffmpeg", +]).pip_install([ + "torch", + "datasets", + "transformers", + "free-mujoco-py", + "einops", + "gym", + "protobuf==3.20.1", + "git+https://github.com/rail-berkeley/d4rl.git", + "wandb", + "mediapy", + "Pillow==9.0.0", + "moviepy", + "imageio" + ]) config = dict( - n_samples=4, - horizon=32, + n_samples=64, + horizon=128, num_inference_steps=200, n_guide_steps=0, scale_grad_by_std=True, scale=0.001, eta=0.0, - t_grad_cutoff=4 + t_grad_cutoff=4, + device='cuda' ) -# model = torch.load("../diffuser/test.torch") -# hf_value_function = ValueFunction(training_horizon=32, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) -# hf_value_function.load_state_dict(model.state_dict()) -# hf_value_function.to_hub("bglick13/hf_value_function") - -env_name = "hopper-medium-expert-v2" -env = gym.make(env_name) -data = env.get_dataset() # dataset is only used for normalization in this colab -render = MuJoCoRenderer(env) - -# Cuda settings for colab -# torch.cuda.get_device_name(0) -DEVICE = 'cpu' -DTYPE = torch.float - -# diffusion model settings -state_dim = env.observation_space.shape[0] -action_dim = env.action_space.shape[0] - - - - -# Two generators for different parts of the diffusion loop to work in colab -# generator = torch.Generator(device='cuda') -generator_cpu = torch.Generator(device='cpu') - -scheduler = ValueFunctionScheduler(num_train_timesteps=config['num_inference_steps'],beta_schedule="squaredcos_cap_v2", clip_sample=False) - -# 3 different pretrained models are available for this task. -# The horizion represents the length of trajectories used in training. -# network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) - -network = ValueFunction.from_pretrained("bglick13/hopper-medium-expert-v2-value-function-hor32").to(device=DEVICE) -unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-expert-v2-unet-hor32").to(device=DEVICE) -# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) -# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) - -## add a batch dimension and repeat for multiple samples -## [ observation_dim ] --> [ n_samples x observation_dim ] -obs = env.reset() -total_reward = 0 -done = False -T = 400 -rollout = [obs.copy()] -trajectories = [] -y_maxes = [] -try: - for t in tqdm.tqdm(range(T)): - obs_raw = obs - # 1. Call the policy - # normalize observations for forward passes - obs = helpers.normalize(obs, data, 'observations') - - obs = obs[None].repeat(config['n_samples'], axis=0) - conditions = { - 0: helpers.to_torch(obs, device=DEVICE) - } - - # 2. Call the diffusion model - # constants for inference - batch_size = len(conditions[0]) - shape = (batch_size, config['horizon'], state_dim+action_dim) - - # sample random initial noise vector - x1 = torch.randn(shape, device=DEVICE, generator=generator_cpu) - - # this model is conditioned from an initial state, so you will see this function - # multiple times to change the initial state of generated data to the state - # generated via env.reset() above or env.step() below - x = helpers.reset_x0(x1, conditions, action_dim) - - # convert a np observation to torch for model forward pass - x = helpers.to_torch(x) - x, y = helpers.run_diffusion(x, scheduler, generator_cpu, network, unet, conditions, action_dim, config) - sorted_idx = y.argsort(0, descending=True).squeeze() - y_maxes.append(y[sorted_idx[0]].detach().cpu().numpy()) - sorted_values = x[sorted_idx] - actions = sorted_values[:, :, :action_dim] - if t % 10 == 0: - trajectory = sorted_values[:, :, action_dim:][0].unsqueeze(0).detach().numpy() - trajectory = helpers.de_normalize(trajectory, data, 'observations') - trajectories.append(trajectory) - - actions = actions.detach().cpu().numpy() - denorm_actions = helpers.de_normalize(actions, data, key='actions') - # denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] - denorm_actions = denorm_actions[0, 0] - - - ## execute action in environment - next_observation, reward, terminal, _ = env.step(denorm_actions) - - ## update return - total_reward += reward - wandb.log({"total_reward": total_reward, "reward": reward, "y_max": y_maxes[-1], "diff_from_expert_reward": reward - data['rewards'][t]}) - print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}") - # save observations for rendering - rollout.append(next_observation.copy()) - - obs = next_observation -except KeyboardInterrupt: - pass - -print(f"Total reward: {total_reward}") - -images = show_sample(render, np.expand_dims(np.stack(rollout),axis=0)) -wandb.log({"rollout": wandb.Video('videos/sample.mp4', fps=60, format='mp4')}) \ No newline at end of file +@stub.function( + image=image, + secret=modal.Secret.from_name("wandb-api-key"), + mounts=modal.create_package_mounts(["diffusers"]), + gpu=True +) +def run(): + wandb.login(key=os.environ["WANDB_API_KEY"]) + wandb.init(project="diffusers-value-guided-rl") + + env_name = "hopper-medium-expert-v2" + env = gym.make(env_name) + data = env.get_dataset() # dataset is only used for normalization in this colab + render = MuJoCoRenderer(env) + + # Cuda settings for colab + # torch.cuda.get_device_name(0) + DEVICE = config['device'] + DTYPE = torch.float + + # diffusion model settings + state_dim = env.observation_space.shape[0] + action_dim = env.action_space.shape[0] + + # Two generators for different parts of the diffusion loop to work in colab + # generator = torch.Generator(device='cuda') + generator = torch.Generator(device=DEVICE) + + scheduler = ValueFunctionScheduler(num_train_timesteps=config['num_inference_steps'],beta_schedule="squaredcos_cap_v2", clip_sample=False) + + # 3 different pretrained models are available for this task. + # The horizion represents the length of trajectories used in training. + # network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) + + network = ValueFunction.from_pretrained("bglick13/hopper-medium-expert-v2-value-function-hor32").to(device=DEVICE).eval() + unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-expert-v2-unet-hor128").to(device=DEVICE).eval() + # unet = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) + # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) + + ## add a batch dimension and repeat for multiple samples + ## [ observation_dim ] --> [ n_samples x observation_dim ] + obs = env.reset() + total_reward = 0 + done = False + T = 200 + rollout = [obs.copy()] + trajectories = [] + y_maxes = [0] + try: + for t in tqdm.tqdm(range(T)): + obs_raw = obs + # 1. Call the policy + # normalize observations for forward passes + obs = helpers.normalize(obs, data, 'observations') + + obs = obs[None].repeat(config['n_samples'], axis=0) + conditions = { + 0: helpers.to_torch(obs, device=DEVICE) + } + + # 2. Call the diffusion model + # constants for inference + batch_size = len(conditions[0]) + shape = (batch_size, config['horizon'], state_dim+action_dim) + + # sample random initial noise vector + x1 = torch.randn(shape, device=DEVICE, generator=generator) + + # this model is conditioned from an initial state, so you will see this function + # multiple times to change the initial state of generated data to the state + # generated via env.reset() above or env.step() below + x = helpers.reset_x0(x1, conditions, action_dim) + + # convert a np observation to torch for model forward pass + x = helpers.to_torch(x, device=DEVICE) + x, y = helpers.run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim, config) + if y: + sorted_idx = y.argsort(0, descending=True).squeeze() + y_maxes.append(y[sorted_idx[0]].detach().cpu().numpy()) + sorted_values = x[sorted_idx] + else: + sorted_values = x + actions = sorted_values[:, :, :action_dim] + if t % 10 == 0: + trajectory = sorted_values[:, :, action_dim:][0].unsqueeze(0).detach().cpu().numpy() + trajectory = helpers.de_normalize(trajectory, data, 'observations') + trajectories.append(trajectory) + + actions = actions.detach().cpu().numpy() + denorm_actions = helpers.de_normalize(actions, data, key='actions') + denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] + # denorm_actions = denorm_actions[0, 0] + + + ## execute action in environment + next_observation, reward, terminal, _ = env.step(denorm_actions) + + ## update return + total_reward += reward + wandb.log({"total_reward": total_reward, "reward": reward, "y_max": y_maxes[-1], "diff_from_expert_reward": reward - data['rewards'][t]}) + print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}") + # save observations for rendering + rollout.append(next_observation.copy()) + + obs = next_observation + except KeyboardInterrupt: + pass + + print(f"Total reward: {total_reward}") + + images = show_sample(render, np.expand_dims(np.stack(rollout),axis=0)) + wandb.log({"rollout": wandb.Video("videos/sample.mp4", fps=60, format='mp4')}) + + +if __name__ == "__main__": + # run() + with stub.run(): + run() From 38616cf7a6e971ae8def8a5449d97bc096a98af6 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Wed, 12 Oct 2022 16:57:41 -0400 Subject: [PATCH 040/133] merge and code cleanup --- convert_model.py | 18 +- examples/diffuser/helpers.py | 2 +- .../diffuser/run_diffuser_value_guided.py | 2 +- src/diffusers/models/__init__.py | 1 + src/diffusers/models/unet_rl.py | 136 +------- .../schedulers/scheduling_value_function.py | 298 ------------------ 6 files changed, 22 insertions(+), 435 deletions(-) delete mode 100644 src/diffusers/schedulers/scheduling_value_function.py diff --git a/convert_model.py b/convert_model.py index 4323854faadd..cfb5db24570c 100644 --- a/convert_model.py +++ b/convert_model.py @@ -3,11 +3,13 @@ from diffusers import DDPMScheduler, UNet1DModel, ValueFunction, ValueFunctionScheduler import os import json -os.makedirs("hub/hopper-medium-v2/unet", exist_ok=True) +os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True) +os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True) + os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) -def unet(): - model = torch.load("/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-hor128.torch") +def unet(hor): + model = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-hor{hor}.torch") state_dict = model.state_dict() hf_value_function = UNet1DModel(dim=32, dim_mults=(1, 4, 8), transition_dim=14) mapping = dict((k, hfk) for k, hfk in zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) @@ -15,9 +17,13 @@ def unet(): state_dict[v] = state_dict.pop(k) hf_value_function.load_state_dict(state_dict) - torch.save(hf_value_function.state_dict(), "hub/hopper-medium-v2/unet/diffusion_pytorch_model.bin") - config = dict(dim=32, dim_mults=(1, 4, 8), transition_dim=14) - with open("hub/hopper-medium-v2/unet/config.json", "w") as f: + torch.save(hf_value_function.state_dict(), f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin") + if hor == 128: + dim_mults = (1, 4, 8) + elif hor == 32: + dim_mults = (1, 2, 4, 8) + config = dict(dim=32, dim_mults=dim_mults, transition_dim=14) + with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json", "w") as f: json.dump(config, f) def value_function(): diff --git a/examples/diffuser/helpers.py b/examples/diffuser/helpers.py index d8dc50e77388..6866c415f4e4 100644 --- a/examples/diffuser/helpers.py +++ b/examples/diffuser/helpers.py @@ -56,7 +56,7 @@ def run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim x = reset_x0(x, conditions, action_dim) with torch.no_grad(): prev_x = unet(x, timesteps).sample - x = scheduler.step(prev_x, i, x)["prev_sample"] + x = scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] # 3. [optional] add posterior noise to the sample if config['eta'] > 0: diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 535b647a64c9..6c29acb62b7c 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -77,7 +77,7 @@ def run(): # generator = torch.Generator(device='cuda') generator = torch.Generator(device=DEVICE) - scheduler = ValueFunctionScheduler(num_train_timesteps=config['num_inference_steps'],beta_schedule="squaredcos_cap_v2", clip_sample=False) + scheduler = DDPMScheduler(num_train_timesteps=config['num_inference_steps'],beta_schedule="squaredcos_cap_v2", clip_sample=False, ) # 3 different pretrained models are available for this task. # The horizion represents the length of trajectories used in training. diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index c5d53b2feb4b..a1a5722ef5f8 100644 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -20,6 +20,7 @@ from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel from .vae import AutoencoderKL, VQModel + from .unet_rl import UNetRLModel if is_flax_available(): from .unet_2d_condition_flax import FlaxUNet2DConditionModel diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index 129d18f330a8..fe33259cda40 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -5,8 +5,8 @@ import torch import torch.nn as nn -from diffusers.models.resnet import ResidualTemporalBlock -from diffusers.models.unet_blocks import DownResnetBlock1D, UpResnetBlock1D, Downsample1D +from diffusers.models.resnet import ResidualTemporalBlock1D +from diffusers.models.unet_1d_blocks import DownResnetBlock1D, UpResnetBlock1D, Downsample1D from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin @@ -16,7 +16,7 @@ @dataclass -class UNet1DOutput(BaseOutput): +class ValueFunctionOutput(BaseOutput): """ Args: sample (`torch.FloatTensor` of shape `(batch, horizon, obs_dimension)`): @@ -26,128 +26,6 @@ class UNet1DOutput(BaseOutput): sample: torch.FloatTensor -class UNet1DModel(ModelMixin, ConfigMixin): - """ - A UNet for multi-dimensional temporal data. This model takes the batch over the `training_horizon`. - - Parameters: - transition_dim: state-dimension of samples to predict over - dim: embedding dimension of model - dim_mults: dimension multiples of the up/down blocks - """ - - @register_to_config - def __init__( - self, - transition_dim=14, - dim=32, - dim_mults=(1, 4, 8), - ): - super().__init__() - - self.transition_dim = transition_dim - - # time - self.time_proj = Timesteps(num_channels=dim, flip_sin_to_cos=False, downscale_freq_shift=1) - self.time_mlp = TimestepEmbedding(channel=dim, time_embed_dim=4 * dim, act_fn="mish", out_dim=dim) - - dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] - in_out = list(zip(dims[:-1], dims[1:])) - - self.down_blocks = nn.ModuleList([]) - self.up_blocks = nn.ModuleList([]) - num_resolutions = len(in_out) - - # down - for ind, (dim_in, dim_out) in enumerate(in_out): - is_last = ind >= (num_resolutions - 1) - - self.down_blocks.append( - DownResnetBlock1D( - in_channels=dim_in, out_channels=dim_out, temb_channels=dim, add_downsample=(not is_last) - ) - ) - - # mid - mid_dim = dims[-1] - self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim) - self.mid_block2 = ResidualTemporalBlock(mid_dim, mid_dim, embed_dim=dim) - - # up - for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): - is_last = ind >= (num_resolutions - 1) - - self.up_blocks.append( - UpResnetBlock1D( - in_channels=dim_out * 2, out_channels=dim_in, temb_channels=dim, add_upsample=(not is_last) - ) - ) - - # out - self.final_conv1d_1 = nn.Conv1d(dim, dim, 5, padding=2) - self.final_conv1d_gn = nn.GroupNorm(8, dim) - self.final_conv1d_act = nn.Mish() - self.final_conv1d_2 = nn.Conv1d(dim, transition_dim, 1) - - def forward( - self, - sample: torch.FloatTensor, - timestep: Union[torch.Tensor, float, int], - return_dict: bool = True, - ) -> Union[UNet1DOutput, Tuple]: - r""" - Args: - sample (`torch.FloatTensor`): (batch, horizon, obs_dimension + action_dimension) noisy inputs tensor - timestep (`torch.FloatTensor` or `float` or `int): batch (batch) timesteps - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple. - - Returns: - [`~models.unet_2d.UNet2DOutput`] or `tuple`: [`~models.unet_2d.UNet2DOutput`] if `return_dict` is True, - otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - """ - sample = sample.permute(0, 2, 1) - - # 1. time - timesteps = timestep - if not torch.is_tensor(timesteps): - timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) - elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: - timesteps = timesteps[None].to(sample.device) - - temb = self.time_proj(timesteps) - temb = self.time_mlp(temb) - down_block_res_samples = [] - - # 2. down - for downsample_block in self.down_blocks: - sample, res_samples = downsample_block(hidden_states=sample, temb=temb) - down_block_res_samples.append(res_samples[0]) - - # 3. mid - sample = self.mid_block1(sample, temb) - sample = self.mid_block2(sample, temb) - - # 4. up - for up_block in self.up_blocks: - sample = up_block(hidden_states=sample, res_hidden_states=down_block_res_samples.pop(), temb=temb) - - # 5. post-process - sample = self.final_conv1d_1(sample) - sample = rearrange_dims(sample) - sample = self.final_conv1d_gn(sample) - sample = rearrange_dims(sample) - sample = self.final_conv1d_act(sample) - sample = self.final_conv1d_2(sample) - - sample = sample.permute(0, 2, 1) - - if not return_dict: - return (sample,) - - return UNet1DOutput(sample=sample) - - class ValueFunction(ModelMixin, ConfigMixin): @register_to_config def __init__( @@ -182,10 +60,10 @@ def __init__( mid_dim_2 = mid_dim // 2 mid_dim_3 = mid_dim // 4 ## - self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim_2, embed_dim=dim) + self.mid_block1 = ResidualTemporalBlock1D(mid_dim, mid_dim_2, embed_dim=dim) self.mid_down1 = Downsample1D(mid_dim_2, use_conv=True) ## - self.mid_block2 = ResidualTemporalBlock(mid_dim_2, mid_dim_3, embed_dim=dim) + self.mid_block2 = ResidualTemporalBlock1D(mid_dim_2, mid_dim_3, embed_dim=dim) self.mid_down2 = Downsample1D(mid_dim_3, use_conv=True) ## fc_dim = mid_dim_3 @@ -200,7 +78,7 @@ def forward( sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], return_dict: bool = True, - ) -> Union[UNet1DOutput, Tuple]: + ) -> Union[ValueFunctionOutput, Tuple]: """r Args: sample (`torch.FloatTensor`): (batch, horizon, obs_dimension + action_dimension) noisy inputs tensor @@ -244,4 +122,4 @@ def forward( if not return_dict: return (sample,) - return UNet1DOutput(sample=sample) + return ValueFunctionOutput(sample=sample) diff --git a/src/diffusers/schedulers/scheduling_value_function.py b/src/diffusers/schedulers/scheduling_value_function.py deleted file mode 100644 index 246d65758522..000000000000 --- a/src/diffusers/schedulers/scheduling_value_function.py +++ /dev/null @@ -1,298 +0,0 @@ -# Copyright 2022 UC Berkeley Team and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim - -import math -import warnings -from dataclasses import dataclass -from typing import Optional, Tuple, Union - -import numpy as np -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput -from .scheduling_utils import SchedulerMixin - - -@dataclass -class ValueFunctionSchedulerOutput(BaseOutput): - """ - Output class for the scheduler's step function output. - - Args: - prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the - denoising loop. - pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - The predicted denoised sample (x_{0}) based on the model output from the current timestep. - `pred_original_sample` can be used to preview progress or for guidance. - """ - - prev_sample: torch.FloatTensor - pred_original_sample: Optional[torch.FloatTensor] = None - - -def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of - (1-beta) over time from t = [0,1]. - - Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up - to that part of the diffusion process. - - - Args: - num_diffusion_timesteps (`int`): the number of betas to produce. - max_beta (`float`): the maximum beta to use; use values lower than 1 to - prevent singularities. - - Returns: - betas (`np.ndarray`): the betas used by the scheduler to step the model outputs - """ - - def alpha_bar(time_step): - return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 - - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return torch.tensor(betas, dtype=torch.float32) - - -class ValueFunctionScheduler(SchedulerMixin, ConfigMixin): - """ - Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and - Langevin dynamics sampling. - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and - [`~ConfigMixin.from_config`] functions. - - For more details, see the original paper: https://arxiv.org/abs/2006.11239 - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - beta_start (`float`): the starting `beta` value of inference. - beta_end (`float`): the final `beta` value. - beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear`, `scaled_linear`, or `squaredcos_cap_v2`. - trained_betas (`np.ndarray`, optional): - option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. - variance_type (`str`): - options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, - `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. - clip_sample (`bool`, default `True`): - option to clip predicted sample between -1 and 1 for numerical stability. - - """ - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.0001, - beta_end: float = 0.02, - beta_schedule: str = "linear", - trained_betas: Optional[np.ndarray] = None, - variance_type: str = "fixed_small", - clip_sample: bool = True, - **kwargs, - ): - if "tensor_format" in kwargs: - warnings.warn( - "`tensor_format` is deprecated as an argument and will be removed in version `0.5.0`." - "If you're running your code in PyTorch, you can safely remove this argument.", - DeprecationWarning, - ) - - if trained_betas is not None: - self.betas = torch.from_numpy(trained_betas) - elif beta_schedule == "linear": - self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) - elif beta_schedule == "scaled_linear": - # this schedule is very specific to the latent diffusion model. - self.betas = ( - torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 - ) - elif beta_schedule == "squaredcos_cap_v2": - # Glide cosine schedule - self.betas = betas_for_alpha_bar(num_train_timesteps) - else: - raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") - - self.alphas = 1.0 - self.betas - self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - self.one = torch.tensor(1.0) - - # setable values - self.num_inference_steps = None - self.timesteps = np.arange(0, num_train_timesteps)[::-1] - - self.variance_type = variance_type - - def set_timesteps(self, num_inference_steps: int): - """ - Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - """ - num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps) - self.num_inference_steps = num_inference_steps - self.timesteps = np.arange( - 0, self.config.num_train_timesteps, self.config.num_train_timesteps // self.num_inference_steps - )[::-1] - - def _get_variance(self, t, predicted_variance=None, variance_type=None): - alpha_prod_t = self.alphas_cumprod[t] - alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one - - # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) - # and sample from it to get previous sample - # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample - variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t] - - if variance_type is None: - variance_type = self.config.variance_type - - # hacks - were probably added for training stability - if variance_type == "fixed_small": - variance = torch.clamp(variance, min=1e-20) - # for rl-diffuser https://arxiv.org/abs/2205.09991 - elif variance_type == "fixed_small_log": - variance = torch.log(torch.clamp(variance, min=1e-20)) - elif variance_type == "fixed_large": - variance = self.betas[t] - elif variance_type == "fixed_large_log": - # Glide max_log - variance = torch.log(self.betas[t]) - elif variance_type == "learned": - return predicted_variance - elif variance_type == "learned_range": - min_log = variance - max_log = self.betas[t] - frac = (predicted_variance + 1) / 2 - variance = frac * max_log + (1 - frac) * min_log - - return variance - - def step( - self, - model_output: torch.FloatTensor, - timestep: int, - sample: torch.FloatTensor, - generator=None, - scale=0.001, - return_dict: bool = True, - ) -> Union[ValueFunctionSchedulerOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - predict_epsilon (`bool`): - optional flag to use when model predicts the samples directly instead of the noise, epsilon. - generator: random number generator. - return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class - - Returns: - [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`: - [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When - returning a tuple, the first element is the sample tensor. - - """ - t = timestep - - if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: - model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) - else: - predicted_variance = None - - # 1. compute alphas, betas - alpha_prod_t = self.alphas_cumprod[t] - alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one - beta_prod_t = 1 - alpha_prod_t - beta_prod_t_prev = 1 - alpha_prod_t_prev - - # 2. compute predicted original sample from predicted noise also called - # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf - - pred_original_sample = model_output - - # 3. Clip "predicted x_0" - if self.config.clip_sample: - pred_original_sample = torch.clamp(pred_original_sample, -1, 1) - - # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t - # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf - pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t - current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t - - # 5. Compute predicted previous sample µ_t - # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf - pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample - - # 6. Add noise - variance = 0 - if t > 0: - noise = torch.randn( - model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator - ).to(model_output.device) - variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise - - pred_prev_sample = pred_prev_sample + variance * noise - - if not return_dict: - return (pred_prev_sample,) - - return ValueFunctionSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) - - def add_noise( - self, - original_samples: torch.FloatTensor, - noise: torch.FloatTensor, - timesteps: torch.IntTensor, - ) -> torch.FloatTensor: - if self.alphas_cumprod.device != original_samples.device: - self.alphas_cumprod = self.alphas_cumprod.to(original_samples.device) - - if timesteps.device != original_samples.device: - timesteps = timesteps.to(original_samples.device) - - sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5 - sqrt_alpha_prod = sqrt_alpha_prod.flatten() - while len(sqrt_alpha_prod.shape) < len(original_samples.shape): - sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) - - sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5 - sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() - while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): - sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) - - noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise - return noisy_samples - - def __len__(self): - return self.config.num_train_timesteps From d37b472a6cd4bb7f0fe7098e065618337d75b62f Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Wed, 12 Oct 2022 18:02:32 -0400 Subject: [PATCH 041/133] use same api for rl model --- convert_model.py | 31 +++++++++----- src/diffusers/__init__.py | 1 - src/diffusers/models/__init__.py | 2 +- src/diffusers/models/unet_rl.py | 64 +++++++++++++++------------- src/diffusers/schedulers/__init__.py | 1 - 5 files changed, 55 insertions(+), 44 deletions(-) diff --git a/convert_model.py b/convert_model.py index cfb5db24570c..216c46cf3003 100644 --- a/convert_model.py +++ b/convert_model.py @@ -1,6 +1,6 @@ import torch -from diffusers import DDPMScheduler, UNet1DModel, ValueFunction, ValueFunctionScheduler +from diffusers import DDPMScheduler, UNet1DModel, ValueFunction import os import json os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True) @@ -9,27 +9,36 @@ os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) def unet(hor): + if hor == 128: + down_block_types = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") + block_out_channels = (32, 128, 256) + up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D") + + elif hor == 32: + down_block_types = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") + block_out_channels = (32, 64, 128, 256) + up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") model = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-hor{hor}.torch") state_dict = model.state_dict() - hf_value_function = UNet1DModel(dim=32, dim_mults=(1, 4, 8), transition_dim=14) + config = dict(down_block_types=down_block_types, block_out_channels=block_out_channels, up_block_types=up_block_types, layers_per_block=1) + hf_value_function = UNet1DModel(**config) + print(f"length of state dict: {len(state_dict.keys())}") + print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") mapping = dict((k, hfk) for k, hfk in zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) for k, v in mapping.items(): state_dict[v] = state_dict.pop(k) hf_value_function.load_state_dict(state_dict) torch.save(hf_value_function.state_dict(), f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin") - if hor == 128: - dim_mults = (1, 4, 8) - elif hor == 32: - dim_mults = (1, 2, 4, 8) - config = dict(dim=32, dim_mults=dim_mults, transition_dim=14) with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json", "w") as f: json.dump(config, f) def value_function(): + config = dict(in_channels=14, down_block_types=("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), block_out_channels=(32, 64, 128, 256), layers_per_block=1) + model = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-hor32.torch") state_dict = model.state_dict() - hf_value_function = ValueFunction(dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14) + hf_value_function = ValueFunction(**config) print(f"length of state dict: {len(state_dict.keys())}") print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") @@ -40,11 +49,11 @@ def value_function(): hf_value_function.load_state_dict(state_dict) torch.save(hf_value_function.state_dict(), "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin") - config = dict(dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14) with open("hub/hopper-medium-v2/value_function/config.json", "w") as f: json.dump(config, f) if __name__ == "__main__": - unet() - value_function() \ No newline at end of file + # unet(32) + unet(128) + # value_function() \ No newline at end of file diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index c21ba3c7c3c2..645ba4604cea 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -37,7 +37,6 @@ PNDMScheduler, SchedulerMixin, ScoreSdeVeScheduler, - ValueFunctionScheduler ) from .training_utils import EMAModel else: diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index a1a5722ef5f8..355999f76688 100644 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -20,7 +20,7 @@ from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel from .vae import AutoencoderKL, VQModel - from .unet_rl import UNetRLModel + from .unet_rl import ValueFunction if is_flax_available(): from .unet_2d_condition_flax import FlaxUNet2DConditionModel diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index fe33259cda40..6584bd13eda3 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -5,8 +5,8 @@ import torch import torch.nn as nn -from diffusers.models.resnet import ResidualTemporalBlock1D -from diffusers.models.unet_1d_blocks import DownResnetBlock1D, UpResnetBlock1D, Downsample1D +from diffusers.models.resnet import ResidualTemporalBlock1D, Downsample1D +from diffusers.models.unet_1d_blocks import get_down_block from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin @@ -30,45 +30,49 @@ class ValueFunction(ModelMixin, ConfigMixin): @register_to_config def __init__( self, - transition_dim=14, - dim=32, - dim_mults=(1, 4, 8), + in_channels=14, + down_block_types: Tuple[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), + block_out_channels: Tuple[int] = (32, 64, 128, 256), + act_fn: str = "mish", + norm_num_groups: int = 8, + layers_per_block: int = 1, ): super().__init__() + time_embed_dim = block_out_channels[0] * 4 + self.time_proj = Timesteps(num_channels=block_out_channels[0], flip_sin_to_cos=False, downscale_freq_shift=1) + self.time_mlp = TimestepEmbedding(channel=block_out_channels[0], time_embed_dim=time_embed_dim, act_fn="mish", out_dim=block_out_channels[0]) - self.transition_dim = transition_dim - self.time_proj = Timesteps(num_channels=dim, flip_sin_to_cos=False, downscale_freq_shift=1) - self.time_mlp = TimestepEmbedding(channel=dim, time_embed_dim=4 * dim, act_fn="mish", out_dim=dim) - - dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] - in_out = list(zip(dims[:-1], dims[1:])) self.blocks = nn.ModuleList([]) - num_resolutions = len(in_out) - - for ind, (dim_in, dim_out) in enumerate(in_out): - is_last = ind >= (num_resolutions - 1) - - self.blocks.append( - DownResnetBlock1D( - in_channels=dim_in, out_channels=dim_out, temb_channels=dim, add_downsample=True - ) + mid_dim = block_out_channels[-1] + + output_channel = in_channels + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block_type = down_block_types[i] + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=block_out_channels[0], + add_downsample=True, ) + self.blocks.append(down_block) - - mid_dim = dims[-1] - mid_dim_2 = mid_dim // 2 - mid_dim_3 = mid_dim // 4 ## - self.mid_block1 = ResidualTemporalBlock1D(mid_dim, mid_dim_2, embed_dim=dim) - self.mid_down1 = Downsample1D(mid_dim_2, use_conv=True) + self.mid_block1 = ResidualTemporalBlock1D(mid_dim, mid_dim // 2, embed_dim=block_out_channels[0]) + self.mid_down1 = Downsample1D(mid_dim // 2, use_conv=True) ## - self.mid_block2 = ResidualTemporalBlock1D(mid_dim_2, mid_dim_3, embed_dim=dim) - self.mid_down2 = Downsample1D(mid_dim_3, use_conv=True) + self.mid_block2 = ResidualTemporalBlock1D(mid_dim //2, mid_dim // 4, embed_dim=block_out_channels[0]) + self.mid_down2 = Downsample1D(mid_dim // 4, use_conv=True) ## - fc_dim = mid_dim_3 + fc_dim = mid_dim // 4 self.final_block = nn.ModuleList([ - nn.Linear(fc_dim + dim, fc_dim // 2), + nn.Linear(fc_dim + block_out_channels[0], fc_dim // 2), nn.Mish(), nn.Linear(fc_dim // 2, 1),] ) diff --git a/src/diffusers/schedulers/__init__.py b/src/diffusers/schedulers/__init__.py index c4770de538cc..a906c39eb24c 100644 --- a/src/diffusers/schedulers/__init__.py +++ b/src/diffusers/schedulers/__init__.py @@ -24,7 +24,6 @@ from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_utils import SchedulerMixin - from .scheduling_value_function import ValueFunctionScheduler else: from ..utils.dummy_pt_objects import * # noqa F403 From 798263f6292db82b586fd14dcd5f5e665eb29004 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Wed, 12 Oct 2022 17:24:36 -0700 Subject: [PATCH 042/133] init v-pred pr --- src/diffusers/schedulers/scheduling_ddpm.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 04c92904a660..020850d680fe 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -220,7 +220,7 @@ def step( model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, - predict_epsilon=True, + prediction_type: str = "epsilon", generator=None, return_dict: bool = True, ) -> Union[DDPMSchedulerOutput, Tuple]: @@ -233,8 +233,10 @@ def step( timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. - predict_epsilon (`bool`): - optional flag to use when model predicts the samples directly instead of the noise, epsilon. + prediction_type (`str`): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample), or `v` (see section 2.4 + https://imagen.research.google/video/paper.pdf) generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class @@ -259,10 +261,15 @@ def step( # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf - if predict_epsilon: + if prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) - else: + elif prediction_type == "sample": pred_original_sample = model_output + elif prediction_type == "v": + # v_t = alpha_t * epsilon - sigma_t * x + raise NotImplementedError(f"v prediction not yet implemented") + else: + raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") # 3. Clip "predicted x_0" if self.config.clip_sample: From b7d0c1e84aa9be151f577a31de17cbb8c15b65d8 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Wed, 12 Oct 2022 17:32:52 -0700 Subject: [PATCH 043/133] placeholder code --- src/diffusers/schedulers/scheduling_ddpm.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 020850d680fe..74dfc57bd017 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -267,7 +267,10 @@ def step( pred_original_sample = model_output elif prediction_type == "v": # v_t = alpha_t * epsilon - sigma_t * x - raise NotImplementedError(f"v prediction not yet implemented") + # need to merge the PRs for sigma to be available in DDPM + # pred_original_sample = sample*self.alphas[t] - model_output * self.sigmas[t] + # eps = model_output*self.alphas[t] - sample * self.sigmas[t] + raise NotImplementedError(f"v prediction not yet implemented for DDPM") else: raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") From 7eb4bfae6c663ac3974406a65fd8160a809ace8e Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Wed, 12 Oct 2022 17:39:48 -0700 Subject: [PATCH 044/133] up --- src/diffusers/schedulers/scheduling_ddpm.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 74dfc57bd017..ded5fea168e5 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -142,6 +142,7 @@ def __init__( self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.sigmas = 1 - self.alphas ** 2 self.one = torch.tensor(1.0) # standard deviation of the initial noise distribution @@ -268,8 +269,8 @@ def step( elif prediction_type == "v": # v_t = alpha_t * epsilon - sigma_t * x # need to merge the PRs for sigma to be available in DDPM - # pred_original_sample = sample*self.alphas[t] - model_output * self.sigmas[t] - # eps = model_output*self.alphas[t] - sample * self.sigmas[t] + pred_original_sample = sample*self.alphas[t] - model_output * self.sigmas[t] + eps = model_output*self.alphas[t] - sample * self.sigmas[t] raise NotImplementedError(f"v prediction not yet implemented for DDPM") else: raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") From 3eb2593d9a48f0c0861bd9d4b1089ef9843e57be Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Wed, 12 Oct 2022 20:10:03 -0700 Subject: [PATCH 045/133] a few more additions --- src/diffusers/schedulers/scheduling_ddpm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index ded5fea168e5..6ab2498956d7 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -142,7 +142,7 @@ def __init__( self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - self.sigmas = 1 - self.alphas ** 2 + self.sigmas = 1 - self.alphas**2 self.one = torch.tensor(1.0) # standard deviation of the initial noise distribution @@ -269,8 +269,8 @@ def step( elif prediction_type == "v": # v_t = alpha_t * epsilon - sigma_t * x # need to merge the PRs for sigma to be available in DDPM - pred_original_sample = sample*self.alphas[t] - model_output * self.sigmas[t] - eps = model_output*self.alphas[t] - sample * self.sigmas[t] + pred_original_sample = sample * self.alphas[t] - model_output * self.sigmas[t] + eps = model_output * self.alphas[t] - sample * self.sigmas[t] raise NotImplementedError(f"v prediction not yet implemented for DDPM") else: raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") From aa19286a6fccc5c7fdd5d1332c113592bca40a76 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 13 Oct 2022 12:53:44 -0400 Subject: [PATCH 046/133] fix variance type --- convert_model.py | 6 +- examples/diffuser/helpers.py | 9 +-- .../diffuser/run_diffuser_value_guided.py | 65 ++++++++++--------- src/diffusers/schedulers/scheduling_ddpm.py | 8 ++- 4 files changed, 50 insertions(+), 38 deletions(-) diff --git a/convert_model.py b/convert_model.py index 216c46cf3003..85f96f0a743b 100644 --- a/convert_model.py +++ b/convert_model.py @@ -54,6 +54,6 @@ def value_function(): if __name__ == "__main__": - # unet(32) - unet(128) - # value_function() \ No newline at end of file + unet(32) + # unet(128) + value_function() \ No newline at end of file diff --git a/examples/diffuser/helpers.py b/examples/diffuser/helpers.py index 6866c415f4e4..a088635d7d37 100644 --- a/examples/diffuser/helpers.py +++ b/examples/diffuser/helpers.py @@ -49,13 +49,14 @@ def run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim grad = torch.autograd.grad([y.sum()], [x])[0] if config['scale_grad_by_std']: posterior_variance = scheduler._get_variance(i) - grad = posterior_variance * 0.5 * grad + model_std = torch.exp(0.5 * posterior_variance) + grad = model_std * grad grad[timesteps < config['t_grad_cutoff']] = 0 x = x.detach() x = x + config['scale'] * grad x = reset_x0(x, conditions, action_dim) - with torch.no_grad(): - prev_x = unet(x, timesteps).sample + # with torch.no_grad(): + prev_x = unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) x = scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] # 3. [optional] add posterior noise to the sample @@ -68,7 +69,7 @@ def run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim # 4. apply conditions to the trajectory x = reset_x0(x, conditions, action_dim) - x = to_torch(x) + x = to_torch(x, device=config['device']) # y = network(x, timesteps).sample return x, y diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 6c29acb62b7c..7d5588149fa0 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -4,7 +4,7 @@ import tqdm import numpy as np import gym -from diffusers import DDPMScheduler, UNet1DModel, ValueFunction, ValueFunctionScheduler +from diffusers import DDPMScheduler, UNet1DModel, ValueFunction from helpers import MuJoCoRenderer, show_sample import helpers import wandb @@ -39,27 +39,20 @@ config = dict( n_samples=64, - horizon=128, - num_inference_steps=200, - n_guide_steps=0, + horizon=32, + num_inference_steps=20, + n_guide_steps=2, scale_grad_by_std=True, - scale=0.001, + scale=0.1, eta=0.0, - t_grad_cutoff=4, - device='cuda' + t_grad_cutoff=2, + device='cpu' ) -@stub.function( - image=image, - secret=modal.Secret.from_name("wandb-api-key"), - mounts=modal.create_package_mounts(["diffusers"]), - gpu=True -) -def run(): - wandb.login(key=os.environ["WANDB_API_KEY"]) +def _run(): wandb.init(project="diffusers-value-guided-rl") - - env_name = "hopper-medium-expert-v2" + wandb.config.update(config) + env_name = "hopper-medium-v2" env = gym.make(env_name) data = env.get_dataset() # dataset is only used for normalization in this colab render = MuJoCoRenderer(env) @@ -77,14 +70,14 @@ def run(): # generator = torch.Generator(device='cuda') generator = torch.Generator(device=DEVICE) - scheduler = DDPMScheduler(num_train_timesteps=config['num_inference_steps'],beta_schedule="squaredcos_cap_v2", clip_sample=False, ) + scheduler = DDPMScheduler(num_train_timesteps=config['num_inference_steps'],beta_schedule="squaredcos_cap_v2", clip_sample=False, variance_type="fixed_small_log") # 3 different pretrained models are available for this task. # The horizion represents the length of trajectories used in training. # network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) - network = ValueFunction.from_pretrained("bglick13/hopper-medium-expert-v2-value-function-hor32").to(device=DEVICE).eval() - unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-expert-v2-unet-hor128").to(device=DEVICE).eval() + network = ValueFunction.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32").to(device=DEVICE).eval() + unet = UNet1DModel.from_pretrained(f"bglick13/hopper-medium-v2-unet-hor32").to(device=DEVICE).eval() # unet = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) @@ -92,8 +85,9 @@ def run(): ## [ observation_dim ] --> [ n_samples x observation_dim ] obs = env.reset() total_reward = 0 + total_score = 0 done = False - T = 200 + T = 1000 rollout = [obs.copy()] trajectories = [] y_maxes = [0] @@ -125,7 +119,7 @@ def run(): # convert a np observation to torch for model forward pass x = helpers.to_torch(x, device=DEVICE) x, y = helpers.run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim, config) - if y: + if y is not None: sorted_idx = y.argsort(0, descending=True).squeeze() y_maxes.append(y[sorted_idx[0]].detach().cpu().numpy()) sorted_values = x[sorted_idx] @@ -139,17 +133,18 @@ def run(): actions = actions.detach().cpu().numpy() denorm_actions = helpers.de_normalize(actions, data, key='actions') - denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] - # denorm_actions = denorm_actions[0, 0] + # denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] + denorm_actions = denorm_actions[0, 0] ## execute action in environment next_observation, reward, terminal, _ = env.step(denorm_actions) - + score = env.get_normalized_score(total_reward) ## update return total_reward += reward - wandb.log({"total_reward": total_reward, "reward": reward, "y_max": y_maxes[-1], "diff_from_expert_reward": reward - data['rewards'][t]}) - print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}") + total_score += score + wandb.log({"total_reward": total_reward, "reward": reward, "score": score, "total_score": total_score, "y_max": y_maxes[-1], "diff_from_expert_reward": reward - data['rewards'][t]}) + print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score: {total_score}") # save observations for rendering rollout.append(next_observation.copy()) @@ -162,8 +157,18 @@ def run(): images = show_sample(render, np.expand_dims(np.stack(rollout),axis=0)) wandb.log({"rollout": wandb.Video("videos/sample.mp4", fps=60, format='mp4')}) +@stub.function( + image=image, + secret=modal.Secret.from_name("wandb-api-key"), + mounts=modal.create_package_mounts(["diffusers"]), + gpu=True +) +def run(): + wandb.login(key=os.environ["WANDB_API_KEY"]) + _run() + if __name__ == "__main__": - # run() - with stub.run(): - run() + _run() + # with stub.run(): + # run() diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 04c92904a660..98f15f8dbb64 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -283,7 +283,13 @@ def step( noise = torch.randn( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator ).to(model_output.device) - variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise + if self.variance_type == "fixed_small_log": + variance = (self._get_variance(t, predicted_variance=predicted_variance)) + variance = torch.exp(0.5 * variance) + variance = variance * noise + + else: + variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise pred_prev_sample = pred_prev_sample + variance From 02293e2d9b686ee064c4c56511528f98202354bf Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 13 Oct 2022 18:52:22 -0400 Subject: [PATCH 047/133] wrong normalization function --- convert_model.py | 8 ++++---- examples/diffuser/helpers.py | 16 +++++++--------- examples/diffuser/run_diffuser_value_guided.py | 17 +++++++++++------ 3 files changed, 22 insertions(+), 19 deletions(-) diff --git a/convert_model.py b/convert_model.py index 85f96f0a743b..4691c69239d7 100644 --- a/convert_model.py +++ b/convert_model.py @@ -18,7 +18,7 @@ def unet(hor): down_block_types = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") block_out_channels = (32, 64, 128, 256) up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") - model = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-hor{hor}.torch") + model = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch") state_dict = model.state_dict() config = dict(down_block_types=down_block_types, block_out_channels=block_out_channels, up_block_types=up_block_types, layers_per_block=1) hf_value_function = UNet1DModel(**config) @@ -36,13 +36,13 @@ def unet(hor): def value_function(): config = dict(in_channels=14, down_block_types=("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), block_out_channels=(32, 64, 128, 256), layers_per_block=1) - model = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-hor32.torch") - state_dict = model.state_dict() + model = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch") + state_dict = model hf_value_function = ValueFunction(**config) print(f"length of state dict: {len(state_dict.keys())}") print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") - mapping = dict((k, hfk) for k, hfk in zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) + mapping = dict((k, hfk) for k, hfk in zip(state_dict.keys(), hf_value_function.state_dict().keys())) for k, v in mapping.items(): state_dict[v] = state_dict.pop(k) diff --git a/examples/diffuser/helpers.py b/examples/diffuser/helpers.py index a088635d7d37..633a7eedfce8 100644 --- a/examples/diffuser/helpers.py +++ b/examples/diffuser/helpers.py @@ -9,16 +9,14 @@ DTYPE = torch.float def normalize(x_in, data, key): - upper = np.max(data[key], axis=0) - lower = np.min(data[key], axis=0) - x_out = 2*(x_in - lower)/(upper-lower) - 1 - return x_out + means = data[key].mean(axis=0) + stds = data[key].std(axis=0) + return (x_in - means) / stds def de_normalize(x_in, data, key): - upper = np.max(data[key], axis=0) - lower = np.min(data[key], axis=0) - x_out = lower + (upper - lower)*(1 + x_in) /2 - return x_out + means = data[key].mean(axis=0) + stds = data[key].std(axis=0) + return x_in * stds + means def to_torch(x_in, dtype=None, device='cuda'): dtype = dtype or DTYPE @@ -61,7 +59,7 @@ def run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim # 3. [optional] add posterior noise to the sample if config['eta'] > 0: - noise = torch.randn(x.shape, generator=generator).to(x.device) + noise = torch.randn(x.shape).to(x.device) posterior_variance = scheduler._get_variance(i) # * noise # no noise when t == 0 # NOTE: original implementation missing sqrt on posterior_variance diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 7d5588149fa0..529f4bbb66ac 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -10,6 +10,9 @@ import wandb import modal import os +from pytorch_lightning import seed_everything + +seed_everything(0) stub = modal.Stub("diffusers-value-guided") image = modal.Image.debian_slim().apt_install([ @@ -34,7 +37,8 @@ "mediapy", "Pillow==9.0.0", "moviepy", - "imageio" + "imageio", + "pytorch-lightning", ]) config = dict( @@ -46,7 +50,7 @@ scale=0.1, eta=0.0, t_grad_cutoff=2, - device='cpu' + device='cuda' ) def _run(): @@ -83,6 +87,7 @@ def _run(): ## add a batch dimension and repeat for multiple samples ## [ observation_dim ] --> [ n_samples x observation_dim ] + env.seed(0) obs = env.reset() total_reward = 0 total_score = 0 @@ -109,7 +114,7 @@ def _run(): shape = (batch_size, config['horizon'], state_dim+action_dim) # sample random initial noise vector - x1 = torch.randn(shape, device=DEVICE, generator=generator) + x1 = torch.randn(shape, device=DEVICE) # this model is conditioned from an initial state, so you will see this function # multiple times to change the initial state of generated data to the state @@ -169,6 +174,6 @@ def run(): if __name__ == "__main__": - _run() - # with stub.run(): - # run() + # _run() + with stub.run(): + run() From 56818e58c6423d4eee4a415dcf49995fb652932c Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 17 Oct 2022 14:48:23 -0400 Subject: [PATCH 048/133] add tests --- tests/test_models_unet.py | 85 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index e1dbdfaa4611..55f373af8a9b 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -20,7 +20,7 @@ import torch -from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel +from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel, ValueFunction from diffusers.utils import floats_tensor, slow, torch_device from .test_modeling_common import ModelTesterMixin @@ -524,3 +524,86 @@ def test_output_pretrained(self): def test_forward_with_norm_groups(self): # Not implemented yet for this UNet pass + + +class UNetRLModelTests(ModelTesterMixin, unittest.TestCase): + model_class = ValueFunction + + @property + def dummy_input(self): + batch_size = 4 + num_features = 14 + seq_len = 16 + + noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) + time_step = torch.tensor([10] * batch_size).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (4, 14, 16) + + @property + def output_shape(self): + return (4, 14, 1) + + def test_ema_training(self): + pass + + def test_training(self): + pass + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": (32, 64, 128, 256), + "in_channels": 14, + "out_channels": 14, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + unet, loading_info = UNet1DModel.from_pretrained( + "bglick13/hopper-medium-v2-unet-hor32", output_loading_info=True + ) + value_function, vf_loading_info = ValueFunction.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True + ) + self.assertIsNotNone(unet) + self.assertEqual(len(loading_info["missing_keys"]), 0) + self.assertIsNotNone(value_function) + self.assertEqual(len(vf_loading_info["missing_keys"]), 0) + + unet.to(torch_device) + value_function.to(torch_device) + image = value_function(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + value_function, vf_loading_info = ValueFunction.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True + ) + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + num_features = value_function.in_channels + seq_len = 14 + noise = torch.randn((1, seq_len, num_features)).permute( + 0, 2, 1 + ) # match original, we can update values and remove + time_step = torch.full((num_features,), 0) + + with torch.no_grad(): + output = value_function(noise, time_step).sample + + # fmt: off + expected_output_slice = torch.tensor([207.0272] * seq_len) + # fmt: on + self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3)) + + def test_forward_with_norm_groups(self): + # Not implemented yet for this UNet + pass From d085725ba387adc03067445a8dc81d5fa63dff7a Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 17 Oct 2022 14:53:09 -0400 Subject: [PATCH 049/133] style --- examples/diffuser/helpers.py | 149 +++++++------- examples/diffuser/run_diffuser.py | 214 ++++++++++---------- src/diffusers/__init__.py | 2 +- src/diffusers/models/__init__.py | 2 +- src/diffusers/models/unet_rl.py | 33 +-- src/diffusers/schedulers/scheduling_ddpm.py | 2 +- 6 files changed, 213 insertions(+), 189 deletions(-) diff --git a/examples/diffuser/helpers.py b/examples/diffuser/helpers.py index 633a7eedfce8..c2ec457abad7 100644 --- a/examples/diffuser/helpers.py +++ b/examples/diffuser/helpers.py @@ -1,86 +1,94 @@ import os -import mediapy as media +import warnings + import numpy as np import torch + import gym -import warnings +import mediapy as media import tqdm DTYPE = torch.float + + def normalize(x_in, data, key): means = data[key].mean(axis=0) stds = data[key].std(axis=0) return (x_in - means) / stds + def de_normalize(x_in, data, key): means = data[key].mean(axis=0) stds = data[key].std(axis=0) return x_in * stds + means - -def to_torch(x_in, dtype=None, device='cuda'): - dtype = dtype or DTYPE - device = device - if type(x_in) is dict: - return {k: to_torch(v, dtype, device) for k, v in x_in.items()} - elif torch.is_tensor(x_in): - return x_in.to(device).type(dtype) - return torch.tensor(x_in, dtype=dtype, device=device) + + +def to_torch(x_in, dtype=None, device="cuda"): + dtype = dtype or DTYPE + device = device + if type(x_in) is dict: + return {k: to_torch(v, dtype, device) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(device).type(dtype) + return torch.tensor(x_in, dtype=dtype, device=device) def reset_x0(x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + def run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim, config): y = None for i in tqdm.tqdm(scheduler.timesteps): - # create batch of timesteps to pass into model - timesteps = torch.full((config['n_samples'],), i, device=config['device'], dtype=torch.long) + timesteps = torch.full((config["n_samples"],), i, device=config["device"], dtype=torch.long) # 3. call the sample function - for _ in range(config['n_guide_steps']): + for _ in range(config["n_guide_steps"]): with torch.enable_grad(): x.requires_grad_() y = network(x, timesteps).sample grad = torch.autograd.grad([y.sum()], [x])[0] - if config['scale_grad_by_std']: + if config["scale_grad_by_std"]: posterior_variance = scheduler._get_variance(i) model_std = torch.exp(0.5 * posterior_variance) grad = model_std * grad - grad[timesteps < config['t_grad_cutoff']] = 0 + grad[timesteps < config["t_grad_cutoff"]] = 0 x = x.detach() - x = x + config['scale'] * grad + x = x + config["scale"] * grad x = reset_x0(x, conditions, action_dim) # with torch.no_grad(): prev_x = unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) x = scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] - + # 3. [optional] add posterior noise to the sample - if config['eta'] > 0: + if config["eta"] > 0: noise = torch.randn(x.shape).to(x.device) - posterior_variance = scheduler._get_variance(i) # * noise + posterior_variance = scheduler._get_variance(i) # * noise # no noise when t == 0 # NOTE: original implementation missing sqrt on posterior_variance - x = x + int(i>0) * (0.5 * posterior_variance) * config['eta'] * noise # MJ had as log var, exponentiated + x = x + int(i > 0) * (0.5 * posterior_variance) * config["eta"] * noise # MJ had as log var, exponentiated # 4. apply conditions to the trajectory x = reset_x0(x, conditions, action_dim) - x = to_torch(x, device=config['device']) + x = to_torch(x, device=config["device"]) # y = network(x, timesteps).sample return x, y + def to_np(x_in): - if torch.is_tensor(x_in): - x_in = x_in.detach().cpu().numpy() - return x_in + if torch.is_tensor(x_in): + x_in = x_in.detach().cpu().numpy() + return x_in -# from MJ's Diffuser code + +# from MJ's Diffuser code # https://github.com/jannerm/diffuser/blob/76ae49ae85ba1c833bf78438faffdc63b8b4d55d/diffuser/utils/colab.py#L79 def mkdir(savepath): """ - returns `True` iff `savepath` is created + returns `True` iff `savepath` is created """ if not os.path.exists(savepath): os.makedirs(savepath) @@ -89,10 +97,10 @@ def mkdir(savepath): return False -def show_sample(renderer, observations, filename='sample.mp4', savebase='videos'): - ''' +def show_sample(renderer, observations, filename="sample.mp4", savebase="videos"): + """ observations : [ batch_size x horizon x observation_dim ] - ''' + """ mkdir(savebase) savepath = os.path.join(savebase, filename) @@ -106,52 +114,58 @@ def show_sample(renderer, observations, filename='sample.mp4', savebase='videos' ## [ horizon x height x (batch_size * width) x channels ] images = np.concatenate(images, axis=2) media.write_video(savepath, images, fps=60) - media.show_video(images, codec='h264', fps=60) + media.show_video(images, codec="h264", fps=60) return images + # Code adapted from Michael Janner # source: https://github.com/jannerm/diffuser/blob/main/diffuser/utils/rendering.py import mujoco_py as mjc + def env_map(env_name): - ''' - map D4RL dataset names to custom fully-observed - variants for rendering - ''' - if 'halfcheetah' in env_name: - return 'HalfCheetahFullObs-v2' - elif 'hopper' in env_name: - return 'HopperFullObs-v2' - elif 'walker2d' in env_name: - return 'Walker2dFullObs-v2' + """ + map D4RL dataset names to custom fully-observed + variants for rendering + """ + if "halfcheetah" in env_name: + return "HalfCheetahFullObs-v2" + elif "hopper" in env_name: + return "HopperFullObs-v2" + elif "walker2d" in env_name: + return "Walker2dFullObs-v2" else: return env_name + def get_image_mask(img): background = (img == 255).all(axis=-1, keepdims=True) mask = ~background.repeat(3, axis=-1) return mask + def atmost_2d(x): while x.ndim > 2: x = x.squeeze(0) return x + def set_state(env, state): qpos_dim = env.sim.data.qpos.size qvel_dim = env.sim.data.qvel.size if not state.size == qpos_dim + qvel_dim: warnings.warn( - f'[ utils/rendering ] Expected state of size {qpos_dim + qvel_dim}, ' - f'but got state of size {state.size}') - state = state[:qpos_dim + qvel_dim] + f"[ utils/rendering ] Expected state of size {qpos_dim + qvel_dim}, but got state of size {state.size}" + ) + state = state[: qpos_dim + qvel_dim] env.set_state(state[:qpos_dim], state[qpos_dim:]) + class MuJoCoRenderer: - ''' - default mujoco renderer - ''' + """ + default mujoco renderer + """ def __init__(self, env): if type(env) is str: @@ -166,14 +180,16 @@ def __init__(self, env): try: self.viewer = mjc.MjRenderContextOffscreen(self.env.sim) except: - print('[ utils/rendering ] Warning: could not initialize offscreen renderer') + print("[ utils/rendering ] Warning: could not initialize offscreen renderer") self.viewer = None def pad_observation(self, observation): - state = np.concatenate([ - np.zeros(1), - observation, - ]) + state = np.concatenate( + [ + np.zeros(1), + observation, + ] + ) return state def pad_observations(self, observations): @@ -182,14 +198,16 @@ def pad_observations(self, observations): xvel_dim = qpos_dim - 1 xvel = observations[:, xvel_dim] xpos = np.cumsum(xvel) * self.env.dt - states = np.concatenate([ - xpos[:,None], - observations, - ], axis=-1) + states = np.concatenate( + [ + xpos[:, None], + observations, + ], + axis=-1, + ) return states def render(self, observation, dim=256, partial=False, qvel=True, render_kwargs=None, conditions=None): - if type(dim) == int: dim = (dim, dim) @@ -198,15 +216,10 @@ def render(self, observation, dim=256, partial=False, qvel=True, render_kwargs=N if render_kwargs is None: xpos = observation[0] if not partial else 0 - render_kwargs = { - 'trackbodyid': 2, - 'distance': 3, - 'lookat': [xpos, -0.5, 1], - 'elevation': -20 - } + render_kwargs = {"trackbodyid": 2, "distance": 3, "lookat": [xpos, -0.5, 1], "elevation": -20} for key, val in render_kwargs.items(): - if key == 'lookat': + if key == "lookat": self.viewer.cam.lookat[:] = val[:] else: setattr(self.viewer.cam, key, val) @@ -251,4 +264,4 @@ def renders(self, samples, partial=False, **kwargs): return composite def __call__(self, *args, **kwargs): - return self.renders(*args, **kwargs) \ No newline at end of file + return self.renders(*args, **kwargs) diff --git a/examples/diffuser/run_diffuser.py b/examples/diffuser/run_diffuser.py index 97f38fd3f261..e69ffc117e3e 100644 --- a/examples/diffuser/run_diffuser.py +++ b/examples/diffuser/run_diffuser.py @@ -1,66 +1,71 @@ -import d4rl - -import torch -import tqdm import numpy as np -import gym +import torch + +import d4rl +import gym import helpers +import tqdm + env_name = "hopper-medium-expert-v2" env = gym.make(env_name) -data = env.get_dataset() # dataset is only used for normalization in this colab +data = env.get_dataset() # dataset is only used for normalization in this colab # Cuda settings for colab # torch.cuda.get_device_name(0) -DEVICE = 'cpu' +DEVICE = "cpu" DTYPE = torch.float # diffusion model settings -n_samples = 4 # number of trajectories planned via diffusion -horizon = 128 # length of sampled trajectories -state_dim = env.observation_space.shape[0] +n_samples = 4 # number of trajectories planned via diffusion +horizon = 128 # length of sampled trajectories +state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] -num_inference_steps = 100 # number of difusion steps +num_inference_steps = 100 # number of difusion steps + def normalize(x_in, data, key): - upper = np.max(data[key], axis=0) - lower = np.min(data[key], axis=0) - x_out = 2*(x_in - lower)/(upper-lower) - 1 - return x_out + upper = np.max(data[key], axis=0) + lower = np.min(data[key], axis=0) + x_out = 2 * (x_in - lower) / (upper - lower) - 1 + return x_out + def de_normalize(x_in, data, key): - upper = np.max(data[key], axis=0) - lower = np.min(data[key], axis=0) - x_out = lower + (upper - lower)*(1 + x_in) /2 - return x_out - -def to_torch(x_in, dtype=None, device=None): - dtype = dtype or DTYPE - device = device or DEVICE - if type(x_in) is dict: - return {k: to_torch(v, dtype, device) for k, v in x_in.items()} - elif torch.is_tensor(x_in): - return x_in.to(device).type(dtype) - return torch.tensor(x_in, dtype=dtype, device=device) + upper = np.max(data[key], axis=0) + lower = np.min(data[key], axis=0) + x_out = lower + (upper - lower) * (1 + x_in) / 2 + return x_out +def to_torch(x_in, dtype=None, device=None): + dtype = dtype or DTYPE + device = device or DEVICE + if type(x_in) is dict: + return {k: to_torch(v, dtype, device) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(device).type(dtype) + return torch.tensor(x_in, dtype=dtype, device=device) + from diffusers import DDPMScheduler, TemporalUNet + # Two generators for different parts of the diffusion loop to work in colab -generator_cpu = torch.Generator(device='cpu') +generator_cpu = torch.Generator(device="cpu") -scheduler = DDPMScheduler(num_train_timesteps=100,beta_schedule="squaredcos_cap_v2") +scheduler = DDPMScheduler(num_train_timesteps=100, beta_schedule="squaredcos_cap_v2") -# 3 different pretrained models are available for this task. +# 3 different pretrained models are available for this task. # The horizion represents the length of trajectories used in training. network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) def reset_x0(x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + # network specific constants for inference clip_denoised = network.clip_denoised @@ -75,77 +80,76 @@ def reset_x0(x_in, cond, act_dim): rollout = [obs.copy()] try: - for t in tqdm.tqdm(range(T)): - obs_raw = obs - - # normalize observations for forward passes - obs = normalize(obs, data, 'observations') - obs = obs[None].repeat(n_samples, axis=0) - conditions = { - 0: to_torch(obs, device=DEVICE) - } - - # constants for inference - batch_size = len(conditions[0]) - shape = (batch_size, horizon, state_dim+action_dim) - - # sample random initial noise vector - x1 = torch.randn(shape, device=DEVICE, generator=generator_cpu) - - # this model is conditioned from an initial state, so you will see this function - # multiple times to change the initial state of generated data to the state - # generated via env.reset() above or env.step() below - x = reset_x0(x1, conditions, action_dim) - - # convert a np observation to torch for model forward pass - x = to_torch(x) - - eta = 1.0 # noise factor for sampling reconstructed state - - # run the diffusion process - # for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): - for i in tqdm.tqdm(scheduler.timesteps): - - # create batch of timesteps to pass into model - timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) - - # 1. generate prediction from model - with torch.no_grad(): - residual = network(x, timesteps).sample - - # 2. use the model prediction to reconstruct an observation (de-noise) - obs_reconstruct = scheduler.step(residual, i, x, predict_epsilon=predict_epsilon)["prev_sample"] - - # 3. [optional] add posterior noise to the sample - if eta > 0: - noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device) - posterior_variance = scheduler._get_variance(i) # * noise - # no noise when t == 0 - # NOTE: original implementation missing sqrt on posterior_variance - obs_reconstruct = obs_reconstruct + int(i>0) * (0.5 * posterior_variance) * eta* noise # MJ had as log var, exponentiated - - # 4. apply conditions to the trajectory - obs_reconstruct_postcond = reset_x0(obs_reconstruct, conditions, action_dim) - x = to_torch(obs_reconstruct_postcond) - plans = helpers.to_np(x[:,:,:action_dim]) - # select random plan - idx = np.random.randint(plans.shape[0]) - # select action at correct time - action = plans[idx, 0, :] - actions= de_normalize(action, data, 'actions') - ## execute action in environment - next_observation, reward, terminal, _ = env.step(action) - - ## update return - total_reward += reward - print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}") - - # save observations for rendering - rollout.append(next_observation.copy()) - obs = next_observation + for t in tqdm.tqdm(range(T)): + obs_raw = obs + + # normalize observations for forward passes + obs = normalize(obs, data, "observations") + obs = obs[None].repeat(n_samples, axis=0) + conditions = {0: to_torch(obs, device=DEVICE)} + + # constants for inference + batch_size = len(conditions[0]) + shape = (batch_size, horizon, state_dim + action_dim) + + # sample random initial noise vector + x1 = torch.randn(shape, device=DEVICE, generator=generator_cpu) + + # this model is conditioned from an initial state, so you will see this function + # multiple times to change the initial state of generated data to the state + # generated via env.reset() above or env.step() below + x = reset_x0(x1, conditions, action_dim) + + # convert a np observation to torch for model forward pass + x = to_torch(x) + + eta = 1.0 # noise factor for sampling reconstructed state + + # run the diffusion process + # for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): + for i in tqdm.tqdm(scheduler.timesteps): + # create batch of timesteps to pass into model + timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) + + # 1. generate prediction from model + with torch.no_grad(): + residual = network(x, timesteps).sample + + # 2. use the model prediction to reconstruct an observation (de-noise) + obs_reconstruct = scheduler.step(residual, i, x, predict_epsilon=predict_epsilon)["prev_sample"] + + # 3. [optional] add posterior noise to the sample + if eta > 0: + noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device) + posterior_variance = scheduler._get_variance(i) # * noise + # no noise when t == 0 + # NOTE: original implementation missing sqrt on posterior_variance + obs_reconstruct = ( + obs_reconstruct + int(i > 0) * (0.5 * posterior_variance) * eta * noise + ) # MJ had as log var, exponentiated + + # 4. apply conditions to the trajectory + obs_reconstruct_postcond = reset_x0(obs_reconstruct, conditions, action_dim) + x = to_torch(obs_reconstruct_postcond) + plans = helpers.to_np(x[:, :, :action_dim]) + # select random plan + idx = np.random.randint(plans.shape[0]) + # select action at correct time + action = plans[idx, 0, :] + actions = de_normalize(action, data, "actions") + ## execute action in environment + next_observation, reward, terminal, _ = env.step(action) + + ## update return + total_reward += reward + print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}") + + # save observations for rendering + rollout.append(next_observation.copy()) + obs = next_observation except KeyboardInterrupt: - pass + pass print(f"Total reward: {total_reward}") -render =helpers.MuJoCoRenderer(env) -helpers.show_sample(render, np.expand_dims(np.stack(rollout),axis=0)) \ No newline at end of file +render = helpers.MuJoCoRenderer(env) +helpers.show_sample(render, np.expand_dims(np.stack(rollout), axis=0)) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 645ba4604cea..7088e560dd66 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -18,7 +18,7 @@ if is_torch_available(): from .modeling_utils import ModelMixin - from .models import AutoencoderKL, UNet1DModel, UNet2DConditionModel, UNet2DModel, VQModel, ValueFunction + from .models import AutoencoderKL, UNet1DModel, UNet2DConditionModel, UNet2DModel, ValueFunction, VQModel from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index 355999f76688..b771aaac8467 100644 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -19,8 +19,8 @@ from .unet_1d import UNet1DModel from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel - from .vae import AutoencoderKL, VQModel from .unet_rl import ValueFunction + from .vae import AutoencoderKL, VQModel if is_flax_available(): from .unet_2d_condition_flax import FlaxUNet2DConditionModel diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index 6584bd13eda3..b6e052c8922f 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -5,21 +5,20 @@ import torch import torch.nn as nn -from diffusers.models.resnet import ResidualTemporalBlock1D, Downsample1D +from diffusers.models.resnet import Downsample1D, ResidualTemporalBlock1D from diffusers.models.unet_1d_blocks import get_down_block from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin from ..utils import BaseOutput from .embeddings import TimestepEmbedding, Timesteps -from .resnet import rearrange_dims @dataclass class ValueFunctionOutput(BaseOutput): """ Args: - sample (`torch.FloatTensor` of shape `(batch, horizon, obs_dimension)`): + sample (`torch.FloatTensor` of shape `(batch, horizon, 1)`): Hidden states output. Output of last layer of model. """ @@ -31,7 +30,12 @@ class ValueFunction(ModelMixin, ConfigMixin): def __init__( self, in_channels=14, - down_block_types: Tuple[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), + down_block_types: Tuple[str] = ( + "DownResnetBlock1D", + "DownResnetBlock1D", + "DownResnetBlock1D", + "DownResnetBlock1D", + ), block_out_channels: Tuple[int] = (32, 64, 128, 256), act_fn: str = "mish", norm_num_groups: int = 8, @@ -40,8 +44,9 @@ def __init__( super().__init__() time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(num_channels=block_out_channels[0], flip_sin_to_cos=False, downscale_freq_shift=1) - self.time_mlp = TimestepEmbedding(channel=block_out_channels[0], time_embed_dim=time_embed_dim, act_fn="mish", out_dim=block_out_channels[0]) - + self.time_mlp = TimestepEmbedding( + channel=block_out_channels[0], time_embed_dim=time_embed_dim, act_fn="mish", out_dim=block_out_channels[0] + ) self.blocks = nn.ModuleList([]) mid_dim = block_out_channels[-1] @@ -67,14 +72,16 @@ def __init__( self.mid_block1 = ResidualTemporalBlock1D(mid_dim, mid_dim // 2, embed_dim=block_out_channels[0]) self.mid_down1 = Downsample1D(mid_dim // 2, use_conv=True) ## - self.mid_block2 = ResidualTemporalBlock1D(mid_dim //2, mid_dim // 4, embed_dim=block_out_channels[0]) + self.mid_block2 = ResidualTemporalBlock1D(mid_dim // 2, mid_dim // 4, embed_dim=block_out_channels[0]) self.mid_down2 = Downsample1D(mid_dim // 4, use_conv=True) ## fc_dim = mid_dim // 4 - self.final_block = nn.ModuleList([ - nn.Linear(fc_dim + block_out_channels[0], fc_dim // 2), - nn.Mish(), - nn.Linear(fc_dim // 2, 1),] + self.final_block = nn.ModuleList( + [ + nn.Linear(fc_dim + block_out_channels[0], fc_dim // 2), + nn.Mish(), + nn.Linear(fc_dim // 2, 1), + ] ) def forward( @@ -88,10 +95,10 @@ def forward( sample (`torch.FloatTensor`): (batch, horizon, obs_dimension + action_dimension) noisy inputs tensor timestep (`torch.FloatTensor` or `float` or `int): batch (batch) timesteps return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple. + Whether or not to return a [`~models.unet_rl.ValueFunctionOutput`] instead of a plain tuple. Returns: - [`~models.unet_2d.UNet2DOutput`] or `tuple`: [`~models.unet_2d.UNet2DOutput`] if `return_dict` is True, + [`~models.unet_rl.ValueFunctionOutput`] or `tuple`: [`~models.unet_rl.ValueFunctionOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ sample = sample.permute(0, 2, 1) diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 98f15f8dbb64..9072710886b8 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -284,7 +284,7 @@ def step( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator ).to(model_output.device) if self.variance_type == "fixed_small_log": - variance = (self._get_variance(t, predicted_variance=predicted_variance)) + variance = self._get_variance(t, predicted_variance=predicted_variance) variance = torch.exp(0.5 * variance) variance = variance * noise From 93fe3ef0ab23a25c66c77b6f5e988b360e54bb73 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 17 Oct 2022 15:03:11 -0400 Subject: [PATCH 050/133] style and quality --- examples/diffuser/helpers.py | 12 +- examples/diffuser/run_diffuser.py | 18 +-- .../diffuser/run_diffuser_value_guided.py | 115 ++++++------------ examples/diffuser/train_diffuser.py | 97 ++++++++------- src/diffusers/models/unet_rl.py | 5 +- 5 files changed, 106 insertions(+), 141 deletions(-) diff --git a/examples/diffuser/helpers.py b/examples/diffuser/helpers.py index c2ec457abad7..3d873e4112dc 100644 --- a/examples/diffuser/helpers.py +++ b/examples/diffuser/helpers.py @@ -6,6 +6,7 @@ import gym import mediapy as media +import mujoco_py as mjc import tqdm @@ -107,11 +108,11 @@ def show_sample(renderer, observations, filename="sample.mp4", savebase="videos" images = [] for rollout in observations: - ## [ horizon x height x width x channels ] + # [ horizon x height x width x channels ] img = renderer._renders(rollout, partial=True) images.append(img) - ## [ horizon x height x (batch_size * width) x channels ] + # [ horizon x height x (batch_size * width) x channels ] images = np.concatenate(images, axis=2) media.write_video(savepath, images, fps=60) media.show_video(images, codec="h264", fps=60) @@ -120,7 +121,6 @@ def show_sample(renderer, observations, filename="sample.mp4", savebase="videos" # Code adapted from Michael Janner # source: https://github.com/jannerm/diffuser/blob/main/diffuser/utils/rendering.py -import mujoco_py as mjc def env_map(env_name): @@ -173,8 +173,8 @@ def __init__(self, env): self.env = gym.make(env) else: self.env = env - ## - 1 because the envs in renderer are fully-observed - ## @TODO : clean up + # - 1 because the envs in renderer are fully-observed + # @TODO : clean up self.observation_dim = np.prod(self.env.observation_space.shape) - 1 self.action_dim = np.prod(self.env.action_space.shape) try: @@ -194,7 +194,7 @@ def pad_observation(self, observation): def pad_observations(self, observations): qpos_dim = self.env.sim.data.qpos.size - ## xpos is hidden + # xpos is hidden xvel_dim = qpos_dim - 1 xvel = observations[:, xvel_dim] xpos = np.cumsum(xvel) * self.env.dt diff --git a/examples/diffuser/run_diffuser.py b/examples/diffuser/run_diffuser.py index e69ffc117e3e..80eb8f20dadd 100644 --- a/examples/diffuser/run_diffuser.py +++ b/examples/diffuser/run_diffuser.py @@ -1,10 +1,11 @@ import numpy as np import torch -import d4rl +import d4rl # noqa import gym import helpers import tqdm +from diffusers import DDPMScheduler, UNet1DModel env_name = "hopper-medium-expert-v2" @@ -48,9 +49,6 @@ def to_torch(x_in, dtype=None, device=None): return torch.tensor(x_in, dtype=dtype, device=device) -from diffusers import DDPMScheduler, TemporalUNet - - # Two generators for different parts of the diffusion loop to work in colab generator_cpu = torch.Generator(device="cpu") @@ -58,9 +56,11 @@ def to_torch(x_in, dtype=None, device=None): # 3 different pretrained models are available for this task. # The horizion represents the length of trajectories used in training. -network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) +network = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) + + def reset_x0(x_in, cond, act_dim): for key, val in cond.items(): x_in[:, key, act_dim:] = val.clone() @@ -71,8 +71,8 @@ def reset_x0(x_in, cond, act_dim): clip_denoised = network.clip_denoised predict_epsilon = network.predict_epsilon -## add a batch dimension and repeat for multiple samples -## [ observation_dim ] --> [ n_samples x observation_dim ] +# add a batch dimension and repeat for multiple samples +# [ observation_dim ] --> [ n_samples x observation_dim ] obs = env.reset() total_reward = 0 done = False @@ -137,10 +137,10 @@ def reset_x0(x_in, cond, act_dim): # select action at correct time action = plans[idx, 0, :] actions = de_normalize(action, data, "actions") - ## execute action in environment + # execute action in environment next_observation, reward, terminal, _ = env.step(action) - ## update return + # update return total_reward += reward print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}") diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 529f4bbb66ac..31dbc8536f69 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -1,45 +1,13 @@ -import d4rl - +import numpy as np import torch + +import d4rl # noqa +import gym +import helpers import tqdm -import numpy as np -import gym from diffusers import DDPMScheduler, UNet1DModel, ValueFunction from helpers import MuJoCoRenderer, show_sample -import helpers -import wandb -import modal -import os -from pytorch_lightning import seed_everything - -seed_everything(0) - -stub = modal.Stub("diffusers-value-guided") -image = modal.Image.debian_slim().apt_install([ - "libgl1-mesa-dev", - "libgl1-mesa-glx", - "libglew-dev", - "libosmesa6-dev", - "software-properties-common", - "patchelf", - "git", - "ffmpeg", -]).pip_install([ - "torch", - "datasets", - "transformers", - "free-mujoco-py", - "einops", - "gym", - "protobuf==3.20.1", - "git+https://github.com/rail-berkeley/d4rl.git", - "wandb", - "mediapy", - "Pillow==9.0.0", - "moviepy", - "imageio", - "pytorch-lightning", - ]) + config = dict( n_samples=64, @@ -50,74 +18,73 @@ scale=0.1, eta=0.0, t_grad_cutoff=2, - device='cuda' + device="cpu", ) + def _run(): - wandb.init(project="diffusers-value-guided-rl") - wandb.config.update(config) env_name = "hopper-medium-v2" env = gym.make(env_name) - data = env.get_dataset() # dataset is only used for normalization in this colab + data = env.get_dataset() # dataset is only used for normalization in this colab render = MuJoCoRenderer(env) # Cuda settings for colab # torch.cuda.get_device_name(0) - DEVICE = config['device'] - DTYPE = torch.float + DEVICE = config["device"] # diffusion model settings - state_dim = env.observation_space.shape[0] + state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] # Two generators for different parts of the diffusion loop to work in colab # generator = torch.Generator(device='cuda') generator = torch.Generator(device=DEVICE) - scheduler = DDPMScheduler(num_train_timesteps=config['num_inference_steps'],beta_schedule="squaredcos_cap_v2", clip_sample=False, variance_type="fixed_small_log") + scheduler = DDPMScheduler( + num_train_timesteps=config["num_inference_steps"], + beta_schedule="squaredcos_cap_v2", + clip_sample=False, + variance_type="fixed_small_log", + ) - # 3 different pretrained models are available for this task. + # 3 different pretrained models are available for this task. # The horizion represents the length of trajectories used in training. # network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) network = ValueFunction.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32").to(device=DEVICE).eval() - unet = UNet1DModel.from_pretrained(f"bglick13/hopper-medium-v2-unet-hor32").to(device=DEVICE).eval() + unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-unet-hor32").to(device=DEVICE).eval() # unet = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) - ## add a batch dimension and repeat for multiple samples - ## [ observation_dim ] --> [ n_samples x observation_dim ] + # add a batch dimension and repeat for multiple samples + # [ observation_dim ] --> [ n_samples x observation_dim ] env.seed(0) obs = env.reset() total_reward = 0 total_score = 0 - done = False T = 1000 rollout = [obs.copy()] trajectories = [] y_maxes = [0] try: for t in tqdm.tqdm(range(T)): - obs_raw = obs # 1. Call the policy # normalize observations for forward passes - obs = helpers.normalize(obs, data, 'observations') + obs = helpers.normalize(obs, data, "observations") - obs = obs[None].repeat(config['n_samples'], axis=0) - conditions = { - 0: helpers.to_torch(obs, device=DEVICE) - } + obs = obs[None].repeat(config["n_samples"], axis=0) + conditions = {0: helpers.to_torch(obs, device=DEVICE)} # 2. Call the diffusion model # constants for inference batch_size = len(conditions[0]) - shape = (batch_size, config['horizon'], state_dim+action_dim) + shape = (batch_size, config["horizon"], state_dim + action_dim) # sample random initial noise vector x1 = torch.randn(shape, device=DEVICE) # this model is conditioned from an initial state, so you will see this function - # multiple times to change the initial state of generated data to the state + # multiple times to change the initial state of generated data to the state # generated via env.reset() above or env.step() below x = helpers.reset_x0(x1, conditions, action_dim) @@ -133,23 +100,24 @@ def _run(): actions = sorted_values[:, :, :action_dim] if t % 10 == 0: trajectory = sorted_values[:, :, action_dim:][0].unsqueeze(0).detach().cpu().numpy() - trajectory = helpers.de_normalize(trajectory, data, 'observations') + trajectory = helpers.de_normalize(trajectory, data, "observations") trajectories.append(trajectory) actions = actions.detach().cpu().numpy() - denorm_actions = helpers.de_normalize(actions, data, key='actions') + denorm_actions = helpers.de_normalize(actions, data, key="actions") # denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] denorm_actions = denorm_actions[0, 0] - - ## execute action in environment + # execute action in environment next_observation, reward, terminal, _ = env.step(denorm_actions) score = env.get_normalized_score(total_reward) - ## update return + # update return total_reward += reward total_score += score - wandb.log({"total_reward": total_reward, "reward": reward, "score": score, "total_score": total_score, "y_max": y_maxes[-1], "diff_from_expert_reward": reward - data['rewards'][t]}) - print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score: {total_score}") + print( + f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" + f" {total_score}" + ) # save observations for rendering rollout.append(next_observation.copy()) @@ -159,21 +127,12 @@ def _run(): print(f"Total reward: {total_reward}") - images = show_sample(render, np.expand_dims(np.stack(rollout),axis=0)) - wandb.log({"rollout": wandb.Video("videos/sample.mp4", fps=60, format='mp4')}) + show_sample(render, np.expand_dims(np.stack(rollout), axis=0)) + -@stub.function( - image=image, - secret=modal.Secret.from_name("wandb-api-key"), - mounts=modal.create_package_mounts(["diffusers"]), - gpu=True -) def run(): - wandb.login(key=os.environ["WANDB_API_KEY"]) _run() if __name__ == "__main__": - # _run() - with stub.run(): - run() + run() diff --git a/examples/diffuser/train_diffuser.py b/examples/diffuser/train_diffuser.py index 902f5ec7357c..dd226fe3c813 100644 --- a/examples/diffuser/train_diffuser.py +++ b/examples/diffuser/train_diffuser.py @@ -1,75 +1,82 @@ -import d4rl - -import torch -import tqdm import numpy as np -import gym -from accelerate import Accelerator +import torch + +import d4rl # noqa +import gym +from diffusers import DDPMScheduler, UNet1DModel + + env_name = "hopper-medium-expert-v2" env = gym.make(env_name) -data = env.get_dataset() # dataset is only used for normalization in this colab +data = env.get_dataset() # dataset is only used for normalization in this colab # Cuda settings for colab # torch.cuda.get_device_name(0) -DEVICE = 'cpu' +DEVICE = "cpu" DTYPE = torch.float # diffusion model settings -n_samples = 4 # number of trajectories planned via diffusion -horizon = 128 # length of sampled trajectories -state_dim = env.observation_space.shape[0] +n_samples = 4 # number of trajectories planned via diffusion +horizon = 128 # length of sampled trajectories +state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] -num_inference_steps = 100 # number of difusion steps +num_inference_steps = 100 # number of difusion steps + def normalize(x_in, data, key): - upper = np.max(data[key], axis=0) - lower = np.min(data[key], axis=0) - x_out = 2*(x_in - lower)/(upper-lower) - 1 - return x_out + upper = np.max(data[key], axis=0) + lower = np.min(data[key], axis=0) + x_out = 2 * (x_in - lower) / (upper - lower) - 1 + return x_out + def de_normalize(x_in, data, key): - upper = np.max(data[key], axis=0) - lower = np.min(data[key], axis=0) - x_out = lower + (upper - lower)*(1 + x_in) /2 - return x_out - + upper = np.max(data[key], axis=0) + lower = np.min(data[key], axis=0) + x_out = lower + (upper - lower) * (1 + x_in) / 2 + return x_out + + def to_torch(x_in, dtype=None, device=None): - dtype = dtype or DTYPE - device = device or DEVICE - if type(x_in) is dict: - return {k: to_torch(v, dtype, device) for k, v in x_in.items()} - elif torch.is_tensor(x_in): - return x_in.to(device).type(dtype) - return torch.tensor(x_in, dtype=dtype, device=device) + dtype = dtype or DTYPE + device = device or DEVICE + if type(x_in) is dict: + return {k: to_torch(v, dtype, device) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(device).type(dtype) + return torch.tensor(x_in, dtype=dtype, device=device) + obs = env.reset() obs_raw = obs # normalize observations for forward passes -obs = normalize(obs, data, 'observations') +obs = normalize(obs, data, "observations") -from diffusers import DDPMScheduler, TemporalUNet # Two generators for different parts of the diffusion loop to work in colab -generator = torch.Generator(device='cuda') -generator_cpu = torch.Generator(device='cpu') -network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) +generator = torch.Generator(device="cuda") +generator_cpu = torch.Generator(device="cpu") +network = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) -scheduler = DDPMScheduler(num_train_timesteps=100,beta_schedule="squaredcos_cap_v2") +scheduler = DDPMScheduler(num_train_timesteps=100, beta_schedule="squaredcos_cap_v2") optimizer = torch.optim.AdamW( - network.parameters(), - lr=0.001, - betas=(0.95, 0.99), - weight_decay=1e-6, - eps=1e-8, - ) -# 3 different pretrained models are available for this task. + network.parameters(), + lr=0.001, + betas=(0.95, 0.99), + weight_decay=1e-6, + eps=1e-8, +) +# 3 different pretrained models are available for this task. # The horizion represents the length of trajectories used in training. # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) + + def reset_x0(x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + -# TODO: Flesh this out using accelerate library (a la other examples) \ No newline at end of file +# TODO: Flesh this out using accelerate library (a la other examples) diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py index b6e052c8922f..66822f99b198 100644 --- a/src/diffusers/models/unet_rl.py +++ b/src/diffusers/models/unet_rl.py @@ -55,7 +55,6 @@ def __init__( for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] - is_final_block = i == len(block_out_channels) - 1 down_block_type = down_block_types[i] down_block = get_down_block( @@ -98,8 +97,8 @@ def forward( Whether or not to return a [`~models.unet_rl.ValueFunctionOutput`] instead of a plain tuple. Returns: - [`~models.unet_rl.ValueFunctionOutput`] or `tuple`: [`~models.unet_rl.ValueFunctionOutput`] if `return_dict` is True, - otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. + [`~models.unet_rl.ValueFunctionOutput`] or `tuple`: [`~models.unet_rl.ValueFunctionOutput`] if + `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ sample = sample.permute(0, 2, 1) From 4c6850473dfbdb7b6a792141d6683669e8a2e793 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 18 Oct 2022 11:22:46 -0700 Subject: [PATCH 051/133] add ddim --- src/diffusers/schedulers/scheduling_ddim.py | 29 ++++++++++++++--- src/diffusers/schedulers/scheduling_ddpm.py | 36 ++++++++++----------- 2 files changed, 41 insertions(+), 24 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index 33d9bafb8aed..b12a565ee196 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -145,6 +145,7 @@ def __init__( self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.sigmas = 1 - self.alphas**2 # At every step in ddim, we are looking into the previous alphas_cumprod # For the final step, there is no previous alphas_cumprod because we are already at 0 @@ -209,6 +210,7 @@ def step( model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, + prediction_type: str = "epsilon", eta: float = 0.0, use_clipped_model_output: bool = False, generator=None, @@ -223,6 +225,10 @@ def step( timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. + prediction_type (`str`): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample), or `v` (see section 2.4 + https://imagen.research.google/video/paper.pdf) eta (`float`): weight of noise for added noise in diffusion step. use_clipped_model_output (`bool`): TODO generator: random number generator. @@ -243,14 +249,14 @@ def step( # Ideally, read DDIM paper in-detail understanding # Notation ( -> - # - pred_noise_t -> e_theta(x_t, t) - # - pred_original_sample -> f_theta(x_t, t) or x_0 + # - pred_noise_t -> e_theta(x_t, timestep) + # - pred_original_sample -> f_theta(x_t, timestep) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" - # 1. get previous step value (=t-1) + # 1. get previous step value (=timestep-1) prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas @@ -261,7 +267,20 @@ def step( # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + if prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + eps = torch.tensor(1) + elif prediction_type == "sample": + pred_original_sample = model_output + eps = torch.tensor(1) + elif prediction_type == "v": + # v_t = alpha_t * epsilon - sigma_t * x + # need to merge the PRs for sigma to be available in DDPM + pred_original_sample = sample * self.alphas[timestep] - model_output * self.sigmas[timestep] + eps = model_output * self.alphas[timestep] - sample * self.sigmas[timestep] + raise NotImplementedError(f"v prediction not yet implemented for DDPM") + else: + raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") # 4. Clip "predicted x_0" if self.config.clip_sample: @@ -280,7 +299,7 @@ def step( pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + eps * pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 6ab2498956d7..daf17a2de3e2 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -183,14 +183,14 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic )[::-1].copy() self.timesteps = torch.from_numpy(timesteps).to(device) - def _get_variance(self, t, predicted_variance=None, variance_type=None): - alpha_prod_t = self.alphas_cumprod[t] - alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one + def _get_variance(self, timestep, predicted_variance=None, variance_type=None): + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[timestep - 1] if timestep > 0 else self.one - # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) + # For timestep > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample - # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample - variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t] + # x_{timestep-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample + variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[timestep] if variance_type is None: variance_type = self.config.variance_type @@ -202,15 +202,15 @@ def _get_variance(self, t, predicted_variance=None, variance_type=None): elif variance_type == "fixed_small_log": variance = torch.log(torch.clamp(variance, min=1e-20)) elif variance_type == "fixed_large": - variance = self.betas[t] + variance = self.betas[timestep] elif variance_type == "fixed_large_log": # Glide max_log - variance = torch.log(self.betas[t]) + variance = torch.log(self.betas[timestep]) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": min_log = variance - max_log = self.betas[t] + max_log = self.betas[timestep] frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log @@ -247,16 +247,14 @@ def step( returning a tuple, the first element is the sample tensor. """ - t = timestep - if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) else: predicted_variance = None # 1. compute alphas, betas - alpha_prod_t = self.alphas_cumprod[t] - alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[timestep - 1] if timestep > 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev @@ -269,8 +267,8 @@ def step( elif prediction_type == "v": # v_t = alpha_t * epsilon - sigma_t * x # need to merge the PRs for sigma to be available in DDPM - pred_original_sample = sample * self.alphas[t] - model_output * self.sigmas[t] - eps = model_output * self.alphas[t] - sample * self.sigmas[t] + pred = sample * self.alphas[timestep] - model_output * self.sigmas[timestep] + eps = model_output * self.alphas[timestep] - sample * self.sigmas[timestep] raise NotImplementedError(f"v prediction not yet implemented for DDPM") else: raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") @@ -281,8 +279,8 @@ def step( # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf - pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t - current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t + pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[timestep]) / beta_prod_t + current_sample_coeff = self.alphas[timestep] ** (0.5) * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf @@ -290,11 +288,11 @@ def step( # 6. Add noise variance = 0 - if t > 0: + if timestep > 0: noise = torch.randn( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator ).to(model_output.device) - variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise + variance = (self._get_variance(timestep, predicted_variance=predicted_variance) ** 0.5) * noise pred_prev_sample = pred_prev_sample + variance From ac6be90a718d1dc3fe5c57e411082577a19454ac Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 18 Oct 2022 11:42:51 -0700 Subject: [PATCH 052/133] style --- src/diffusers/schedulers/scheduling_ddim.py | 1 - src/diffusers/schedulers/scheduling_ddpm.py | 1 - 2 files changed, 2 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index b12a565ee196..abdcb3e81a58 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -278,7 +278,6 @@ def step( # need to merge the PRs for sigma to be available in DDPM pred_original_sample = sample * self.alphas[timestep] - model_output * self.sigmas[timestep] eps = model_output * self.alphas[timestep] - sample * self.sigmas[timestep] - raise NotImplementedError(f"v prediction not yet implemented for DDPM") else: raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index daf17a2de3e2..ee4f608e09aa 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -269,7 +269,6 @@ def step( # need to merge the PRs for sigma to be available in DDPM pred = sample * self.alphas[timestep] - model_output * self.sigmas[timestep] eps = model_output * self.alphas[timestep] - sample * self.sigmas[timestep] - raise NotImplementedError(f"v prediction not yet implemented for DDPM") else: raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") From ffb73552a39280b00c16672f0d53e903de5c2b46 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 18 Oct 2022 13:47:33 -0700 Subject: [PATCH 053/133] add output activation --- src/diffusers/models/unet_1d.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index 2e20cacb64f1..3ede756c9b3d 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -125,7 +125,10 @@ def __init__( num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) self.final_conv1d_1 = nn.Conv1d(block_out_channels[0], block_out_channels[0], 5, padding=2) self.final_conv1d_gn = nn.GroupNorm(num_groups_out, block_out_channels[0]) - self.final_conv1d_act = nn.Mish() + if act_fn == "silu": + self.final_conv1d_act = nn.SiLU() + if act_fn == "mish": + self.final_conv1d_act = nn.Mish() self.final_conv1d_2 = nn.Conv1d(block_out_channels[0], out_channels, 1) def forward( From a6314f67962ca95c40a5b8a41e51ed8dbeebf666 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 18 Oct 2022 14:38:51 -0700 Subject: [PATCH 054/133] rename flax blocks file --- .../models/{unet_blocks_flax.py => unet_2d_blocks_flax.py} | 0 src/diffusers/models/unet_2d_condition_flax.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename src/diffusers/models/{unet_blocks_flax.py => unet_2d_blocks_flax.py} (100%) diff --git a/src/diffusers/models/unet_blocks_flax.py b/src/diffusers/models/unet_2d_blocks_flax.py similarity index 100% rename from src/diffusers/models/unet_blocks_flax.py rename to src/diffusers/models/unet_2d_blocks_flax.py diff --git a/src/diffusers/models/unet_2d_condition_flax.py b/src/diffusers/models/unet_2d_condition_flax.py index 5411855f79d5..f0e721826bd7 100644 --- a/src/diffusers/models/unet_2d_condition_flax.py +++ b/src/diffusers/models/unet_2d_condition_flax.py @@ -23,7 +23,7 @@ from ..modeling_flax_utils import FlaxModelMixin from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps -from .unet_blocks_flax import ( +from .unet_2d_blocks_flax import ( FlaxCrossAttnDownBlock2D, FlaxCrossAttnUpBlock2D, FlaxDownBlock2D, From 4e378e9518e9a2fb81507bf83e0d7918b35aef34 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 18 Oct 2022 19:18:03 -0400 Subject: [PATCH 055/133] edits based on comments --- .gitignore | 4 +- examples/diffuser/helpers.py | 267 ---------------- examples/diffuser/run_diffuser.py | 52 +-- .../diffuser/run_diffuser_value_guided.py | 21 +- examples/diffuser/train_diffuser.py | 298 ++++++++++++++++-- .../convert_models_diffuser_to_diffusers.py | 0 src/diffusers/schedulers/scheduling_ddpm.py | 6 +- 7 files changed, 289 insertions(+), 359 deletions(-) delete mode 100644 examples/diffuser/helpers.py rename convert_model.py => scripts/convert_models_diffuser_to_diffusers.py (100%) diff --git a/.gitignore b/.gitignore index f066e7f84299..f018a111ea33 100644 --- a/.gitignore +++ b/.gitignore @@ -164,5 +164,5 @@ tags # DS_Store (MacOS) .DS_Store -*.mp4 -hub/* \ No newline at end of file +# RL pipelines may produce mp4 outputs +*.mp4 \ No newline at end of file diff --git a/examples/diffuser/helpers.py b/examples/diffuser/helpers.py deleted file mode 100644 index 3d873e4112dc..000000000000 --- a/examples/diffuser/helpers.py +++ /dev/null @@ -1,267 +0,0 @@ -import os -import warnings - -import numpy as np -import torch - -import gym -import mediapy as media -import mujoco_py as mjc -import tqdm - - -DTYPE = torch.float - - -def normalize(x_in, data, key): - means = data[key].mean(axis=0) - stds = data[key].std(axis=0) - return (x_in - means) / stds - - -def de_normalize(x_in, data, key): - means = data[key].mean(axis=0) - stds = data[key].std(axis=0) - return x_in * stds + means - - -def to_torch(x_in, dtype=None, device="cuda"): - dtype = dtype or DTYPE - device = device - if type(x_in) is dict: - return {k: to_torch(v, dtype, device) for k, v in x_in.items()} - elif torch.is_tensor(x_in): - return x_in.to(device).type(dtype) - return torch.tensor(x_in, dtype=dtype, device=device) - - -def reset_x0(x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in - - -def run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim, config): - y = None - for i in tqdm.tqdm(scheduler.timesteps): - # create batch of timesteps to pass into model - timesteps = torch.full((config["n_samples"],), i, device=config["device"], dtype=torch.long) - # 3. call the sample function - for _ in range(config["n_guide_steps"]): - with torch.enable_grad(): - x.requires_grad_() - y = network(x, timesteps).sample - grad = torch.autograd.grad([y.sum()], [x])[0] - if config["scale_grad_by_std"]: - posterior_variance = scheduler._get_variance(i) - model_std = torch.exp(0.5 * posterior_variance) - grad = model_std * grad - grad[timesteps < config["t_grad_cutoff"]] = 0 - x = x.detach() - x = x + config["scale"] * grad - x = reset_x0(x, conditions, action_dim) - # with torch.no_grad(): - prev_x = unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) - x = scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] - - # 3. [optional] add posterior noise to the sample - if config["eta"] > 0: - noise = torch.randn(x.shape).to(x.device) - posterior_variance = scheduler._get_variance(i) # * noise - # no noise when t == 0 - # NOTE: original implementation missing sqrt on posterior_variance - x = x + int(i > 0) * (0.5 * posterior_variance) * config["eta"] * noise # MJ had as log var, exponentiated - - # 4. apply conditions to the trajectory - x = reset_x0(x, conditions, action_dim) - x = to_torch(x, device=config["device"]) - # y = network(x, timesteps).sample - return x, y - - -def to_np(x_in): - if torch.is_tensor(x_in): - x_in = x_in.detach().cpu().numpy() - return x_in - - -# from MJ's Diffuser code -# https://github.com/jannerm/diffuser/blob/76ae49ae85ba1c833bf78438faffdc63b8b4d55d/diffuser/utils/colab.py#L79 -def mkdir(savepath): - """ - returns `True` iff `savepath` is created - """ - if not os.path.exists(savepath): - os.makedirs(savepath) - return True - else: - return False - - -def show_sample(renderer, observations, filename="sample.mp4", savebase="videos"): - """ - observations : [ batch_size x horizon x observation_dim ] - """ - - mkdir(savebase) - savepath = os.path.join(savebase, filename) - - images = [] - for rollout in observations: - # [ horizon x height x width x channels ] - img = renderer._renders(rollout, partial=True) - images.append(img) - - # [ horizon x height x (batch_size * width) x channels ] - images = np.concatenate(images, axis=2) - media.write_video(savepath, images, fps=60) - media.show_video(images, codec="h264", fps=60) - return images - - -# Code adapted from Michael Janner -# source: https://github.com/jannerm/diffuser/blob/main/diffuser/utils/rendering.py - - -def env_map(env_name): - """ - map D4RL dataset names to custom fully-observed - variants for rendering - """ - if "halfcheetah" in env_name: - return "HalfCheetahFullObs-v2" - elif "hopper" in env_name: - return "HopperFullObs-v2" - elif "walker2d" in env_name: - return "Walker2dFullObs-v2" - else: - return env_name - - -def get_image_mask(img): - background = (img == 255).all(axis=-1, keepdims=True) - mask = ~background.repeat(3, axis=-1) - return mask - - -def atmost_2d(x): - while x.ndim > 2: - x = x.squeeze(0) - return x - - -def set_state(env, state): - qpos_dim = env.sim.data.qpos.size - qvel_dim = env.sim.data.qvel.size - if not state.size == qpos_dim + qvel_dim: - warnings.warn( - f"[ utils/rendering ] Expected state of size {qpos_dim + qvel_dim}, but got state of size {state.size}" - ) - state = state[: qpos_dim + qvel_dim] - - env.set_state(state[:qpos_dim], state[qpos_dim:]) - - -class MuJoCoRenderer: - """ - default mujoco renderer - """ - - def __init__(self, env): - if type(env) is str: - env = env_map(env) - self.env = gym.make(env) - else: - self.env = env - # - 1 because the envs in renderer are fully-observed - # @TODO : clean up - self.observation_dim = np.prod(self.env.observation_space.shape) - 1 - self.action_dim = np.prod(self.env.action_space.shape) - try: - self.viewer = mjc.MjRenderContextOffscreen(self.env.sim) - except: - print("[ utils/rendering ] Warning: could not initialize offscreen renderer") - self.viewer = None - - def pad_observation(self, observation): - state = np.concatenate( - [ - np.zeros(1), - observation, - ] - ) - return state - - def pad_observations(self, observations): - qpos_dim = self.env.sim.data.qpos.size - # xpos is hidden - xvel_dim = qpos_dim - 1 - xvel = observations[:, xvel_dim] - xpos = np.cumsum(xvel) * self.env.dt - states = np.concatenate( - [ - xpos[:, None], - observations, - ], - axis=-1, - ) - return states - - def render(self, observation, dim=256, partial=False, qvel=True, render_kwargs=None, conditions=None): - if type(dim) == int: - dim = (dim, dim) - - if self.viewer is None: - return np.zeros((*dim, 3), np.uint8) - - if render_kwargs is None: - xpos = observation[0] if not partial else 0 - render_kwargs = {"trackbodyid": 2, "distance": 3, "lookat": [xpos, -0.5, 1], "elevation": -20} - - for key, val in render_kwargs.items(): - if key == "lookat": - self.viewer.cam.lookat[:] = val[:] - else: - setattr(self.viewer.cam, key, val) - - if partial: - state = self.pad_observation(observation) - else: - state = observation - - qpos_dim = self.env.sim.data.qpos.size - if not qvel or state.shape[-1] == qpos_dim: - qvel_dim = self.env.sim.data.qvel.size - state = np.concatenate([state, np.zeros(qvel_dim)]) - - set_state(self.env, state) - - self.viewer.render(*dim) - data = self.viewer.read_pixels(*dim, depth=False) - data = data[::-1, :, :] - return data - - def _renders(self, observations, **kwargs): - images = [] - for observation in observations: - img = self.render(observation, **kwargs) - images.append(img) - return np.stack(images, axis=0) - - def renders(self, samples, partial=False, **kwargs): - if partial: - samples = self.pad_observations(samples) - partial = False - - sample_images = self._renders(samples, partial=partial, **kwargs) - - composite = np.ones_like(sample_images[0]) * 255 - - for img in sample_images: - mask = get_image_mask(img) - composite[mask] = img[mask] - - return composite - - def __call__(self, *args, **kwargs): - return self.renders(*args, **kwargs) diff --git a/examples/diffuser/run_diffuser.py b/examples/diffuser/run_diffuser.py index 80eb8f20dadd..ce11363bbbb9 100644 --- a/examples/diffuser/run_diffuser.py +++ b/examples/diffuser/run_diffuser.py @@ -3,7 +3,7 @@ import d4rl # noqa import gym -import helpers +import train_diffuser import tqdm from diffusers import DDPMScheduler, UNet1DModel @@ -25,30 +25,6 @@ num_inference_steps = 100 # number of difusion steps -def normalize(x_in, data, key): - upper = np.max(data[key], axis=0) - lower = np.min(data[key], axis=0) - x_out = 2 * (x_in - lower) / (upper - lower) - 1 - return x_out - - -def de_normalize(x_in, data, key): - upper = np.max(data[key], axis=0) - lower = np.min(data[key], axis=0) - x_out = lower + (upper - lower) * (1 + x_in) / 2 - return x_out - - -def to_torch(x_in, dtype=None, device=None): - dtype = dtype or DTYPE - device = device or DEVICE - if type(x_in) is dict: - return {k: to_torch(v, dtype, device) for k, v in x_in.items()} - elif torch.is_tensor(x_in): - return x_in.to(device).type(dtype) - return torch.tensor(x_in, dtype=dtype, device=device) - - # Two generators for different parts of the diffusion loop to work in colab generator_cpu = torch.Generator(device="cpu") @@ -61,12 +37,6 @@ def to_torch(x_in, dtype=None, device=None): # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) -def reset_x0(x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in - - # network specific constants for inference clip_denoised = network.clip_denoised predict_epsilon = network.predict_epsilon @@ -84,9 +54,9 @@ def reset_x0(x_in, cond, act_dim): obs_raw = obs # normalize observations for forward passes - obs = normalize(obs, data, "observations") + obs = train_diffuser.normalize(obs, data, "observations") obs = obs[None].repeat(n_samples, axis=0) - conditions = {0: to_torch(obs, device=DEVICE)} + conditions = {0: train_diffuser.to_torch(obs, device=DEVICE)} # constants for inference batch_size = len(conditions[0]) @@ -98,10 +68,10 @@ def reset_x0(x_in, cond, act_dim): # this model is conditioned from an initial state, so you will see this function # multiple times to change the initial state of generated data to the state # generated via env.reset() above or env.step() below - x = reset_x0(x1, conditions, action_dim) + x = train_diffuser.reset_x0(x1, conditions, action_dim) # convert a np observation to torch for model forward pass - x = to_torch(x) + x = train_diffuser.to_torch(x) eta = 1.0 # noise factor for sampling reconstructed state @@ -129,14 +99,14 @@ def reset_x0(x_in, cond, act_dim): ) # MJ had as log var, exponentiated # 4. apply conditions to the trajectory - obs_reconstruct_postcond = reset_x0(obs_reconstruct, conditions, action_dim) - x = to_torch(obs_reconstruct_postcond) - plans = helpers.to_np(x[:, :, :action_dim]) + obs_reconstruct_postcond = train_diffuser.reset_x0(obs_reconstruct, conditions, action_dim) + x = train_diffuser.to_torch(obs_reconstruct_postcond) + plans = train_diffuser.helpers.to_np(x[:, :, :action_dim]) # select random plan idx = np.random.randint(plans.shape[0]) # select action at correct time action = plans[idx, 0, :] - actions = de_normalize(action, data, "actions") + actions = train_diffuser.de_normalize(action, data, "actions") # execute action in environment next_observation, reward, terminal, _ = env.step(action) @@ -151,5 +121,5 @@ def reset_x0(x_in, cond, act_dim): pass print(f"Total reward: {total_reward}") -render = helpers.MuJoCoRenderer(env) -helpers.show_sample(render, np.expand_dims(np.stack(rollout), axis=0)) +render = train_diffuser.MuJoCoRenderer(env) +train_diffuser.show_sample(render, np.expand_dims(np.stack(rollout), axis=0)) diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 31dbc8536f69..cc61650ddcdf 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -3,10 +3,9 @@ import d4rl # noqa import gym -import helpers +import train_diffuser import tqdm from diffusers import DDPMScheduler, UNet1DModel, ValueFunction -from helpers import MuJoCoRenderer, show_sample config = dict( @@ -26,7 +25,7 @@ def _run(): env_name = "hopper-medium-v2" env = gym.make(env_name) data = env.get_dataset() # dataset is only used for normalization in this colab - render = MuJoCoRenderer(env) + render = train_diffuser.MuJoCoRenderer(env) # Cuda settings for colab # torch.cuda.get_device_name(0) @@ -70,10 +69,10 @@ def _run(): for t in tqdm.tqdm(range(T)): # 1. Call the policy # normalize observations for forward passes - obs = helpers.normalize(obs, data, "observations") + obs = train_diffuser.normalize(obs, data, "observations") obs = obs[None].repeat(config["n_samples"], axis=0) - conditions = {0: helpers.to_torch(obs, device=DEVICE)} + conditions = {0: train_diffuser.to_torch(obs, device=DEVICE)} # 2. Call the diffusion model # constants for inference @@ -86,11 +85,11 @@ def _run(): # this model is conditioned from an initial state, so you will see this function # multiple times to change the initial state of generated data to the state # generated via env.reset() above or env.step() below - x = helpers.reset_x0(x1, conditions, action_dim) + x = train_diffuser.reset_x0(x1, conditions, action_dim) # convert a np observation to torch for model forward pass - x = helpers.to_torch(x, device=DEVICE) - x, y = helpers.run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim, config) + x = train_diffuser.to_torch(x, device=DEVICE) + x, y = train_diffuser.run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim, config) if y is not None: sorted_idx = y.argsort(0, descending=True).squeeze() y_maxes.append(y[sorted_idx[0]].detach().cpu().numpy()) @@ -100,11 +99,11 @@ def _run(): actions = sorted_values[:, :, :action_dim] if t % 10 == 0: trajectory = sorted_values[:, :, action_dim:][0].unsqueeze(0).detach().cpu().numpy() - trajectory = helpers.de_normalize(trajectory, data, "observations") + trajectory = train_diffuser.de_normalize(trajectory, data, "observations") trajectories.append(trajectory) actions = actions.detach().cpu().numpy() - denorm_actions = helpers.de_normalize(actions, data, key="actions") + denorm_actions = train_diffuser.de_normalize(actions, data, key="actions") # denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] denorm_actions = denorm_actions[0, 0] @@ -127,7 +126,7 @@ def _run(): print(f"Total reward: {total_reward}") - show_sample(render, np.expand_dims(np.stack(rollout), axis=0)) + train_diffuser.show_sample(render, np.expand_dims(np.stack(rollout), axis=0)) def run(): diff --git a/examples/diffuser/train_diffuser.py b/examples/diffuser/train_diffuser.py index dd226fe3c813..784ba4d48d52 100644 --- a/examples/diffuser/train_diffuser.py +++ b/examples/diffuser/train_diffuser.py @@ -1,45 +1,38 @@ +import os +import warnings + import numpy as np import torch +import mediapy as media +import mujoco_py as mjc +import tqdm import d4rl # noqa import gym from diffusers import DDPMScheduler, UNet1DModel -env_name = "hopper-medium-expert-v2" -env = gym.make(env_name) -data = env.get_dataset() # dataset is only used for normalization in this colab +# Define some helper functions -# Cuda settings for colab -# torch.cuda.get_device_name(0) -DEVICE = "cpu" -DTYPE = torch.float -# diffusion model settings -n_samples = 4 # number of trajectories planned via diffusion -horizon = 128 # length of sampled trajectories -state_dim = env.observation_space.shape[0] -action_dim = env.action_space.shape[0] -num_inference_steps = 100 # number of difusion steps +DTYPE = torch.float def normalize(x_in, data, key): - upper = np.max(data[key], axis=0) - lower = np.min(data[key], axis=0) - x_out = 2 * (x_in - lower) / (upper - lower) - 1 - return x_out + means = data[key].mean(axis=0) + stds = data[key].std(axis=0) + return (x_in - means) / stds def de_normalize(x_in, data, key): - upper = np.max(data[key], axis=0) - lower = np.min(data[key], axis=0) - x_out = lower + (upper - lower) * (1 + x_in) / 2 - return x_out + means = data[key].mean(axis=0) + stds = data[key].std(axis=0) + return x_in * stds + means -def to_torch(x_in, dtype=None, device=None): +def to_torch(x_in, dtype=None, device="cuda"): dtype = dtype or DTYPE - device = device or DEVICE + device = device if type(x_in) is dict: return {k: to_torch(v, dtype, device) for k, v in x_in.items()} elif torch.is_tensor(x_in): @@ -47,6 +40,254 @@ def to_torch(x_in, dtype=None, device=None): return torch.tensor(x_in, dtype=dtype, device=device) +def reset_x0(x_in, cond, act_dim): + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + + +def run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim, config): + y = None + for i in tqdm.tqdm(scheduler.timesteps): + # create batch of timesteps to pass into model + timesteps = torch.full((config["n_samples"],), i, device=config["device"], dtype=torch.long) + # 3. call the sample function + for _ in range(config["n_guide_steps"]): + with torch.enable_grad(): + x.requires_grad_() + y = network(x, timesteps).sample + grad = torch.autograd.grad([y.sum()], [x])[0] + if config["scale_grad_by_std"]: + posterior_variance = scheduler._get_variance(i) + model_std = torch.exp(0.5 * posterior_variance) + grad = model_std * grad + grad[timesteps < config["t_grad_cutoff"]] = 0 + x = x.detach() + x = x + config["scale"] * grad + x = reset_x0(x, conditions, action_dim) + # with torch.no_grad(): + prev_x = unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) + x = scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] + + # 3. [optional] add posterior noise to the sample + if config["eta"] > 0: + noise = torch.randn(x.shape).to(x.device) + posterior_variance = scheduler._get_variance(i) # * noise + # no noise when t == 0 + # NOTE: original implementation missing sqrt on posterior_variance + x = x + int(i > 0) * (0.5 * posterior_variance) * config["eta"] * noise # MJ had as log var, exponentiated + + # 4. apply conditions to the trajectory + x = reset_x0(x, conditions, action_dim) + x = to_torch(x, device=config["device"]) + # y = network(x, timesteps).sample + return x, y + + +def to_np(x_in): + if torch.is_tensor(x_in): + x_in = x_in.detach().cpu().numpy() + return x_in + + +# from MJ's Diffuser code +# https://github.com/jannerm/diffuser/blob/76ae49ae85ba1c833bf78438faffdc63b8b4d55d/diffuser/utils/colab.py#L79 +def mkdir(savepath): + """ + returns `True` iff `savepath` is created + """ + if not os.path.exists(savepath): + os.makedirs(savepath) + return True + else: + return False + + +def show_sample(renderer, observations, filename="sample.mp4", savebase="videos"): + """ + observations : [ batch_size x horizon x observation_dim ] + """ + + mkdir(savebase) + savepath = os.path.join(savebase, filename) + + images = [] + for rollout in observations: + # [ horizon x height x width x channels ] + img = renderer._renders(rollout, partial=True) + images.append(img) + + # [ horizon x height x (batch_size * width) x channels ] + images = np.concatenate(images, axis=2) + media.write_video(savepath, images, fps=60) + media.show_video(images, codec="h264", fps=60) + return images + + +# Code adapted from Michael Janner +# source: https://github.com/jannerm/diffuser/blob/main/diffuser/utils/rendering.py + + +def env_map(env_name): + """ + map D4RL dataset names to custom fully-observed + variants for rendering + """ + if "halfcheetah" in env_name: + return "HalfCheetahFullObs-v2" + elif "hopper" in env_name: + return "HopperFullObs-v2" + elif "walker2d" in env_name: + return "Walker2dFullObs-v2" + else: + return env_name + + +def get_image_mask(img): + background = (img == 255).all(axis=-1, keepdims=True) + mask = ~background.repeat(3, axis=-1) + return mask + + +def atmost_2d(x): + while x.ndim > 2: + x = x.squeeze(0) + return x + + +def set_state(env, state): + qpos_dim = env.sim.data.qpos.size + qvel_dim = env.sim.data.qvel.size + if not state.size == qpos_dim + qvel_dim: + warnings.warn( + f"[ utils/rendering ] Expected state of size {qpos_dim + qvel_dim}, but got state of size {state.size}" + ) + state = state[: qpos_dim + qvel_dim] + + env.set_state(state[:qpos_dim], state[qpos_dim:]) + + +class MuJoCoRenderer: + """ + default mujoco renderer + """ + + def __init__(self, env): + if type(env) is str: + env = env_map(env) + self.env = gym.make(env) + else: + self.env = env + # - 1 because the envs in renderer are fully-observed + # @TODO : clean up + self.observation_dim = np.prod(self.env.observation_space.shape) - 1 + self.action_dim = np.prod(self.env.action_space.shape) + try: + self.viewer = mjc.MjRenderContextOffscreen(self.env.sim) + except: + print("[ utils/rendering ] Warning: could not initialize offscreen renderer") + self.viewer = None + + def pad_observation(self, observation): + state = np.concatenate( + [ + np.zeros(1), + observation, + ] + ) + return state + + def pad_observations(self, observations): + qpos_dim = self.env.sim.data.qpos.size + # xpos is hidden + xvel_dim = qpos_dim - 1 + xvel = observations[:, xvel_dim] + xpos = np.cumsum(xvel) * self.env.dt + states = np.concatenate( + [ + xpos[:, None], + observations, + ], + axis=-1, + ) + return states + + def render(self, observation, dim=256, partial=False, qvel=True, render_kwargs=None, conditions=None): + if type(dim) == int: + dim = (dim, dim) + + if self.viewer is None: + return np.zeros((*dim, 3), np.uint8) + + if render_kwargs is None: + xpos = observation[0] if not partial else 0 + render_kwargs = {"trackbodyid": 2, "distance": 3, "lookat": [xpos, -0.5, 1], "elevation": -20} + + for key, val in render_kwargs.items(): + if key == "lookat": + self.viewer.cam.lookat[:] = val[:] + else: + setattr(self.viewer.cam, key, val) + + if partial: + state = self.pad_observation(observation) + else: + state = observation + + qpos_dim = self.env.sim.data.qpos.size + if not qvel or state.shape[-1] == qpos_dim: + qvel_dim = self.env.sim.data.qvel.size + state = np.concatenate([state, np.zeros(qvel_dim)]) + + set_state(self.env, state) + + self.viewer.render(*dim) + data = self.viewer.read_pixels(*dim, depth=False) + data = data[::-1, :, :] + return data + + def _renders(self, observations, **kwargs): + images = [] + for observation in observations: + img = self.render(observation, **kwargs) + images.append(img) + return np.stack(images, axis=0) + + def renders(self, samples, partial=False, **kwargs): + if partial: + samples = self.pad_observations(samples) + partial = False + + sample_images = self._renders(samples, partial=partial, **kwargs) + + composite = np.ones_like(sample_images[0]) * 255 + + for img in sample_images: + mask = get_image_mask(img) + composite[mask] = img[mask] + + return composite + + def __call__(self, *args, **kwargs): + return self.renders(*args, **kwargs) + + +env_name = "hopper-medium-expert-v2" +env = gym.make(env_name) +data = env.get_dataset() # dataset is only used for normalization in this colab + +# Cuda settings for colab +# torch.cuda.get_device_name(0) +DEVICE = "cpu" +DTYPE = torch.float + +# diffusion model settings +n_samples = 4 # number of trajectories planned via diffusion +horizon = 128 # length of sampled trajectories +state_dim = env.observation_space.shape[0] +action_dim = env.action_space.shape[0] +num_inference_steps = 100 # number of difusion steps + obs = env.reset() obs_raw = obs @@ -67,16 +308,5 @@ def to_torch(x_in, dtype=None, device=None): weight_decay=1e-6, eps=1e-8, ) -# 3 different pretrained models are available for this task. -# The horizion represents the length of trajectories used in training. -# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) -# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) - - -def reset_x0(x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in - # TODO: Flesh this out using accelerate library (a la other examples) diff --git a/convert_model.py b/scripts/convert_models_diffuser_to_diffusers.py similarity index 100% rename from convert_model.py rename to scripts/convert_models_diffuser_to_diffusers.py diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 9072710886b8..06596bd6091f 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -200,6 +200,7 @@ def _get_variance(self, t, predicted_variance=None, variance_type=None): # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": variance = torch.log(torch.clamp(variance, min=1e-20)) + variance = torch.exp(0.5 * variance) elif variance_type == "fixed_large": variance = self.betas[t] elif variance_type == "fixed_large_log": @@ -284,10 +285,7 @@ def step( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator ).to(model_output.device) if self.variance_type == "fixed_small_log": - variance = self._get_variance(t, predicted_variance=predicted_variance) - variance = torch.exp(0.5 * variance) - variance = variance * noise - + variance = self._get_variance(t, predicted_variance=predicted_variance) * noise else: variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise From e7e6963fdcf0dc761c709b018edbc699bf0454e6 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 18 Oct 2022 19:19:28 -0400 Subject: [PATCH 056/133] style and quality --- examples/diffuser/run_diffuser.py | 2 +- .../diffuser/run_diffuser_value_guided.py | 2 +- examples/diffuser/train_diffuser.py | 4 +-- .../convert_models_diffuser_to_diffusers.py | 27 ++++++++++++++----- 4 files changed, 25 insertions(+), 10 deletions(-) diff --git a/examples/diffuser/run_diffuser.py b/examples/diffuser/run_diffuser.py index ce11363bbbb9..ad35b59d99a7 100644 --- a/examples/diffuser/run_diffuser.py +++ b/examples/diffuser/run_diffuser.py @@ -3,8 +3,8 @@ import d4rl # noqa import gym -import train_diffuser import tqdm +import train_diffuser from diffusers import DDPMScheduler, UNet1DModel diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index cc61650ddcdf..05c10be374c6 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -3,8 +3,8 @@ import d4rl # noqa import gym -import train_diffuser import tqdm +import train_diffuser from diffusers import DDPMScheduler, UNet1DModel, ValueFunction diff --git a/examples/diffuser/train_diffuser.py b/examples/diffuser/train_diffuser.py index 784ba4d48d52..4f5e7bd0b680 100644 --- a/examples/diffuser/train_diffuser.py +++ b/examples/diffuser/train_diffuser.py @@ -4,11 +4,11 @@ import numpy as np import torch +import d4rl # noqa +import gym import mediapy as media import mujoco_py as mjc import tqdm -import d4rl # noqa -import gym from diffusers import DDPMScheduler, UNet1DModel diff --git a/scripts/convert_models_diffuser_to_diffusers.py b/scripts/convert_models_diffuser_to_diffusers.py index 4691c69239d7..821c6d51fb80 100644 --- a/scripts/convert_models_diffuser_to_diffusers.py +++ b/scripts/convert_models_diffuser_to_diffusers.py @@ -1,13 +1,17 @@ +import json +import os import torch -from diffusers import DDPMScheduler, UNet1DModel, ValueFunction -import os -import json + +from diffusers import UNet1DModel, ValueFunction + + os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True) os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True) os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) + def unet(hor): if hor == 128: down_block_types = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") @@ -20,7 +24,12 @@ def unet(hor): up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") model = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch") state_dict = model.state_dict() - config = dict(down_block_types=down_block_types, block_out_channels=block_out_channels, up_block_types=up_block_types, layers_per_block=1) + config = dict( + down_block_types=down_block_types, + block_out_channels=block_out_channels, + up_block_types=up_block_types, + layers_per_block=1, + ) hf_value_function = UNet1DModel(**config) print(f"length of state dict: {len(state_dict.keys())}") print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") @@ -33,8 +42,14 @@ def unet(hor): with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json", "w") as f: json.dump(config, f) + def value_function(): - config = dict(in_channels=14, down_block_types=("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), block_out_channels=(32, 64, 128, 256), layers_per_block=1) + config = dict( + in_channels=14, + down_block_types=("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), + block_out_channels=(32, 64, 128, 256), + layers_per_block=1, + ) model = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch") state_dict = model @@ -56,4 +71,4 @@ def value_function(): if __name__ == "__main__": unet(32) # unet(128) - value_function() \ No newline at end of file + value_function() From 4f77d892ab6690fd84fed069e01ddf211fed65f2 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Wed, 19 Oct 2022 13:39:34 -0400 Subject: [PATCH 057/133] remove unused var --- examples/diffuser/run_diffuser_value_guided.py | 5 +---- examples/diffuser/train_diffuser.py | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 05c10be374c6..9610a07c366f 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -36,9 +36,6 @@ def _run(): action_dim = env.action_space.shape[0] # Two generators for different parts of the diffusion loop to work in colab - # generator = torch.Generator(device='cuda') - generator = torch.Generator(device=DEVICE) - scheduler = DDPMScheduler( num_train_timesteps=config["num_inference_steps"], beta_schedule="squaredcos_cap_v2", @@ -89,7 +86,7 @@ def _run(): # convert a np observation to torch for model forward pass x = train_diffuser.to_torch(x, device=DEVICE) - x, y = train_diffuser.run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim, config) + x, y = train_diffuser.run_diffusion(x, scheduler, network, unet, conditions, action_dim, config) if y is not None: sorted_idx = y.argsort(0, descending=True).squeeze() y_maxes.append(y[sorted_idx[0]].detach().cpu().numpy()) diff --git a/examples/diffuser/train_diffuser.py b/examples/diffuser/train_diffuser.py index 4f5e7bd0b680..b063a0456d97 100644 --- a/examples/diffuser/train_diffuser.py +++ b/examples/diffuser/train_diffuser.py @@ -46,7 +46,7 @@ def reset_x0(x_in, cond, act_dim): return x_in -def run_diffusion(x, scheduler, generator, network, unet, conditions, action_dim, config): +def run_diffusion(x, scheduler, network, unet, conditions, action_dim, config): y = None for i in tqdm.tqdm(scheduler.timesteps): # create batch of timesteps to pass into model From 6bd8397e93b8db272295309ad709c0a8f4127843 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 20 Oct 2022 14:51:15 -0400 Subject: [PATCH 058/133] hack unet1d into a value function --- .../convert_models_diffuser_to_diffusers.py | 9 ++- src/diffusers/models/unet_1d.py | 55 ++++++++----- src/diffusers/models/unet_1d_blocks.py | 79 ++++++++++++++++++- 3 files changed, 118 insertions(+), 25 deletions(-) diff --git a/scripts/convert_models_diffuser_to_diffusers.py b/scripts/convert_models_diffuser_to_diffusers.py index 821c6d51fb80..e957f1204596 100644 --- a/scripts/convert_models_diffuser_to_diffusers.py +++ b/scripts/convert_models_diffuser_to_diffusers.py @@ -3,7 +3,7 @@ import torch -from diffusers import UNet1DModel, ValueFunction +from diffusers import UNet1DModel os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True) @@ -47,13 +47,16 @@ def value_function(): config = dict( in_channels=14, down_block_types=("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), + up_block_types=(), + out_block_type="ValueFunction", block_out_channels=(32, 64, 128, 256), layers_per_block=1, + always_downsample=True ) model = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch") state_dict = model - hf_value_function = ValueFunction(**config) + hf_value_function = UNet1DModel(**config) print(f"length of state dict: {len(state_dict.keys())}") print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") @@ -69,6 +72,6 @@ def value_function(): if __name__ == "__main__": - unet(32) + # unet(32) # unet(128) value_function() diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index 3ede756c9b3d..d0483609fead 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -18,7 +18,7 @@ import torch.nn as nn from diffusers.models.resnet import ResidualTemporalBlock1D -from diffusers.models.unet_1d_blocks import get_down_block, get_up_block +from diffusers.models.unet_1d_blocks import get_down_block, get_mid_block, get_out_block, get_up_block from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin @@ -62,10 +62,13 @@ def __init__( out_channels: int = 14, down_block_types: Tuple[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), up_block_types: Tuple[str] = ("UpResnetBlock1D", "UpResnetBlock1D"), + mid_block_types: Tuple[str] = ("MidResTemporalBlock1D", "MidResTemporalBlock1D"), + out_block_type: str = "OutConv1DBlock", block_out_channels: Tuple[int] = (32, 128, 256), act_fn: str = "mish", norm_num_groups: int = 8, layers_per_block: int = 1, + always_downsample: bool = False, ): super().__init__() @@ -95,14 +98,30 @@ def __init__( in_channels=input_channel, out_channels=output_channel, temb_channels=block_out_channels[0], - add_downsample=not is_final_block, + add_downsample=not is_final_block or always_downsample, ) self.down_blocks.append(down_block) # mid - self.mid_block1 = ResidualTemporalBlock1D(mid_dim, mid_dim, embed_dim=block_out_channels[0]) - self.mid_block2 = ResidualTemporalBlock1D(mid_dim, mid_dim, embed_dim=block_out_channels[0]) - + self.mid_blocks = nn.ModuleList([]) + for i, mid_block_type in enumerate(mid_block_types): + if always_downsample: + mid_block = get_mid_block( + mid_block_type, + in_channels=mid_dim // (i + 1), + out_channels=mid_dim // ((i + 1) * 2), + embed_dim=block_out_channels[0], + add_downsample=True, + ) + else: + mid_block = get_mid_block( + mid_block_type, + in_channels=mid_dim, + out_channels=mid_dim, + embed_dim=block_out_channels[0], + add_downsample=False, + ) + self.mid_blocks.append(mid_block) # up reversed_block_out_channels = list(reversed(block_out_channels)) for i, up_block_type in enumerate(up_block_types): @@ -123,13 +142,14 @@ def __init__( # out num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) - self.final_conv1d_1 = nn.Conv1d(block_out_channels[0], block_out_channels[0], 5, padding=2) - self.final_conv1d_gn = nn.GroupNorm(num_groups_out, block_out_channels[0]) - if act_fn == "silu": - self.final_conv1d_act = nn.SiLU() - if act_fn == "mish": - self.final_conv1d_act = nn.Mish() - self.final_conv1d_2 = nn.Conv1d(block_out_channels[0], out_channels, 1) + self.out_block = get_out_block( + out_block_type=out_block_type, + num_groups_out=num_groups_out, + embed_dim=block_out_channels[0], + out_channels=out_channels, + act_fn=act_fn, + fc_dim=mid_dim // 4, + ) def forward( self, @@ -166,20 +186,15 @@ def forward( down_block_res_samples.append(res_samples[0]) # 3. mid - sample = self.mid_block1(sample, temb) - sample = self.mid_block2(sample, temb) + for mid_block in self.mid_blocks: + sample = mid_block(sample, temb) # 4. up for up_block in self.up_blocks: sample = up_block(hidden_states=sample, res_hidden_states=down_block_res_samples.pop(), temb=temb) # 5. post-process - sample = self.final_conv1d_1(sample) - sample = rearrange_dims(sample) - sample = self.final_conv1d_gn(sample) - sample = rearrange_dims(sample) - sample = self.final_conv1d_act(sample) - sample = self.final_conv1d_2(sample) + sample = self.out_block(sample, temb) if not return_dict: return (sample,) diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index 40e25fb43afb..65a4afbdfc68 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -13,11 +13,12 @@ # limitations under the License. +from turtle import forward import torch import torch.nn.functional as F from torch import nn -from .resnet import Downsample1D, ResidualTemporalBlock1D, Upsample1D +from .resnet import Downsample1D, ResidualTemporalBlock1D, Upsample1D, rearrange_dims class DownResnetBlock1D(nn.Module): @@ -173,6 +174,66 @@ class UpBlock1DNoSkip(nn.Module): pass +class MidResTemporalBlock1D(nn.Module): + def __init__(self, in_channels, out_channels, embed_dim, add_downsample): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.add_downsample = add_downsample + self.resnet = ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=embed_dim) + + if add_downsample: + self.downsample = Downsample1D(out_channels, use_conv=True) + else: + self.downsample = nn.Identity() + + def forward(self, sample, temb): + sample = self.resnet(sample, temb) + sample = self.downsample(sample) + return sample + + +class OutConv1DBlock(nn.Module): + def __init__(self, num_groups_out, embed_dim, out_channels, act_fn): + super().__init__() + self.final_conv1d_1 = nn.Conv1d(embed_dim, embed_dim, 5, padding=2) + self.final_conv1d_gn = nn.GroupNorm(num_groups_out, embed_dim) + if act_fn == "silu": + self.final_conv1d_act = nn.SiLU() + if act_fn == "mish": + self.final_conv1d_act = nn.Mish() + self.final_conv1d_2 = nn.Conv1d(embed_dim, out_channels, 1) + + def forward(self, sample, t): + sample = self.final_conv1d_1(sample) + sample = rearrange_dims(sample) + sample = self.final_conv1d_gn(sample) + sample = rearrange_dims(sample) + sample = self.final_conv1d_act(sample) + sample = self.final_conv1d_2(sample) + return sample + + +class OutValueFunctionBlock(nn.Module): + def __init__(self, fc_dim, embed_dim): + super().__init__() + self.final_block = nn.ModuleList( + [ + nn.Linear(fc_dim + embed_dim, fc_dim // 2), + nn.Mish(), + nn.Linear(fc_dim // 2, 1), + ] + ) + + def forward(self, sample, t): + sample = sample.view(sample.shape[0], -1) + sample = torch.cat((sample, t), dim=-1) + for layer in self.final_block: + sample = layer(sample) + + return sample + + def get_down_block(down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample): if down_block_type == "DownResnetBlock1D": return DownResnetBlock1D( @@ -195,5 +256,19 @@ def get_up_block(up_block_type, num_layers, in_channels, out_channels, temb_chan temb_channels=temb_channels, add_upsample=add_upsample, ) - + elif up_block_type == "Identity": + return nn.Identity() raise ValueError(f"{up_block_type} does not exist.") + + +def get_mid_block(mid_block_type, in_channels, out_channels, embed_dim, add_downsample): + if mid_block_type == "MidResTemporalBlock1D": + return MidResTemporalBlock1D(in_channels, out_channels, embed_dim, add_downsample) + raise ValueError(f"{mid_block_type} does not exist.") + + +def get_out_block(*, out_block_type, num_groups_out, embed_dim, out_channels, act_fn, fc_dim): + if out_block_type == "OutConv1DBlock": + return OutConv1DBlock(num_groups_out, out_channels, embed_dim, act_fn) + elif out_block_type == "ValueFunction": + return OutValueFunctionBlock(fc_dim, embed_dim) From 435ad266bae7ac1a1646c1bba392733f035c8f73 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 20 Oct 2022 15:10:18 -0400 Subject: [PATCH 059/133] add pipeline --- examples/community/value_guided_diffuser.py | 88 +++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 examples/community/value_guided_diffuser.py diff --git a/examples/community/value_guided_diffuser.py b/examples/community/value_guided_diffuser.py new file mode 100644 index 000000000000..a90ff63c509e --- /dev/null +++ b/examples/community/value_guided_diffuser.py @@ -0,0 +1,88 @@ +import torch +from diffusers import DiffusionPipeline +import tqdm + +from diffusers.models.unet_1d import UNet1DModel +from diffusers.utils.dummy_pt_objects import DDPMScheduler + + +class ValueGuidedDiffuserPipeline(DiffusionPipeline): + def __init__(self, value_function: UNet1DModel, unet: UNet1DModel, scheduler: DDPMScheduler, env, *args, **kwargs): + super().__init__(*args, **kwargs) + self.value_function = value_function + self.unet = unet + self.scheduler = scheduler + self.env = env + self.data = env.get_dataset() + self.means = dict((key, val.mean(axis=0)) for key, val in self.data.items()) + self.stds = dict((key, val.std(axis=0)) for key, val in self.data.items()) + self.device = self.unet.device + self.state_dim = env.observation_space.shape[0] + self.action_dim = env.action_space.shape[0] + + def normalize(self, x_in, key): + return (x_in - self.means[key]) / self.stds[key] + + def de_normalize(self, x_in, key): + return x_in * self.stds[key] + self.means[key] + + def to_torch(self, x_in): + + if type(x_in) is dict: + return {k: self.to_torch(v) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(self.device) + return torch.tensor(x_in, device=self.device) + + def reset_x0(self, x_in, cond, act_dim): + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + + def run_diffusion(self, x, conditions, n_guide_steps, scale): + batch_size = x.shape[0] + y = None + for i in tqdm.tqdm(self.scheduler.timesteps): + # create batch of timesteps to pass into model + timesteps = torch.full((batch_size,), i, device=self.device, dtype=torch.long) + # 3. call the sample function + for _ in range(n_guide_steps): + with torch.enable_grad(): + x.requires_grad_() + y = self.value_function(x, timesteps).sample + grad = torch.autograd.grad([y.sum()], [x])[0] + + posterior_variance = self.scheduler._get_variance(i) + model_std = torch.exp(0.5 * posterior_variance) + grad = model_std * grad + grad[timesteps < 2] = 0 + x = x.detach() + x = x + scale * grad + x = self.reset_x0(x, conditions, self.action_dim) + # with torch.no_grad(): + prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) + x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] + + # 4. apply conditions to the trajectory + x = self.reset_x0(x, conditions, self.action_dim) + x = self.to_torch(x, device=self.device) + # y = network(x, timesteps).sample + return x, y + + def __call__(self, obs, batch_size=64, planning_horizon=20, n_guide_steps=2, scale=0.1): + obs = self.normalize(obs, "observations") + obs = obs[None].repeat(batch_size, axis=0) + conditions = {0: self.to_torch(obs)} + shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) + x1 = torch.randn(shape, device=self.device) + x = self.reset_x0(x1, conditions, self.action_dim) + x = self.to_torch(x) + x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) + sorted_idx = y.argsort(0, descending=True).squeeze() + sorted_values = x[sorted_idx] + actions = sorted_values[:, :, : self.action_dim] + actions = actions.detach().cpu().numpy() + denorm_actions = self.de_normalize(actions, key="actions") + # denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] + denorm_actions = denorm_actions[0, 0] + return denorm_actions From 56534088386ffc2c0e94fa125c10ee37494216a1 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 20 Oct 2022 15:30:50 -0400 Subject: [PATCH 060/133] fix arg order --- scripts/convert_models_diffuser_to_diffusers.py | 2 +- src/diffusers/models/unet_1d_blocks.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/convert_models_diffuser_to_diffusers.py b/scripts/convert_models_diffuser_to_diffusers.py index e957f1204596..61e05d261396 100644 --- a/scripts/convert_models_diffuser_to_diffusers.py +++ b/scripts/convert_models_diffuser_to_diffusers.py @@ -72,6 +72,6 @@ def value_function(): if __name__ == "__main__": - # unet(32) + unet(32) # unet(128) value_function() diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index 65a4afbdfc68..1981a34754c3 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -194,7 +194,7 @@ def forward(self, sample, temb): class OutConv1DBlock(nn.Module): - def __init__(self, num_groups_out, embed_dim, out_channels, act_fn): + def __init__(self, num_groups_out, out_channels, embed_dim, act_fn): super().__init__() self.final_conv1d_1 = nn.Conv1d(embed_dim, embed_dim, 5, padding=2) self.final_conv1d_gn = nn.GroupNorm(num_groups_out, embed_dim) From 149193259cc2f6b71da575aa81c29d563d15a208 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 20 Oct 2022 15:43:01 -0400 Subject: [PATCH 061/133] add pipeline to core library --- .../diffuser/run_diffuser_value_guided.py | 16 ++- src/diffusers/__init__.py | 2 +- src/diffusers/pipelines/__init__.py | 1 + src/diffusers/pipelines/diffuser/__init__.py | 1 + .../diffuser/pipeline_value_guided.py | 98 +++++++++++++++++++ 5 files changed, 113 insertions(+), 5 deletions(-) create mode 100644 src/diffusers/pipelines/diffuser/__init__.py create mode 100644 src/diffusers/pipelines/diffuser/pipeline_value_guided.py diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 9610a07c366f..a114d9d2917a 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -4,8 +4,9 @@ import d4rl # noqa import gym import tqdm -import train_diffuser -from diffusers import DDPMScheduler, UNet1DModel, ValueFunction + +# import train_diffuser +from diffusers import DDPMScheduler, UNet1DModel, DiffusionPipeline config = dict( @@ -25,7 +26,7 @@ def _run(): env_name = "hopper-medium-v2" env = gym.make(env_name) data = env.get_dataset() # dataset is only used for normalization in this colab - render = train_diffuser.MuJoCoRenderer(env) + # render = train_diffuser.MuJoCoRenderer(env) # Cuda settings for colab # torch.cuda.get_device_name(0) @@ -47,8 +48,15 @@ def _run(): # The horizion represents the length of trajectories used in training. # network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) - network = ValueFunction.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32").to(device=DEVICE).eval() + network = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32").to(device=DEVICE).eval() unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-unet-hor32").to(device=DEVICE).eval() + pipeline = DiffusionPipeline.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", + value_function=network, + unet=unet, + scheduler=scheduler, + env=env, + ) # unet = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 7088e560dd66..edc97563c707 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -29,7 +29,7 @@ get_scheduler, ) from .pipeline_utils import DiffusionPipeline - from .pipelines import DDIMPipeline, DDPMPipeline, KarrasVePipeline, LDMPipeline, PNDMPipeline, ScoreSdeVePipeline + from .pipelines import DDIMPipeline, DDPMPipeline, KarrasVePipeline, LDMPipeline, PNDMPipeline, ScoreSdeVePipeline, ValueGuidedDiffuserPipeline from .schedulers import ( DDIMScheduler, DDPMScheduler, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 1c31595fb0cf..8edec55188fa 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -8,6 +8,7 @@ from .pndm import PNDMPipeline from .score_sde_ve import ScoreSdeVePipeline from .stochastic_karras_ve import KarrasVePipeline + from .diffuser import ValueGuidedDiffuserPipeline else: from ..utils.dummy_pt_objects import * # noqa F403 diff --git a/src/diffusers/pipelines/diffuser/__init__.py b/src/diffusers/pipelines/diffuser/__init__.py new file mode 100644 index 000000000000..23d3e16c48d3 --- /dev/null +++ b/src/diffusers/pipelines/diffuser/__init__.py @@ -0,0 +1 @@ +from .pipeline_value_guided import ValueGuidedDiffuserPipeline diff --git a/src/diffusers/pipelines/diffuser/pipeline_value_guided.py b/src/diffusers/pipelines/diffuser/pipeline_value_guided.py new file mode 100644 index 000000000000..622ddb7cf608 --- /dev/null +++ b/src/diffusers/pipelines/diffuser/pipeline_value_guided.py @@ -0,0 +1,98 @@ +from numpy import AxisError +import torch +from diffusers import DiffusionPipeline +import tqdm + +from diffusers.models.unet_1d import UNet1DModel +from diffusers.utils.dummy_pt_objects import DDPMScheduler + + +class ValueGuidedDiffuserPipeline(DiffusionPipeline): + def __init__(self, value_function: UNet1DModel, unet: UNet1DModel, scheduler: DDPMScheduler, env): + super().__init__() + self.value_function = value_function + self.unet = unet + self.scheduler = scheduler + self.env = env + self.data = env.get_dataset() + self.means = dict() + for key, val in self.data.items(): + try: + self.means[key] = val.mean(axis=0) + except AxisError: # Not everything in the dataset is an array + pass + self.stds = dict() + for key, val in self.data.items(): + try: + self.stds[key] = val.std(axis=0) + except AxisError: + pass + self.state_dim = env.observation_space.shape[0] + self.action_dim = env.action_space.shape[0] + + def normalize(self, x_in, key): + return (x_in - self.means[key]) / self.stds[key] + + def de_normalize(self, x_in, key): + return x_in * self.stds[key] + self.means[key] + + def to_torch(self, x_in): + + if type(x_in) is dict: + return {k: self.to_torch(v) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(self.unet.device) + return torch.tensor(x_in, device=self.unet.device) + + def reset_x0(self, x_in, cond, act_dim): + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + + def run_diffusion(self, x, conditions, n_guide_steps, scale): + batch_size = x.shape[0] + y = None + for i in tqdm.tqdm(self.scheduler.timesteps): + # create batch of timesteps to pass into model + timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) + # 3. call the sample function + for _ in range(n_guide_steps): + with torch.enable_grad(): + x.requires_grad_() + y = self.value_function(x, timesteps).sample + grad = torch.autograd.grad([y.sum()], [x])[0] + + posterior_variance = self.scheduler._get_variance(i) + model_std = torch.exp(0.5 * posterior_variance) + grad = model_std * grad + grad[timesteps < 2] = 0 + x = x.detach() + x = x + scale * grad + x = self.reset_x0(x, conditions, self.action_dim) + # with torch.no_grad(): + prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) + x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] + + # 4. apply conditions to the trajectory + x = self.reset_x0(x, conditions, self.action_dim) + x = self.to_torch(x, device=self.unet.device) + # y = network(x, timesteps).sample + return x, y + + def __call__(self, obs, batch_size=64, planning_horizon=20, n_guide_steps=2, scale=0.1): + obs = self.normalize(obs, "observations") + obs = obs[None].repeat(batch_size, axis=0) + conditions = {0: self.to_torch(obs)} + shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) + x1 = torch.randn(shape, device=self.unet.device) + x = self.reset_x0(x1, conditions, self.action_dim) + x = self.to_torch(x) + x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) + sorted_idx = y.argsort(0, descending=True).squeeze() + sorted_values = x[sorted_idx] + actions = sorted_values[:, :, : self.action_dim] + actions = actions.detach().cpu().numpy() + denorm_actions = self.de_normalize(actions, key="actions") + # denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] + denorm_actions = denorm_actions[0, 0] + return denorm_actions From 1a8098ed403d20c220e7b163881e422e0e225ceb Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 20 Oct 2022 19:52:11 -0400 Subject: [PATCH 062/133] community pipeline --- .../community/pipeline.py | 23 +++++++----- examples/community/value_guided_diffuser.py | 37 +++++++++++++------ .../diffuser/run_diffuser_value_guided.py | 3 +- src/diffusers/__init__.py | 2 +- src/diffusers/pipelines/__init__.py | 1 - src/diffusers/pipelines/diffuser/__init__.py | 1 - 6 files changed, 43 insertions(+), 24 deletions(-) rename src/diffusers/pipelines/diffuser/pipeline_value_guided.py => examples/community/pipeline.py (90%) delete mode 100644 src/diffusers/pipelines/diffuser/__init__.py diff --git a/src/diffusers/pipelines/diffuser/pipeline_value_guided.py b/examples/community/pipeline.py similarity index 90% rename from src/diffusers/pipelines/diffuser/pipeline_value_guided.py rename to examples/community/pipeline.py index 622ddb7cf608..5a28d6c222e6 100644 --- a/src/diffusers/pipelines/diffuser/pipeline_value_guided.py +++ b/examples/community/pipeline.py @@ -1,14 +1,19 @@ -from numpy import AxisError import torch from diffusers import DiffusionPipeline import tqdm - +from numpy import AxisError from diffusers.models.unet_1d import UNet1DModel from diffusers.utils.dummy_pt_objects import DDPMScheduler class ValueGuidedDiffuserPipeline(DiffusionPipeline): - def __init__(self, value_function: UNet1DModel, unet: UNet1DModel, scheduler: DDPMScheduler, env): + def __init__( + self, + value_function: UNet1DModel, + unet: UNet1DModel, + scheduler: DDPMScheduler, + env, + ): super().__init__() self.value_function = value_function self.unet = unet @@ -16,16 +21,16 @@ def __init__(self, value_function: UNet1DModel, unet: UNet1DModel, scheduler: DD self.env = env self.data = env.get_dataset() self.means = dict() - for key, val in self.data.items(): + for key in self.data.keys(): try: - self.means[key] = val.mean(axis=0) - except AxisError: # Not everything in the dataset is an array + self.means[key] = self.data[key].mean() + except: pass self.stds = dict() - for key, val in self.data.items(): + for key in self.data.keys(): try: - self.stds[key] = val.std(axis=0) - except AxisError: + self.stds[key] = self.data[key].std() + except: pass self.state_dim = env.observation_space.shape[0] self.action_dim = env.action_space.shape[0] diff --git a/examples/community/value_guided_diffuser.py b/examples/community/value_guided_diffuser.py index a90ff63c509e..5a28d6c222e6 100644 --- a/examples/community/value_guided_diffuser.py +++ b/examples/community/value_guided_diffuser.py @@ -1,22 +1,37 @@ import torch from diffusers import DiffusionPipeline import tqdm - +from numpy import AxisError from diffusers.models.unet_1d import UNet1DModel from diffusers.utils.dummy_pt_objects import DDPMScheduler class ValueGuidedDiffuserPipeline(DiffusionPipeline): - def __init__(self, value_function: UNet1DModel, unet: UNet1DModel, scheduler: DDPMScheduler, env, *args, **kwargs): - super().__init__(*args, **kwargs) + def __init__( + self, + value_function: UNet1DModel, + unet: UNet1DModel, + scheduler: DDPMScheduler, + env, + ): + super().__init__() self.value_function = value_function self.unet = unet self.scheduler = scheduler self.env = env self.data = env.get_dataset() - self.means = dict((key, val.mean(axis=0)) for key, val in self.data.items()) - self.stds = dict((key, val.std(axis=0)) for key, val in self.data.items()) - self.device = self.unet.device + self.means = dict() + for key in self.data.keys(): + try: + self.means[key] = self.data[key].mean() + except: + pass + self.stds = dict() + for key in self.data.keys(): + try: + self.stds[key] = self.data[key].std() + except: + pass self.state_dim = env.observation_space.shape[0] self.action_dim = env.action_space.shape[0] @@ -31,8 +46,8 @@ def to_torch(self, x_in): if type(x_in) is dict: return {k: self.to_torch(v) for k, v in x_in.items()} elif torch.is_tensor(x_in): - return x_in.to(self.device) - return torch.tensor(x_in, device=self.device) + return x_in.to(self.unet.device) + return torch.tensor(x_in, device=self.unet.device) def reset_x0(self, x_in, cond, act_dim): for key, val in cond.items(): @@ -44,7 +59,7 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): y = None for i in tqdm.tqdm(self.scheduler.timesteps): # create batch of timesteps to pass into model - timesteps = torch.full((batch_size,), i, device=self.device, dtype=torch.long) + timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) # 3. call the sample function for _ in range(n_guide_steps): with torch.enable_grad(): @@ -65,7 +80,7 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): # 4. apply conditions to the trajectory x = self.reset_x0(x, conditions, self.action_dim) - x = self.to_torch(x, device=self.device) + x = self.to_torch(x, device=self.unet.device) # y = network(x, timesteps).sample return x, y @@ -74,7 +89,7 @@ def __call__(self, obs, batch_size=64, planning_horizon=20, n_guide_steps=2, sca obs = obs[None].repeat(batch_size, axis=0) conditions = {0: self.to_torch(obs)} shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) - x1 = torch.randn(shape, device=self.device) + x1 = torch.randn(shape, device=self.unet.device) x = self.reset_x0(x1, conditions, self.action_dim) x = self.to_torch(x) x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index a114d9d2917a..8b8b708ee968 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -25,7 +25,7 @@ def _run(): env_name = "hopper-medium-v2" env = gym.make(env_name) - data = env.get_dataset() # dataset is only used for normalization in this colab + # data = env.get_dataset() # dataset is only used for normalization in this colab # render = train_diffuser.MuJoCoRenderer(env) # Cuda settings for colab @@ -56,6 +56,7 @@ def _run(): unet=unet, scheduler=scheduler, env=env, + custom_pipeline="/Users/bglickenhaus/Documents/diffusers/examples/community", ) # unet = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index edc97563c707..7088e560dd66 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -29,7 +29,7 @@ get_scheduler, ) from .pipeline_utils import DiffusionPipeline - from .pipelines import DDIMPipeline, DDPMPipeline, KarrasVePipeline, LDMPipeline, PNDMPipeline, ScoreSdeVePipeline, ValueGuidedDiffuserPipeline + from .pipelines import DDIMPipeline, DDPMPipeline, KarrasVePipeline, LDMPipeline, PNDMPipeline, ScoreSdeVePipeline from .schedulers import ( DDIMScheduler, DDPMScheduler, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 8edec55188fa..1c31595fb0cf 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -8,7 +8,6 @@ from .pndm import PNDMPipeline from .score_sde_ve import ScoreSdeVePipeline from .stochastic_karras_ve import KarrasVePipeline - from .diffuser import ValueGuidedDiffuserPipeline else: from ..utils.dummy_pt_objects import * # noqa F403 diff --git a/src/diffusers/pipelines/diffuser/__init__.py b/src/diffusers/pipelines/diffuser/__init__.py deleted file mode 100644 index 23d3e16c48d3..000000000000 --- a/src/diffusers/pipelines/diffuser/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .pipeline_value_guided import ValueGuidedDiffuserPipeline From 0e4be7560befe50e6bf1c876081687bc382c9364 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 20 Oct 2022 20:17:43 -0400 Subject: [PATCH 063/133] fix couple shape bugs --- examples/community/pipeline.py | 6 +-- examples/community/value_guided_diffuser.py | 6 +-- .../diffuser/run_diffuser_value_guided.py | 42 +------------------ 3 files changed, 8 insertions(+), 46 deletions(-) diff --git a/examples/community/pipeline.py b/examples/community/pipeline.py index 5a28d6c222e6..0f0b505ce7b2 100644 --- a/examples/community/pipeline.py +++ b/examples/community/pipeline.py @@ -64,7 +64,7 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): for _ in range(n_guide_steps): with torch.enable_grad(): x.requires_grad_() - y = self.value_function(x, timesteps).sample + y = self.value_function(x.permute(0, 2, 1), timesteps).sample grad = torch.autograd.grad([y.sum()], [x])[0] posterior_variance = self.scheduler._get_variance(i) @@ -80,11 +80,11 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): # 4. apply conditions to the trajectory x = self.reset_x0(x, conditions, self.action_dim) - x = self.to_torch(x, device=self.unet.device) + x = self.to_torch(x) # y = network(x, timesteps).sample return x, y - def __call__(self, obs, batch_size=64, planning_horizon=20, n_guide_steps=2, scale=0.1): + def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): obs = self.normalize(obs, "observations") obs = obs[None].repeat(batch_size, axis=0) conditions = {0: self.to_torch(obs)} diff --git a/examples/community/value_guided_diffuser.py b/examples/community/value_guided_diffuser.py index 5a28d6c222e6..0f0b505ce7b2 100644 --- a/examples/community/value_guided_diffuser.py +++ b/examples/community/value_guided_diffuser.py @@ -64,7 +64,7 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): for _ in range(n_guide_steps): with torch.enable_grad(): x.requires_grad_() - y = self.value_function(x, timesteps).sample + y = self.value_function(x.permute(0, 2, 1), timesteps).sample grad = torch.autograd.grad([y.sum()], [x])[0] posterior_variance = self.scheduler._get_variance(i) @@ -80,11 +80,11 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): # 4. apply conditions to the trajectory x = self.reset_x0(x, conditions, self.action_dim) - x = self.to_torch(x, device=self.unet.device) + x = self.to_torch(x) # y = network(x, timesteps).sample return x, y - def __call__(self, obs, batch_size=64, planning_horizon=20, n_guide_steps=2, scale=0.1): + def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): obs = self.normalize(obs, "observations") obs = obs[None].repeat(batch_size, axis=0) conditions = {0: self.to_torch(obs)} diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 8b8b708ee968..aec2d826d1a2 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -6,7 +6,7 @@ import tqdm # import train_diffuser -from diffusers import DDPMScheduler, UNet1DModel, DiffusionPipeline +from diffusers import DDPMScheduler, UNet1DModel, DiffusionPipeline, UNet1DModel config = dict( @@ -75,43 +75,7 @@ def _run(): for t in tqdm.tqdm(range(T)): # 1. Call the policy # normalize observations for forward passes - obs = train_diffuser.normalize(obs, data, "observations") - - obs = obs[None].repeat(config["n_samples"], axis=0) - conditions = {0: train_diffuser.to_torch(obs, device=DEVICE)} - - # 2. Call the diffusion model - # constants for inference - batch_size = len(conditions[0]) - shape = (batch_size, config["horizon"], state_dim + action_dim) - - # sample random initial noise vector - x1 = torch.randn(shape, device=DEVICE) - - # this model is conditioned from an initial state, so you will see this function - # multiple times to change the initial state of generated data to the state - # generated via env.reset() above or env.step() below - x = train_diffuser.reset_x0(x1, conditions, action_dim) - - # convert a np observation to torch for model forward pass - x = train_diffuser.to_torch(x, device=DEVICE) - x, y = train_diffuser.run_diffusion(x, scheduler, network, unet, conditions, action_dim, config) - if y is not None: - sorted_idx = y.argsort(0, descending=True).squeeze() - y_maxes.append(y[sorted_idx[0]].detach().cpu().numpy()) - sorted_values = x[sorted_idx] - else: - sorted_values = x - actions = sorted_values[:, :, :action_dim] - if t % 10 == 0: - trajectory = sorted_values[:, :, action_dim:][0].unsqueeze(0).detach().cpu().numpy() - trajectory = train_diffuser.de_normalize(trajectory, data, "observations") - trajectories.append(trajectory) - - actions = actions.detach().cpu().numpy() - denorm_actions = train_diffuser.de_normalize(actions, data, key="actions") - # denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] - denorm_actions = denorm_actions[0, 0] + denorm_actions = pipeline(obs, planning_horizon=32) # execute action in environment next_observation, reward, terminal, _ = env.step(denorm_actions) @@ -132,8 +96,6 @@ def _run(): print(f"Total reward: {total_reward}") - train_diffuser.show_sample(render, np.expand_dims(np.stack(rollout), axis=0)) - def run(): _run() From 5ef88ef56862816ab909eefc33eee59248c73f58 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 20 Oct 2022 20:19:35 -0400 Subject: [PATCH 064/133] style --- examples/community/pipeline.py | 5 ++--- examples/community/value_guided_diffuser.py | 5 ++--- examples/diffuser/run_diffuser_value_guided.py | 11 +---------- scripts/convert_models_diffuser_to_diffusers.py | 2 +- src/diffusers/models/unet_1d.py | 2 -- src/diffusers/models/unet_1d_blocks.py | 1 - 6 files changed, 6 insertions(+), 20 deletions(-) diff --git a/examples/community/pipeline.py b/examples/community/pipeline.py index 0f0b505ce7b2..5159de402b3a 100644 --- a/examples/community/pipeline.py +++ b/examples/community/pipeline.py @@ -1,7 +1,7 @@ import torch -from diffusers import DiffusionPipeline + import tqdm -from numpy import AxisError +from diffusers import DiffusionPipeline from diffusers.models.unet_1d import UNet1DModel from diffusers.utils.dummy_pt_objects import DDPMScheduler @@ -42,7 +42,6 @@ def de_normalize(self, x_in, key): return x_in * self.stds[key] + self.means[key] def to_torch(self, x_in): - if type(x_in) is dict: return {k: self.to_torch(v) for k, v in x_in.items()} elif torch.is_tensor(x_in): diff --git a/examples/community/value_guided_diffuser.py b/examples/community/value_guided_diffuser.py index 0f0b505ce7b2..5159de402b3a 100644 --- a/examples/community/value_guided_diffuser.py +++ b/examples/community/value_guided_diffuser.py @@ -1,7 +1,7 @@ import torch -from diffusers import DiffusionPipeline + import tqdm -from numpy import AxisError +from diffusers import DiffusionPipeline from diffusers.models.unet_1d import UNet1DModel from diffusers.utils.dummy_pt_objects import DDPMScheduler @@ -42,7 +42,6 @@ def de_normalize(self, x_in, key): return x_in * self.stds[key] + self.means[key] def to_torch(self, x_in): - if type(x_in) is dict: return {k: self.to_torch(v) for k, v in x_in.items()} elif torch.is_tensor(x_in): diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index aec2d826d1a2..11f36d1ada13 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -1,12 +1,9 @@ -import numpy as np -import torch - import d4rl # noqa import gym import tqdm # import train_diffuser -from diffusers import DDPMScheduler, UNet1DModel, DiffusionPipeline, UNet1DModel +from diffusers import DDPMScheduler, DiffusionPipeline, UNet1DModel config = dict( @@ -32,10 +29,6 @@ def _run(): # torch.cuda.get_device_name(0) DEVICE = config["device"] - # diffusion model settings - state_dim = env.observation_space.shape[0] - action_dim = env.action_space.shape[0] - # Two generators for different parts of the diffusion loop to work in colab scheduler = DDPMScheduler( num_train_timesteps=config["num_inference_steps"], @@ -69,8 +62,6 @@ def _run(): total_score = 0 T = 1000 rollout = [obs.copy()] - trajectories = [] - y_maxes = [0] try: for t in tqdm.tqdm(range(T)): # 1. Call the policy diff --git a/scripts/convert_models_diffuser_to_diffusers.py b/scripts/convert_models_diffuser_to_diffusers.py index 61e05d261396..b154295e9726 100644 --- a/scripts/convert_models_diffuser_to_diffusers.py +++ b/scripts/convert_models_diffuser_to_diffusers.py @@ -51,7 +51,7 @@ def value_function(): out_block_type="ValueFunction", block_out_channels=(32, 64, 128, 256), layers_per_block=1, - always_downsample=True + always_downsample=True, ) model = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch") diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index d0483609fead..b720c78b8833 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -17,14 +17,12 @@ import torch import torch.nn as nn -from diffusers.models.resnet import ResidualTemporalBlock1D from diffusers.models.unet_1d_blocks import get_down_block, get_mid_block, get_out_block, get_up_block from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin from ..utils import BaseOutput from .embeddings import TimestepEmbedding, Timesteps -from .resnet import rearrange_dims @dataclass diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index 1981a34754c3..a00372faf7d9 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -13,7 +13,6 @@ # limitations under the License. -from turtle import forward import torch import torch.nn.functional as F from torch import nn From c6d94cef50b17f1f4626965204c97cee733a7221 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Thu, 20 Oct 2022 19:59:46 -0700 Subject: [PATCH 065/133] Apply suggestions from code review --- examples/community/pipeline.py | 3 --- examples/community/value_guided_diffuser.py | 3 --- examples/diffuser/run_diffuser.py | 3 --- examples/diffuser/run_diffuser_value_guided.py | 2 -- 4 files changed, 11 deletions(-) diff --git a/examples/community/pipeline.py b/examples/community/pipeline.py index 5159de402b3a..7e3f2b832b1f 100644 --- a/examples/community/pipeline.py +++ b/examples/community/pipeline.py @@ -73,14 +73,12 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): x = x.detach() x = x + scale * grad x = self.reset_x0(x, conditions, self.action_dim) - # with torch.no_grad(): prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] # 4. apply conditions to the trajectory x = self.reset_x0(x, conditions, self.action_dim) x = self.to_torch(x) - # y = network(x, timesteps).sample return x, y def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): @@ -97,6 +95,5 @@ def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, sca actions = sorted_values[:, :, : self.action_dim] actions = actions.detach().cpu().numpy() denorm_actions = self.de_normalize(actions, key="actions") - # denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] denorm_actions = denorm_actions[0, 0] return denorm_actions diff --git a/examples/community/value_guided_diffuser.py b/examples/community/value_guided_diffuser.py index 5159de402b3a..7e3f2b832b1f 100644 --- a/examples/community/value_guided_diffuser.py +++ b/examples/community/value_guided_diffuser.py @@ -73,14 +73,12 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): x = x.detach() x = x + scale * grad x = self.reset_x0(x, conditions, self.action_dim) - # with torch.no_grad(): prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] # 4. apply conditions to the trajectory x = self.reset_x0(x, conditions, self.action_dim) x = self.to_torch(x) - # y = network(x, timesteps).sample return x, y def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): @@ -97,6 +95,5 @@ def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, sca actions = sorted_values[:, :, : self.action_dim] actions = actions.detach().cpu().numpy() denorm_actions = self.de_normalize(actions, key="actions") - # denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] denorm_actions = denorm_actions[0, 0] return denorm_actions diff --git a/examples/diffuser/run_diffuser.py b/examples/diffuser/run_diffuser.py index ad35b59d99a7..b29d89992dfc 100644 --- a/examples/diffuser/run_diffuser.py +++ b/examples/diffuser/run_diffuser.py @@ -12,8 +12,6 @@ env = gym.make(env_name) data = env.get_dataset() # dataset is only used for normalization in this colab -# Cuda settings for colab -# torch.cuda.get_device_name(0) DEVICE = "cpu" DTYPE = torch.float @@ -41,7 +39,6 @@ clip_denoised = network.clip_denoised predict_epsilon = network.predict_epsilon -# add a batch dimension and repeat for multiple samples # [ observation_dim ] --> [ n_samples x observation_dim ] obs = env.reset() total_reward = 0 diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 11f36d1ada13..4272ec2c3106 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -22,8 +22,6 @@ def _run(): env_name = "hopper-medium-v2" env = gym.make(env_name) - # data = env.get_dataset() # dataset is only used for normalization in this colab - # render = train_diffuser.MuJoCoRenderer(env) # Cuda settings for colab # torch.cuda.get_device_name(0) From 48a74147233ca48aa6c77530ed244de4b5d59dfe Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 20 Oct 2022 23:02:00 -0400 Subject: [PATCH 066/133] Add Value Function and corresponding example script to Diffuser implementation (#884) * valuefunction code * start example scripts * missing imports * bug fixes and placeholder example script * add value function scheduler * load value function from hub and get best actions in example * very close to working example * larger batch size for planning * more tests * merge unet1d changes * wandb for debugging, use newer models * success! * turns out we just need more diffusion steps * run on modal * merge and code cleanup * use same api for rl model * fix variance type * wrong normalization function * add tests * style * style and quality * edits based on comments * style and quality * remove unused var * hack unet1d into a value function * add pipeline * fix arg order * add pipeline to core library * community pipeline * fix couple shape bugs * style * Apply suggestions from code review Co-authored-by: Nathan Lambert --- .gitignore | 4 +- examples/community/pipeline.py | 99 ++++++ examples/community/value_guided_diffuser.py | 99 ++++++ examples/diffuser/run_diffuser.py | 122 +++++++ .../diffuser/run_diffuser_value_guided.py | 94 ++++++ examples/diffuser/train_diffuser.py | 312 ++++++++++++++++++ .../convert_models_diffuser_to_diffusers.py | 77 +++++ src/diffusers/__init__.py | 2 +- src/diffusers/models/__init__.py | 1 + src/diffusers/models/unet_1d.py | 57 ++-- src/diffusers/models/unet_1d_blocks.py | 78 ++++- src/diffusers/models/unet_rl.py | 135 ++++++++ src/diffusers/schedulers/scheduling_ddpm.py | 6 +- tests/test_models_unet.py | 85 ++++- 14 files changed, 1143 insertions(+), 28 deletions(-) create mode 100644 examples/community/pipeline.py create mode 100644 examples/community/value_guided_diffuser.py create mode 100644 examples/diffuser/run_diffuser.py create mode 100644 examples/diffuser/run_diffuser_value_guided.py create mode 100644 examples/diffuser/train_diffuser.py create mode 100644 scripts/convert_models_diffuser_to_diffusers.py create mode 100644 src/diffusers/models/unet_rl.py diff --git a/.gitignore b/.gitignore index cf8183463613..f018a111ea33 100644 --- a/.gitignore +++ b/.gitignore @@ -163,4 +163,6 @@ tags *.lock # DS_Store (MacOS) -.DS_Store \ No newline at end of file +.DS_Store +# RL pipelines may produce mp4 outputs +*.mp4 \ No newline at end of file diff --git a/examples/community/pipeline.py b/examples/community/pipeline.py new file mode 100644 index 000000000000..7e3f2b832b1f --- /dev/null +++ b/examples/community/pipeline.py @@ -0,0 +1,99 @@ +import torch + +import tqdm +from diffusers import DiffusionPipeline +from diffusers.models.unet_1d import UNet1DModel +from diffusers.utils.dummy_pt_objects import DDPMScheduler + + +class ValueGuidedDiffuserPipeline(DiffusionPipeline): + def __init__( + self, + value_function: UNet1DModel, + unet: UNet1DModel, + scheduler: DDPMScheduler, + env, + ): + super().__init__() + self.value_function = value_function + self.unet = unet + self.scheduler = scheduler + self.env = env + self.data = env.get_dataset() + self.means = dict() + for key in self.data.keys(): + try: + self.means[key] = self.data[key].mean() + except: + pass + self.stds = dict() + for key in self.data.keys(): + try: + self.stds[key] = self.data[key].std() + except: + pass + self.state_dim = env.observation_space.shape[0] + self.action_dim = env.action_space.shape[0] + + def normalize(self, x_in, key): + return (x_in - self.means[key]) / self.stds[key] + + def de_normalize(self, x_in, key): + return x_in * self.stds[key] + self.means[key] + + def to_torch(self, x_in): + if type(x_in) is dict: + return {k: self.to_torch(v) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(self.unet.device) + return torch.tensor(x_in, device=self.unet.device) + + def reset_x0(self, x_in, cond, act_dim): + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + + def run_diffusion(self, x, conditions, n_guide_steps, scale): + batch_size = x.shape[0] + y = None + for i in tqdm.tqdm(self.scheduler.timesteps): + # create batch of timesteps to pass into model + timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) + # 3. call the sample function + for _ in range(n_guide_steps): + with torch.enable_grad(): + x.requires_grad_() + y = self.value_function(x.permute(0, 2, 1), timesteps).sample + grad = torch.autograd.grad([y.sum()], [x])[0] + + posterior_variance = self.scheduler._get_variance(i) + model_std = torch.exp(0.5 * posterior_variance) + grad = model_std * grad + grad[timesteps < 2] = 0 + x = x.detach() + x = x + scale * grad + x = self.reset_x0(x, conditions, self.action_dim) + prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) + x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] + + # 4. apply conditions to the trajectory + x = self.reset_x0(x, conditions, self.action_dim) + x = self.to_torch(x) + return x, y + + def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): + obs = self.normalize(obs, "observations") + obs = obs[None].repeat(batch_size, axis=0) + conditions = {0: self.to_torch(obs)} + shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) + x1 = torch.randn(shape, device=self.unet.device) + x = self.reset_x0(x1, conditions, self.action_dim) + x = self.to_torch(x) + x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) + sorted_idx = y.argsort(0, descending=True).squeeze() + sorted_values = x[sorted_idx] + actions = sorted_values[:, :, : self.action_dim] + actions = actions.detach().cpu().numpy() + denorm_actions = self.de_normalize(actions, key="actions") + denorm_actions = denorm_actions[0, 0] + return denorm_actions diff --git a/examples/community/value_guided_diffuser.py b/examples/community/value_guided_diffuser.py new file mode 100644 index 000000000000..7e3f2b832b1f --- /dev/null +++ b/examples/community/value_guided_diffuser.py @@ -0,0 +1,99 @@ +import torch + +import tqdm +from diffusers import DiffusionPipeline +from diffusers.models.unet_1d import UNet1DModel +from diffusers.utils.dummy_pt_objects import DDPMScheduler + + +class ValueGuidedDiffuserPipeline(DiffusionPipeline): + def __init__( + self, + value_function: UNet1DModel, + unet: UNet1DModel, + scheduler: DDPMScheduler, + env, + ): + super().__init__() + self.value_function = value_function + self.unet = unet + self.scheduler = scheduler + self.env = env + self.data = env.get_dataset() + self.means = dict() + for key in self.data.keys(): + try: + self.means[key] = self.data[key].mean() + except: + pass + self.stds = dict() + for key in self.data.keys(): + try: + self.stds[key] = self.data[key].std() + except: + pass + self.state_dim = env.observation_space.shape[0] + self.action_dim = env.action_space.shape[0] + + def normalize(self, x_in, key): + return (x_in - self.means[key]) / self.stds[key] + + def de_normalize(self, x_in, key): + return x_in * self.stds[key] + self.means[key] + + def to_torch(self, x_in): + if type(x_in) is dict: + return {k: self.to_torch(v) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(self.unet.device) + return torch.tensor(x_in, device=self.unet.device) + + def reset_x0(self, x_in, cond, act_dim): + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + + def run_diffusion(self, x, conditions, n_guide_steps, scale): + batch_size = x.shape[0] + y = None + for i in tqdm.tqdm(self.scheduler.timesteps): + # create batch of timesteps to pass into model + timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) + # 3. call the sample function + for _ in range(n_guide_steps): + with torch.enable_grad(): + x.requires_grad_() + y = self.value_function(x.permute(0, 2, 1), timesteps).sample + grad = torch.autograd.grad([y.sum()], [x])[0] + + posterior_variance = self.scheduler._get_variance(i) + model_std = torch.exp(0.5 * posterior_variance) + grad = model_std * grad + grad[timesteps < 2] = 0 + x = x.detach() + x = x + scale * grad + x = self.reset_x0(x, conditions, self.action_dim) + prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) + x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] + + # 4. apply conditions to the trajectory + x = self.reset_x0(x, conditions, self.action_dim) + x = self.to_torch(x) + return x, y + + def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): + obs = self.normalize(obs, "observations") + obs = obs[None].repeat(batch_size, axis=0) + conditions = {0: self.to_torch(obs)} + shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) + x1 = torch.randn(shape, device=self.unet.device) + x = self.reset_x0(x1, conditions, self.action_dim) + x = self.to_torch(x) + x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) + sorted_idx = y.argsort(0, descending=True).squeeze() + sorted_values = x[sorted_idx] + actions = sorted_values[:, :, : self.action_dim] + actions = actions.detach().cpu().numpy() + denorm_actions = self.de_normalize(actions, key="actions") + denorm_actions = denorm_actions[0, 0] + return denorm_actions diff --git a/examples/diffuser/run_diffuser.py b/examples/diffuser/run_diffuser.py new file mode 100644 index 000000000000..b29d89992dfc --- /dev/null +++ b/examples/diffuser/run_diffuser.py @@ -0,0 +1,122 @@ +import numpy as np +import torch + +import d4rl # noqa +import gym +import tqdm +import train_diffuser +from diffusers import DDPMScheduler, UNet1DModel + + +env_name = "hopper-medium-expert-v2" +env = gym.make(env_name) +data = env.get_dataset() # dataset is only used for normalization in this colab + +DEVICE = "cpu" +DTYPE = torch.float + +# diffusion model settings +n_samples = 4 # number of trajectories planned via diffusion +horizon = 128 # length of sampled trajectories +state_dim = env.observation_space.shape[0] +action_dim = env.action_space.shape[0] +num_inference_steps = 100 # number of difusion steps + + +# Two generators for different parts of the diffusion loop to work in colab +generator_cpu = torch.Generator(device="cpu") + +scheduler = DDPMScheduler(num_train_timesteps=100, beta_schedule="squaredcos_cap_v2") + +# 3 different pretrained models are available for this task. +# The horizion represents the length of trajectories used in training. +network = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) +# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) +# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) + + +# network specific constants for inference +clip_denoised = network.clip_denoised +predict_epsilon = network.predict_epsilon + +# [ observation_dim ] --> [ n_samples x observation_dim ] +obs = env.reset() +total_reward = 0 +done = False +T = 300 +rollout = [obs.copy()] + +try: + for t in tqdm.tqdm(range(T)): + obs_raw = obs + + # normalize observations for forward passes + obs = train_diffuser.normalize(obs, data, "observations") + obs = obs[None].repeat(n_samples, axis=0) + conditions = {0: train_diffuser.to_torch(obs, device=DEVICE)} + + # constants for inference + batch_size = len(conditions[0]) + shape = (batch_size, horizon, state_dim + action_dim) + + # sample random initial noise vector + x1 = torch.randn(shape, device=DEVICE, generator=generator_cpu) + + # this model is conditioned from an initial state, so you will see this function + # multiple times to change the initial state of generated data to the state + # generated via env.reset() above or env.step() below + x = train_diffuser.reset_x0(x1, conditions, action_dim) + + # convert a np observation to torch for model forward pass + x = train_diffuser.to_torch(x) + + eta = 1.0 # noise factor for sampling reconstructed state + + # run the diffusion process + # for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): + for i in tqdm.tqdm(scheduler.timesteps): + # create batch of timesteps to pass into model + timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) + + # 1. generate prediction from model + with torch.no_grad(): + residual = network(x, timesteps).sample + + # 2. use the model prediction to reconstruct an observation (de-noise) + obs_reconstruct = scheduler.step(residual, i, x, predict_epsilon=predict_epsilon)["prev_sample"] + + # 3. [optional] add posterior noise to the sample + if eta > 0: + noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device) + posterior_variance = scheduler._get_variance(i) # * noise + # no noise when t == 0 + # NOTE: original implementation missing sqrt on posterior_variance + obs_reconstruct = ( + obs_reconstruct + int(i > 0) * (0.5 * posterior_variance) * eta * noise + ) # MJ had as log var, exponentiated + + # 4. apply conditions to the trajectory + obs_reconstruct_postcond = train_diffuser.reset_x0(obs_reconstruct, conditions, action_dim) + x = train_diffuser.to_torch(obs_reconstruct_postcond) + plans = train_diffuser.helpers.to_np(x[:, :, :action_dim]) + # select random plan + idx = np.random.randint(plans.shape[0]) + # select action at correct time + action = plans[idx, 0, :] + actions = train_diffuser.de_normalize(action, data, "actions") + # execute action in environment + next_observation, reward, terminal, _ = env.step(action) + + # update return + total_reward += reward + print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}") + + # save observations for rendering + rollout.append(next_observation.copy()) + obs = next_observation +except KeyboardInterrupt: + pass + +print(f"Total reward: {total_reward}") +render = train_diffuser.MuJoCoRenderer(env) +train_diffuser.show_sample(render, np.expand_dims(np.stack(rollout), axis=0)) diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py new file mode 100644 index 000000000000..4272ec2c3106 --- /dev/null +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -0,0 +1,94 @@ +import d4rl # noqa +import gym +import tqdm + +# import train_diffuser +from diffusers import DDPMScheduler, DiffusionPipeline, UNet1DModel + + +config = dict( + n_samples=64, + horizon=32, + num_inference_steps=20, + n_guide_steps=2, + scale_grad_by_std=True, + scale=0.1, + eta=0.0, + t_grad_cutoff=2, + device="cpu", +) + + +def _run(): + env_name = "hopper-medium-v2" + env = gym.make(env_name) + + # Cuda settings for colab + # torch.cuda.get_device_name(0) + DEVICE = config["device"] + + # Two generators for different parts of the diffusion loop to work in colab + scheduler = DDPMScheduler( + num_train_timesteps=config["num_inference_steps"], + beta_schedule="squaredcos_cap_v2", + clip_sample=False, + variance_type="fixed_small_log", + ) + + # 3 different pretrained models are available for this task. + # The horizion represents the length of trajectories used in training. + # network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) + + network = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32").to(device=DEVICE).eval() + unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-unet-hor32").to(device=DEVICE).eval() + pipeline = DiffusionPipeline.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", + value_function=network, + unet=unet, + scheduler=scheduler, + env=env, + custom_pipeline="/Users/bglickenhaus/Documents/diffusers/examples/community", + ) + # unet = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) + # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) + + # add a batch dimension and repeat for multiple samples + # [ observation_dim ] --> [ n_samples x observation_dim ] + env.seed(0) + obs = env.reset() + total_reward = 0 + total_score = 0 + T = 1000 + rollout = [obs.copy()] + try: + for t in tqdm.tqdm(range(T)): + # 1. Call the policy + # normalize observations for forward passes + denorm_actions = pipeline(obs, planning_horizon=32) + + # execute action in environment + next_observation, reward, terminal, _ = env.step(denorm_actions) + score = env.get_normalized_score(total_reward) + # update return + total_reward += reward + total_score += score + print( + f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" + f" {total_score}" + ) + # save observations for rendering + rollout.append(next_observation.copy()) + + obs = next_observation + except KeyboardInterrupt: + pass + + print(f"Total reward: {total_reward}") + + +def run(): + _run() + + +if __name__ == "__main__": + run() diff --git a/examples/diffuser/train_diffuser.py b/examples/diffuser/train_diffuser.py new file mode 100644 index 000000000000..b063a0456d97 --- /dev/null +++ b/examples/diffuser/train_diffuser.py @@ -0,0 +1,312 @@ +import os +import warnings + +import numpy as np +import torch + +import d4rl # noqa +import gym +import mediapy as media +import mujoco_py as mjc +import tqdm +from diffusers import DDPMScheduler, UNet1DModel + + +# Define some helper functions + + +DTYPE = torch.float + + +def normalize(x_in, data, key): + means = data[key].mean(axis=0) + stds = data[key].std(axis=0) + return (x_in - means) / stds + + +def de_normalize(x_in, data, key): + means = data[key].mean(axis=0) + stds = data[key].std(axis=0) + return x_in * stds + means + + +def to_torch(x_in, dtype=None, device="cuda"): + dtype = dtype or DTYPE + device = device + if type(x_in) is dict: + return {k: to_torch(v, dtype, device) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(device).type(dtype) + return torch.tensor(x_in, dtype=dtype, device=device) + + +def reset_x0(x_in, cond, act_dim): + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + + +def run_diffusion(x, scheduler, network, unet, conditions, action_dim, config): + y = None + for i in tqdm.tqdm(scheduler.timesteps): + # create batch of timesteps to pass into model + timesteps = torch.full((config["n_samples"],), i, device=config["device"], dtype=torch.long) + # 3. call the sample function + for _ in range(config["n_guide_steps"]): + with torch.enable_grad(): + x.requires_grad_() + y = network(x, timesteps).sample + grad = torch.autograd.grad([y.sum()], [x])[0] + if config["scale_grad_by_std"]: + posterior_variance = scheduler._get_variance(i) + model_std = torch.exp(0.5 * posterior_variance) + grad = model_std * grad + grad[timesteps < config["t_grad_cutoff"]] = 0 + x = x.detach() + x = x + config["scale"] * grad + x = reset_x0(x, conditions, action_dim) + # with torch.no_grad(): + prev_x = unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) + x = scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] + + # 3. [optional] add posterior noise to the sample + if config["eta"] > 0: + noise = torch.randn(x.shape).to(x.device) + posterior_variance = scheduler._get_variance(i) # * noise + # no noise when t == 0 + # NOTE: original implementation missing sqrt on posterior_variance + x = x + int(i > 0) * (0.5 * posterior_variance) * config["eta"] * noise # MJ had as log var, exponentiated + + # 4. apply conditions to the trajectory + x = reset_x0(x, conditions, action_dim) + x = to_torch(x, device=config["device"]) + # y = network(x, timesteps).sample + return x, y + + +def to_np(x_in): + if torch.is_tensor(x_in): + x_in = x_in.detach().cpu().numpy() + return x_in + + +# from MJ's Diffuser code +# https://github.com/jannerm/diffuser/blob/76ae49ae85ba1c833bf78438faffdc63b8b4d55d/diffuser/utils/colab.py#L79 +def mkdir(savepath): + """ + returns `True` iff `savepath` is created + """ + if not os.path.exists(savepath): + os.makedirs(savepath) + return True + else: + return False + + +def show_sample(renderer, observations, filename="sample.mp4", savebase="videos"): + """ + observations : [ batch_size x horizon x observation_dim ] + """ + + mkdir(savebase) + savepath = os.path.join(savebase, filename) + + images = [] + for rollout in observations: + # [ horizon x height x width x channels ] + img = renderer._renders(rollout, partial=True) + images.append(img) + + # [ horizon x height x (batch_size * width) x channels ] + images = np.concatenate(images, axis=2) + media.write_video(savepath, images, fps=60) + media.show_video(images, codec="h264", fps=60) + return images + + +# Code adapted from Michael Janner +# source: https://github.com/jannerm/diffuser/blob/main/diffuser/utils/rendering.py + + +def env_map(env_name): + """ + map D4RL dataset names to custom fully-observed + variants for rendering + """ + if "halfcheetah" in env_name: + return "HalfCheetahFullObs-v2" + elif "hopper" in env_name: + return "HopperFullObs-v2" + elif "walker2d" in env_name: + return "Walker2dFullObs-v2" + else: + return env_name + + +def get_image_mask(img): + background = (img == 255).all(axis=-1, keepdims=True) + mask = ~background.repeat(3, axis=-1) + return mask + + +def atmost_2d(x): + while x.ndim > 2: + x = x.squeeze(0) + return x + + +def set_state(env, state): + qpos_dim = env.sim.data.qpos.size + qvel_dim = env.sim.data.qvel.size + if not state.size == qpos_dim + qvel_dim: + warnings.warn( + f"[ utils/rendering ] Expected state of size {qpos_dim + qvel_dim}, but got state of size {state.size}" + ) + state = state[: qpos_dim + qvel_dim] + + env.set_state(state[:qpos_dim], state[qpos_dim:]) + + +class MuJoCoRenderer: + """ + default mujoco renderer + """ + + def __init__(self, env): + if type(env) is str: + env = env_map(env) + self.env = gym.make(env) + else: + self.env = env + # - 1 because the envs in renderer are fully-observed + # @TODO : clean up + self.observation_dim = np.prod(self.env.observation_space.shape) - 1 + self.action_dim = np.prod(self.env.action_space.shape) + try: + self.viewer = mjc.MjRenderContextOffscreen(self.env.sim) + except: + print("[ utils/rendering ] Warning: could not initialize offscreen renderer") + self.viewer = None + + def pad_observation(self, observation): + state = np.concatenate( + [ + np.zeros(1), + observation, + ] + ) + return state + + def pad_observations(self, observations): + qpos_dim = self.env.sim.data.qpos.size + # xpos is hidden + xvel_dim = qpos_dim - 1 + xvel = observations[:, xvel_dim] + xpos = np.cumsum(xvel) * self.env.dt + states = np.concatenate( + [ + xpos[:, None], + observations, + ], + axis=-1, + ) + return states + + def render(self, observation, dim=256, partial=False, qvel=True, render_kwargs=None, conditions=None): + if type(dim) == int: + dim = (dim, dim) + + if self.viewer is None: + return np.zeros((*dim, 3), np.uint8) + + if render_kwargs is None: + xpos = observation[0] if not partial else 0 + render_kwargs = {"trackbodyid": 2, "distance": 3, "lookat": [xpos, -0.5, 1], "elevation": -20} + + for key, val in render_kwargs.items(): + if key == "lookat": + self.viewer.cam.lookat[:] = val[:] + else: + setattr(self.viewer.cam, key, val) + + if partial: + state = self.pad_observation(observation) + else: + state = observation + + qpos_dim = self.env.sim.data.qpos.size + if not qvel or state.shape[-1] == qpos_dim: + qvel_dim = self.env.sim.data.qvel.size + state = np.concatenate([state, np.zeros(qvel_dim)]) + + set_state(self.env, state) + + self.viewer.render(*dim) + data = self.viewer.read_pixels(*dim, depth=False) + data = data[::-1, :, :] + return data + + def _renders(self, observations, **kwargs): + images = [] + for observation in observations: + img = self.render(observation, **kwargs) + images.append(img) + return np.stack(images, axis=0) + + def renders(self, samples, partial=False, **kwargs): + if partial: + samples = self.pad_observations(samples) + partial = False + + sample_images = self._renders(samples, partial=partial, **kwargs) + + composite = np.ones_like(sample_images[0]) * 255 + + for img in sample_images: + mask = get_image_mask(img) + composite[mask] = img[mask] + + return composite + + def __call__(self, *args, **kwargs): + return self.renders(*args, **kwargs) + + +env_name = "hopper-medium-expert-v2" +env = gym.make(env_name) +data = env.get_dataset() # dataset is only used for normalization in this colab + +# Cuda settings for colab +# torch.cuda.get_device_name(0) +DEVICE = "cpu" +DTYPE = torch.float + +# diffusion model settings +n_samples = 4 # number of trajectories planned via diffusion +horizon = 128 # length of sampled trajectories +state_dim = env.observation_space.shape[0] +action_dim = env.action_space.shape[0] +num_inference_steps = 100 # number of difusion steps + +obs = env.reset() +obs_raw = obs + +# normalize observations for forward passes +obs = normalize(obs, data, "observations") + + +# Two generators for different parts of the diffusion loop to work in colab +generator = torch.Generator(device="cuda") +generator_cpu = torch.Generator(device="cpu") +network = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) + +scheduler = DDPMScheduler(num_train_timesteps=100, beta_schedule="squaredcos_cap_v2") +optimizer = torch.optim.AdamW( + network.parameters(), + lr=0.001, + betas=(0.95, 0.99), + weight_decay=1e-6, + eps=1e-8, +) + +# TODO: Flesh this out using accelerate library (a la other examples) diff --git a/scripts/convert_models_diffuser_to_diffusers.py b/scripts/convert_models_diffuser_to_diffusers.py new file mode 100644 index 000000000000..b154295e9726 --- /dev/null +++ b/scripts/convert_models_diffuser_to_diffusers.py @@ -0,0 +1,77 @@ +import json +import os + +import torch + +from diffusers import UNet1DModel + + +os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True) +os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True) + +os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) + + +def unet(hor): + if hor == 128: + down_block_types = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") + block_out_channels = (32, 128, 256) + up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D") + + elif hor == 32: + down_block_types = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") + block_out_channels = (32, 64, 128, 256) + up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") + model = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch") + state_dict = model.state_dict() + config = dict( + down_block_types=down_block_types, + block_out_channels=block_out_channels, + up_block_types=up_block_types, + layers_per_block=1, + ) + hf_value_function = UNet1DModel(**config) + print(f"length of state dict: {len(state_dict.keys())}") + print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") + mapping = dict((k, hfk) for k, hfk in zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) + for k, v in mapping.items(): + state_dict[v] = state_dict.pop(k) + hf_value_function.load_state_dict(state_dict) + + torch.save(hf_value_function.state_dict(), f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin") + with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json", "w") as f: + json.dump(config, f) + + +def value_function(): + config = dict( + in_channels=14, + down_block_types=("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), + up_block_types=(), + out_block_type="ValueFunction", + block_out_channels=(32, 64, 128, 256), + layers_per_block=1, + always_downsample=True, + ) + + model = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch") + state_dict = model + hf_value_function = UNet1DModel(**config) + print(f"length of state dict: {len(state_dict.keys())}") + print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") + + mapping = dict((k, hfk) for k, hfk in zip(state_dict.keys(), hf_value_function.state_dict().keys())) + for k, v in mapping.items(): + state_dict[v] = state_dict.pop(k) + + hf_value_function.load_state_dict(state_dict) + + torch.save(hf_value_function.state_dict(), "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin") + with open("hub/hopper-medium-v2/value_function/config.json", "w") as f: + json.dump(config, f) + + +if __name__ == "__main__": + unet(32) + # unet(128) + value_function() diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index fa97effaaf0a..7088e560dd66 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -18,7 +18,7 @@ if is_torch_available(): from .modeling_utils import ModelMixin - from .models import AutoencoderKL, UNet1DModel, UNet2DConditionModel, UNet2DModel, VQModel + from .models import AutoencoderKL, UNet1DModel, UNet2DConditionModel, UNet2DModel, ValueFunction, VQModel from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index c5d53b2feb4b..b771aaac8467 100644 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -19,6 +19,7 @@ from .unet_1d import UNet1DModel from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel + from .unet_rl import ValueFunction from .vae import AutoencoderKL, VQModel if is_flax_available(): diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index 3ede756c9b3d..b720c78b8833 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -17,14 +17,12 @@ import torch import torch.nn as nn -from diffusers.models.resnet import ResidualTemporalBlock1D -from diffusers.models.unet_1d_blocks import get_down_block, get_up_block +from diffusers.models.unet_1d_blocks import get_down_block, get_mid_block, get_out_block, get_up_block from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin from ..utils import BaseOutput from .embeddings import TimestepEmbedding, Timesteps -from .resnet import rearrange_dims @dataclass @@ -62,10 +60,13 @@ def __init__( out_channels: int = 14, down_block_types: Tuple[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), up_block_types: Tuple[str] = ("UpResnetBlock1D", "UpResnetBlock1D"), + mid_block_types: Tuple[str] = ("MidResTemporalBlock1D", "MidResTemporalBlock1D"), + out_block_type: str = "OutConv1DBlock", block_out_channels: Tuple[int] = (32, 128, 256), act_fn: str = "mish", norm_num_groups: int = 8, layers_per_block: int = 1, + always_downsample: bool = False, ): super().__init__() @@ -95,14 +96,30 @@ def __init__( in_channels=input_channel, out_channels=output_channel, temb_channels=block_out_channels[0], - add_downsample=not is_final_block, + add_downsample=not is_final_block or always_downsample, ) self.down_blocks.append(down_block) # mid - self.mid_block1 = ResidualTemporalBlock1D(mid_dim, mid_dim, embed_dim=block_out_channels[0]) - self.mid_block2 = ResidualTemporalBlock1D(mid_dim, mid_dim, embed_dim=block_out_channels[0]) - + self.mid_blocks = nn.ModuleList([]) + for i, mid_block_type in enumerate(mid_block_types): + if always_downsample: + mid_block = get_mid_block( + mid_block_type, + in_channels=mid_dim // (i + 1), + out_channels=mid_dim // ((i + 1) * 2), + embed_dim=block_out_channels[0], + add_downsample=True, + ) + else: + mid_block = get_mid_block( + mid_block_type, + in_channels=mid_dim, + out_channels=mid_dim, + embed_dim=block_out_channels[0], + add_downsample=False, + ) + self.mid_blocks.append(mid_block) # up reversed_block_out_channels = list(reversed(block_out_channels)) for i, up_block_type in enumerate(up_block_types): @@ -123,13 +140,14 @@ def __init__( # out num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) - self.final_conv1d_1 = nn.Conv1d(block_out_channels[0], block_out_channels[0], 5, padding=2) - self.final_conv1d_gn = nn.GroupNorm(num_groups_out, block_out_channels[0]) - if act_fn == "silu": - self.final_conv1d_act = nn.SiLU() - if act_fn == "mish": - self.final_conv1d_act = nn.Mish() - self.final_conv1d_2 = nn.Conv1d(block_out_channels[0], out_channels, 1) + self.out_block = get_out_block( + out_block_type=out_block_type, + num_groups_out=num_groups_out, + embed_dim=block_out_channels[0], + out_channels=out_channels, + act_fn=act_fn, + fc_dim=mid_dim // 4, + ) def forward( self, @@ -166,20 +184,15 @@ def forward( down_block_res_samples.append(res_samples[0]) # 3. mid - sample = self.mid_block1(sample, temb) - sample = self.mid_block2(sample, temb) + for mid_block in self.mid_blocks: + sample = mid_block(sample, temb) # 4. up for up_block in self.up_blocks: sample = up_block(hidden_states=sample, res_hidden_states=down_block_res_samples.pop(), temb=temb) # 5. post-process - sample = self.final_conv1d_1(sample) - sample = rearrange_dims(sample) - sample = self.final_conv1d_gn(sample) - sample = rearrange_dims(sample) - sample = self.final_conv1d_act(sample) - sample = self.final_conv1d_2(sample) + sample = self.out_block(sample, temb) if not return_dict: return (sample,) diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index 40e25fb43afb..a00372faf7d9 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -17,7 +17,7 @@ import torch.nn.functional as F from torch import nn -from .resnet import Downsample1D, ResidualTemporalBlock1D, Upsample1D +from .resnet import Downsample1D, ResidualTemporalBlock1D, Upsample1D, rearrange_dims class DownResnetBlock1D(nn.Module): @@ -173,6 +173,66 @@ class UpBlock1DNoSkip(nn.Module): pass +class MidResTemporalBlock1D(nn.Module): + def __init__(self, in_channels, out_channels, embed_dim, add_downsample): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.add_downsample = add_downsample + self.resnet = ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=embed_dim) + + if add_downsample: + self.downsample = Downsample1D(out_channels, use_conv=True) + else: + self.downsample = nn.Identity() + + def forward(self, sample, temb): + sample = self.resnet(sample, temb) + sample = self.downsample(sample) + return sample + + +class OutConv1DBlock(nn.Module): + def __init__(self, num_groups_out, out_channels, embed_dim, act_fn): + super().__init__() + self.final_conv1d_1 = nn.Conv1d(embed_dim, embed_dim, 5, padding=2) + self.final_conv1d_gn = nn.GroupNorm(num_groups_out, embed_dim) + if act_fn == "silu": + self.final_conv1d_act = nn.SiLU() + if act_fn == "mish": + self.final_conv1d_act = nn.Mish() + self.final_conv1d_2 = nn.Conv1d(embed_dim, out_channels, 1) + + def forward(self, sample, t): + sample = self.final_conv1d_1(sample) + sample = rearrange_dims(sample) + sample = self.final_conv1d_gn(sample) + sample = rearrange_dims(sample) + sample = self.final_conv1d_act(sample) + sample = self.final_conv1d_2(sample) + return sample + + +class OutValueFunctionBlock(nn.Module): + def __init__(self, fc_dim, embed_dim): + super().__init__() + self.final_block = nn.ModuleList( + [ + nn.Linear(fc_dim + embed_dim, fc_dim // 2), + nn.Mish(), + nn.Linear(fc_dim // 2, 1), + ] + ) + + def forward(self, sample, t): + sample = sample.view(sample.shape[0], -1) + sample = torch.cat((sample, t), dim=-1) + for layer in self.final_block: + sample = layer(sample) + + return sample + + def get_down_block(down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample): if down_block_type == "DownResnetBlock1D": return DownResnetBlock1D( @@ -195,5 +255,19 @@ def get_up_block(up_block_type, num_layers, in_channels, out_channels, temb_chan temb_channels=temb_channels, add_upsample=add_upsample, ) - + elif up_block_type == "Identity": + return nn.Identity() raise ValueError(f"{up_block_type} does not exist.") + + +def get_mid_block(mid_block_type, in_channels, out_channels, embed_dim, add_downsample): + if mid_block_type == "MidResTemporalBlock1D": + return MidResTemporalBlock1D(in_channels, out_channels, embed_dim, add_downsample) + raise ValueError(f"{mid_block_type} does not exist.") + + +def get_out_block(*, out_block_type, num_groups_out, embed_dim, out_channels, act_fn, fc_dim): + if out_block_type == "OutConv1DBlock": + return OutConv1DBlock(num_groups_out, out_channels, embed_dim, act_fn) + elif out_block_type == "ValueFunction": + return OutValueFunctionBlock(fc_dim, embed_dim) diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py new file mode 100644 index 000000000000..66822f99b198 --- /dev/null +++ b/src/diffusers/models/unet_rl.py @@ -0,0 +1,135 @@ +# model adapted from diffuser https://github.com/jannerm/diffuser/blob/main/diffuser/models/temporal.py +from dataclasses import dataclass +from typing import Tuple, Union + +import torch +import torch.nn as nn + +from diffusers.models.resnet import Downsample1D, ResidualTemporalBlock1D +from diffusers.models.unet_1d_blocks import get_down_block + +from ..configuration_utils import ConfigMixin, register_to_config +from ..modeling_utils import ModelMixin +from ..utils import BaseOutput +from .embeddings import TimestepEmbedding, Timesteps + + +@dataclass +class ValueFunctionOutput(BaseOutput): + """ + Args: + sample (`torch.FloatTensor` of shape `(batch, horizon, 1)`): + Hidden states output. Output of last layer of model. + """ + + sample: torch.FloatTensor + + +class ValueFunction(ModelMixin, ConfigMixin): + @register_to_config + def __init__( + self, + in_channels=14, + down_block_types: Tuple[str] = ( + "DownResnetBlock1D", + "DownResnetBlock1D", + "DownResnetBlock1D", + "DownResnetBlock1D", + ), + block_out_channels: Tuple[int] = (32, 64, 128, 256), + act_fn: str = "mish", + norm_num_groups: int = 8, + layers_per_block: int = 1, + ): + super().__init__() + time_embed_dim = block_out_channels[0] * 4 + self.time_proj = Timesteps(num_channels=block_out_channels[0], flip_sin_to_cos=False, downscale_freq_shift=1) + self.time_mlp = TimestepEmbedding( + channel=block_out_channels[0], time_embed_dim=time_embed_dim, act_fn="mish", out_dim=block_out_channels[0] + ) + + self.blocks = nn.ModuleList([]) + mid_dim = block_out_channels[-1] + + output_channel = in_channels + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + + down_block_type = down_block_types[i] + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=block_out_channels[0], + add_downsample=True, + ) + self.blocks.append(down_block) + + ## + self.mid_block1 = ResidualTemporalBlock1D(mid_dim, mid_dim // 2, embed_dim=block_out_channels[0]) + self.mid_down1 = Downsample1D(mid_dim // 2, use_conv=True) + ## + self.mid_block2 = ResidualTemporalBlock1D(mid_dim // 2, mid_dim // 4, embed_dim=block_out_channels[0]) + self.mid_down2 = Downsample1D(mid_dim // 4, use_conv=True) + ## + fc_dim = mid_dim // 4 + self.final_block = nn.ModuleList( + [ + nn.Linear(fc_dim + block_out_channels[0], fc_dim // 2), + nn.Mish(), + nn.Linear(fc_dim // 2, 1), + ] + ) + + def forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + return_dict: bool = True, + ) -> Union[ValueFunctionOutput, Tuple]: + """r + Args: + sample (`torch.FloatTensor`): (batch, horizon, obs_dimension + action_dimension) noisy inputs tensor + timestep (`torch.FloatTensor` or `float` or `int): batch (batch) timesteps + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unet_rl.ValueFunctionOutput`] instead of a plain tuple. + + Returns: + [`~models.unet_rl.ValueFunctionOutput`] or `tuple`: [`~models.unet_rl.ValueFunctionOutput`] if + `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. + """ + sample = sample.permute(0, 2, 1) + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) + elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + t = self.time_proj(timesteps) + t = self.time_mlp(t) + down_block_res_samples = [] + + # 2. down + for downsample_block in self.blocks: + sample, res_samples = downsample_block(hidden_states=sample, temb=t) + down_block_res_samples.append(res_samples[0]) + + # 3. mid + sample = self.mid_block1(sample, t) + sample = self.mid_down1(sample) + sample = self.mid_block2(sample, t) + sample = self.mid_down2(sample) + + sample = sample.view(sample.shape[0], -1) + sample = torch.cat((sample, t), dim=-1) + for layer in self.final_block: + sample = layer(sample) + + if not return_dict: + return (sample,) + + return ValueFunctionOutput(sample=sample) diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 04c92904a660..06596bd6091f 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -200,6 +200,7 @@ def _get_variance(self, t, predicted_variance=None, variance_type=None): # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": variance = torch.log(torch.clamp(variance, min=1e-20)) + variance = torch.exp(0.5 * variance) elif variance_type == "fixed_large": variance = self.betas[t] elif variance_type == "fixed_large_log": @@ -283,7 +284,10 @@ def step( noise = torch.randn( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator ).to(model_output.device) - variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise + if self.variance_type == "fixed_small_log": + variance = self._get_variance(t, predicted_variance=predicted_variance) * noise + else: + variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise pred_prev_sample = pred_prev_sample + variance diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index e1dbdfaa4611..55f373af8a9b 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -20,7 +20,7 @@ import torch -from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel +from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel, ValueFunction from diffusers.utils import floats_tensor, slow, torch_device from .test_modeling_common import ModelTesterMixin @@ -524,3 +524,86 @@ def test_output_pretrained(self): def test_forward_with_norm_groups(self): # Not implemented yet for this UNet pass + + +class UNetRLModelTests(ModelTesterMixin, unittest.TestCase): + model_class = ValueFunction + + @property + def dummy_input(self): + batch_size = 4 + num_features = 14 + seq_len = 16 + + noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) + time_step = torch.tensor([10] * batch_size).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (4, 14, 16) + + @property + def output_shape(self): + return (4, 14, 1) + + def test_ema_training(self): + pass + + def test_training(self): + pass + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": (32, 64, 128, 256), + "in_channels": 14, + "out_channels": 14, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + unet, loading_info = UNet1DModel.from_pretrained( + "bglick13/hopper-medium-v2-unet-hor32", output_loading_info=True + ) + value_function, vf_loading_info = ValueFunction.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True + ) + self.assertIsNotNone(unet) + self.assertEqual(len(loading_info["missing_keys"]), 0) + self.assertIsNotNone(value_function) + self.assertEqual(len(vf_loading_info["missing_keys"]), 0) + + unet.to(torch_device) + value_function.to(torch_device) + image = value_function(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + value_function, vf_loading_info = ValueFunction.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True + ) + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + num_features = value_function.in_channels + seq_len = 14 + noise = torch.randn((1, seq_len, num_features)).permute( + 0, 2, 1 + ) # match original, we can update values and remove + time_step = torch.full((num_features,), 0) + + with torch.no_grad(): + output = value_function(noise, time_step).sample + + # fmt: off + expected_output_slice = torch.tensor([207.0272] * seq_len) + # fmt: on + self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3)) + + def test_forward_with_norm_groups(self): + # Not implemented yet for this UNet + pass From 3acddb56e67e1fbcbac3e243a651daad93df3239 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Thu, 20 Oct 2022 20:05:20 -0700 Subject: [PATCH 067/133] update post merge of scripts --- src/diffusers/__init__.py | 2 +- src/diffusers/models/__init__.py | 1 - src/diffusers/models/unet_rl.py | 135 ------------------------------- tests/test_models_unet.py | 85 +------------------ 4 files changed, 2 insertions(+), 221 deletions(-) delete mode 100644 src/diffusers/models/unet_rl.py diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 7088e560dd66..fa97effaaf0a 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -18,7 +18,7 @@ if is_torch_available(): from .modeling_utils import ModelMixin - from .models import AutoencoderKL, UNet1DModel, UNet2DConditionModel, UNet2DModel, ValueFunction, VQModel + from .models import AutoencoderKL, UNet1DModel, UNet2DConditionModel, UNet2DModel, VQModel from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index b771aaac8467..c5d53b2feb4b 100644 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -19,7 +19,6 @@ from .unet_1d import UNet1DModel from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel - from .unet_rl import ValueFunction from .vae import AutoencoderKL, VQModel if is_flax_available(): diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py deleted file mode 100644 index 66822f99b198..000000000000 --- a/src/diffusers/models/unet_rl.py +++ /dev/null @@ -1,135 +0,0 @@ -# model adapted from diffuser https://github.com/jannerm/diffuser/blob/main/diffuser/models/temporal.py -from dataclasses import dataclass -from typing import Tuple, Union - -import torch -import torch.nn as nn - -from diffusers.models.resnet import Downsample1D, ResidualTemporalBlock1D -from diffusers.models.unet_1d_blocks import get_down_block - -from ..configuration_utils import ConfigMixin, register_to_config -from ..modeling_utils import ModelMixin -from ..utils import BaseOutput -from .embeddings import TimestepEmbedding, Timesteps - - -@dataclass -class ValueFunctionOutput(BaseOutput): - """ - Args: - sample (`torch.FloatTensor` of shape `(batch, horizon, 1)`): - Hidden states output. Output of last layer of model. - """ - - sample: torch.FloatTensor - - -class ValueFunction(ModelMixin, ConfigMixin): - @register_to_config - def __init__( - self, - in_channels=14, - down_block_types: Tuple[str] = ( - "DownResnetBlock1D", - "DownResnetBlock1D", - "DownResnetBlock1D", - "DownResnetBlock1D", - ), - block_out_channels: Tuple[int] = (32, 64, 128, 256), - act_fn: str = "mish", - norm_num_groups: int = 8, - layers_per_block: int = 1, - ): - super().__init__() - time_embed_dim = block_out_channels[0] * 4 - self.time_proj = Timesteps(num_channels=block_out_channels[0], flip_sin_to_cos=False, downscale_freq_shift=1) - self.time_mlp = TimestepEmbedding( - channel=block_out_channels[0], time_embed_dim=time_embed_dim, act_fn="mish", out_dim=block_out_channels[0] - ) - - self.blocks = nn.ModuleList([]) - mid_dim = block_out_channels[-1] - - output_channel = in_channels - for i, down_block_type in enumerate(down_block_types): - input_channel = output_channel - output_channel = block_out_channels[i] - - down_block_type = down_block_types[i] - down_block = get_down_block( - down_block_type, - num_layers=layers_per_block, - in_channels=input_channel, - out_channels=output_channel, - temb_channels=block_out_channels[0], - add_downsample=True, - ) - self.blocks.append(down_block) - - ## - self.mid_block1 = ResidualTemporalBlock1D(mid_dim, mid_dim // 2, embed_dim=block_out_channels[0]) - self.mid_down1 = Downsample1D(mid_dim // 2, use_conv=True) - ## - self.mid_block2 = ResidualTemporalBlock1D(mid_dim // 2, mid_dim // 4, embed_dim=block_out_channels[0]) - self.mid_down2 = Downsample1D(mid_dim // 4, use_conv=True) - ## - fc_dim = mid_dim // 4 - self.final_block = nn.ModuleList( - [ - nn.Linear(fc_dim + block_out_channels[0], fc_dim // 2), - nn.Mish(), - nn.Linear(fc_dim // 2, 1), - ] - ) - - def forward( - self, - sample: torch.FloatTensor, - timestep: Union[torch.Tensor, float, int], - return_dict: bool = True, - ) -> Union[ValueFunctionOutput, Tuple]: - """r - Args: - sample (`torch.FloatTensor`): (batch, horizon, obs_dimension + action_dimension) noisy inputs tensor - timestep (`torch.FloatTensor` or `float` or `int): batch (batch) timesteps - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~models.unet_rl.ValueFunctionOutput`] instead of a plain tuple. - - Returns: - [`~models.unet_rl.ValueFunctionOutput`] or `tuple`: [`~models.unet_rl.ValueFunctionOutput`] if - `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - """ - sample = sample.permute(0, 2, 1) - - # 1. time - timesteps = timestep - if not torch.is_tensor(timesteps): - timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) - elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: - timesteps = timesteps[None].to(sample.device) - - t = self.time_proj(timesteps) - t = self.time_mlp(t) - down_block_res_samples = [] - - # 2. down - for downsample_block in self.blocks: - sample, res_samples = downsample_block(hidden_states=sample, temb=t) - down_block_res_samples.append(res_samples[0]) - - # 3. mid - sample = self.mid_block1(sample, t) - sample = self.mid_down1(sample) - sample = self.mid_block2(sample, t) - sample = self.mid_down2(sample) - - sample = sample.view(sample.shape[0], -1) - sample = torch.cat((sample, t), dim=-1) - for layer in self.final_block: - sample = layer(sample) - - if not return_dict: - return (sample,) - - return ValueFunctionOutput(sample=sample) diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index 55f373af8a9b..e1dbdfaa4611 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -20,7 +20,7 @@ import torch -from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel, ValueFunction +from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel from diffusers.utils import floats_tensor, slow, torch_device from .test_modeling_common import ModelTesterMixin @@ -524,86 +524,3 @@ def test_output_pretrained(self): def test_forward_with_norm_groups(self): # Not implemented yet for this UNet pass - - -class UNetRLModelTests(ModelTesterMixin, unittest.TestCase): - model_class = ValueFunction - - @property - def dummy_input(self): - batch_size = 4 - num_features = 14 - seq_len = 16 - - noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) - time_step = torch.tensor([10] * batch_size).to(torch_device) - - return {"sample": noise, "timestep": time_step} - - @property - def input_shape(self): - return (4, 14, 16) - - @property - def output_shape(self): - return (4, 14, 1) - - def test_ema_training(self): - pass - - def test_training(self): - pass - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "block_out_channels": (32, 64, 128, 256), - "in_channels": 14, - "out_channels": 14, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - unet, loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-unet-hor32", output_loading_info=True - ) - value_function, vf_loading_info = ValueFunction.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True - ) - self.assertIsNotNone(unet) - self.assertEqual(len(loading_info["missing_keys"]), 0) - self.assertIsNotNone(value_function) - self.assertEqual(len(vf_loading_info["missing_keys"]), 0) - - unet.to(torch_device) - value_function.to(torch_device) - image = value_function(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - value_function, vf_loading_info = ValueFunction.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True - ) - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - num_features = value_function.in_channels - seq_len = 14 - noise = torch.randn((1, seq_len, num_features)).permute( - 0, 2, 1 - ) # match original, we can update values and remove - time_step = torch.full((num_features,), 0) - - with torch.no_grad(): - output = value_function(noise, time_step).sample - - # fmt: off - expected_output_slice = torch.tensor([207.0272] * seq_len) - # fmt: on - self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3)) - - def test_forward_with_norm_groups(self): - # Not implemented yet for this UNet - pass From a9cee784d28f5aecff1431d1cd72cc4b492aa462 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 20 Oct 2022 23:20:13 -0400 Subject: [PATCH 068/133] clean up comments --- examples/community/pipeline.py | 16 +++++++++++----- examples/community/value_guided_diffuser.py | 16 +++++++++++----- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/examples/community/pipeline.py b/examples/community/pipeline.py index 5159de402b3a..6b28e868eddd 100644 --- a/examples/community/pipeline.py +++ b/examples/community/pipeline.py @@ -59,7 +59,6 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): for i in tqdm.tqdm(self.scheduler.timesteps): # create batch of timesteps to pass into model timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) - # 3. call the sample function for _ in range(n_guide_steps): with torch.enable_grad(): x.requires_grad_() @@ -73,30 +72,37 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): x = x.detach() x = x + scale * grad x = self.reset_x0(x, conditions, self.action_dim) - # with torch.no_grad(): prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] - # 4. apply conditions to the trajectory + # apply conditions to the trajectory x = self.reset_x0(x, conditions, self.action_dim) x = self.to_torch(x) - # y = network(x, timesteps).sample return x, y def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): + # normalize the observations and create batch dimension obs = self.normalize(obs, "observations") obs = obs[None].repeat(batch_size, axis=0) + conditions = {0: self.to_torch(obs)} shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) + + # generate initial noise and apply our conditions (to make the trajectories start at current state) x1 = torch.randn(shape, device=self.unet.device) x = self.reset_x0(x1, conditions, self.action_dim) x = self.to_torch(x) + + # run the diffusion process x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) + + # sort output trajectories by value sorted_idx = y.argsort(0, descending=True).squeeze() sorted_values = x[sorted_idx] actions = sorted_values[:, :, : self.action_dim] actions = actions.detach().cpu().numpy() denorm_actions = self.de_normalize(actions, key="actions") - # denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] + + # select the action with the highest value denorm_actions = denorm_actions[0, 0] return denorm_actions diff --git a/examples/community/value_guided_diffuser.py b/examples/community/value_guided_diffuser.py index 5159de402b3a..6b28e868eddd 100644 --- a/examples/community/value_guided_diffuser.py +++ b/examples/community/value_guided_diffuser.py @@ -59,7 +59,6 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): for i in tqdm.tqdm(self.scheduler.timesteps): # create batch of timesteps to pass into model timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) - # 3. call the sample function for _ in range(n_guide_steps): with torch.enable_grad(): x.requires_grad_() @@ -73,30 +72,37 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): x = x.detach() x = x + scale * grad x = self.reset_x0(x, conditions, self.action_dim) - # with torch.no_grad(): prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] - # 4. apply conditions to the trajectory + # apply conditions to the trajectory x = self.reset_x0(x, conditions, self.action_dim) x = self.to_torch(x) - # y = network(x, timesteps).sample return x, y def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): + # normalize the observations and create batch dimension obs = self.normalize(obs, "observations") obs = obs[None].repeat(batch_size, axis=0) + conditions = {0: self.to_torch(obs)} shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) + + # generate initial noise and apply our conditions (to make the trajectories start at current state) x1 = torch.randn(shape, device=self.unet.device) x = self.reset_x0(x1, conditions, self.action_dim) x = self.to_torch(x) + + # run the diffusion process x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) + + # sort output trajectories by value sorted_idx = y.argsort(0, descending=True).squeeze() sorted_values = x[sorted_idx] actions = sorted_values[:, :, : self.action_dim] actions = actions.detach().cpu().numpy() denorm_actions = self.de_normalize(actions, key="actions") - # denorm_actions = denorm_actions[np.random.randint(config['n_samples']), 0] + + # select the action with the highest value denorm_actions = denorm_actions[0, 0] return denorm_actions From b7fac182341ae459d9548b31712353d9714e304c Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 20 Oct 2022 23:34:25 -0400 Subject: [PATCH 069/133] convert older script to using pipeline and add readme --- examples/community/pipeline.py | 9 +- examples/diffuser/README.md | 16 ++ examples/diffuser/run_diffuser.py | 194 +++++++----------- .../diffuser/run_diffuser_value_guided.py | 12 +- 4 files changed, 100 insertions(+), 131 deletions(-) create mode 100644 examples/diffuser/README.md diff --git a/examples/community/pipeline.py b/examples/community/pipeline.py index 6b28e868eddd..3badedea3d27 100644 --- a/examples/community/pipeline.py +++ b/examples/community/pipeline.py @@ -1,5 +1,5 @@ import torch - +import numpy as np import tqdm from diffusers import DiffusionPipeline from diffusers.models.unet_1d import UNet1DModel @@ -104,5 +104,10 @@ def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, sca denorm_actions = self.de_normalize(actions, key="actions") # select the action with the highest value - denorm_actions = denorm_actions[0, 0] + if y is not None: + selected_index = 0 + else: + # if we didn't run value guiding, select a random action + selected_index = np.random.randint(0, batch_size) + denorm_actions = denorm_actions[selected_index, 0] return denorm_actions diff --git a/examples/diffuser/README.md b/examples/diffuser/README.md new file mode 100644 index 000000000000..464ccd57af85 --- /dev/null +++ b/examples/diffuser/README.md @@ -0,0 +1,16 @@ +# Overview + +These examples show how to run (Diffuser)[https://arxiv.org/pdf/2205.09991.pdf] in Diffusers. There are two scripts, `run_diffuser_value_guided.py` and `run_diffuser.py`. + +You will need some RL specific requirements to run the examples: + +``` +pip install -f https://download.pytorch.org/whl/torch_stable.html \ + free-mujoco-py \ + einops \ + gym \ + protobuf==3.20.1 \ + git+https://github.com/rail-berkeley/d4rl.git \ + mediapy \ + Pillow==9.0.0 +``` diff --git a/examples/diffuser/run_diffuser.py b/examples/diffuser/run_diffuser.py index b29d89992dfc..bb12e12aba94 100644 --- a/examples/diffuser/run_diffuser.py +++ b/examples/diffuser/run_diffuser.py @@ -1,122 +1,80 @@ -import numpy as np -import torch - import d4rl # noqa import gym import tqdm -import train_diffuser -from diffusers import DDPMScheduler, UNet1DModel - - -env_name = "hopper-medium-expert-v2" -env = gym.make(env_name) -data = env.get_dataset() # dataset is only used for normalization in this colab - -DEVICE = "cpu" -DTYPE = torch.float - -# diffusion model settings -n_samples = 4 # number of trajectories planned via diffusion -horizon = 128 # length of sampled trajectories -state_dim = env.observation_space.shape[0] -action_dim = env.action_space.shape[0] -num_inference_steps = 100 # number of difusion steps - - -# Two generators for different parts of the diffusion loop to work in colab -generator_cpu = torch.Generator(device="cpu") - -scheduler = DDPMScheduler(num_train_timesteps=100, beta_schedule="squaredcos_cap_v2") - -# 3 different pretrained models are available for this task. -# The horizion represents the length of trajectories used in training. -network = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) -# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) -# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) - - -# network specific constants for inference -clip_denoised = network.clip_denoised -predict_epsilon = network.predict_epsilon - -# [ observation_dim ] --> [ n_samples x observation_dim ] -obs = env.reset() -total_reward = 0 -done = False -T = 300 -rollout = [obs.copy()] - -try: - for t in tqdm.tqdm(range(T)): - obs_raw = obs - - # normalize observations for forward passes - obs = train_diffuser.normalize(obs, data, "observations") - obs = obs[None].repeat(n_samples, axis=0) - conditions = {0: train_diffuser.to_torch(obs, device=DEVICE)} - - # constants for inference - batch_size = len(conditions[0]) - shape = (batch_size, horizon, state_dim + action_dim) - - # sample random initial noise vector - x1 = torch.randn(shape, device=DEVICE, generator=generator_cpu) - - # this model is conditioned from an initial state, so you will see this function - # multiple times to change the initial state of generated data to the state - # generated via env.reset() above or env.step() below - x = train_diffuser.reset_x0(x1, conditions, action_dim) - - # convert a np observation to torch for model forward pass - x = train_diffuser.to_torch(x) - - eta = 1.0 # noise factor for sampling reconstructed state - - # run the diffusion process - # for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): - for i in tqdm.tqdm(scheduler.timesteps): - # create batch of timesteps to pass into model - timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) - - # 1. generate prediction from model - with torch.no_grad(): - residual = network(x, timesteps).sample - - # 2. use the model prediction to reconstruct an observation (de-noise) - obs_reconstruct = scheduler.step(residual, i, x, predict_epsilon=predict_epsilon)["prev_sample"] - - # 3. [optional] add posterior noise to the sample - if eta > 0: - noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device) - posterior_variance = scheduler._get_variance(i) # * noise - # no noise when t == 0 - # NOTE: original implementation missing sqrt on posterior_variance - obs_reconstruct = ( - obs_reconstruct + int(i > 0) * (0.5 * posterior_variance) * eta * noise - ) # MJ had as log var, exponentiated - - # 4. apply conditions to the trajectory - obs_reconstruct_postcond = train_diffuser.reset_x0(obs_reconstruct, conditions, action_dim) - x = train_diffuser.to_torch(obs_reconstruct_postcond) - plans = train_diffuser.helpers.to_np(x[:, :, :action_dim]) - # select random plan - idx = np.random.randint(plans.shape[0]) - # select action at correct time - action = plans[idx, 0, :] - actions = train_diffuser.de_normalize(action, data, "actions") - # execute action in environment - next_observation, reward, terminal, _ = env.step(action) - - # update return - total_reward += reward - print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}") - - # save observations for rendering - rollout.append(next_observation.copy()) - obs = next_observation -except KeyboardInterrupt: - pass -print(f"Total reward: {total_reward}") -render = train_diffuser.MuJoCoRenderer(env) -train_diffuser.show_sample(render, np.expand_dims(np.stack(rollout), axis=0)) +from diffusers import DDPMScheduler, DiffusionPipeline, UNet1DModel + + +config = dict( + n_samples=64, + horizon=32, + num_inference_steps=20, + n_guide_steps=0, + scale_grad_by_std=True, + scale=0.1, + eta=0.0, + t_grad_cutoff=2, + device="cpu", +) + + +def _run(): + env_name = "hopper-medium-v2" + env = gym.make(env_name) + + DEVICE = config["device"] + + scheduler = DDPMScheduler( + num_train_timesteps=config["num_inference_steps"], + beta_schedule="squaredcos_cap_v2", + clip_sample=False, + variance_type="fixed_small_log", + ) + network = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32").to(device=DEVICE).eval() + unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-unet-hor32").to(device=DEVICE).eval() + pipeline = DiffusionPipeline.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", + value_function=network, + unet=unet, + scheduler=scheduler, + env=env, + custom_pipeline="/Users/bglickenhaus/Documents/diffusers/examples/community", + ) + + env.seed(0) + obs = env.reset() + total_reward = 0 + total_score = 0 + T = 1000 + rollout = [obs.copy()] + try: + for t in tqdm.tqdm(range(T)): + # Call the policy + denorm_actions = pipeline(obs, planning_horizon=32) + + # execute action in environment + next_observation, reward, terminal, _ = env.step(denorm_actions) + score = env.get_normalized_score(total_reward) + # update return + total_reward += reward + total_score += score + print( + f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" + f" {total_score}" + ) + # save observations for rendering + rollout.append(next_observation.copy()) + + obs = next_observation + except KeyboardInterrupt: + pass + + print(f"Total reward: {total_reward}") + + +def run(): + _run() + + +if __name__ == "__main__": + run() diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 4272ec2c3106..2f7063c94ec4 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -2,7 +2,6 @@ import gym import tqdm -# import train_diffuser from diffusers import DDPMScheduler, DiffusionPipeline, UNet1DModel @@ -35,10 +34,6 @@ def _run(): variance_type="fixed_small_log", ) - # 3 different pretrained models are available for this task. - # The horizion represents the length of trajectories used in training. - # network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) - network = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32").to(device=DEVICE).eval() unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-unet-hor32").to(device=DEVICE).eval() pipeline = DiffusionPipeline.from_pretrained( @@ -49,11 +44,7 @@ def _run(): env=env, custom_pipeline="/Users/bglickenhaus/Documents/diffusers/examples/community", ) - # unet = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) - # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) - # add a batch dimension and repeat for multiple samples - # [ observation_dim ] --> [ n_samples x observation_dim ] env.seed(0) obs = env.reset() total_reward = 0 @@ -62,8 +53,7 @@ def _run(): rollout = [obs.copy()] try: for t in tqdm.tqdm(range(T)): - # 1. Call the policy - # normalize observations for forward passes + # call the policy denorm_actions = pipeline(obs, planning_horizon=32) # execute action in environment From b3edd7bb8621635bf8436b2ceed53e35e8015007 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Fri, 21 Oct 2022 10:53:44 -0400 Subject: [PATCH 070/133] rename scripts --- .../{run_diffuser.py => run_diffuser_gen_trajectories.py} | 0 .../{run_diffuser_value_guided.py => run_diffuser_locomotion.py} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename examples/diffuser/{run_diffuser.py => run_diffuser_gen_trajectories.py} (100%) rename examples/diffuser/{run_diffuser_value_guided.py => run_diffuser_locomotion.py} (100%) diff --git a/examples/diffuser/run_diffuser.py b/examples/diffuser/run_diffuser_gen_trajectories.py similarity index 100% rename from examples/diffuser/run_diffuser.py rename to examples/diffuser/run_diffuser_gen_trajectories.py diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_locomotion.py similarity index 100% rename from examples/diffuser/run_diffuser_value_guided.py rename to examples/diffuser/run_diffuser_locomotion.py From 8b01b9348809aac585aadb9de9145bd80d750bd8 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Fri, 21 Oct 2022 10:55:46 -0400 Subject: [PATCH 071/133] style, update tests --- examples/community/pipeline.py | 3 ++- examples/diffuser/run_diffuser_gen_trajectories.py | 1 - examples/diffuser/run_diffuser_locomotion.py | 1 - tests/test_models_unet.py | 6 +++--- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/examples/community/pipeline.py b/examples/community/pipeline.py index 3badedea3d27..85e359c5c4c9 100644 --- a/examples/community/pipeline.py +++ b/examples/community/pipeline.py @@ -1,5 +1,6 @@ -import torch import numpy as np +import torch + import tqdm from diffusers import DiffusionPipeline from diffusers.models.unet_1d import UNet1DModel diff --git a/examples/diffuser/run_diffuser_gen_trajectories.py b/examples/diffuser/run_diffuser_gen_trajectories.py index bb12e12aba94..f4c86635c652 100644 --- a/examples/diffuser/run_diffuser_gen_trajectories.py +++ b/examples/diffuser/run_diffuser_gen_trajectories.py @@ -1,7 +1,6 @@ import d4rl # noqa import gym import tqdm - from diffusers import DDPMScheduler, DiffusionPipeline, UNet1DModel diff --git a/examples/diffuser/run_diffuser_locomotion.py b/examples/diffuser/run_diffuser_locomotion.py index 2f7063c94ec4..1b4351095d3b 100644 --- a/examples/diffuser/run_diffuser_locomotion.py +++ b/examples/diffuser/run_diffuser_locomotion.py @@ -1,7 +1,6 @@ import d4rl # noqa import gym import tqdm - from diffusers import DDPMScheduler, DiffusionPipeline, UNet1DModel diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index 55f373af8a9b..1ff092b3ce78 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -527,7 +527,7 @@ def test_forward_with_norm_groups(self): class UNetRLModelTests(ModelTesterMixin, unittest.TestCase): - model_class = ValueFunction + model_class = UNet1DModel @property def dummy_input(self): @@ -567,7 +567,7 @@ def test_from_pretrained_hub(self): unet, loading_info = UNet1DModel.from_pretrained( "bglick13/hopper-medium-v2-unet-hor32", output_loading_info=True ) - value_function, vf_loading_info = ValueFunction.from_pretrained( + value_function, vf_loading_info = UNet1DModel.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True ) self.assertIsNotNone(unet) @@ -582,7 +582,7 @@ def test_from_pretrained_hub(self): assert image is not None, "Make sure output is not None" def test_output_pretrained(self): - value_function, vf_loading_info = ValueFunction.from_pretrained( + value_function, vf_loading_info = UNet1DModel.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True ) torch.manual_seed(0) From 3c668a7b26d98103ce0d76894cbda89a41046059 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Sat, 22 Oct 2022 10:41:03 -0400 Subject: [PATCH 072/133] delete unet rl model file --- src/diffusers/models/unet_rl.py | 135 -------------------------------- 1 file changed, 135 deletions(-) delete mode 100644 src/diffusers/models/unet_rl.py diff --git a/src/diffusers/models/unet_rl.py b/src/diffusers/models/unet_rl.py deleted file mode 100644 index 66822f99b198..000000000000 --- a/src/diffusers/models/unet_rl.py +++ /dev/null @@ -1,135 +0,0 @@ -# model adapted from diffuser https://github.com/jannerm/diffuser/blob/main/diffuser/models/temporal.py -from dataclasses import dataclass -from typing import Tuple, Union - -import torch -import torch.nn as nn - -from diffusers.models.resnet import Downsample1D, ResidualTemporalBlock1D -from diffusers.models.unet_1d_blocks import get_down_block - -from ..configuration_utils import ConfigMixin, register_to_config -from ..modeling_utils import ModelMixin -from ..utils import BaseOutput -from .embeddings import TimestepEmbedding, Timesteps - - -@dataclass -class ValueFunctionOutput(BaseOutput): - """ - Args: - sample (`torch.FloatTensor` of shape `(batch, horizon, 1)`): - Hidden states output. Output of last layer of model. - """ - - sample: torch.FloatTensor - - -class ValueFunction(ModelMixin, ConfigMixin): - @register_to_config - def __init__( - self, - in_channels=14, - down_block_types: Tuple[str] = ( - "DownResnetBlock1D", - "DownResnetBlock1D", - "DownResnetBlock1D", - "DownResnetBlock1D", - ), - block_out_channels: Tuple[int] = (32, 64, 128, 256), - act_fn: str = "mish", - norm_num_groups: int = 8, - layers_per_block: int = 1, - ): - super().__init__() - time_embed_dim = block_out_channels[0] * 4 - self.time_proj = Timesteps(num_channels=block_out_channels[0], flip_sin_to_cos=False, downscale_freq_shift=1) - self.time_mlp = TimestepEmbedding( - channel=block_out_channels[0], time_embed_dim=time_embed_dim, act_fn="mish", out_dim=block_out_channels[0] - ) - - self.blocks = nn.ModuleList([]) - mid_dim = block_out_channels[-1] - - output_channel = in_channels - for i, down_block_type in enumerate(down_block_types): - input_channel = output_channel - output_channel = block_out_channels[i] - - down_block_type = down_block_types[i] - down_block = get_down_block( - down_block_type, - num_layers=layers_per_block, - in_channels=input_channel, - out_channels=output_channel, - temb_channels=block_out_channels[0], - add_downsample=True, - ) - self.blocks.append(down_block) - - ## - self.mid_block1 = ResidualTemporalBlock1D(mid_dim, mid_dim // 2, embed_dim=block_out_channels[0]) - self.mid_down1 = Downsample1D(mid_dim // 2, use_conv=True) - ## - self.mid_block2 = ResidualTemporalBlock1D(mid_dim // 2, mid_dim // 4, embed_dim=block_out_channels[0]) - self.mid_down2 = Downsample1D(mid_dim // 4, use_conv=True) - ## - fc_dim = mid_dim // 4 - self.final_block = nn.ModuleList( - [ - nn.Linear(fc_dim + block_out_channels[0], fc_dim // 2), - nn.Mish(), - nn.Linear(fc_dim // 2, 1), - ] - ) - - def forward( - self, - sample: torch.FloatTensor, - timestep: Union[torch.Tensor, float, int], - return_dict: bool = True, - ) -> Union[ValueFunctionOutput, Tuple]: - """r - Args: - sample (`torch.FloatTensor`): (batch, horizon, obs_dimension + action_dimension) noisy inputs tensor - timestep (`torch.FloatTensor` or `float` or `int): batch (batch) timesteps - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~models.unet_rl.ValueFunctionOutput`] instead of a plain tuple. - - Returns: - [`~models.unet_rl.ValueFunctionOutput`] or `tuple`: [`~models.unet_rl.ValueFunctionOutput`] if - `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - """ - sample = sample.permute(0, 2, 1) - - # 1. time - timesteps = timestep - if not torch.is_tensor(timesteps): - timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) - elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: - timesteps = timesteps[None].to(sample.device) - - t = self.time_proj(timesteps) - t = self.time_mlp(t) - down_block_res_samples = [] - - # 2. down - for downsample_block in self.blocks: - sample, res_samples = downsample_block(hidden_states=sample, temb=t) - down_block_res_samples.append(res_samples[0]) - - # 3. mid - sample = self.mid_block1(sample, t) - sample = self.mid_down1(sample) - sample = self.mid_block2(sample, t) - sample = self.mid_down2(sample) - - sample = sample.view(sample.shape[0], -1) - sample = torch.cat((sample, t), dim=-1) - for layer in self.final_block: - sample = layer(sample) - - if not return_dict: - return (sample,) - - return ValueFunctionOutput(sample=sample) From 713e8f27172fe708703342a4b3f67802172845dd Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 24 Oct 2022 09:47:55 -0700 Subject: [PATCH 073/133] add mdiblock / outblock architecture --- src/diffusers/models/unet_1d.py | 39 +++++------ src/diffusers/models/unet_1d_blocks.py | 94 +++++++++++++++++++------- 2 files changed, 85 insertions(+), 48 deletions(-) diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index b720c78b8833..8f74926da505 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -60,7 +60,7 @@ def __init__( out_channels: int = 14, down_block_types: Tuple[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), up_block_types: Tuple[str] = ("UpResnetBlock1D", "UpResnetBlock1D"), - mid_block_types: Tuple[str] = ("MidResTemporalBlock1D", "MidResTemporalBlock1D"), + mid_block_type: Tuple[str] = "MidResTemporalBlock1D", out_block_type: str = "OutConv1DBlock", block_out_channels: Tuple[int] = (32, 128, 256), act_fn: str = "mish", @@ -79,7 +79,9 @@ def __init__( ) self.down_blocks = nn.ModuleList([]) + self.mid_block = None self.up_blocks = nn.ModuleList([]) + self.out_block = None mid_dim = block_out_channels[-1] # down @@ -101,25 +103,15 @@ def __init__( self.down_blocks.append(down_block) # mid - self.mid_blocks = nn.ModuleList([]) - for i, mid_block_type in enumerate(mid_block_types): - if always_downsample: - mid_block = get_mid_block( - mid_block_type, - in_channels=mid_dim // (i + 1), - out_channels=mid_dim // ((i + 1) * 2), - embed_dim=block_out_channels[0], - add_downsample=True, - ) - else: - mid_block = get_mid_block( - mid_block_type, - in_channels=mid_dim, - out_channels=mid_dim, - embed_dim=block_out_channels[0], - add_downsample=False, - ) - self.mid_blocks.append(mid_block) + self.mid_block = get_mid_block( + mid_block_type, + in_channels=mid_dim, + out_channels=mid_dim, + embed_dim=block_out_channels[0], + num_layers=layers_per_block, + add_downsample=always_downsample, + ) + # up reversed_block_out_channels = list(reversed(block_out_channels)) for i, up_block_type in enumerate(up_block_types): @@ -184,15 +176,16 @@ def forward( down_block_res_samples.append(res_samples[0]) # 3. mid - for mid_block in self.mid_blocks: - sample = mid_block(sample, temb) + if self.mid_block: + sample = self.mid_block(sample, temb) # 4. up for up_block in self.up_blocks: sample = up_block(hidden_states=sample, res_hidden_states=down_block_res_samples.pop(), temb=temb) # 5. post-process - sample = self.out_block(sample, temb) + if self.out_block: + sample = self.out_block(sample, temb) if not return_dict: return (sample,) diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index a00372faf7d9..e1a1ac4a8f0c 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -47,7 +47,7 @@ def __init__( if groups_out is None: groups_out = groups - # there will always be at least one resenet + # there will always be at least one resnet resnets = [ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=temb_channels)] for _ in range(num_layers): @@ -111,7 +111,7 @@ def __init__( if groups_out is None: groups_out = groups - # there will always be at least one resenet + # there will always be at least one resnet resnets = [ResidualTemporalBlock1D(2 * in_channels, out_channels, embed_dim=temb_channels)] for _ in range(num_layers): @@ -174,22 +174,60 @@ class UpBlock1DNoSkip(nn.Module): class MidResTemporalBlock1D(nn.Module): - def __init__(self, in_channels, out_channels, embed_dim, add_downsample): + def __init__( + self, + in_channels, + out_channels, + embed_dim, + num_layers: int = 1, + add_downsample: bool = False, + add_upsample: bool = False, + non_linearity=None, + ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.add_downsample = add_downsample - self.resnet = ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=embed_dim) + # there will always be at least one resnet + resnets = [ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=embed_dim)] + + for _ in range(num_layers): + resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=embed_dim)) + + self.resnets = nn.ModuleList(resnets) + + if non_linearity == "swish": + self.nonlinearity = lambda x: F.silu(x) + elif non_linearity == "mish": + self.nonlinearity = nn.Mish() + elif non_linearity == "silu": + self.nonlinearity = nn.SiLU() + else: + self.nonlinearity = None + + self.upsample = None + if add_downsample: + self.upsample = Downsample1D(out_channels, use_conv=True) + + self.downsample = None if add_downsample: self.downsample = Downsample1D(out_channels, use_conv=True) - else: - self.downsample = nn.Identity() - def forward(self, sample, temb): - sample = self.resnet(sample, temb) - sample = self.downsample(sample) - return sample + if self.upsample and self.downsample: + raise ValueError("Block cannot downsample and upsample") + + def forward(self, hidden_states, temb): + hidden_states = self.resnets[0](hidden_states, temb) + for resnet in self.resnets[1:]: + hidden_states = resnet(hidden_states, temb) + + if self.upsample: + hidden_states = self.upsample(hidden_states) + if self.downsample: + self.downsample = self.downsample(hidden_states) + + return hidden_states class OutConv1DBlock(nn.Module): @@ -203,14 +241,14 @@ def __init__(self, num_groups_out, out_channels, embed_dim, act_fn): self.final_conv1d_act = nn.Mish() self.final_conv1d_2 = nn.Conv1d(embed_dim, out_channels, 1) - def forward(self, sample, t): - sample = self.final_conv1d_1(sample) - sample = rearrange_dims(sample) - sample = self.final_conv1d_gn(sample) - sample = rearrange_dims(sample) - sample = self.final_conv1d_act(sample) - sample = self.final_conv1d_2(sample) - return sample + def forward(self, hidden_states, temb=None): + hidden_states = self.final_conv1d_1(hidden_states) + hidden_states = rearrange_dims(hidden_states) + hidden_states = self.final_conv1d_gn(hidden_states) + hidden_states = rearrange_dims(hidden_states) + hidden_states = self.final_conv1d_act(hidden_states) + hidden_states = self.final_conv1d_2(hidden_states) + return hidden_states class OutValueFunctionBlock(nn.Module): @@ -224,13 +262,13 @@ def __init__(self, fc_dim, embed_dim): ] ) - def forward(self, sample, t): - sample = sample.view(sample.shape[0], -1) - sample = torch.cat((sample, t), dim=-1) + def forward(self, hidden_states, temb): + hidden_states = hidden_states.view(hidden_states.shape[0], -1) + hidden_states = torch.cat((hidden_states, temb), dim=-1) for layer in self.final_block: - sample = layer(sample) + hidden_states = layer(hidden_states) - return sample + return hidden_states def get_down_block(down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample): @@ -260,9 +298,15 @@ def get_up_block(up_block_type, num_layers, in_channels, out_channels, temb_chan raise ValueError(f"{up_block_type} does not exist.") -def get_mid_block(mid_block_type, in_channels, out_channels, embed_dim, add_downsample): +def get_mid_block(mid_block_type, num_layers, in_channels, out_channels, embed_dim, add_downsample): if mid_block_type == "MidResTemporalBlock1D": - return MidResTemporalBlock1D(in_channels, out_channels, embed_dim, add_downsample) + return MidResTemporalBlock1D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + embed_dim=embed_dim, + add_downsample=add_downsample, + ) raise ValueError(f"{mid_block_type} does not exist.") From af26faaf7bde4146779312358668651793b466c7 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 24 Oct 2022 09:54:13 -0700 Subject: [PATCH 074/133] remove imports in src --- src/diffusers/__init__.py | 2 +- src/diffusers/models/__init__.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 7088e560dd66..fa97effaaf0a 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -18,7 +18,7 @@ if is_torch_available(): from .modeling_utils import ModelMixin - from .models import AutoencoderKL, UNet1DModel, UNet2DConditionModel, UNet2DModel, ValueFunction, VQModel + from .models import AutoencoderKL, UNet1DModel, UNet2DConditionModel, UNet2DModel, VQModel from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index b771aaac8467..c5d53b2feb4b 100644 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -19,7 +19,6 @@ from .unet_1d import UNet1DModel from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel - from .unet_rl import ValueFunction from .vae import AutoencoderKL, VQModel if is_flax_available(): From 268ebdf4d84ec6b7432aa77e025827c25a628f7b Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 24 Oct 2022 13:17:41 -0400 Subject: [PATCH 075/133] Pipeline cleanup (#947) * valuefunction code * start example scripts * missing imports * bug fixes and placeholder example script * add value function scheduler * load value function from hub and get best actions in example * very close to working example * larger batch size for planning * more tests * merge unet1d changes * wandb for debugging, use newer models * success! * turns out we just need more diffusion steps * run on modal * merge and code cleanup * use same api for rl model * fix variance type * wrong normalization function * add tests * style * style and quality * edits based on comments * style and quality * remove unused var * hack unet1d into a value function * add pipeline * fix arg order * add pipeline to core library * community pipeline * fix couple shape bugs * style * Apply suggestions from code review * clean up comments * convert older script to using pipeline and add readme * rename scripts * style, update tests * delete unet rl model file * remove imports in src Co-authored-by: Nathan Lambert --- examples/community/pipeline.py | 21 ++++- examples/community/value_guided_diffuser.py | 13 ++- examples/diffuser/README.md | 16 ++++ .../diffuser/run_diffuser_gen_trajectories.py | 79 +++++++++++++++++ examples/diffuser/run_diffuser_locomotion.py | 83 ++++++++++++++++++ tests/test_models_unet.py | 85 ++++++++++++++++++- 6 files changed, 291 insertions(+), 6 deletions(-) create mode 100644 examples/diffuser/README.md create mode 100644 examples/diffuser/run_diffuser_gen_trajectories.py create mode 100644 examples/diffuser/run_diffuser_locomotion.py diff --git a/examples/community/pipeline.py b/examples/community/pipeline.py index 7e3f2b832b1f..85e359c5c4c9 100644 --- a/examples/community/pipeline.py +++ b/examples/community/pipeline.py @@ -1,3 +1,4 @@ +import numpy as np import torch import tqdm @@ -59,7 +60,6 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): for i in tqdm.tqdm(self.scheduler.timesteps): # create batch of timesteps to pass into model timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) - # 3. call the sample function for _ in range(n_guide_steps): with torch.enable_grad(): x.requires_grad_() @@ -76,24 +76,39 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] - # 4. apply conditions to the trajectory + # apply conditions to the trajectory x = self.reset_x0(x, conditions, self.action_dim) x = self.to_torch(x) return x, y def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): + # normalize the observations and create batch dimension obs = self.normalize(obs, "observations") obs = obs[None].repeat(batch_size, axis=0) + conditions = {0: self.to_torch(obs)} shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) + + # generate initial noise and apply our conditions (to make the trajectories start at current state) x1 = torch.randn(shape, device=self.unet.device) x = self.reset_x0(x1, conditions, self.action_dim) x = self.to_torch(x) + + # run the diffusion process x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) + + # sort output trajectories by value sorted_idx = y.argsort(0, descending=True).squeeze() sorted_values = x[sorted_idx] actions = sorted_values[:, :, : self.action_dim] actions = actions.detach().cpu().numpy() denorm_actions = self.de_normalize(actions, key="actions") - denorm_actions = denorm_actions[0, 0] + + # select the action with the highest value + if y is not None: + selected_index = 0 + else: + # if we didn't run value guiding, select a random action + selected_index = np.random.randint(0, batch_size) + denorm_actions = denorm_actions[selected_index, 0] return denorm_actions diff --git a/examples/community/value_guided_diffuser.py b/examples/community/value_guided_diffuser.py index 7e3f2b832b1f..6b28e868eddd 100644 --- a/examples/community/value_guided_diffuser.py +++ b/examples/community/value_guided_diffuser.py @@ -59,7 +59,6 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): for i in tqdm.tqdm(self.scheduler.timesteps): # create batch of timesteps to pass into model timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) - # 3. call the sample function for _ in range(n_guide_steps): with torch.enable_grad(): x.requires_grad_() @@ -76,24 +75,34 @@ def run_diffusion(self, x, conditions, n_guide_steps, scale): prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] - # 4. apply conditions to the trajectory + # apply conditions to the trajectory x = self.reset_x0(x, conditions, self.action_dim) x = self.to_torch(x) return x, y def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): + # normalize the observations and create batch dimension obs = self.normalize(obs, "observations") obs = obs[None].repeat(batch_size, axis=0) + conditions = {0: self.to_torch(obs)} shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) + + # generate initial noise and apply our conditions (to make the trajectories start at current state) x1 = torch.randn(shape, device=self.unet.device) x = self.reset_x0(x1, conditions, self.action_dim) x = self.to_torch(x) + + # run the diffusion process x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) + + # sort output trajectories by value sorted_idx = y.argsort(0, descending=True).squeeze() sorted_values = x[sorted_idx] actions = sorted_values[:, :, : self.action_dim] actions = actions.detach().cpu().numpy() denorm_actions = self.de_normalize(actions, key="actions") + + # select the action with the highest value denorm_actions = denorm_actions[0, 0] return denorm_actions diff --git a/examples/diffuser/README.md b/examples/diffuser/README.md new file mode 100644 index 000000000000..464ccd57af85 --- /dev/null +++ b/examples/diffuser/README.md @@ -0,0 +1,16 @@ +# Overview + +These examples show how to run (Diffuser)[https://arxiv.org/pdf/2205.09991.pdf] in Diffusers. There are two scripts, `run_diffuser_value_guided.py` and `run_diffuser.py`. + +You will need some RL specific requirements to run the examples: + +``` +pip install -f https://download.pytorch.org/whl/torch_stable.html \ + free-mujoco-py \ + einops \ + gym \ + protobuf==3.20.1 \ + git+https://github.com/rail-berkeley/d4rl.git \ + mediapy \ + Pillow==9.0.0 +``` diff --git a/examples/diffuser/run_diffuser_gen_trajectories.py b/examples/diffuser/run_diffuser_gen_trajectories.py new file mode 100644 index 000000000000..f4c86635c652 --- /dev/null +++ b/examples/diffuser/run_diffuser_gen_trajectories.py @@ -0,0 +1,79 @@ +import d4rl # noqa +import gym +import tqdm +from diffusers import DDPMScheduler, DiffusionPipeline, UNet1DModel + + +config = dict( + n_samples=64, + horizon=32, + num_inference_steps=20, + n_guide_steps=0, + scale_grad_by_std=True, + scale=0.1, + eta=0.0, + t_grad_cutoff=2, + device="cpu", +) + + +def _run(): + env_name = "hopper-medium-v2" + env = gym.make(env_name) + + DEVICE = config["device"] + + scheduler = DDPMScheduler( + num_train_timesteps=config["num_inference_steps"], + beta_schedule="squaredcos_cap_v2", + clip_sample=False, + variance_type="fixed_small_log", + ) + network = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32").to(device=DEVICE).eval() + unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-unet-hor32").to(device=DEVICE).eval() + pipeline = DiffusionPipeline.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", + value_function=network, + unet=unet, + scheduler=scheduler, + env=env, + custom_pipeline="/Users/bglickenhaus/Documents/diffusers/examples/community", + ) + + env.seed(0) + obs = env.reset() + total_reward = 0 + total_score = 0 + T = 1000 + rollout = [obs.copy()] + try: + for t in tqdm.tqdm(range(T)): + # Call the policy + denorm_actions = pipeline(obs, planning_horizon=32) + + # execute action in environment + next_observation, reward, terminal, _ = env.step(denorm_actions) + score = env.get_normalized_score(total_reward) + # update return + total_reward += reward + total_score += score + print( + f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" + f" {total_score}" + ) + # save observations for rendering + rollout.append(next_observation.copy()) + + obs = next_observation + except KeyboardInterrupt: + pass + + print(f"Total reward: {total_reward}") + + +def run(): + _run() + + +if __name__ == "__main__": + run() diff --git a/examples/diffuser/run_diffuser_locomotion.py b/examples/diffuser/run_diffuser_locomotion.py new file mode 100644 index 000000000000..1b4351095d3b --- /dev/null +++ b/examples/diffuser/run_diffuser_locomotion.py @@ -0,0 +1,83 @@ +import d4rl # noqa +import gym +import tqdm +from diffusers import DDPMScheduler, DiffusionPipeline, UNet1DModel + + +config = dict( + n_samples=64, + horizon=32, + num_inference_steps=20, + n_guide_steps=2, + scale_grad_by_std=True, + scale=0.1, + eta=0.0, + t_grad_cutoff=2, + device="cpu", +) + + +def _run(): + env_name = "hopper-medium-v2" + env = gym.make(env_name) + + # Cuda settings for colab + # torch.cuda.get_device_name(0) + DEVICE = config["device"] + + # Two generators for different parts of the diffusion loop to work in colab + scheduler = DDPMScheduler( + num_train_timesteps=config["num_inference_steps"], + beta_schedule="squaredcos_cap_v2", + clip_sample=False, + variance_type="fixed_small_log", + ) + + network = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32").to(device=DEVICE).eval() + unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-unet-hor32").to(device=DEVICE).eval() + pipeline = DiffusionPipeline.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", + value_function=network, + unet=unet, + scheduler=scheduler, + env=env, + custom_pipeline="/Users/bglickenhaus/Documents/diffusers/examples/community", + ) + + env.seed(0) + obs = env.reset() + total_reward = 0 + total_score = 0 + T = 1000 + rollout = [obs.copy()] + try: + for t in tqdm.tqdm(range(T)): + # call the policy + denorm_actions = pipeline(obs, planning_horizon=32) + + # execute action in environment + next_observation, reward, terminal, _ = env.step(denorm_actions) + score = env.get_normalized_score(total_reward) + # update return + total_reward += reward + total_score += score + print( + f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" + f" {total_score}" + ) + # save observations for rendering + rollout.append(next_observation.copy()) + + obs = next_observation + except KeyboardInterrupt: + pass + + print(f"Total reward: {total_reward}") + + +def run(): + _run() + + +if __name__ == "__main__": + run() diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index e1dbdfaa4611..1ff092b3ce78 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -20,7 +20,7 @@ import torch -from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel +from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel, ValueFunction from diffusers.utils import floats_tensor, slow, torch_device from .test_modeling_common import ModelTesterMixin @@ -524,3 +524,86 @@ def test_output_pretrained(self): def test_forward_with_norm_groups(self): # Not implemented yet for this UNet pass + + +class UNetRLModelTests(ModelTesterMixin, unittest.TestCase): + model_class = UNet1DModel + + @property + def dummy_input(self): + batch_size = 4 + num_features = 14 + seq_len = 16 + + noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) + time_step = torch.tensor([10] * batch_size).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (4, 14, 16) + + @property + def output_shape(self): + return (4, 14, 1) + + def test_ema_training(self): + pass + + def test_training(self): + pass + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": (32, 64, 128, 256), + "in_channels": 14, + "out_channels": 14, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + unet, loading_info = UNet1DModel.from_pretrained( + "bglick13/hopper-medium-v2-unet-hor32", output_loading_info=True + ) + value_function, vf_loading_info = UNet1DModel.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True + ) + self.assertIsNotNone(unet) + self.assertEqual(len(loading_info["missing_keys"]), 0) + self.assertIsNotNone(value_function) + self.assertEqual(len(vf_loading_info["missing_keys"]), 0) + + unet.to(torch_device) + value_function.to(torch_device) + image = value_function(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + value_function, vf_loading_info = UNet1DModel.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True + ) + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + num_features = value_function.in_channels + seq_len = 14 + noise = torch.randn((1, seq_len, num_features)).permute( + 0, 2, 1 + ) # match original, we can update values and remove + time_step = torch.full((num_features,), 0) + + with torch.no_grad(): + output = value_function(noise, time_step).sample + + # fmt: off + expected_output_slice = torch.tensor([207.0272] * seq_len) + # fmt: on + self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3)) + + def test_forward_with_norm_groups(self): + # Not implemented yet for this UNet + pass From daa05fb66f70f63f6e8b34fecb6f48f80a8f995c Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 24 Oct 2022 10:25:39 -0700 Subject: [PATCH 076/133] Update src/diffusers/models/unet_1d_blocks.py --- src/diffusers/models/unet_1d_blocks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index e1a1ac4a8f0c..1beea2b123ac 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -207,7 +207,7 @@ def __init__( self.nonlinearity = None self.upsample = None - if add_downsample: + if add_upsample: self.upsample = Downsample1D(out_channels, use_conv=True) self.downsample = None From ea5f2310c74a32a43d4ac564ccf1532de7baa970 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 24 Oct 2022 10:27:29 -0700 Subject: [PATCH 077/133] Update tests/test_models_unet.py --- tests/test_models_unet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index 1ff092b3ce78..d6578955f295 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -20,7 +20,7 @@ import torch -from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel, ValueFunction +from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel from diffusers.utils import floats_tensor, slow, torch_device from .test_modeling_common import ModelTesterMixin From 84efdac61e81258ff05b4da4daa7c6e3383d2817 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 24 Oct 2022 13:46:26 -0400 Subject: [PATCH 078/133] add specific vf block and update tests --- .../convert_models_diffuser_to_diffusers.py | 1 + src/diffusers/models/unet_1d_blocks.py | 52 ++++++++++++++++++- tests/test_models_unet.py | 16 +++--- 3 files changed, 61 insertions(+), 8 deletions(-) diff --git a/scripts/convert_models_diffuser_to_diffusers.py b/scripts/convert_models_diffuser_to_diffusers.py index b154295e9726..4b4608358c17 100644 --- a/scripts/convert_models_diffuser_to_diffusers.py +++ b/scripts/convert_models_diffuser_to_diffusers.py @@ -49,6 +49,7 @@ def value_function(): down_block_types=("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), up_block_types=(), out_block_type="ValueFunction", + mid_block_type="ValueFunctionMidBlock1D", block_out_channels=(32, 64, 128, 256), layers_per_block=1, always_downsample=True, diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index a00372faf7d9..6cb45c471e7f 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -173,6 +173,26 @@ class UpBlock1DNoSkip(nn.Module): pass +class ValueFunctionMidBlock1D(nn.Module): + def __init__(self, in_channels, out_channels, embed_dim): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.embed_dim = embed_dim + + self.res1 = ResidualTemporalBlock1D(in_channels, in_channels // 2, embed_dim=embed_dim) + self.down1 = Downsample1D(out_channels // 2, use_conv=True) + self.res2 = ResidualTemporalBlock1D(in_channels // 2, in_channels // 4, embed_dim=embed_dim) + self.down2 = Downsample1D(out_channels // 4, use_conv=True) + + def forward(self, x, temb=None): + x = self.res1(x, temb) + x = self.down1(x) + x = self.res2(x, temb) + x = self.down2(x) + return x + + class MidResTemporalBlock1D(nn.Module): def __init__(self, in_channels, out_channels, embed_dim, add_downsample): super().__init__() @@ -181,6 +201,28 @@ def __init__(self, in_channels, out_channels, embed_dim, add_downsample): self.add_downsample = add_downsample self.resnet = ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=embed_dim) + # there will always be at least one resnet + resnets = [ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=embed_dim)] + + for _ in range(num_layers): + resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=embed_dim)) + + self.resnets = nn.ModuleList(resnets) + + if non_linearity == "swish": + self.nonlinearity = lambda x: F.silu(x) + elif non_linearity == "mish": + self.nonlinearity = nn.Mish() + elif non_linearity == "silu": + self.nonlinearity = nn.SiLU() + else: + self.nonlinearity = None + + self.upsample = None + if add_upsample: + self.upsample = Downsample1D(out_channels, use_conv=True) + + self.downsample = None if add_downsample: self.downsample = Downsample1D(out_channels, use_conv=True) else: @@ -262,7 +304,15 @@ def get_up_block(up_block_type, num_layers, in_channels, out_channels, temb_chan def get_mid_block(mid_block_type, in_channels, out_channels, embed_dim, add_downsample): if mid_block_type == "MidResTemporalBlock1D": - return MidResTemporalBlock1D(in_channels, out_channels, embed_dim, add_downsample) + return MidResTemporalBlock1D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + embed_dim=embed_dim, + add_downsample=add_downsample, + ) + elif mid_block_type == "ValueFunctionMidBlock1D": + return ValueFunctionMidBlock1D(in_channels=in_channels, out_channels=out_channels, embed_dim=embed_dim) raise ValueError(f"{mid_block_type} does not exist.") diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index 1ff092b3ce78..80c57b90f6f7 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -17,6 +17,7 @@ import math import tracemalloc import unittest +from regex import subf import torch @@ -489,7 +490,7 @@ def prepare_init_args_and_inputs_for_common(self): def test_from_pretrained_hub(self): model, loading_info = UNet1DModel.from_pretrained( - "fusing/ddpm-unet-rl-hopper-hor128", output_loading_info=True + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" ) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) @@ -500,7 +501,7 @@ def test_from_pretrained_hub(self): assert image is not None, "Make sure output is not None" def test_output_pretrained(self): - model = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128") + model = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32", subfolder="unet") torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) @@ -517,7 +518,8 @@ def test_output_pretrained(self): output_slice = output[0, -3:, -3:].flatten() # fmt: off - expected_output_slice = torch.tensor([-0.2714, 0.1042, -0.0794, -0.2820, 0.0803, -0.0811, -0.2345, 0.0580, -0.0584]) + expected_output_slice = torch.tensor([-2.137172 , 1.1426016 , 0.3688687 , -0.766922 , 0.7303146 , + 0.11038864, -0.4760633 , 0.13270172, 0.02591348]) # fmt: on self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) @@ -565,10 +567,10 @@ def prepare_init_args_and_inputs_for_common(self): def test_from_pretrained_hub(self): unet, loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-unet-hor32", output_loading_info=True + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" ) value_function, vf_loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" ) self.assertIsNotNone(unet) self.assertEqual(len(loading_info["missing_keys"]), 0) @@ -583,7 +585,7 @@ def test_from_pretrained_hub(self): def test_output_pretrained(self): value_function, vf_loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" ) torch.manual_seed(0) if torch.cuda.is_available(): @@ -600,7 +602,7 @@ def test_output_pretrained(self): output = value_function(noise, time_step).sample # fmt: off - expected_output_slice = torch.tensor([207.0272] * seq_len) + expected_output_slice = torch.tensor([165.25] * seq_len) # fmt: on self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3)) From 9faf55a5a5aae30e265a542f002daa5048a53b99 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 24 Oct 2022 13:50:21 -0400 Subject: [PATCH 079/133] style --- tests/test_models_unet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index 80c57b90f6f7..02fa7b744ba9 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -17,12 +17,12 @@ import math import tracemalloc import unittest -from regex import subf import torch from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel, ValueFunction from diffusers.utils import floats_tensor, slow, torch_device +from regex import subf from .test_modeling_common import ModelTesterMixin From 24bb52a0557f05f99749597f1fa1f9882aff996b Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 24 Oct 2022 10:52:14 -0700 Subject: [PATCH 080/133] Update tests/test_models_unet.py --- tests/test_models_unet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index 02fa7b744ba9..03ed28f5d442 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -20,7 +20,7 @@ import torch -from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel, ValueFunction +from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel from diffusers.utils import floats_tensor, slow, torch_device from regex import subf From 4f7a3a43e49ddeefa392ff7e84e9480f89f57891 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 24 Oct 2022 13:52:41 -0400 Subject: [PATCH 081/133] RL Cleanup v2 (#965) * valuefunction code * start example scripts * missing imports * bug fixes and placeholder example script * add value function scheduler * load value function from hub and get best actions in example * very close to working example * larger batch size for planning * more tests * merge unet1d changes * wandb for debugging, use newer models * success! * turns out we just need more diffusion steps * run on modal * merge and code cleanup * use same api for rl model * fix variance type * wrong normalization function * add tests * style * style and quality * edits based on comments * style and quality * remove unused var * hack unet1d into a value function * add pipeline * fix arg order * add pipeline to core library * community pipeline * fix couple shape bugs * style * Apply suggestions from code review * clean up comments * convert older script to using pipeline and add readme * rename scripts * style, update tests * delete unet rl model file * remove imports in src * add specific vf block and update tests * style * Update tests/test_models_unet.py Co-authored-by: Nathan Lambert --- .../convert_models_diffuser_to_diffusers.py | 1 + src/diffusers/models/unet_1d_blocks.py | 22 +++++++++++++++++++ tests/test_models_unet.py | 16 ++++++++------ 3 files changed, 32 insertions(+), 7 deletions(-) diff --git a/scripts/convert_models_diffuser_to_diffusers.py b/scripts/convert_models_diffuser_to_diffusers.py index b154295e9726..4b4608358c17 100644 --- a/scripts/convert_models_diffuser_to_diffusers.py +++ b/scripts/convert_models_diffuser_to_diffusers.py @@ -49,6 +49,7 @@ def value_function(): down_block_types=("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), up_block_types=(), out_block_type="ValueFunction", + mid_block_type="ValueFunctionMidBlock1D", block_out_channels=(32, 64, 128, 256), layers_per_block=1, always_downsample=True, diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index 1beea2b123ac..0788cc1e76e5 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -173,6 +173,26 @@ class UpBlock1DNoSkip(nn.Module): pass +class ValueFunctionMidBlock1D(nn.Module): + def __init__(self, in_channels, out_channels, embed_dim): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.embed_dim = embed_dim + + self.res1 = ResidualTemporalBlock1D(in_channels, in_channels // 2, embed_dim=embed_dim) + self.down1 = Downsample1D(out_channels // 2, use_conv=True) + self.res2 = ResidualTemporalBlock1D(in_channels // 2, in_channels // 4, embed_dim=embed_dim) + self.down2 = Downsample1D(out_channels // 4, use_conv=True) + + def forward(self, x, temb=None): + x = self.res1(x, temb) + x = self.down1(x) + x = self.res2(x, temb) + x = self.down2(x) + return x + + class MidResTemporalBlock1D(nn.Module): def __init__( self, @@ -307,6 +327,8 @@ def get_mid_block(mid_block_type, num_layers, in_channels, out_channels, embed_d embed_dim=embed_dim, add_downsample=add_downsample, ) + elif mid_block_type == "ValueFunctionMidBlock1D": + return ValueFunctionMidBlock1D(in_channels=in_channels, out_channels=out_channels, embed_dim=embed_dim) raise ValueError(f"{mid_block_type} does not exist.") diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index d6578955f295..03ed28f5d442 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -22,6 +22,7 @@ from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel from diffusers.utils import floats_tensor, slow, torch_device +from regex import subf from .test_modeling_common import ModelTesterMixin @@ -489,7 +490,7 @@ def prepare_init_args_and_inputs_for_common(self): def test_from_pretrained_hub(self): model, loading_info = UNet1DModel.from_pretrained( - "fusing/ddpm-unet-rl-hopper-hor128", output_loading_info=True + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" ) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) @@ -500,7 +501,7 @@ def test_from_pretrained_hub(self): assert image is not None, "Make sure output is not None" def test_output_pretrained(self): - model = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128") + model = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32", subfolder="unet") torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) @@ -517,7 +518,8 @@ def test_output_pretrained(self): output_slice = output[0, -3:, -3:].flatten() # fmt: off - expected_output_slice = torch.tensor([-0.2714, 0.1042, -0.0794, -0.2820, 0.0803, -0.0811, -0.2345, 0.0580, -0.0584]) + expected_output_slice = torch.tensor([-2.137172 , 1.1426016 , 0.3688687 , -0.766922 , 0.7303146 , + 0.11038864, -0.4760633 , 0.13270172, 0.02591348]) # fmt: on self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) @@ -565,10 +567,10 @@ def prepare_init_args_and_inputs_for_common(self): def test_from_pretrained_hub(self): unet, loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-unet-hor32", output_loading_info=True + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" ) value_function, vf_loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" ) self.assertIsNotNone(unet) self.assertEqual(len(loading_info["missing_keys"]), 0) @@ -583,7 +585,7 @@ def test_from_pretrained_hub(self): def test_output_pretrained(self): value_function, vf_loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" ) torch.manual_seed(0) if torch.cuda.is_available(): @@ -600,7 +602,7 @@ def test_output_pretrained(self): output = value_function(noise, time_step).sample # fmt: off - expected_output_slice = torch.tensor([207.0272] * seq_len) + expected_output_slice = torch.tensor([165.25] * seq_len) # fmt: on self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3)) From d90b8b1bd60fbf5c858746849bde62e95f428ac2 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 24 Oct 2022 10:56:04 -0700 Subject: [PATCH 082/133] fix quality in tests --- tests/test_models_unet.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index 03ed28f5d442..8608eec166a4 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -518,8 +518,8 @@ def test_output_pretrained(self): output_slice = output[0, -3:, -3:].flatten() # fmt: off - expected_output_slice = torch.tensor([-2.137172 , 1.1426016 , 0.3688687 , -0.766922 , 0.7303146 , - 0.11038864, -0.4760633 , 0.13270172, 0.02591348]) + expected_output_slice = torch.tensor([-2.137172, 1.1426016, 0.3688687, -0.766922, 0.7303146, + 0.11038864, -0.4760633, 0.13270172, 0.02591348]) # fmt: on self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) From 35f03befeba5a3affbba9dadf12b6de1fda42f30 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 24 Oct 2022 13:57:54 -0400 Subject: [PATCH 083/133] quality --- tests/test_models_unet.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/test_models_unet.py b/tests/test_models_unet.py index 02fa7b744ba9..65de68bbe547 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet.py @@ -20,9 +20,8 @@ import torch -from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel, ValueFunction +from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel from diffusers.utils import floats_tensor, slow, torch_device -from regex import subf from .test_modeling_common import ModelTesterMixin @@ -518,8 +517,7 @@ def test_output_pretrained(self): output_slice = output[0, -3:, -3:].flatten() # fmt: off - expected_output_slice = torch.tensor([-2.137172 , 1.1426016 , 0.3688687 , -0.766922 , 0.7303146 , - 0.11038864, -0.4760633 , 0.13270172, 0.02591348]) + expected_output_slice = torch.tensor([-2.137172, 1.1426016, 0.3688687, -0.766922, 0.7303146, 0.11038864, -0.4760633, 0.13270172, 0.02591348]) # fmt: on self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) From ad8b6cf1123ca6b5bebe7ab4af866ea54d00539a Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 24 Oct 2022 11:00:29 -0700 Subject: [PATCH 084/133] fix quality style, split test file --- tests/test_models_unet_1d.py | 185 ++++++++++++++++++ ..._models_unet.py => test_models_unet_2d.py} | 163 +-------------- 2 files changed, 186 insertions(+), 162 deletions(-) create mode 100644 tests/test_models_unet_1d.py rename tests/{test_models_unet.py => test_models_unet_2d.py} (74%) diff --git a/tests/test_models_unet_1d.py b/tests/test_models_unet_1d.py new file mode 100644 index 000000000000..f50bb8785eae --- /dev/null +++ b/tests/test_models_unet_1d.py @@ -0,0 +1,185 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import UNet1DModel +from diffusers.utils import floats_tensor, torch_device + +from .test_modeling_common import ModelTesterMixin + + +torch.backends.cuda.matmul.allow_tf32 = False + + +class UNet1DModelTests(ModelTesterMixin, unittest.TestCase): + model_class = UNet1DModel + + @property + def dummy_input(self): + batch_size = 4 + num_features = 14 + seq_len = 16 + + noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) + time_step = torch.tensor([10] * batch_size).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (4, 14, 16) + + @property + def output_shape(self): + return (4, 14, 16) + + def test_ema_training(self): + pass + + def test_training(self): + pass + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": (32, 128, 256), + "in_channels": 14, + "out_channels": 14, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = UNet1DModel.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" + ) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32", subfolder="unet") + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + num_features = model.in_channels + seq_len = 16 + noise = torch.randn((1, seq_len, num_features)).permute( + 0, 2, 1 + ) # match original, we can update values and remove + time_step = torch.full((num_features,), 0) + + with torch.no_grad(): + output = model(noise, time_step).sample.permute(0, 2, 1) + + output_slice = output[0, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-2.137172, 1.1426016, 0.3688687, -0.766922, 0.7303146, 0.11038864, -0.4760633, 0.13270172, 0.02591348]) + # fmt: on + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) + + def test_forward_with_norm_groups(self): + # Not implemented yet for this UNet + pass + + +class UNetRLModelTests(ModelTesterMixin, unittest.TestCase): + model_class = UNet1DModel + + @property + def dummy_input(self): + batch_size = 4 + num_features = 14 + seq_len = 16 + + noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) + time_step = torch.tensor([10] * batch_size).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (4, 14, 16) + + @property + def output_shape(self): + return (4, 14, 1) + + def test_ema_training(self): + pass + + def test_training(self): + pass + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": (32, 64, 128, 256), + "in_channels": 14, + "out_channels": 14, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + unet, loading_info = UNet1DModel.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" + ) + value_function, vf_loading_info = UNet1DModel.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" + ) + self.assertIsNotNone(unet) + self.assertEqual(len(loading_info["missing_keys"]), 0) + self.assertIsNotNone(value_function) + self.assertEqual(len(vf_loading_info["missing_keys"]), 0) + + unet.to(torch_device) + value_function.to(torch_device) + image = value_function(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + value_function, vf_loading_info = UNet1DModel.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" + ) + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + + num_features = value_function.in_channels + seq_len = 14 + noise = torch.randn((1, seq_len, num_features)).permute( + 0, 2, 1 + ) # match original, we can update values and remove + time_step = torch.full((num_features,), 0) + + with torch.no_grad(): + output = value_function(noise, time_step).sample + + # fmt: off + expected_output_slice = torch.tensor([165.25] * seq_len) + # fmt: on + self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3)) + + def test_forward_with_norm_groups(self): + # Not implemented yet for this UNet + pass diff --git a/tests/test_models_unet.py b/tests/test_models_unet_2d.py similarity index 74% rename from tests/test_models_unet.py rename to tests/test_models_unet_2d.py index 8608eec166a4..b2f16aef5825 100644 --- a/tests/test_models_unet.py +++ b/tests/test_models_unet_2d.py @@ -20,9 +20,8 @@ import torch -from diffusers import UNet1DModel, UNet2DConditionModel, UNet2DModel +from diffusers import UNet2DConditionModel, UNet2DModel from diffusers.utils import floats_tensor, slow, torch_device -from regex import subf from .test_modeling_common import ModelTesterMixin @@ -449,163 +448,3 @@ def test_output_pretrained_ve_large(self): def test_forward_with_norm_groups(self): # not required for this model pass - - -class UNet1DModelTests(ModelTesterMixin, unittest.TestCase): - model_class = UNet1DModel - - @property - def dummy_input(self): - batch_size = 4 - num_features = 14 - seq_len = 16 - - noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) - time_step = torch.tensor([10] * batch_size).to(torch_device) - - return {"sample": noise, "timestep": time_step} - - @property - def input_shape(self): - return (4, 14, 16) - - @property - def output_shape(self): - return (4, 14, 16) - - def test_ema_training(self): - pass - - def test_training(self): - pass - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "block_out_channels": (32, 128, 256), - "in_channels": 14, - "out_channels": 14, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - model, loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" - ) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32", subfolder="unet") - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - num_features = model.in_channels - seq_len = 16 - noise = torch.randn((1, seq_len, num_features)).permute( - 0, 2, 1 - ) # match original, we can update values and remove - time_step = torch.full((num_features,), 0) - - with torch.no_grad(): - output = model(noise, time_step).sample.permute(0, 2, 1) - - output_slice = output[0, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([-2.137172, 1.1426016, 0.3688687, -0.766922, 0.7303146, - 0.11038864, -0.4760633, 0.13270172, 0.02591348]) - # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) - - def test_forward_with_norm_groups(self): - # Not implemented yet for this UNet - pass - - -class UNetRLModelTests(ModelTesterMixin, unittest.TestCase): - model_class = UNet1DModel - - @property - def dummy_input(self): - batch_size = 4 - num_features = 14 - seq_len = 16 - - noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) - time_step = torch.tensor([10] * batch_size).to(torch_device) - - return {"sample": noise, "timestep": time_step} - - @property - def input_shape(self): - return (4, 14, 16) - - @property - def output_shape(self): - return (4, 14, 1) - - def test_ema_training(self): - pass - - def test_training(self): - pass - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "block_out_channels": (32, 64, 128, 256), - "in_channels": 14, - "out_channels": 14, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - unet, loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" - ) - value_function, vf_loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" - ) - self.assertIsNotNone(unet) - self.assertEqual(len(loading_info["missing_keys"]), 0) - self.assertIsNotNone(value_function) - self.assertEqual(len(vf_loading_info["missing_keys"]), 0) - - unet.to(torch_device) - value_function.to(torch_device) - image = value_function(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - value_function, vf_loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" - ) - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - num_features = value_function.in_channels - seq_len = 14 - noise = torch.randn((1, seq_len, num_features)).permute( - 0, 2, 1 - ) # match original, we can update values and remove - time_step = torch.full((num_features,), 0) - - with torch.no_grad(): - output = value_function(noise, time_step).sample - - # fmt: off - expected_output_slice = torch.tensor([165.25] * seq_len) - # fmt: on - self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3)) - - def test_forward_with_norm_groups(self): - # Not implemented yet for this UNet - pass From 99b2c815216860ca0be8752caaad0a9f17eb75df Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 24 Oct 2022 11:37:00 -0700 Subject: [PATCH 085/133] fix checks / tests --- src/diffusers/utils/dummy_pt_objects.py | 15 +++++++++++ tests/test_models_unet_1d.py | 33 +++++++++++++++++++------ 2 files changed, 40 insertions(+), 8 deletions(-) diff --git a/src/diffusers/utils/dummy_pt_objects.py b/src/diffusers/utils/dummy_pt_objects.py index ee748f5b1d6a..ed3750e29991 100644 --- a/src/diffusers/utils/dummy_pt_objects.py +++ b/src/diffusers/utils/dummy_pt_objects.py @@ -34,6 +34,21 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) +class UNet1DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + class UNet2DConditionModel(metaclass=DummyObject): _backends = ["torch"] diff --git a/tests/test_models_unet_1d.py b/tests/test_models_unet_1d.py index f50bb8785eae..2ff9315e0bf5 100644 --- a/tests/test_models_unet_1d.py +++ b/tests/test_models_unet_1d.py @@ -124,6 +124,23 @@ def input_shape(self): def output_shape(self): return (4, 14, 1) + def test_output(self): + # UNetRL is a value-function is different output shape + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.sample + + self.assertIsNotNone(output) + expected_shape = torch.Size((inputs_dict["sample"].shape[0], 1)) + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + def test_ema_training(self): pass @@ -132,26 +149,26 @@ def test_training(self): def prepare_init_args_and_inputs_for_common(self): init_dict = { - "block_out_channels": (32, 64, 128, 256), "in_channels": 14, - "out_channels": 14, + "out_channels": 1, + "down_block_types": ["DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"], + "up_block_types": [], + "out_block_type": "ValueFunction", + "mid_block_type": "ValueFunctionMidBlock1D", + "block_out_channels": [32, 64, 128, 256], + "layers_per_block": 1, + "always_downsample": True, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_pretrained_hub(self): - unet, loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" - ) value_function, vf_loading_info = UNet1DModel.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" ) - self.assertIsNotNone(unet) - self.assertEqual(len(loading_info["missing_keys"]), 0) self.assertIsNotNone(value_function) self.assertEqual(len(vf_loading_info["missing_keys"]), 0) - unet.to(torch_device) value_function.to(torch_device) image = value_function(**self.dummy_input) From eceafd5c907bda4f07f1969fa352e107d681d6bc Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 25 Oct 2022 18:18:42 -0400 Subject: [PATCH 086/133] placeholder script --- .../community/progressive_distillation.py | 0 examples/progressive_distillation/train.py | 538 ++++++++++++++++++ 2 files changed, 538 insertions(+) create mode 100644 examples/community/progressive_distillation.py create mode 100644 examples/progressive_distillation/train.py diff --git a/examples/community/progressive_distillation.py b/examples/community/progressive_distillation.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/examples/progressive_distillation/train.py b/examples/progressive_distillation/train.py new file mode 100644 index 000000000000..a49e45c21a8b --- /dev/null +++ b/examples/progressive_distillation/train.py @@ -0,0 +1,538 @@ +import argparse +import logging +import math +import os +import random +from pathlib import Path +from re import L +from typing import Iterable, Optional + +import numpy as np +import torch +import torch.nn.functional as F +import torch.utils.checkpoint + +from accelerate import Accelerator +from accelerate.logging import get_logger +from accelerate.utils import set_seed +from datasets import load_dataset +from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel +from diffusers.optimization import get_scheduler +from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker +from huggingface_hub import HfFolder, Repository, whoami +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer + +logger = get_logger(__name__) + + +def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): + if token is None: + token = HfFolder.get_token() + if organization is None: + username = whoami(token)["name"] + return f"{username}/{model_id}" + else: + return f"{organization}/{model_id}" + + +def parse_args(): + parser = argparse.ArgumentParser(description="Simple example of a training script.") + parser.add_argument( + "--pretrained_model_name_or_path", + type=str, + default=None, + required=True, + help="Path to pretrained model or model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--dataset_name", + type=str, + default=None, + help=( + "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," + " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," + " or to a folder containing files that 🤗 Datasets can understand." + ), + ) + parser.add_argument( + "--dataset_config_name", + type=str, + default=None, + help="The config of the Dataset, leave as None if there's only one config.", + ) + parser.add_argument( + "--train_data_dir", + type=str, + default=None, + help=( + "A folder containing the training data. Folder contents must follow the structure described in" + " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" + " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." + ), + ) + parser.add_argument( + "--image_column", type=str, default="image", help="The column of the dataset containing an image." + ) + parser.add_argument( + "--caption_column", + type=str, + default="text", + help="The column of the dataset containing a caption or a list of captions.", + ) + parser.add_argument( + "--max_train_samples", + type=int, + default=None, + help=( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ), + ) + parser.add_argument( + "--output_dir", + type=str, + default="sd-model-distilled", + help="The output directory where the model predictions and checkpoints will be written.", + ) + parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="The directory where the downloaded models and datasets will be stored.", + ) + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + parser.add_argument( + "--resolution", + type=int, + default=512, + help=( + "The resolution for input images, all the images in the train/validation dataset will be resized to this" + " resolution" + ), + ) + parser.add_argument( + "--center_crop", + action="store_true", + help="Whether to center crop images before resizing to resolution (if not set, random crop will be used)", + ) + parser.add_argument( + "--random_flip", + action="store_true", + help="whether to randomly flip images horizontally", + ) + parser.add_argument( + "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." + ) + parser.add_argument("--num_train_epochs", type=int, default=100) + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--gradient_checkpointing", + action="store_true", + help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=1e-4, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--scale_lr", + action="store_true", + default=False, + help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", + ) + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup"]' + ), + ) + parser.add_argument( + "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." + ) + parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") + parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") + parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") + parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") + parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--hub_model_id", + type=str, + default=None, + help="The name of the repository to keep in sync with the local `output_dir`.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help=( + "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" + " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." + ), + ) + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU." + ), + ) + parser.add_argument( + "--report_to", + type=str, + default="tensorboard", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' + ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' + "Only applicable when `--with_tracking` is passed." + ), + ) + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + + args = parser.parse_args() + env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) + if env_local_rank != -1 and env_local_rank != args.local_rank: + args.local_rank = env_local_rank + + # Sanity checks + if args.dataset_name is None and args.train_data_dir is None: + raise ValueError("Need either a dataset name or a training folder.") + + return args + + +repo_name = "CompVis/stable-diffusion-v1-4" + +dataset_name_mapping = { + "lambdalabs/pokemon-blip-captions": ("image", "text"), +} + + +def main(): + args = parse_args() + logging_dir = os.path.join(args.output_dir, args.logging_dir) + dataset_name_mapping = { + "lambdalabs/pokemon-blip-captions": ("image", "text"), + } + + dataset = load_dataset(args.dataset_name) + tokenizer = CLIPTokenizer.from_pretrained(repo_name, subfolder="tokenizer") + text_encoder = CLIPTextModel.from_pretrained(repo_name, subfolder="text_encoder") + vae = AutoencoderKL.from_pretrained(repo_name, subfolder="vae") + unet = UNet2DConditionModel.from_pretrained(repo_name, subfolder="unet") + student = UNet2DConditionModel.from_pretrained(repo_name, subfolder="unet") + scheduler = DDPMScheduler.from_pretrained(repo_name, subfolder="scheduler") + student.load_state_dict(unet.state_dict()) + + # Freeze vae and text_encoder + vae.requires_grad_(False) + text_encoder.requires_grad_(False) + + if args.gradient_checkpointing: + unet.enable_gradient_checkpointing() + + if args.scale_lr: + args.learning_rate = ( + args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes + ) + + # Initialize the optimizer + if args.use_8bit_adam: + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError( + "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" + ) + + optimizer_cls = bnb.optim.AdamW8bit + else: + optimizer_cls = torch.optim.AdamW + + optimizer = optimizer_cls( + student.parameters(), + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, + ) + + accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision, + log_with=args.report_to, + logging_dir=logging_dir, + ) + + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + + # Handle the repository creation + if accelerator.is_main_process: + if args.push_to_hub: + if args.hub_model_id is None: + repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) + else: + repo_name = args.hub_model_id + repo = Repository(args.output_dir, clone_from=repo_name) + + with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: + if "step_*" not in gitignore: + gitignore.write("step_*\n") + if "epoch_*" not in gitignore: + gitignore.write("epoch_*\n") + elif args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + if args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + dataset = load_dataset( + args.dataset_name, + args.dataset_config_name, + cache_dir=args.cache_dir, + ) + else: + data_files = {} + if args.train_data_dir is not None: + data_files["train"] = os.path.join(args.train_data_dir, "**") + dataset = load_dataset( + "imagefolder", + data_files=data_files, + cache_dir=args.cache_dir, + ) + # See more about loading custom images at + # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder + + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + column_names = dataset["train"].column_names + + # 6. Get the column names for input/target. + dataset_columns = dataset_name_mapping.get(args.dataset_name, None) + if args.image_column is None: + image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] + else: + image_column = args.image_column + if image_column not in column_names: + raise ValueError( + f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" + ) + if args.caption_column is None: + caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] + else: + caption_column = args.caption_column + if caption_column not in column_names: + raise ValueError( + f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" + ) + + # Preprocessing the datasets. + # We need to tokenize input captions and transform the images. + def tokenize_captions(examples, is_train=True): + captions = [] + for caption in examples[caption_column]: + if isinstance(caption, str): + captions.append(caption) + elif isinstance(caption, (list, np.ndarray)): + # take a random caption if there are multiple + captions.append(random.choice(caption) if is_train else caption[0]) + else: + raise ValueError( + f"Caption column `{caption_column}` should contain either strings or lists of strings." + ) + inputs = tokenizer(captions, max_length=tokenizer.model_max_length, padding="do_not_pad", truncation=True) + input_ids = inputs.input_ids + return input_ids + + train_transforms = transforms.Compose( + [ + transforms.Resize((args.resolution, args.resolution), interpolation=transforms.InterpolationMode.BILINEAR), + transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), + transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), + ] + ) + + def preprocess_train(examples): + images = [image.convert("RGB") for image in examples[image_column]] + examples["pixel_values"] = [train_transforms(image) for image in images] + examples["input_ids"] = tokenize_captions(examples) + + return examples + + with accelerator.main_process_first(): + if args.max_train_samples is not None: + dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) + # Set the training transforms + train_dataset = dataset["train"].with_transform(preprocess_train) + + def collate_fn(examples): + pixel_values = torch.stack([example["pixel_values"] for example in examples]) + pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() + input_ids = [example["input_ids"] for example in examples] + padded_tokens = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt") + return { + "pixel_values": pixel_values, + "input_ids": padded_tokens.input_ids, + "attention_mask": padded_tokens.attention_mask, + } + + train_dataloader = torch.utils.data.DataLoader( + train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size + ) + + # Scheduler and math around the number of training steps. + overrode_max_train_steps = False + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + overrode_max_train_steps = True + + lr_scheduler = get_scheduler( + args.lr_scheduler, + optimizer=optimizer, + num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + ) + + unet, student, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( + unet, student, optimizer, train_dataloader, lr_scheduler + ) + weight_dtype = torch.float32 + if args.mixed_precision == "fp16": + weight_dtype = torch.float16 + elif args.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + + # Move text_encode and vae to gpu. + # For mixed precision training we cast the text_encoder and vae weights to half-precision + # as these models are only used for inference, keeping weights in full precision is not required. + text_encoder.to(accelerator.device, dtype=weight_dtype) + vae.to(accelerator.device, dtype=weight_dtype) + + # We need to recalculate our total training steps as the size of the training dataloader may have changed. + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if overrode_max_train_steps: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + # Afterwards we recalculate our number of training epochs + args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + # We need to initialize the trackers we use, and also store our configuration. + # The trackers initializes automatically on the main process. + if accelerator.is_main_process: + accelerator.init_trackers("text2image-fine-tune", config=vars(args)) + + # Train! + total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + + # Only show the progress bar once on each machine. + progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) + progress_bar.set_description("Steps") + global_step = 0 + N = 1000 + for epoch in range(args.num_train_epochs): + unet.load_state_dict(student.state_dict()) + unet.eval() + student.train() + train_loss = 0.0 + N = N // 2 + scheduler.set_timesteps(N) + for step, batch in enumerate(train_dataloader): + with accelerator.accumulate(unet): + # Convert images to latent space + latents = vae.encode(batch["pixel_values"].to(weight_dtype)).latent_dist.sample() + latents = latents * 0.18215 + + # Sample noise that we'll add to the latents + noise = torch.randn_like(latents) + bsz = latents.shape[0] + # Sample a random timestep for each image + timesteps = torch.randint(0, scheduler.num_train_timesteps, (bsz,), device=latents.device) + timesteps = timesteps.long() + # Get the text embedding for conditioning + encoder_hidden_states = text_encoder(batch["input_ids"])[0] + original_noisy_latents = scheduler.add_noise(latents, noise, timesteps) + noisy_latents = original_noisy_latents.clone() + # Calculate a target for the student by running 2 steps of the diffusion process with the teacher + for target_calc_step in range(2): + # Predict the noise residual and compute loss + noise_pred = unet(noisy_latents, timesteps - target_calc_step, encoder_hidden_states).sample + noisy_latents = scheduler.step(noise_pred, timesteps - target_calc_step, noisy_latents) + + # Z_t-2 - ((\sigma_t-2 / \sigma_t-1) * Z_t) + student_target_numerator = noisy_latents - ( + (scheduler.get_variance(timesteps - 2) / scheduler.get_variance(timesteps - 1)) + * original_noisy_latents + ) + # \alpha_t-2 - ((\sigma_t-2 / \sigma_t-1) * \alpha_t) + student_target_denominator = scheduler.alphas_cumprod[timesteps - 2] - ( + (scheduler.get_variance(timesteps - 2) / scheduler.get_variance(timesteps - 1)) + * scheduler.alphas_cumprod[timesteps - 1] + ) + student_target = student_target_numerator / student_target_denominator + student_noise_pred = student(original_noisy_latents, timesteps, encoder_hidden_states).sample + loss = F.mse_loss(student_noise_pred.float(), student_target.float(), reduction="mean") + + # Gather the losses across all processes for logging (if we use distributed training). + avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() + train_loss += avg_loss.item() / args.gradient_accumulation_steps + + # Backpropagate + accelerator.backward(loss) + if accelerator.sync_gradients: + accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + global_step += 1 + accelerator.log({"train_loss": train_loss}, step=global_step) + train_loss = 0.0 + + logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} + progress_bar.set_postfix(**logs) + + if global_step >= args.max_train_steps: + break From de4b6e4672cbadd321351f191b6fa4219da361c1 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 25 Oct 2022 15:49:50 -0700 Subject: [PATCH 087/133] make timesteps closer to main --- src/diffusers/models/embeddings.py | 4 +-- src/diffusers/models/unet_1d.py | 56 +++++++++++++++++++++++------- 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index 7d2e1b677a9f..cbf7ce31bded 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -62,10 +62,10 @@ def get_timestep_embedding( class TimestepEmbedding(nn.Module): - def __init__(self, channel: int, time_embed_dim: int, act_fn: str = "silu", out_dim: int = None): + def __init__(self, in_channels: int, time_embed_dim: int, act_fn: str = "silu", out_dim: int = None): super().__init__() - self.linear_1 = nn.Linear(channel, time_embed_dim) + self.linear_1 = nn.Linear(in_channels, time_embed_dim) self.act = None if act_fn == "silu": self.act = nn.SiLU() diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index 8f74926da505..bcc0b4636e14 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass -from typing import Tuple, Union +from typing import Optional, Tuple, Union import torch import torch.nn as nn @@ -22,7 +22,7 @@ from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin from ..utils import BaseOutput -from .embeddings import TimestepEmbedding, Timesteps +from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps @dataclass @@ -44,11 +44,21 @@ class UNet1DModel(ModelMixin, ConfigMixin): implements for all the model (such as downloading or saving, etc.) Parameters: - in_channels: - out_channels: - down_block_types: - up_block_types: - block_out_channels: + sample_size (`int`, *optionl*): Default length of sample. Should be adaptable at runtime. + in_channels (`int`, *optional*, defaults to 2): Number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 2): Number of channels in the output. + time_embedding_type (`str`, *optional*, defaults to `"fourier"`): Type of time embedding to use. + freq_shift (`int`, *optional*, defaults to 0): Frequency shift for fourier time embedding. + flip_sin_to_cos (`bool`, *optional*, defaults to : + obj:`False`): Whether to flip sin to cos for fourier time embedding. + down_block_types (`Tuple[str]`, *optional*, defaults to : + obj:`("DownBlock1D", "DownBlock1DNoSkip", "AttnDownBlock1D")`): Tuple of downsample block types. + up_block_types (`Tuple[str]`, *optional*, defaults to : + obj:`("UpBlock1D", "UpBlock1DNoSkip", "AttnUpBlock1D")`): Tuple of upsample block types. + block_out_channels (`Tuple[int]`, *optional*, defaults to : + obj:`(32, 32, 64)`): Tuple of block output channels. + mid_block_type: + out_block_type: act_fn: norm_num_groups: """ @@ -56,8 +66,15 @@ class UNet1DModel(ModelMixin, ConfigMixin): @register_to_config def __init__( self, + sample_size: int = 65536, + sample_rate: Optional[int] = None, in_channels: int = 14, out_channels: int = 14, + extra_in_channels: int = 0, + time_embedding_type: str = "positional", + flip_sin_to_cos: bool = False, + use_timestep_embedding: bool = True, + downscale_freq_shift: float = 1.0, down_block_types: Tuple[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), up_block_types: Tuple[str] = ("UpResnetBlock1D", "UpResnetBlock1D"), mid_block_type: Tuple[str] = "MidResTemporalBlock1D", @@ -70,13 +87,28 @@ def __init__( ): super().__init__() - time_embed_dim = block_out_channels[0] * 4 + self.sample_size = sample_size # time - self.time_proj = Timesteps(num_channels=block_out_channels[0], flip_sin_to_cos=False, downscale_freq_shift=1) - self.time_mlp = TimestepEmbedding( - channel=block_out_channels[0], time_embed_dim=time_embed_dim, act_fn=act_fn, out_dim=block_out_channels[0] - ) + if time_embedding_type == "fourier": + self.time_proj = GaussianFourierProjection( + embedding_size=8, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos + ) + timestep_input_dim = 2 * block_out_channels[0] + elif time_embedding_type == "positional": + self.time_proj = Timesteps( + block_out_channels[0], flip_sin_to_cos=flip_sin_to_cos, downscale_freq_shift=downscale_freq_shift + ) + timestep_input_dim = block_out_channels[0] + + if use_timestep_embedding: + time_embed_dim = block_out_channels[0] * 4 + self.time_mlp = TimestepEmbedding( + in_channels=timestep_input_dim, + time_embed_dim=time_embed_dim, + act_fn=act_fn, + out_dim=block_out_channels[0], + ) self.down_blocks = nn.ModuleList([]) self.mid_block = None From ef6ca1ff320c8870ab865b9539695692e7166df2 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 25 Oct 2022 16:06:37 -0700 Subject: [PATCH 088/133] unify block API --- src/diffusers/models/unet_1d.py | 29 +++++++++++++++++++------- src/diffusers/models/unet_1d_blocks.py | 4 +++- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index bcc0b4636e14..a54fab2afaf9 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from dataclasses import dataclass from typing import Optional, Tuple, Union @@ -114,16 +115,18 @@ def __init__( self.mid_block = None self.up_blocks = nn.ModuleList([]) self.out_block = None - mid_dim = block_out_channels[-1] # down output_channel = in_channels for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] + + if i == 0: + input_channel += extra_in_channels + is_final_block = i == len(block_out_channels) - 1 - down_block_type = down_block_types[i] down_block = get_down_block( down_block_type, num_layers=layers_per_block, @@ -137,8 +140,9 @@ def __init__( # mid self.mid_block = get_mid_block( mid_block_type, - in_channels=mid_dim, - out_channels=mid_dim, + in_channels=block_out_channels[-1], + mid_channels=block_out_channels[-1], + out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=layers_per_block, add_downsample=always_downsample, @@ -146,21 +150,30 @@ def __init__( # up reversed_block_out_channels = list(reversed(block_out_channels)) + output_channel = reversed_block_out_channels[0] + if out_block_type is None: + final_upsample_channels = out_channels + else: + final_upsample_channels = block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): - input_channel = reversed_block_out_channels[i] - output_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + prev_output_channel = output_channel + output_channel = ( + reversed_block_out_channels[i + 1] if i < len(up_block_types) - 1 else final_upsample_channels + ) is_final_block = i == len(block_out_channels) - 1 up_block = get_up_block( up_block_type, num_layers=layers_per_block, - in_channels=input_channel, + in_channels=prev_output_channel, out_channels=output_channel, temb_channels=block_out_channels[0], add_upsample=not is_final_block, ) self.up_blocks.append(up_block) + prev_output_channel = output_channel # out num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) @@ -170,7 +183,7 @@ def __init__( embed_dim=block_out_channels[0], out_channels=out_channels, act_fn=act_fn, - fc_dim=mid_dim // 4, + fc_dim=block_out_channels[-1] // 4, ) def forward( diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index 0788cc1e76e5..d2197ae5116f 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -318,7 +318,7 @@ def get_up_block(up_block_type, num_layers, in_channels, out_channels, temb_chan raise ValueError(f"{up_block_type} does not exist.") -def get_mid_block(mid_block_type, num_layers, in_channels, out_channels, embed_dim, add_downsample): +def get_mid_block(mid_block_type, num_layers, in_channels, mid_channels, out_channels, embed_dim, add_downsample): if mid_block_type == "MidResTemporalBlock1D": return MidResTemporalBlock1D( num_layers=num_layers, @@ -337,3 +337,5 @@ def get_out_block(*, out_block_type, num_groups_out, embed_dim, out_channels, ac return OutConv1DBlock(num_groups_out, out_channels, embed_dim, act_fn) elif out_block_type == "ValueFunction": return OutValueFunctionBlock(fc_dim, embed_dim) + else: + return None From e6f1a83a1d78aed2aaad78571e813097441c6673 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 25 Oct 2022 16:35:01 -0700 Subject: [PATCH 089/133] unify forward api --- src/diffusers/models/unet_1d.py | 48 +++++++++----------------- src/diffusers/models/unet_1d_blocks.py | 30 ++-------------- tests/test_models_unet_1d.py | 1 + 3 files changed, 20 insertions(+), 59 deletions(-) diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index 42043064645a..2d0c1e2c6804 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -211,48 +211,32 @@ def forward( elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) - temb = self.time_proj(timesteps) - temb = self.time_mlp(temb) - down_block_res_samples = [] + timestep_embed = self.time_proj(timesteps) + if self.time_mlp: + timestep_embed = self.time_mlp(timestep_embed) + else: + timestep_embed = timestep_embed[..., None] + timestep_embed = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) # 2. down - for down_block in self.down_blocks: - sample, res_samples = down_block(hidden_states=sample, temb=temb) - down_block_res_samples.append(res_samples[0]) + down_block_res_samples = () + for downsample_block in self.down_blocks: + sample, res_samples = downsample_block(hidden_states=sample, temb=timestep_embed) + down_block_res_samples += res_samples # 3. mid if self.mid_block: - sample = self.mid_block(sample, temb) + sample = self.mid_block(sample, timestep_embed) # 4. up - for up_block in self.up_blocks: - sample = up_block(hidden_states=sample, res_hidden_states=down_block_res_samples.pop(), temb=temb) + for i, upsample_block in enumerate(self.up_blocks): + res_samples = down_block_res_samples[-1:] + down_block_res_samples = down_block_res_samples[:-1] + sample = upsample_block(sample, res_hidden_states_tuple=res_samples, temb=timestep_embed) # 5. post-process if self.out_block: - sample = self.out_block(sample, temb) - - # # 1. time - # if len(timestep.shape) == 0: - # timestep = timestep[None] - # - # timestep_embed = self.time_proj(timestep)[..., None] - # timestep_embed = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) - # - # # 2. down - # down_block_res_samples = () - # for downsample_block in self.down_blocks: - # sample, res_samples = downsample_block(hidden_states=sample, temb=timestep_embed) - # down_block_res_samples += res_samples - # - # # 3. mid - # sample = self.mid_block(sample) - # - # # 4. up - # for i, upsample_block in enumerate(self.up_blocks): - # res_samples = down_block_res_samples[-1:] - # down_block_res_samples = down_block_res_samples[:-1] - # sample = upsample_block(sample, res_samples) + sample = self.out_block(sample, timestep_embed) if not return_dict: return (sample,) diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index 367a6871a087..98ecc07d0873 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -132,8 +132,9 @@ def __init__( if add_upsample: self.upsample = Upsample1D(out_channels, use_conv_transpose=True) - def forward(self, hidden_states, res_hidden_states=None, temb=None): - if res_hidden_states is not None: + def forward(self, hidden_states, res_hidden_states_tuple=None, temb=None): + if res_hidden_states_tuple is not None: + res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat((hidden_states, res_hidden_states), dim=1) hidden_states = self.resnets[0](hidden_states, temb) @@ -148,31 +149,6 @@ def forward(self, hidden_states, res_hidden_states=None, temb=None): return hidden_states - -class DownBlock1D(nn.Module): - pass - - -class AttnDownBlock1D(nn.Module): - pass - - -class DownBlock1DNoSkip(nn.Module): - pass - - -class UpBlock1D(nn.Module): - pass - - -class AttnUpBlock1D(nn.Module): - pass - - -class UpBlock1DNoSkip(nn.Module): - pass - - class ValueFunctionMidBlock1D(nn.Module): def __init__(self, in_channels, out_channels, embed_dim): super().__init__() diff --git a/tests/test_models_unet_1d.py b/tests/test_models_unet_1d.py index 364e9193f53f..4e5e8d666413 100644 --- a/tests/test_models_unet_1d.py +++ b/tests/test_models_unet_1d.py @@ -59,6 +59,7 @@ def prepare_init_args_and_inputs_for_common(self): "block_out_channels": (32, 128, 256), "in_channels": 14, "out_channels": 14, + "time_embedding_type": "positional", } inputs_dict = self.dummy_input return init_dict, inputs_dict From c35a925747630d230c1a3048a64c381fb47aae87 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 25 Oct 2022 16:36:42 -0700 Subject: [PATCH 090/133] delete lines in examples --- .../diffuser/run_diffuser_gen_trajectories.py | 13 ----------- examples/diffuser/run_diffuser_locomotion.py | 17 -------------- .../diffuser/run_diffuser_value_guided.py | 23 ------------------- 3 files changed, 53 deletions(-) diff --git a/examples/diffuser/run_diffuser_gen_trajectories.py b/examples/diffuser/run_diffuser_gen_trajectories.py index f4c86635c652..097222462dc3 100644 --- a/examples/diffuser/run_diffuser_gen_trajectories.py +++ b/examples/diffuser/run_diffuser_gen_trajectories.py @@ -21,21 +21,8 @@ def _run(): env_name = "hopper-medium-v2" env = gym.make(env_name) - DEVICE = config["device"] - - scheduler = DDPMScheduler( - num_train_timesteps=config["num_inference_steps"], - beta_schedule="squaredcos_cap_v2", - clip_sample=False, - variance_type="fixed_small_log", - ) - network = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32").to(device=DEVICE).eval() - unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-unet-hor32").to(device=DEVICE).eval() pipeline = DiffusionPipeline.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", - value_function=network, - unet=unet, - scheduler=scheduler, env=env, custom_pipeline="/Users/bglickenhaus/Documents/diffusers/examples/community", ) diff --git a/examples/diffuser/run_diffuser_locomotion.py b/examples/diffuser/run_diffuser_locomotion.py index 1b4351095d3b..0714769cdc2e 100644 --- a/examples/diffuser/run_diffuser_locomotion.py +++ b/examples/diffuser/run_diffuser_locomotion.py @@ -21,25 +21,8 @@ def _run(): env_name = "hopper-medium-v2" env = gym.make(env_name) - # Cuda settings for colab - # torch.cuda.get_device_name(0) - DEVICE = config["device"] - - # Two generators for different parts of the diffusion loop to work in colab - scheduler = DDPMScheduler( - num_train_timesteps=config["num_inference_steps"], - beta_schedule="squaredcos_cap_v2", - clip_sample=False, - variance_type="fixed_small_log", - ) - - network = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32").to(device=DEVICE).eval() - unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-unet-hor32").to(device=DEVICE).eval() pipeline = DiffusionPipeline.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", - value_function=network, - unet=unet, - scheduler=scheduler, env=env, custom_pipeline="/Users/bglickenhaus/Documents/diffusers/examples/community", ) diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 4272ec2c3106..8c0ec54fcf97 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -23,34 +23,11 @@ def _run(): env_name = "hopper-medium-v2" env = gym.make(env_name) - # Cuda settings for colab - # torch.cuda.get_device_name(0) - DEVICE = config["device"] - - # Two generators for different parts of the diffusion loop to work in colab - scheduler = DDPMScheduler( - num_train_timesteps=config["num_inference_steps"], - beta_schedule="squaredcos_cap_v2", - clip_sample=False, - variance_type="fixed_small_log", - ) - - # 3 different pretrained models are available for this task. - # The horizion represents the length of trajectories used in training. - # network = ValueFunction(training_horizon=horizon, dim=32, dim_mults=(1, 2, 4, 8), transition_dim=14, cond_dim=11) - - network = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32").to(device=DEVICE).eval() - unet = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-unet-hor32").to(device=DEVICE).eval() pipeline = DiffusionPipeline.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", - value_function=network, - unet=unet, - scheduler=scheduler, env=env, custom_pipeline="/Users/bglickenhaus/Documents/diffusers/examples/community", ) - # unet = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) - # network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) # add a batch dimension and repeat for multiple samples # [ observation_dim ] --> [ n_samples x observation_dim ] From 949b93a17d14288d547e98016888b55eb22e7c8c Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 25 Oct 2022 16:37:55 -0700 Subject: [PATCH 091/133] style --- src/diffusers/models/unet_1d.py | 3 +-- src/diffusers/models/unet_1d_blocks.py | 5 ++--- tests/test_models_unet_1d.py | 3 ++- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index 2d0c1e2c6804..da61fd1d145b 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -18,12 +18,11 @@ import torch import torch.nn as nn - from ..configuration_utils import ConfigMixin, register_to_config from ..modeling_utils import ModelMixin from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps -from .unet_1d_blocks import get_down_block, get_mid_block, get_up_block, get_out_block +from .unet_1d_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index 98ecc07d0873..697de5184cfb 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -149,6 +149,7 @@ def forward(self, hidden_states, res_hidden_states_tuple=None, temb=None): return hidden_states + class ValueFunctionMidBlock1D(nn.Module): def __init__(self, in_channels, out_channels, embed_dim): super().__init__() @@ -267,8 +268,6 @@ def forward(self, hidden_states, temb): return hidden_states - - _kernels = { "linear": [1 / 8, 3 / 8, 3 / 8, 1 / 8], "cubic": [-0.01171875, -0.03515625, 0.11328125, 0.43359375, 0.43359375, 0.11328125, -0.03515625, -0.01171875], @@ -666,4 +665,4 @@ def get_out_block(*, out_block_type, num_groups_out, embed_dim, out_channels, ac return OutConv1DBlock(num_groups_out, out_channels, embed_dim, act_fn) elif out_block_type == "ValueFunction": return OutValueFunctionBlock(fc_dim, embed_dim) - return None \ No newline at end of file + return None diff --git a/tests/test_models_unet_1d.py b/tests/test_models_unet_1d.py index 4e5e8d666413..edda27a7ad6a 100644 --- a/tests/test_models_unet_1d.py +++ b/tests/test_models_unet_1d.py @@ -18,9 +18,9 @@ import torch from diffusers import UNet1DModel +from diffusers.utils import floats_tensor, slow, torch_device from .test_modeling_common import ModelTesterMixin -from diffusers.utils import floats_tensor, slow, torch_device torch.backends.cuda.matmul.allow_tf32 = False @@ -202,6 +202,7 @@ def test_forward_with_norm_groups(self): # Not implemented yet for this UNet pass + class UnetModel1DTests(unittest.TestCase): @slow def test_unet_1d_maestro(self): From 2f6462b2f685c9f67b6d4c9d090ea78e3c15a9dd Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 25 Oct 2022 16:43:30 -0700 Subject: [PATCH 092/133] examples style --- examples/diffuser/run_diffuser_gen_trajectories.py | 2 +- examples/diffuser/run_diffuser_locomotion.py | 2 +- examples/diffuser/run_diffuser_value_guided.py | 4 +--- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/examples/diffuser/run_diffuser_gen_trajectories.py b/examples/diffuser/run_diffuser_gen_trajectories.py index 097222462dc3..3de8521343e3 100644 --- a/examples/diffuser/run_diffuser_gen_trajectories.py +++ b/examples/diffuser/run_diffuser_gen_trajectories.py @@ -1,7 +1,7 @@ import d4rl # noqa import gym import tqdm -from diffusers import DDPMScheduler, DiffusionPipeline, UNet1DModel +from diffusers import DiffusionPipeline config = dict( diff --git a/examples/diffuser/run_diffuser_locomotion.py b/examples/diffuser/run_diffuser_locomotion.py index 0714769cdc2e..9ac9df28db81 100644 --- a/examples/diffuser/run_diffuser_locomotion.py +++ b/examples/diffuser/run_diffuser_locomotion.py @@ -1,7 +1,7 @@ import d4rl # noqa import gym import tqdm -from diffusers import DDPMScheduler, DiffusionPipeline, UNet1DModel +from diffusers import DiffusionPipeline config = dict( diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py index 8c0ec54fcf97..707663abb3bf 100644 --- a/examples/diffuser/run_diffuser_value_guided.py +++ b/examples/diffuser/run_diffuser_value_guided.py @@ -1,9 +1,7 @@ import d4rl # noqa import gym import tqdm - -# import train_diffuser -from diffusers import DDPMScheduler, DiffusionPipeline, UNet1DModel +from diffusers import DiffusionPipeline config = dict( From a2dd559e12ddf9255dfea39d8fa77f92a5cc4f91 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 25 Oct 2022 17:10:00 -0700 Subject: [PATCH 093/133] all tests pass --- src/diffusers/models/unet_1d.py | 6 +++--- src/diffusers/models/unet_1d_blocks.py | 10 +++++----- tests/test_models_unet_1d.py | 3 +++ 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index da61fd1d145b..7fdee9ea84ba 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -44,7 +44,7 @@ class UNet1DModel(ModelMixin, ConfigMixin): implements for all the model (such as downloading or saving, etc.) Parameters: - sample_size (`int`, *optionl*): Default length of sample. Should be adaptable at runtime. + sample_size (`int`, *optional*): Default length of sample. Should be adaptable at runtime. in_channels (`int`, *optional*, defaults to 2): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 2): Number of channels in the output. time_embedding_type (`str`, *optional*, defaults to `"fourier"`): Type of time embedding to use. @@ -78,7 +78,7 @@ def __init__( down_block_types: Tuple[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), up_block_types: Tuple[str] = ("UpResnetBlock1D", "UpResnetBlock1D"), mid_block_type: Tuple[str] = "MidResTemporalBlock1D", - out_block_type: str = "OutConv1DBlock", + out_block_type: str = None, block_out_channels: Tuple[int] = (32, 128, 256), act_fn: str = "mish", norm_num_groups: int = 8, @@ -211,7 +211,7 @@ def forward( timesteps = timesteps[None].to(sample.device) timestep_embed = self.time_proj(timesteps) - if self.time_mlp: + if self.config.use_timestep_embedding: timestep_embed = self.time_mlp(timestep_embed) else: timestep_embed = timestep_embed[..., None] diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index 697de5184cfb..fc758ebbb044 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -312,7 +312,7 @@ def __init__(self, kernel="linear", pad_mode="reflect"): self.pad = kernel_1d.shape[0] // 2 - 1 self.register_buffer("kernel", kernel_1d) - def forward(self, hidden_states): + def forward(self, hidden_states, temb=None): hidden_states = F.pad(hidden_states, ((self.pad + 1) // 2,) * 2, self.pad_mode) weight = hidden_states.new_zeros([hidden_states.shape[1], hidden_states.shape[1], self.kernel.shape[0]]) indices = torch.arange(hidden_states.shape[1], device=hidden_states.device) @@ -441,7 +441,7 @@ def __init__(self, mid_channels, in_channels, out_channels=None): self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) - def forward(self, hidden_states): + def forward(self, hidden_states, temb=None): hidden_states = self.down(hidden_states) for attn, resnet in zip(self.attentions, self.resnets): hidden_states = resnet(hidden_states) @@ -546,7 +546,7 @@ def __init__(self, in_channels, out_channels, mid_channels=None): self.resnets = nn.ModuleList(resnets) self.up = Upsample1d(kernel="cubic") - def forward(self, hidden_states, res_hidden_states_tuple): + def forward(self, hidden_states, res_hidden_states_tuple, temb=None): res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) @@ -573,7 +573,7 @@ def __init__(self, in_channels, out_channels, mid_channels=None): self.resnets = nn.ModuleList(resnets) self.up = Upsample1d(kernel="cubic") - def forward(self, hidden_states, res_hidden_states_tuple): + def forward(self, hidden_states, res_hidden_states_tuple, temb=None): res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) @@ -598,7 +598,7 @@ def __init__(self, in_channels, out_channels, mid_channels=None): self.resnets = nn.ModuleList(resnets) - def forward(self, hidden_states, res_hidden_states_tuple): + def forward(self, hidden_states, res_hidden_states_tuple, temb=None): res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) diff --git a/tests/test_models_unet_1d.py b/tests/test_models_unet_1d.py index edda27a7ad6a..26ef5b419345 100644 --- a/tests/test_models_unet_1d.py +++ b/tests/test_models_unet_1d.py @@ -60,6 +60,8 @@ def prepare_init_args_and_inputs_for_common(self): "in_channels": 14, "out_channels": 14, "time_embedding_type": "positional", + "use_timestep_embedding": True, + "out_block_type": "OutConv1DBlock", } inputs_dict = self.dummy_input return init_dict, inputs_dict @@ -159,6 +161,7 @@ def prepare_init_args_and_inputs_for_common(self): "block_out_channels": [32, 64, 128, 256], "layers_per_block": 1, "always_downsample": True, + "use_timestep_embedding": True } inputs_dict = self.dummy_input return init_dict, inputs_dict From 39dff7331bc50bdceffd616dd6b866eedc1b3c7a Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Tue, 25 Oct 2022 17:11:31 -0700 Subject: [PATCH 094/133] make style --- tests/test_models_unet_1d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_models_unet_1d.py b/tests/test_models_unet_1d.py index 26ef5b419345..ab86b5b6f202 100644 --- a/tests/test_models_unet_1d.py +++ b/tests/test_models_unet_1d.py @@ -161,7 +161,7 @@ def prepare_init_args_and_inputs_for_common(self): "block_out_channels": [32, 64, 128, 256], "layers_per_block": 1, "always_downsample": True, - "use_timestep_embedding": True + "use_timestep_embedding": True, } inputs_dict = self.dummy_input return init_dict, inputs_dict From 7653c4f2cf8605fabe718c8c07489aad886ea6a5 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Wed, 26 Oct 2022 09:47:12 -0400 Subject: [PATCH 095/133] update conversion script --- scripts/convert_models_diffuser_to_diffusers.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/convert_models_diffuser_to_diffusers.py b/scripts/convert_models_diffuser_to_diffusers.py index 4b4608358c17..dda41d4c5d35 100644 --- a/scripts/convert_models_diffuser_to_diffusers.py +++ b/scripts/convert_models_diffuser_to_diffusers.py @@ -29,6 +29,10 @@ def unet(hor): block_out_channels=block_out_channels, up_block_types=up_block_types, layers_per_block=1, + in_channels=14, + out_channels=14, + use_timestep_embedding=True, + out_block_type="OutConv1DBlock", ) hf_value_function = UNet1DModel(**config) print(f"length of state dict: {len(state_dict.keys())}") From 2f97adfcb84c602f9c0bb90b1f6b3ff7fd301b62 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Wed, 26 Oct 2022 17:52:52 -0400 Subject: [PATCH 096/133] first go --- .../image_diffusion.ipynb | 532 +++++++++++ .../train_teacher_model.ipynb | 835 ++++++++++++++++++ examples/progressive_distillation/utils.py | 68 ++ src/diffusers/schedulers/scheduling_ddpm.py | 14 + 4 files changed, 1449 insertions(+) create mode 100644 examples/progressive_distillation/image_diffusion.ipynb create mode 100644 examples/progressive_distillation/train_teacher_model.ipynb create mode 100644 examples/progressive_distillation/utils.py diff --git a/examples/progressive_distillation/image_diffusion.ipynb b/examples/progressive_distillation/image_diffusion.ipynb new file mode 100644 index 000000000000..7fc08f7e3a5c --- /dev/null +++ b/examples/progressive_distillation/image_diffusion.ipynb @@ -0,0 +1,532 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/lib/python3/dist-packages/requests/__init__.py:89: RequestsDependencyWarning: urllib3 (1.26.12) or chardet (3.0.4) doesn't match a supported version!\n", + " warnings.warn(\"urllib3 ({}) or chardet ({}) doesn't match a supported \"\n", + "WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n" + ] + } + ], + "source": [ + "import torch\n", + "from PIL import Image\n", + "from diffusers import AutoencoderKL, UNet2DModel, DDPMScheduler, DDPMPipeline\n", + "from diffusers.optimization import get_scheduler\n", + "from diffusers.training_utils import EMAModel\n", + "import math\n", + "import requests\n", + "from torchvision.transforms import (\n", + " CenterCrop,\n", + " Compose,\n", + " InterpolationMode,\n", + " Normalize,\n", + " RandomHorizontalFlip,\n", + " Resize,\n", + " ToTensor,\n", + ")\n", + "from accelerate import Accelerator\n", + "import utils\n", + "from tqdm import tqdm\n", + "import torch.nn.functional as F\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "training_config = utils.DiffusionTrainingArgs()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# Load an image of my dog for this example\n", + "\n", + "image_url = \"https://i.imgur.com/IJcs4Aa.jpeg\"\n", + "image = Image.open(requests.get(image_url, stream=True).raw)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Define the transforms to apply to the image for training\n", + "augmentations = utils.get_train_transforms(training_config)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "train_image = augmentations(image.convert(\"RGB\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "model = UNet2DModel.from_pretrained(\"bglick13/minnie-diffusion\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A/tmp/ipykernel_602221/3684360613.py:49: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').\n", + " alpha_t_prime2, sigma_t_prime2 = student_scheduler.get_alpha_sigma(batch, timesteps // 2, accelerator.device)\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0, loss=0.461, lr=0.000297, step=1]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0, loss=0.418, lr=0.000294, step=2]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.405, loss=0.308, lr=0.000291, step=3]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.561, loss=0.368, lr=0.000288, step=4]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s, ema_decay=0.646, loss=0.278, lr=0.000285, step=5]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.701, loss=0.255, lr=0.000282, step=6]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.739, loss=0.199, lr=0.000279, step=7]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.768, loss=0.227, lr=0.000276, step=8]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.79, loss=0.298, lr=0.000273, step=9]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.808, loss=0.212, lr=0.00027, step=10]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.822, loss=0.254, lr=0.000267, step=11]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.834, loss=0.23, lr=0.000264, step=12]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.845, loss=0.129, lr=0.000261, step=13]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.854, loss=0.158, lr=0.000258, step=14]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.862, loss=0.137, lr=0.000255, step=15]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.869, loss=0.171, lr=0.000252, step=16]\n", + "\n", + "\n", + "\u001b[A\u001b[A\n", + "\n", + "\u001b[A\u001b[A" + ] + }, + { + "data": { + "text/html": [ + "
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮\n",
+       " /tmp/ipykernel_602221/3684360613.py:90 in <module>                                               \n",
+       "                                                                                                  \n",
+       " [Errno 2] No such file or directory: '/tmp/ipykernel_602221/3684360613.py'                       \n",
+       "                                                                                                  \n",
+       " /tmp/ipykernel_602221/3684360613.py:66 in distill                                                \n",
+       "                                                                                                  \n",
+       " [Errno 2] No such file or directory: '/tmp/ipykernel_602221/3684360613.py'                       \n",
+       "                                                                                                  \n",
+       " /home/ben/.local/lib/python3.8/site-packages/accelerate/accelerator.py:1005 in backward          \n",
+       "                                                                                                  \n",
+       "   1002 │   │   if self.distributed_type == DistributedType.DEEPSPEED:                            \n",
+       "   1003 │   │   │   self.deepspeed_engine_wrapped.backward(loss, **kwargs)                        \n",
+       "   1004 │   │   elif self.scaler is not None:                                                     \n",
+       " 1005 │   │   │   self.scaler.scale(loss).backward(**kwargs)                                    \n",
+       "   1006 │   │   else:                                                                             \n",
+       "   1007 │   │   │   loss.backward(**kwargs)                                                       \n",
+       "   1008                                                                                           \n",
+       "                                                                                                  \n",
+       " /home/ben/.local/lib/python3.8/site-packages/torch/_tensor.py:396 in backward                    \n",
+       "                                                                                                  \n",
+       "    393 │   │   │   │   retain_graph=retain_graph,                                                \n",
+       "    394 │   │   │   │   create_graph=create_graph,                                                \n",
+       "    395 │   │   │   │   inputs=inputs)                                                            \n",
+       "  396 │   │   torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=input  \n",
+       "    397 │                                                                                         \n",
+       "    398 │   def register_hook(self, hook):                                                        \n",
+       "    399 │   │   r\"\"\"Registers a backward hook.                                                    \n",
+       "                                                                                                  \n",
+       " /home/ben/.local/lib/python3.8/site-packages/torch/autograd/__init__.py:173 in backward          \n",
+       "                                                                                                  \n",
+       "   170 │   # The reason we repeat same the comment below is that                                  \n",
+       "   171 │   # some Python versions print out the first line of a multi-line function               \n",
+       "   172 │   # calls in the traceback and some print out the last line                              \n",
+       " 173 Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the bac   \n",
+       "   174 │   │   tensors, grad_tensors_, retain_graph, create_graph, inputs,                        \n",
+       "   175 │   │   allow_unreachable=True, accumulate_grad=True)  # Calls into the C++ engine to ru   \n",
+       "   176                                                                                            \n",
+       "╰──────────────────────────────────────────────────────────────────────────────────────────────────╯\n",
+       "KeyboardInterrupt\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[31m╭─\u001b[0m\u001b[31m──────────────────────────────\u001b[0m\u001b[31m \u001b[0m\u001b[1;31mTraceback \u001b[0m\u001b[1;2;31m(most recent call last)\u001b[0m\u001b[31m \u001b[0m\u001b[31m───────────────────────────────\u001b[0m\u001b[31m─╮\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2;33m/tmp/ipykernel_602221/\u001b[0m\u001b[1;33m3684360613.py\u001b[0m:\u001b[94m90\u001b[0m in \u001b[92m\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[3;31m[Errno 2] No such file or directory: '/tmp/ipykernel_602221/3684360613.py'\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2;33m/tmp/ipykernel_602221/\u001b[0m\u001b[1;33m3684360613.py\u001b[0m:\u001b[94m66\u001b[0m in \u001b[92mdistill\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[3;31m[Errno 2] No such file or directory: '/tmp/ipykernel_602221/3684360613.py'\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2;33m/home/ben/.local/lib/python3.8/site-packages/accelerate/\u001b[0m\u001b[1;33maccelerator.py\u001b[0m:\u001b[94m1005\u001b[0m in \u001b[92mbackward\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m1002 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mif\u001b[0m \u001b[96mself\u001b[0m.distributed_type == DistributedType.DEEPSPEED: \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m1003 \u001b[0m\u001b[2m│ │ │ \u001b[0m\u001b[96mself\u001b[0m.deepspeed_engine_wrapped.backward(loss, **kwargs) \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m1004 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94melif\u001b[0m \u001b[96mself\u001b[0m.scaler \u001b[95mis\u001b[0m \u001b[95mnot\u001b[0m \u001b[94mNone\u001b[0m: \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m1005 \u001b[2m│ │ │ \u001b[0m\u001b[96mself\u001b[0m.scaler.scale(loss).backward(**kwargs) \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m1006 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94melse\u001b[0m: \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m1007 \u001b[0m\u001b[2m│ │ │ \u001b[0mloss.backward(**kwargs) \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m1008 \u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2;33m/home/ben/.local/lib/python3.8/site-packages/torch/\u001b[0m\u001b[1;33m_tensor.py\u001b[0m:\u001b[94m396\u001b[0m in \u001b[92mbackward\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m 393 \u001b[0m\u001b[2m│ │ │ │ \u001b[0mretain_graph=retain_graph, \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m 394 \u001b[0m\u001b[2m│ │ │ │ \u001b[0mcreate_graph=create_graph, \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m 395 \u001b[0m\u001b[2m│ │ │ │ \u001b[0minputs=inputs) \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m 396 \u001b[2m│ │ \u001b[0mtorch.autograd.backward(\u001b[96mself\u001b[0m, gradient, retain_graph, create_graph, inputs=input \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m 397 \u001b[0m\u001b[2m│ \u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m 398 \u001b[0m\u001b[2m│ \u001b[0m\u001b[94mdef\u001b[0m \u001b[92mregister_hook\u001b[0m(\u001b[96mself\u001b[0m, hook): \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m 399 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[33mr\u001b[0m\u001b[33m\"\"\"Registers a backward hook.\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2;33m/home/ben/.local/lib/python3.8/site-packages/torch/autograd/\u001b[0m\u001b[1;33m__init__.py\u001b[0m:\u001b[94m173\u001b[0m in \u001b[92mbackward\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m170 \u001b[0m\u001b[2m│ \u001b[0m\u001b[2m# The reason we repeat same the comment below is that\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m171 \u001b[0m\u001b[2m│ \u001b[0m\u001b[2m# some Python versions print out the first line of a multi-line function\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m172 \u001b[0m\u001b[2m│ \u001b[0m\u001b[2m# calls in the traceback and some print out the last line\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m173 \u001b[2m│ \u001b[0mVariable._execution_engine.run_backward( \u001b[2m# Calls into the C++ engine to run the bac\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m174 \u001b[0m\u001b[2m│ │ \u001b[0mtensors, grad_tensors_, retain_graph, create_graph, inputs, \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m175 \u001b[0m\u001b[2m│ │ \u001b[0mallow_unreachable=\u001b[94mTrue\u001b[0m, accumulate_grad=\u001b[94mTrue\u001b[0m) \u001b[2m# Calls into the C++ engine to ru\u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m│\u001b[0m \u001b[2m176 \u001b[0m \u001b[31m│\u001b[0m\n", + "\u001b[31m╰──────────────────────────────────────────────────────────────────────────────────────────────────╯\u001b[0m\n", + "\u001b[1;91mKeyboardInterrupt\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "def distill(teacher, n, train_image, epochs=100, lr=3e-4, batch_size=16):\n", + " accelerator = Accelerator(\n", + " gradient_accumulation_steps=training_config.gradient_accumulation_steps,\n", + " mixed_precision=training_config.mixed_precision,\n", + ")\n", + " if accelerator.is_main_process:\n", + " run = \"distill\"\n", + " accelerator.init_trackers(run)\n", + " teacher_scheduler = DDPMScheduler(num_train_timesteps=n)\n", + " student_scheduler = DDPMScheduler(num_train_timesteps=n // 2)\n", + " student = utils.get_unet(training_config)\n", + " student.load_state_dict(teacher.state_dict())\n", + " student = accelerator.prepare(student)\n", + " student.train()\n", + " optimizer = torch.optim.AdamW(\n", + " student.parameters(),\n", + " lr=lr,\n", + " betas=(training_config.adam_beta1, training_config.adam_beta2),\n", + " weight_decay=0.001,\n", + " eps=training_config.adam_epsilon,\n", + " )\n", + " lr_scheduler = get_scheduler(\n", + " \"linear\",\n", + " optimizer=optimizer,\n", + " num_warmup_steps=0,\n", + " num_training_steps=(epochs) // training_config.gradient_accumulation_steps,\n", + ")\n", + " teacher, student, optimizer, lr_scheduler, train_image, teacher_scheduler, student_scheduler = accelerator.prepare(\n", + " teacher, student, optimizer, lr_scheduler, train_image,teacher_scheduler, student_scheduler\n", + ")\n", + " ema_model = EMAModel(student, inv_gamma=training_config.ema_inv_gamma, power=training_config.ema_power, max_value=training_config.ema_max_decay)\n", + " global_step = 0\n", + " for epoch in range(epochs):\n", + " progress_bar = tqdm(total=1, disable=not accelerator.is_local_main_process)\n", + " progress_bar.set_description(f\"Epoch {epoch}\")\n", + " batch = train_image.unsqueeze(0).repeat(\n", + " batch_size, 1, 1, 1\n", + " ).to(accelerator.device)\n", + " with accelerator.accumulate(student):\n", + " noise = torch.randn(batch.shape).to(accelerator.device)\n", + " bsz = batch.shape[0]\n", + " # Sample a random timestep for each image\n", + " timesteps = torch.randint(\n", + " 0, student_scheduler.config.num_train_timesteps, (bsz,), device=batch.device\n", + " ).long() * 2\n", + " with torch.no_grad():\n", + " alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps + 1, accelerator.device)\n", + " z_t = alpha_t * batch + sigma_t * noise\n", + " alpha_t_prime2, sigma_t_prime2 = student_scheduler.get_alpha_sigma(batch, timesteps // 2, accelerator.device)\n", + " alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma(batch, timesteps, accelerator.device)\n", + " v = teacher(z_t.float(), timesteps + 1).sample\n", + " rec_t = (alpha_t * z_t - sigma_t * v).clip(-1, 1)\n", + "\n", + " z_t_prime = alpha_t_prime * rec_t + (sigma_t_prime / sigma_t) * (z_t - alpha_t * rec_t)\n", + " v_1 = teacher(z_t_prime.float(), timesteps).sample\n", + " rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * v_1).clip(-1, 1)\n", + " z_t_prime_2 = alpha_t_prime2 * rec_t_prime + (sigma_t_prime2 / sigma_t_prime) * (z_t_prime - alpha_t_prime * rec_t_prime)\n", + " x_hat = z_t_prime_2 - ((sigma_t_prime2 / sigma_t_prime) * z_t) / (alpha_t_prime2 - (sigma_t_prime2 / sigma_t_prime) * alpha_t)\n", + "\n", + " noise_pred = student(z_t, timesteps).sample\n", + " student_rec = (alpha_t * z_t - sigma_t * noise_pred).clip(-1, 1)\n", + " loss = F.mse_loss(student_rec, x_hat.clip(-1, 1))\n", + " \n", + " accelerator.backward(loss)\n", + "\n", + " if accelerator.sync_gradients:\n", + " accelerator.clip_grad_norm_(student.parameters(), 1.0)\n", + " optimizer.step()\n", + " lr_scheduler.step()\n", + " if training_config.use_ema:\n", + " ema_model.step(student)\n", + " optimizer.zero_grad()\n", + "\n", + " # Checks if the accelerator has performed an optimization step behind the scenes\n", + " if accelerator.sync_gradients:\n", + " progress_bar.update(1)\n", + " global_step += 1\n", + "\n", + " logs = {\"loss\": loss.detach().item(), \"lr\": lr_scheduler.get_last_lr()[0], \"step\": global_step}\n", + " if training_config.use_ema:\n", + " logs[\"ema_decay\"] = ema_model.decay\n", + " progress_bar.set_postfix(**logs)\n", + " accelerator.log(logs, step=global_step)\n", + " progress_bar.close()\n", + "\n", + " accelerator.wait_for_everyone()\n", + " return student, ema_model, accelerator\n", + "teacher, distilled_ema, distill_accelrator = distill(model, 1000, train_image, epochs=100, batch_size=64)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "d8921e5a95394b7e888ba3af9fe4f0b8", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/500 [00:00" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display(Image.fromarray(images_processed[0]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.8.10 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/progressive_distillation/train_teacher_model.ipynb b/examples/progressive_distillation/train_teacher_model.ipynb new file mode 100644 index 000000000000..ec1a47d25945 --- /dev/null +++ b/examples/progressive_distillation/train_teacher_model.ipynb @@ -0,0 +1,835 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n" + ] + } + ], + "source": [ + "import torch\n", + "from PIL import Image\n", + "from diffusers import AutoencoderKL, UNet2DModel, DDPMScheduler, DDPMPipeline\n", + "from diffusers.optimization import get_scheduler\n", + "from diffusers.training_utils import EMAModel\n", + "import math\n", + "import requests\n", + "from torchvision.transforms import (\n", + " CenterCrop,\n", + " Compose,\n", + " InterpolationMode,\n", + " Normalize,\n", + " RandomHorizontalFlip,\n", + " Resize,\n", + " ToTensor,\n", + ")\n", + "from accelerate import Accelerator\n", + "import utils\n", + "from tqdm import tqdm\n", + "import torch.nn.functional as F" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "training_config = utils.DiffusionTrainingArgs()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# Load an image of my dog for this example\n", + "\n", + "image_url = \"https://i.imgur.com/IJcs4Aa.jpeg\"\n", + "image = Image.open(requests.get(image_url, stream=True).raw)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "# Define the transforms to apply to the image for training\n", + "augmentations = utils.get_train_transforms(training_config)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "train_image = augmentations(image.convert(\"RGB\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "accelerator = Accelerator(\n", + " gradient_accumulation_steps=training_config.gradient_accumulation_steps,\n", + " mixed_precision=training_config.mixed_precision,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "model = utils.get_unet(training_config)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "noise_scheduler = DDPMScheduler(num_train_timesteps=1000)\n", + "optimizer = torch.optim.AdamW(\n", + " model.parameters(),\n", + " lr=training_config.learning_rate,\n", + " betas=(training_config.adam_beta1, training_config.adam_beta2),\n", + " weight_decay=training_config.adam_weight_decay,\n", + " eps=training_config.adam_epsilon,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "lr_scheduler = get_scheduler(\n", + " training_config.lr_scheduler,\n", + " optimizer=optimizer,\n", + " num_warmup_steps=training_config.lr_warmup_steps,\n", + " num_training_steps=(training_config.num_epochs) // training_config.gradient_accumulation_steps,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "model, optimizer, train_image, lr_scheduler = accelerator.prepare(\n", + " model, optimizer, train_image, lr_scheduler\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "num_update_steps_per_epoch = math.ceil(training_config.batch_size / training_config.gradient_accumulation_steps)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "ema_model = EMAModel(model, inv_gamma=training_config.ema_inv_gamma, power=training_config.ema_power, max_value=training_config.ema_max_decay)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "if accelerator.is_main_process:\n", + " run = \"train.py\"\n", + " accelerator.init_trackers(run)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Epoch 0: 6%|▋ | 1/16 [00:00<00:06, 2.16it/s, ema_decay=0, loss=1.09, lr=2e-7, step=1]\n", + "Epoch 1: 6%|▋ | 1/16 [00:00<00:03, 4.52it/s, ema_decay=0, loss=1.09, lr=4e-7, step=2]\n", + "Epoch 2: 6%|▋ | 1/16 [00:00<00:02, 5.01it/s, ema_decay=0.405, loss=1.09, lr=6e-7, step=3]\n", + "Epoch 3: 6%|▋ | 1/16 [00:00<00:02, 5.04it/s, ema_decay=0.561, loss=1.09, lr=8e-7, step=4]\n", + "Epoch 4: 6%|▋ | 1/16 [00:00<00:02, 5.16it/s, ema_decay=0.646, loss=1.08, lr=1e-6, step=5]\n", + "Epoch 5: 6%|▋ | 1/16 [00:00<00:03, 4.83it/s, ema_decay=0.701, loss=1.08, lr=1.2e-6, step=6]\n", + "Epoch 6: 6%|▋ | 1/16 [00:00<00:02, 5.37it/s, ema_decay=0.739, loss=1.08, lr=1.4e-6, step=7]\n", + "Epoch 7: 6%|▋ | 1/16 [00:00<00:02, 5.53it/s, ema_decay=0.768, loss=1.07, lr=1.6e-6, step=8]\n", + "Epoch 8: 6%|▋ | 1/16 [00:00<00:02, 5.51it/s, ema_decay=0.79, loss=1.08, lr=1.8e-6, step=9]\n", + "Epoch 9: 6%|▋ | 1/16 [00:00<00:03, 4.77it/s, ema_decay=0.808, loss=1.06, lr=2e-6, step=10]\n", + "Epoch 10: 6%|▋ | 1/16 [00:00<00:02, 5.09it/s, ema_decay=0.822, loss=1.06, lr=2.2e-6, step=11]\n", + "Epoch 11: 6%|▋ | 1/16 [00:00<00:03, 4.37it/s, ema_decay=0.834, loss=1.05, lr=2.4e-6, step=12]\n", + "Epoch 12: 6%|▋ | 1/16 [00:00<00:02, 5.33it/s, ema_decay=0.845, loss=1.06, lr=2.6e-6, step=13]\n", + "Epoch 13: 6%|▋ | 1/16 [00:00<00:02, 5.29it/s, ema_decay=0.854, loss=1.05, lr=2.8e-6, step=14]\n", + "Epoch 14: 6%|▋ | 1/16 [00:00<00:02, 5.20it/s, ema_decay=0.862, loss=1.04, lr=3e-6, step=15]\n", + "Epoch 15: 6%|▋ | 1/16 [00:00<00:03, 4.99it/s, ema_decay=0.869, loss=1.03, lr=3.2e-6, step=16]\n", + "Epoch 16: 6%|▋ | 1/16 [00:00<00:02, 5.03it/s, ema_decay=0.875, loss=1.02, lr=3.4e-6, step=17]\n", + "Epoch 17: 6%|▋ | 1/16 [00:00<00:03, 4.72it/s, ema_decay=0.881, loss=1.01, lr=3.6e-6, step=18]\n", + "Epoch 18: 6%|▋ | 1/16 [00:00<00:02, 5.14it/s, ema_decay=0.886, loss=1, lr=3.8e-6, step=19]\n", + "Epoch 19: 6%|▋ | 1/16 [00:00<00:02, 5.45it/s, ema_decay=0.89, loss=0.995, lr=4e-6, step=20]\n", + "Epoch 20: 6%|▋ | 1/16 [00:00<00:02, 5.10it/s, ema_decay=0.894, loss=0.981, lr=4.2e-6, step=21]\n", + "Epoch 21: 6%|▋ | 1/16 [00:00<00:02, 5.24it/s, ema_decay=0.898, loss=0.965, lr=4.4e-6, step=22]\n", + "Epoch 22: 6%|▋ | 1/16 [00:00<00:02, 5.16it/s, ema_decay=0.902, loss=0.966, lr=4.6e-6, step=23]\n", + "Epoch 23: 6%|▋ | 1/16 [00:00<00:02, 5.18it/s, ema_decay=0.905, loss=0.944, lr=4.8e-6, step=24]\n", + "Epoch 24: 6%|▋ | 1/16 [00:00<00:02, 5.05it/s, ema_decay=0.908, loss=0.944, lr=5e-6, step=25]\n", + "Epoch 25: 6%|▋ | 1/16 [00:00<00:03, 4.95it/s, ema_decay=0.911, loss=0.936, lr=5.2e-6, step=26]\n", + "Epoch 26: 6%|▋ | 1/16 [00:00<00:02, 5.05it/s, ema_decay=0.913, loss=0.905, lr=5.4e-6, step=27]\n", + "Epoch 27: 6%|▋ | 1/16 [00:00<00:02, 5.30it/s, ema_decay=0.916, loss=0.89, lr=5.6e-6, step=28]\n", + "Epoch 28: 6%|▋ | 1/16 [00:00<00:02, 5.33it/s, ema_decay=0.918, loss=0.882, lr=5.8e-6, step=29]\n", + "Epoch 29: 6%|▋ | 1/16 [00:00<00:02, 5.29it/s, ema_decay=0.92, loss=0.869, lr=6e-6, step=30]\n", + "Epoch 30: 6%|▋ | 1/16 [00:00<00:02, 5.07it/s, ema_decay=0.922, loss=0.858, lr=6.2e-6, step=31]\n", + "Epoch 31: 6%|▋ | 1/16 [00:00<00:02, 5.37it/s, ema_decay=0.924, loss=0.861, lr=6.4e-6, step=32]\n", + "Epoch 32: 6%|▋ | 1/16 [00:00<00:02, 5.45it/s, ema_decay=0.926, loss=0.856, lr=6.6e-6, step=33]\n", + "Epoch 33: 6%|▋ | 1/16 [00:00<00:02, 5.46it/s, ema_decay=0.927, loss=0.835, lr=6.8e-6, step=34]\n", + "Epoch 34: 6%|▋ | 1/16 [00:00<00:03, 4.51it/s, ema_decay=0.929, loss=0.812, lr=7e-6, step=35]\n", + "Epoch 35: 6%|▋ | 1/16 [00:00<00:02, 5.46it/s, ema_decay=0.931, loss=0.785, lr=7.2e-6, step=36]\n", + "Epoch 36: 6%|▋ | 1/16 [00:00<00:02, 5.05it/s, ema_decay=0.932, loss=0.778, lr=7.4e-6, step=37]\n", + "Epoch 37: 6%|▋ | 1/16 [00:00<00:02, 5.01it/s, ema_decay=0.933, loss=0.793, lr=7.6e-6, step=38]\n", + "Epoch 38: 6%|▋ | 1/16 [00:00<00:02, 5.09it/s, ema_decay=0.935, loss=0.737, lr=7.8e-6, step=39]\n", + "Epoch 39: 6%|▋ | 1/16 [00:00<00:02, 5.17it/s, ema_decay=0.936, loss=0.714, lr=8e-6, step=40]\n", + "Epoch 40: 6%|▋ | 1/16 [00:00<00:02, 5.02it/s, ema_decay=0.937, loss=0.728, lr=8.2e-6, step=41]\n", + "Epoch 41: 6%|▋ | 1/16 [00:00<00:02, 5.05it/s, ema_decay=0.938, loss=0.689, lr=8.4e-6, step=42]\n", + "Epoch 42: 6%|▋ | 1/16 [00:00<00:03, 4.48it/s, ema_decay=0.939, loss=0.699, lr=8.6e-6, step=43]\n", + "Epoch 43: 6%|▋ | 1/16 [00:00<00:03, 4.96it/s, ema_decay=0.94, loss=0.663, lr=8.8e-6, step=44]\n", + "Epoch 44: 6%|▋ | 1/16 [00:00<00:03, 4.96it/s, ema_decay=0.941, loss=0.661, lr=9e-6, step=45]\n", + "Epoch 45: 6%|▋ | 1/16 [00:00<00:02, 5.19it/s, ema_decay=0.942, loss=0.632, lr=9.2e-6, step=46]\n", + "Epoch 46: 6%|▋ | 1/16 [00:00<00:02, 5.23it/s, ema_decay=0.943, loss=0.597, lr=9.4e-6, step=47]\n", + "Epoch 47: 6%|▋ | 1/16 [00:00<00:02, 5.45it/s, ema_decay=0.944, loss=0.613, lr=9.6e-6, step=48]\n", + "Epoch 48: 6%|▋ | 1/16 [00:00<00:02, 5.46it/s, ema_decay=0.945, loss=0.597, lr=9.8e-6, step=49]\n", + "Epoch 49: 6%|▋ | 1/16 [00:00<00:03, 4.68it/s, ema_decay=0.946, loss=0.612, lr=1e-5, step=50]\n", + "Epoch 50: 6%|▋ | 1/16 [00:00<00:03, 4.64it/s, ema_decay=0.947, loss=0.601, lr=1.02e-5, step=51]\n", + "Epoch 51: 6%|▋ | 1/16 [00:00<00:03, 4.50it/s, ema_decay=0.948, loss=0.532, lr=1.04e-5, step=52]\n", + "Epoch 52: 6%|▋ | 1/16 [00:00<00:02, 5.28it/s, ema_decay=0.948, loss=0.495, lr=1.06e-5, step=53]\n", + "Epoch 53: 6%|▋ | 1/16 [00:00<00:02, 5.45it/s, ema_decay=0.949, loss=0.516, lr=1.08e-5, step=54]\n", + "Epoch 54: 6%|▋ | 1/16 [00:00<00:02, 5.38it/s, ema_decay=0.95, loss=0.493, lr=1.1e-5, step=55]\n", + "Epoch 55: 6%|▋ | 1/16 [00:00<00:03, 4.44it/s, ema_decay=0.95, loss=0.461, lr=1.12e-5, step=56]\n", + "Epoch 56: 6%|▋ | 1/16 [00:00<00:03, 4.91it/s, ema_decay=0.951, loss=0.462, lr=1.14e-5, step=57]\n", + "Epoch 57: 6%|▋ | 1/16 [00:00<00:02, 5.22it/s, ema_decay=0.952, loss=0.521, lr=1.16e-5, step=58]\n", + "Epoch 58: 6%|▋ | 1/16 [00:00<00:02, 5.08it/s, ema_decay=0.952, loss=0.431, lr=1.18e-5, step=59]\n", + "Epoch 59: 6%|▋ | 1/16 [00:00<00:02, 5.31it/s, ema_decay=0.953, loss=0.418, lr=1.2e-5, step=60]\n", + "Epoch 60: 6%|▋ | 1/16 [00:00<00:03, 4.59it/s, ema_decay=0.954, loss=0.41, lr=1.22e-5, step=61]\n", + "Epoch 61: 6%|▋ | 1/16 [00:00<00:03, 4.72it/s, ema_decay=0.954, loss=0.376, lr=1.24e-5, step=62]\n", + "Epoch 62: 6%|▋ | 1/16 [00:00<00:03, 3.85it/s, ema_decay=0.955, loss=0.362, lr=1.26e-5, step=63]\n", + "Epoch 63: 6%|▋ | 1/16 [00:00<00:03, 4.20it/s, ema_decay=0.955, loss=0.352, lr=1.28e-5, step=64]\n", + "Epoch 64: 6%|▋ | 1/16 [00:00<00:02, 5.28it/s, ema_decay=0.956, loss=0.348, lr=1.3e-5, step=65]\n", + "Epoch 65: 6%|▋ | 1/16 [00:00<00:03, 4.45it/s, ema_decay=0.956, loss=0.327, lr=1.32e-5, step=66]\n", + "Epoch 66: 6%|▋ | 1/16 [00:00<00:02, 5.05it/s, ema_decay=0.957, loss=0.372, lr=1.34e-5, step=67]\n", + "Epoch 67: 6%|▋ | 1/16 [00:00<00:02, 5.17it/s, ema_decay=0.957, loss=0.299, lr=1.36e-5, step=68]\n", + "Epoch 68: 6%|▋ | 1/16 [00:00<00:02, 5.20it/s, ema_decay=0.958, loss=0.306, lr=1.38e-5, step=69]\n", + "Epoch 69: 6%|▋ | 1/16 [00:00<00:02, 5.19it/s, ema_decay=0.958, loss=0.373, lr=1.4e-5, step=70]\n", + "Epoch 70: 6%|▋ | 1/16 [00:00<00:02, 5.36it/s, ema_decay=0.959, loss=0.34, lr=1.42e-5, step=71]\n", + "Epoch 71: 6%|▋ | 1/16 [00:00<00:03, 4.27it/s, ema_decay=0.959, loss=0.239, lr=1.44e-5, step=72]\n", + "Epoch 72: 6%|▋ | 1/16 [00:00<00:03, 4.34it/s, ema_decay=0.96, loss=0.295, lr=1.46e-5, step=73]\n", + "Epoch 73: 6%|▋ | 1/16 [00:00<00:03, 4.82it/s, ema_decay=0.96, loss=0.225, lr=1.48e-5, step=74]\n", + "Epoch 74: 6%|▋ | 1/16 [00:00<00:03, 4.96it/s, ema_decay=0.96, loss=0.295, lr=1.5e-5, step=75]\n", + "Epoch 75: 6%|▋ | 1/16 [00:00<00:04, 3.02it/s, ema_decay=0.961, loss=0.203, lr=1.52e-5, step=76]\n", + "Epoch 76: 6%|▋ | 1/16 [00:00<00:03, 4.44it/s, ema_decay=0.961, loss=0.203, lr=1.54e-5, step=77]\n", + "Epoch 77: 6%|▋ | 1/16 [00:00<00:02, 5.10it/s, ema_decay=0.962, loss=0.204, lr=1.56e-5, step=78]\n", + "Epoch 78: 6%|▋ | 1/16 [00:00<00:03, 4.92it/s, ema_decay=0.962, loss=0.176, lr=1.58e-5, step=79]\n", + "Epoch 79: 6%|▋ | 1/16 [00:00<00:02, 5.04it/s, ema_decay=0.962, loss=0.197, lr=1.6e-5, step=80]\n", + "Epoch 80: 6%|▋ | 1/16 [00:00<00:03, 4.66it/s, ema_decay=0.963, loss=0.2, lr=1.62e-5, step=81]\n", + "Epoch 81: 6%|▋ | 1/16 [00:00<00:02, 5.09it/s, ema_decay=0.963, loss=0.284, lr=1.64e-5, step=82]\n", + "Epoch 82: 6%|▋ | 1/16 [00:00<00:02, 5.33it/s, ema_decay=0.963, loss=0.211, lr=1.66e-5, step=83]\n", + "Epoch 83: 6%|▋ | 1/16 [00:00<00:02, 5.14it/s, ema_decay=0.964, loss=0.133, lr=1.68e-5, step=84]\n", + "Epoch 84: 6%|▋ | 1/16 [00:00<00:02, 5.07it/s, ema_decay=0.964, loss=0.151, lr=1.7e-5, step=85]\n", + "Epoch 85: 6%|▋ | 1/16 [00:00<00:02, 5.13it/s, ema_decay=0.964, loss=0.172, lr=1.72e-5, step=86]\n", + "Epoch 86: 6%|▋ | 1/16 [00:00<00:03, 4.73it/s, ema_decay=0.965, loss=0.18, lr=1.74e-5, step=87]\n", + "Epoch 87: 6%|▋ | 1/16 [00:00<00:03, 4.83it/s, ema_decay=0.965, loss=0.116, lr=1.76e-5, step=88]\n", + "Epoch 88: 6%|▋ | 1/16 [00:00<00:03, 4.89it/s, ema_decay=0.965, loss=0.124, lr=1.78e-5, step=89]\n", + "Epoch 89: 6%|▋ | 1/16 [00:00<00:02, 5.06it/s, ema_decay=0.965, loss=0.137, lr=1.8e-5, step=90]\n", + "Epoch 90: 6%|▋ | 1/16 [00:00<00:02, 5.31it/s, ema_decay=0.966, loss=0.189, lr=1.82e-5, step=91]\n", + "Epoch 91: 6%|▋ | 1/16 [00:00<00:02, 5.15it/s, ema_decay=0.966, loss=0.131, lr=1.84e-5, step=92]\n", + "Epoch 92: 6%|▋ | 1/16 [00:00<00:03, 4.89it/s, ema_decay=0.966, loss=0.184, lr=1.86e-5, step=93]\n", + "Epoch 93: 6%|▋ | 1/16 [00:00<00:02, 5.08it/s, ema_decay=0.967, loss=0.157, lr=1.88e-5, step=94]\n", + "Epoch 94: 6%|▋ | 1/16 [00:00<00:03, 4.25it/s, ema_decay=0.967, loss=0.128, lr=1.9e-5, step=95]\n", + "Epoch 95: 6%|▋ | 1/16 [00:00<00:02, 5.12it/s, ema_decay=0.967, loss=0.12, lr=1.92e-5, step=96]\n", + "Epoch 96: 6%|▋ | 1/16 [00:00<00:02, 5.01it/s, ema_decay=0.967, loss=0.127, lr=1.94e-5, step=97]\n", + "Epoch 97: 6%|▋ | 1/16 [00:00<00:02, 5.30it/s, ema_decay=0.968, loss=0.189, lr=1.96e-5, step=98]\n", + "Epoch 98: 6%|▋ | 1/16 [00:00<00:02, 5.07it/s, ema_decay=0.968, loss=0.125, lr=1.98e-5, step=99]\n", + "Epoch 99: 6%|▋ | 1/16 [00:00<00:03, 4.55it/s, ema_decay=0.968, loss=0.162, lr=2e-5, step=100]\n", + "Epoch 100: 6%|▋ | 1/16 [00:00<00:02, 5.14it/s, ema_decay=0.968, loss=0.0917, lr=2.02e-5, step=101]\n", + "Epoch 101: 6%|▋ | 1/16 [00:00<00:03, 4.89it/s, ema_decay=0.969, loss=0.117, lr=2.04e-5, step=102]\n", + "Epoch 102: 6%|▋ | 1/16 [00:00<00:02, 5.14it/s, ema_decay=0.969, loss=0.122, lr=2.06e-5, step=103]\n", + "Epoch 103: 6%|▋ | 1/16 [00:00<00:03, 4.97it/s, ema_decay=0.969, loss=0.109, lr=2.08e-5, step=104]\n", + "Epoch 104: 6%|▋ | 1/16 [00:00<00:03, 4.68it/s, ema_decay=0.969, loss=0.0759, lr=2.1e-5, step=105]\n", + "Epoch 105: 6%|▋ | 1/16 [00:00<00:03, 4.39it/s, ema_decay=0.97, loss=0.086, lr=2.12e-5, step=106]\n", + "Epoch 106: 6%|▋ | 1/16 [00:00<00:03, 4.57it/s, ema_decay=0.97, loss=0.0864, lr=2.14e-5, step=107]\n", + "Epoch 107: 6%|▋ | 1/16 [00:00<00:02, 5.03it/s, ema_decay=0.97, loss=0.107, lr=2.16e-5, step=108]\n", + "Epoch 108: 6%|▋ | 1/16 [00:00<00:02, 5.50it/s, ema_decay=0.97, loss=0.158, lr=2.18e-5, step=109]\n", + "Epoch 109: 6%|▋ | 1/16 [00:00<00:02, 5.52it/s, ema_decay=0.97, loss=0.101, lr=2.2e-5, step=110]\n", + "Epoch 110: 6%|▋ | 1/16 [00:00<00:02, 5.00it/s, ema_decay=0.971, loss=0.0762, lr=2.22e-5, step=111]\n", + "Epoch 111: 6%|▋ | 1/16 [00:00<00:02, 5.03it/s, ema_decay=0.971, loss=0.0671, lr=2.24e-5, step=112]\n", + "Epoch 112: 6%|▋ | 1/16 [00:00<00:02, 5.31it/s, ema_decay=0.971, loss=0.0693, lr=2.26e-5, step=113]\n", + "Epoch 113: 6%|▋ | 1/16 [00:00<00:03, 4.95it/s, ema_decay=0.971, loss=0.139, lr=2.28e-5, step=114]\n", + "Epoch 114: 6%|▋ | 1/16 [00:00<00:03, 4.55it/s, ema_decay=0.971, loss=0.111, lr=2.3e-5, step=115]\n", + "Epoch 115: 6%|▋ | 1/16 [00:00<00:02, 5.25it/s, ema_decay=0.972, loss=0.0885, lr=2.32e-5, step=116]\n", + "Epoch 116: 6%|▋ | 1/16 [00:00<00:03, 4.61it/s, ema_decay=0.972, loss=0.115, lr=2.34e-5, step=117]\n", + "Epoch 117: 6%|▋ | 1/16 [00:00<00:02, 5.02it/s, ema_decay=0.972, loss=0.0874, lr=2.36e-5, step=118]\n", + "Epoch 118: 6%|▋ | 1/16 [00:00<00:03, 4.87it/s, ema_decay=0.972, loss=0.151, lr=2.38e-5, step=119]\n", + "Epoch 119: 6%|▋ | 1/16 [00:00<00:02, 5.25it/s, ema_decay=0.972, loss=0.0648, lr=2.4e-5, step=120]\n", + "Epoch 120: 6%|▋ | 1/16 [00:00<00:03, 4.97it/s, ema_decay=0.972, loss=0.104, lr=2.42e-5, step=121]\n", + "Epoch 121: 6%|▋ | 1/16 [00:00<00:02, 5.26it/s, ema_decay=0.973, loss=0.0634, lr=2.44e-5, step=122]\n", + "Epoch 122: 6%|▋ | 1/16 [00:00<00:03, 4.95it/s, ema_decay=0.973, loss=0.061, lr=2.46e-5, step=123]\n", + "Epoch 123: 6%|▋ | 1/16 [00:00<00:02, 5.13it/s, ema_decay=0.973, loss=0.099, lr=2.48e-5, step=124]\n", + "Epoch 124: 6%|▋ | 1/16 [00:00<00:03, 4.30it/s, ema_decay=0.973, loss=0.0714, lr=2.5e-5, step=125]\n", + "Epoch 125: 6%|▋ | 1/16 [00:00<00:02, 5.17it/s, ema_decay=0.973, loss=0.0665, lr=2.52e-5, step=126]\n", + "Epoch 126: 6%|▋ | 1/16 [00:00<00:03, 5.00it/s, ema_decay=0.973, loss=0.0831, lr=2.54e-5, step=127]\n", + "Epoch 127: 6%|▋ | 1/16 [00:00<00:02, 5.24it/s, ema_decay=0.974, loss=0.0801, lr=2.56e-5, step=128]\n", + "Epoch 128: 6%|▋ | 1/16 [00:00<00:02, 5.17it/s, ema_decay=0.974, loss=0.0688, lr=2.58e-5, step=129]\n", + "Epoch 129: 6%|▋ | 1/16 [00:00<00:02, 5.32it/s, ema_decay=0.974, loss=0.0724, lr=2.6e-5, step=130]\n", + "Epoch 130: 6%|▋ | 1/16 [00:00<00:02, 5.14it/s, ema_decay=0.974, loss=0.0863, lr=2.62e-5, step=131]\n", + "Epoch 131: 6%|▋ | 1/16 [00:00<00:02, 5.38it/s, ema_decay=0.974, loss=0.06, lr=2.64e-5, step=132]\n", + "Epoch 132: 6%|▋ | 1/16 [00:00<00:03, 4.87it/s, ema_decay=0.974, loss=0.0578, lr=2.66e-5, step=133]\n", + "Epoch 133: 6%|▋ | 1/16 [00:00<00:02, 5.40it/s, ema_decay=0.974, loss=0.0901, lr=2.68e-5, step=134]\n", + "Epoch 134: 6%|▋ | 1/16 [00:00<00:02, 5.40it/s, ema_decay=0.975, loss=0.0557, lr=2.7e-5, step=135]\n", + "Epoch 135: 6%|▋ | 1/16 [00:00<00:02, 5.14it/s, ema_decay=0.975, loss=0.0742, lr=2.72e-5, step=136]\n", + "Epoch 136: 6%|▋ | 1/16 [00:00<00:03, 5.00it/s, ema_decay=0.975, loss=0.0627, lr=2.74e-5, step=137]\n", + "Epoch 137: 6%|▋ | 1/16 [00:00<00:03, 4.99it/s, ema_decay=0.975, loss=0.0622, lr=2.76e-5, step=138]\n", + "Epoch 138: 6%|▋ | 1/16 [00:00<00:03, 4.76it/s, ema_decay=0.975, loss=0.111, lr=2.78e-5, step=139]\n", + "Epoch 139: 6%|▋ | 1/16 [00:00<00:02, 5.34it/s, ema_decay=0.975, loss=0.0709, lr=2.8e-5, step=140]\n", + "Epoch 140: 6%|▋ | 1/16 [00:00<00:02, 5.37it/s, ema_decay=0.975, loss=0.057, lr=2.82e-5, step=141]\n", + "Epoch 141: 6%|▋ | 1/16 [00:00<00:02, 5.12it/s, ema_decay=0.976, loss=0.0926, lr=2.84e-5, step=142]\n", + "Epoch 142: 6%|▋ | 1/16 [00:00<00:02, 5.22it/s, ema_decay=0.976, loss=0.0583, lr=2.86e-5, step=143]\n", + "Epoch 143: 6%|▋ | 1/16 [00:00<00:02, 5.21it/s, ema_decay=0.976, loss=0.059, lr=2.88e-5, step=144]\n", + "Epoch 144: 6%|▋ | 1/16 [00:00<00:02, 5.05it/s, ema_decay=0.976, loss=0.0514, lr=2.9e-5, step=145]\n", + "Epoch 145: 6%|▋ | 1/16 [00:00<00:02, 5.31it/s, ema_decay=0.976, loss=0.0561, lr=2.92e-5, step=146]\n", + "Epoch 146: 6%|▋ | 1/16 [00:00<00:02, 5.04it/s, ema_decay=0.976, loss=0.071, lr=2.94e-5, step=147]\n", + "Epoch 147: 6%|▋ | 1/16 [00:00<00:02, 5.29it/s, ema_decay=0.976, loss=0.0786, lr=2.96e-5, step=148]\n", + "Epoch 148: 6%|▋ | 1/16 [00:00<00:03, 4.25it/s, ema_decay=0.976, loss=0.0524, lr=2.98e-5, step=149]\n", + "Epoch 149: 6%|▋ | 1/16 [00:00<00:03, 4.41it/s, ema_decay=0.977, loss=0.115, lr=3e-5, step=150]\n", + "Epoch 150: 6%|▋ | 1/16 [00:00<00:02, 5.33it/s, ema_decay=0.977, loss=0.0731, lr=3.02e-5, step=151]\n", + "Epoch 151: 6%|▋ | 1/16 [00:00<00:03, 4.93it/s, ema_decay=0.977, loss=0.0455, lr=3.04e-5, step=152]\n", + "Epoch 152: 6%|▋ | 1/16 [00:00<00:02, 5.42it/s, ema_decay=0.977, loss=0.0566, lr=3.06e-5, step=153]\n", + "Epoch 153: 6%|▋ | 1/16 [00:00<00:02, 5.10it/s, ema_decay=0.977, loss=0.051, lr=3.08e-5, step=154]\n", + "Epoch 154: 6%|▋ | 1/16 [00:00<00:02, 5.30it/s, ema_decay=0.977, loss=0.0746, lr=3.1e-5, step=155]\n", + "Epoch 155: 6%|▋ | 1/16 [00:00<00:02, 5.06it/s, ema_decay=0.977, loss=0.0725, lr=3.12e-5, step=156]\n", + "Epoch 156: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.977, loss=0.0661, lr=3.14e-5, step=157]\n", + "Epoch 157: 6%|▋ | 1/16 [00:00<00:02, 5.14it/s, ema_decay=0.977, loss=0.0697, lr=3.16e-5, step=158]\n", + "Epoch 158: 6%|▋ | 1/16 [00:00<00:02, 5.10it/s, ema_decay=0.978, loss=0.0526, lr=3.18e-5, step=159]\n", + "Epoch 159: 6%|▋ | 1/16 [00:00<00:02, 5.01it/s, ema_decay=0.978, loss=0.0796, lr=3.2e-5, step=160]\n", + "Epoch 160: 6%|▋ | 1/16 [00:00<00:03, 4.88it/s, ema_decay=0.978, loss=0.0802, lr=3.22e-5, step=161]\n", + "Epoch 161: 6%|▋ | 1/16 [00:00<00:02, 5.09it/s, ema_decay=0.978, loss=0.113, lr=3.24e-5, step=162]\n", + "Epoch 162: 6%|▋ | 1/16 [00:00<00:02, 5.28it/s, ema_decay=0.978, loss=0.0446, lr=3.26e-5, step=163]\n", + "Epoch 163: 6%|▋ | 1/16 [00:00<00:02, 5.20it/s, ema_decay=0.978, loss=0.0558, lr=3.28e-5, step=164]\n", + "Epoch 164: 6%|▋ | 1/16 [00:00<00:02, 5.34it/s, ema_decay=0.978, loss=0.0407, lr=3.3e-5, step=165]\n", + "Epoch 165: 6%|▋ | 1/16 [00:00<00:03, 4.27it/s, ema_decay=0.978, loss=0.0429, lr=3.32e-5, step=166]\n", + "Epoch 166: 6%|▋ | 1/16 [00:00<00:03, 4.24it/s, ema_decay=0.978, loss=0.0578, lr=3.34e-5, step=167]\n", + "Epoch 167: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.978, loss=0.0424, lr=3.36e-5, step=168]\n", + "Epoch 168: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.979, loss=0.0382, lr=3.38e-5, step=169]\n", + "Epoch 169: 6%|▋ | 1/16 [00:00<00:02, 5.02it/s, ema_decay=0.979, loss=0.113, lr=3.4e-5, step=170]\n", + "Epoch 170: 6%|▋ | 1/16 [00:00<00:02, 5.23it/s, ema_decay=0.979, loss=0.0452, lr=3.42e-5, step=171]\n", + "Epoch 171: 6%|▋ | 1/16 [00:00<00:02, 5.28it/s, ema_decay=0.979, loss=0.0406, lr=3.44e-5, step=172]\n", + "Epoch 172: 6%|▋ | 1/16 [00:00<00:03, 4.92it/s, ema_decay=0.979, loss=0.0613, lr=3.46e-5, step=173]\n", + "Epoch 173: 6%|▋ | 1/16 [00:00<00:03, 4.78it/s, ema_decay=0.979, loss=0.0516, lr=3.48e-5, step=174]\n", + "Epoch 174: 6%|▋ | 1/16 [00:00<00:03, 4.99it/s, ema_decay=0.979, loss=0.0604, lr=3.5e-5, step=175]\n", + "Epoch 175: 6%|▋ | 1/16 [00:00<00:03, 4.92it/s, ema_decay=0.979, loss=0.0924, lr=3.52e-5, step=176]\n", + "Epoch 176: 6%|▋ | 1/16 [00:00<00:03, 4.37it/s, ema_decay=0.979, loss=0.087, lr=3.54e-5, step=177]\n", + "Epoch 177: 6%|▋ | 1/16 [00:00<00:03, 4.40it/s, ema_decay=0.979, loss=0.0669, lr=3.56e-5, step=178]\n", + "Epoch 178: 6%|▋ | 1/16 [00:00<00:03, 4.60it/s, ema_decay=0.979, loss=0.0556, lr=3.58e-5, step=179]\n", + "Epoch 179: 6%|▋ | 1/16 [00:00<00:03, 4.50it/s, ema_decay=0.98, loss=0.0723, lr=3.6e-5, step=180]\n", + "Epoch 180: 6%|▋ | 1/16 [00:00<00:03, 4.50it/s, ema_decay=0.98, loss=0.0453, lr=3.62e-5, step=181]\n", + "Epoch 181: 6%|▋ | 1/16 [00:00<00:02, 5.25it/s, ema_decay=0.98, loss=0.0455, lr=3.64e-5, step=182]\n", + "Epoch 182: 6%|▋ | 1/16 [00:00<00:03, 4.47it/s, ema_decay=0.98, loss=0.0365, lr=3.66e-5, step=183]\n", + "Epoch 183: 6%|▋ | 1/16 [00:00<00:02, 5.24it/s, ema_decay=0.98, loss=0.0486, lr=3.68e-5, step=184]\n", + "Epoch 184: 6%|▋ | 1/16 [00:00<00:02, 5.30it/s, ema_decay=0.98, loss=0.0556, lr=3.7e-5, step=185]\n", + "Epoch 185: 6%|▋ | 1/16 [00:00<00:02, 5.36it/s, ema_decay=0.98, loss=0.0817, lr=3.72e-5, step=186]\n", + "Epoch 186: 6%|▋ | 1/16 [00:00<00:03, 4.85it/s, ema_decay=0.98, loss=0.0511, lr=3.74e-5, step=187]\n", + "Epoch 187: 6%|▋ | 1/16 [00:00<00:02, 5.40it/s, ema_decay=0.98, loss=0.0849, lr=3.76e-5, step=188]\n", + "Epoch 188: 6%|▋ | 1/16 [00:00<00:03, 4.50it/s, ema_decay=0.98, loss=0.0575, lr=3.78e-5, step=189]\n", + "Epoch 189: 6%|▋ | 1/16 [00:00<00:02, 5.36it/s, ema_decay=0.98, loss=0.0804, lr=3.8e-5, step=190]\n", + "Epoch 190: 6%|▋ | 1/16 [00:00<00:02, 5.02it/s, ema_decay=0.98, loss=0.0405, lr=3.82e-5, step=191]\n", + "Epoch 191: 6%|▋ | 1/16 [00:00<00:02, 5.39it/s, ema_decay=0.981, loss=0.0437, lr=3.84e-5, step=192]\n", + "Epoch 192: 6%|▋ | 1/16 [00:00<00:03, 4.53it/s, ema_decay=0.981, loss=0.0564, lr=3.86e-5, step=193]\n", + "Epoch 193: 6%|▋ | 1/16 [00:00<00:03, 4.62it/s, ema_decay=0.981, loss=0.0744, lr=3.88e-5, step=194]\n", + "Epoch 194: 6%|▋ | 1/16 [00:00<00:02, 5.32it/s, ema_decay=0.981, loss=0.0554, lr=3.9e-5, step=195]\n", + "Epoch 195: 6%|▋ | 1/16 [00:00<00:03, 4.45it/s, ema_decay=0.981, loss=0.0421, lr=3.92e-5, step=196]\n", + "Epoch 196: 6%|▋ | 1/16 [00:00<00:02, 5.29it/s, ema_decay=0.981, loss=0.0355, lr=3.94e-5, step=197]\n", + "Epoch 197: 6%|▋ | 1/16 [00:00<00:03, 5.00it/s, ema_decay=0.981, loss=0.0396, lr=3.96e-5, step=198]\n", + "Epoch 198: 6%|▋ | 1/16 [00:00<00:02, 5.40it/s, ema_decay=0.981, loss=0.0349, lr=3.98e-5, step=199]\n", + "Epoch 199: 6%|▋ | 1/16 [00:00<00:03, 4.60it/s, ema_decay=0.981, loss=0.0342, lr=4e-5, step=200]\n", + "Epoch 200: 6%|▋ | 1/16 [00:00<00:02, 5.19it/s, ema_decay=0.981, loss=0.0403, lr=4.02e-5, step=201]\n", + "Epoch 201: 6%|▋ | 1/16 [00:00<00:02, 5.30it/s, ema_decay=0.981, loss=0.0589, lr=4.04e-5, step=202]\n", + "Epoch 202: 6%|▋ | 1/16 [00:00<00:02, 5.17it/s, ema_decay=0.981, loss=0.0379, lr=4.06e-5, step=203]\n", + "Epoch 203: 6%|▋ | 1/16 [00:00<00:03, 4.50it/s, ema_decay=0.981, loss=0.0777, lr=4.08e-5, step=204]\n", + "Epoch 204: 6%|▋ | 1/16 [00:00<00:02, 5.12it/s, ema_decay=0.981, loss=0.051, lr=4.1e-5, step=205]\n", + "Epoch 205: 6%|▋ | 1/16 [00:00<00:02, 5.24it/s, ema_decay=0.982, loss=0.0575, lr=4.12e-5, step=206]\n", + "Epoch 206: 6%|▋ | 1/16 [00:00<00:02, 5.04it/s, ema_decay=0.982, loss=0.0911, lr=4.14e-5, step=207]\n", + "Epoch 207: 6%|▋ | 1/16 [00:00<00:02, 5.19it/s, ema_decay=0.982, loss=0.0485, lr=4.16e-5, step=208]\n", + "Epoch 208: 6%|▋ | 1/16 [00:00<00:03, 4.62it/s, ema_decay=0.982, loss=0.053, lr=4.18e-5, step=209]\n", + "Epoch 209: 6%|▋ | 1/16 [00:00<00:02, 5.16it/s, ema_decay=0.982, loss=0.0463, lr=4.2e-5, step=210]\n", + "Epoch 210: 6%|▋ | 1/16 [00:00<00:03, 4.41it/s, ema_decay=0.982, loss=0.0362, lr=4.22e-5, step=211]\n", + "Epoch 211: 6%|▋ | 1/16 [00:00<00:03, 4.57it/s, ema_decay=0.982, loss=0.0275, lr=4.24e-5, step=212]\n", + "Epoch 212: 6%|▋ | 1/16 [00:00<00:02, 5.06it/s, ema_decay=0.982, loss=0.0321, lr=4.26e-5, step=213]\n", + "Epoch 213: 6%|▋ | 1/16 [00:00<00:02, 5.43it/s, ema_decay=0.982, loss=0.0679, lr=4.28e-5, step=214]\n", + "Epoch 214: 6%|▋ | 1/16 [00:00<00:03, 4.37it/s, ema_decay=0.982, loss=0.0298, lr=4.3e-5, step=215]\n", + "Epoch 215: 6%|▋ | 1/16 [00:00<00:03, 4.75it/s, ema_decay=0.982, loss=0.0524, lr=4.32e-5, step=216]\n", + "Epoch 216: 6%|▋ | 1/16 [00:00<00:02, 5.20it/s, ema_decay=0.982, loss=0.0373, lr=4.34e-5, step=217]\n", + "Epoch 217: 6%|▋ | 1/16 [00:00<00:02, 5.21it/s, ema_decay=0.982, loss=0.0427, lr=4.36e-5, step=218]\n", + "Epoch 218: 6%|▋ | 1/16 [00:00<00:02, 5.32it/s, ema_decay=0.982, loss=0.0529, lr=4.38e-5, step=219]\n", + "Epoch 219: 6%|▋ | 1/16 [00:00<00:03, 4.84it/s, ema_decay=0.982, loss=0.0412, lr=4.4e-5, step=220]\n", + "Epoch 220: 6%|▋ | 1/16 [00:00<00:02, 5.31it/s, ema_decay=0.982, loss=0.0447, lr=4.42e-5, step=221]\n", + "Epoch 221: 6%|▋ | 1/16 [00:00<00:02, 5.34it/s, ema_decay=0.983, loss=0.0393, lr=4.44e-5, step=222]\n", + "Epoch 222: 6%|▋ | 1/16 [00:00<00:02, 5.34it/s, ema_decay=0.983, loss=0.0429, lr=4.46e-5, step=223]\n", + "Epoch 223: 6%|▋ | 1/16 [00:00<00:03, 4.83it/s, ema_decay=0.983, loss=0.0298, lr=4.48e-5, step=224]\n", + "Epoch 224: 6%|▋ | 1/16 [00:00<00:02, 5.28it/s, ema_decay=0.983, loss=0.0266, lr=4.5e-5, step=225]\n", + "Epoch 225: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.983, loss=0.0324, lr=4.52e-5, step=226]\n", + "Epoch 226: 6%|▋ | 1/16 [00:00<00:02, 5.02it/s, ema_decay=0.983, loss=0.0555, lr=4.54e-5, step=227]\n", + "Epoch 227: 6%|▋ | 1/16 [00:00<00:03, 4.44it/s, ema_decay=0.983, loss=0.0532, lr=4.56e-5, step=228]\n", + "Epoch 228: 6%|▋ | 1/16 [00:00<00:02, 5.20it/s, ema_decay=0.983, loss=0.0436, lr=4.58e-5, step=229]\n", + "Epoch 229: 6%|▋ | 1/16 [00:00<00:03, 4.42it/s, ema_decay=0.983, loss=0.0832, lr=4.6e-5, step=230]\n", + "Epoch 230: 6%|▋ | 1/16 [00:00<00:03, 4.60it/s, ema_decay=0.983, loss=0.0621, lr=4.62e-5, step=231]\n", + "Epoch 231: 6%|▋ | 1/16 [00:00<00:02, 5.15it/s, ema_decay=0.983, loss=0.0317, lr=4.64e-5, step=232]\n", + "Epoch 232: 6%|▋ | 1/16 [00:00<00:02, 5.03it/s, ema_decay=0.983, loss=0.0362, lr=4.66e-5, step=233]\n", + "Epoch 233: 6%|▋ | 1/16 [00:00<00:02, 5.49it/s, ema_decay=0.983, loss=0.0332, lr=4.68e-5, step=234]\n", + "Epoch 234: 6%|▋ | 1/16 [00:00<00:02, 5.50it/s, ema_decay=0.983, loss=0.0459, lr=4.7e-5, step=235]\n", + "Epoch 235: 6%|▋ | 1/16 [00:00<00:02, 5.21it/s, ema_decay=0.983, loss=0.0318, lr=4.72e-5, step=236]\n", + "Epoch 236: 6%|▋ | 1/16 [00:00<00:02, 5.52it/s, ema_decay=0.983, loss=0.0502, lr=4.74e-5, step=237]\n", + "Epoch 237: 6%|▋ | 1/16 [00:00<00:02, 5.20it/s, ema_decay=0.983, loss=0.0797, lr=4.76e-5, step=238]\n", + "Epoch 238: 6%|▋ | 1/16 [00:00<00:02, 5.13it/s, ema_decay=0.983, loss=0.0414, lr=4.78e-5, step=239]\n", + "Epoch 239: 6%|▋ | 1/16 [00:00<00:02, 5.25it/s, ema_decay=0.984, loss=0.0456, lr=4.8e-5, step=240]\n", + "Epoch 240: 6%|▋ | 1/16 [00:00<00:02, 5.37it/s, ema_decay=0.984, loss=0.0711, lr=4.82e-5, step=241]\n", + "Epoch 241: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.984, loss=0.0717, lr=4.84e-5, step=242]\n", + "Epoch 242: 6%|▋ | 1/16 [00:00<00:02, 5.31it/s, ema_decay=0.984, loss=0.0476, lr=4.86e-5, step=243]\n", + "Epoch 243: 6%|▋ | 1/16 [00:00<00:02, 5.46it/s, ema_decay=0.984, loss=0.0361, lr=4.88e-5, step=244]\n", + "Epoch 244: 6%|▋ | 1/16 [00:00<00:02, 5.39it/s, ema_decay=0.984, loss=0.0406, lr=4.9e-5, step=245]\n", + "Epoch 245: 6%|▋ | 1/16 [00:00<00:02, 5.29it/s, ema_decay=0.984, loss=0.0471, lr=4.92e-5, step=246]\n", + "Epoch 246: 6%|▋ | 1/16 [00:00<00:02, 5.42it/s, ema_decay=0.984, loss=0.038, lr=4.94e-5, step=247]\n", + "Epoch 247: 6%|▋ | 1/16 [00:00<00:02, 5.16it/s, ema_decay=0.984, loss=0.0326, lr=4.96e-5, step=248]\n", + "Epoch 248: 6%|▋ | 1/16 [00:00<00:02, 5.39it/s, ema_decay=0.984, loss=0.0465, lr=4.98e-5, step=249]\n", + "Epoch 249: 6%|▋ | 1/16 [00:00<00:02, 5.43it/s, ema_decay=0.984, loss=0.0313, lr=5e-5, step=250]\n", + "Epoch 250: 6%|▋ | 1/16 [00:00<00:02, 5.25it/s, ema_decay=0.984, loss=0.0336, lr=5.02e-5, step=251]\n", + "Epoch 251: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.984, loss=0.0318, lr=5.04e-5, step=252]\n", + "Epoch 252: 6%|▋ | 1/16 [00:00<00:02, 5.30it/s, ema_decay=0.984, loss=0.0603, lr=5.06e-5, step=253]\n", + "Epoch 253: 6%|▋ | 1/16 [00:00<00:02, 5.21it/s, ema_decay=0.984, loss=0.0307, lr=5.08e-5, step=254]\n", + "Epoch 254: 6%|▋ | 1/16 [00:00<00:02, 5.19it/s, ema_decay=0.984, loss=0.0588, lr=5.1e-5, step=255]\n", + "Epoch 255: 6%|▋ | 1/16 [00:00<00:02, 5.44it/s, ema_decay=0.984, loss=0.0265, lr=5.12e-5, step=256]\n", + "Epoch 256: 6%|▋ | 1/16 [00:00<00:02, 5.18it/s, ema_decay=0.984, loss=0.064, lr=5.14e-5, step=257]\n", + "Epoch 257: 6%|▋ | 1/16 [00:00<00:03, 4.64it/s, ema_decay=0.984, loss=0.0204, lr=5.16e-5, step=258]\n", + "Epoch 258: 6%|▋ | 1/16 [00:00<00:02, 5.46it/s, ema_decay=0.984, loss=0.0218, lr=5.18e-5, step=259]\n", + "Epoch 259: 6%|▋ | 1/16 [00:00<00:02, 5.24it/s, ema_decay=0.985, loss=0.0659, lr=5.2e-5, step=260]\n", + "Epoch 260: 6%|▋ | 1/16 [00:00<00:04, 3.67it/s, ema_decay=0.985, loss=0.0257, lr=5.22e-5, step=261]\n", + "Epoch 261: 6%|▋ | 1/16 [00:00<00:02, 5.31it/s, ema_decay=0.985, loss=0.0423, lr=5.24e-5, step=262]\n", + "Epoch 262: 6%|▋ | 1/16 [00:00<00:02, 5.32it/s, ema_decay=0.985, loss=0.0379, lr=5.26e-5, step=263]\n", + "Epoch 263: 6%|▋ | 1/16 [00:00<00:02, 5.34it/s, ema_decay=0.985, loss=0.0305, lr=5.28e-5, step=264]\n", + "Epoch 264: 6%|▋ | 1/16 [00:00<00:02, 5.46it/s, ema_decay=0.985, loss=0.11, lr=5.3e-5, step=265]\n", + "Epoch 265: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.985, loss=0.0569, lr=5.32e-5, step=266]\n", + "Epoch 266: 6%|▋ | 1/16 [00:00<00:02, 5.28it/s, ema_decay=0.985, loss=0.0296, lr=5.34e-5, step=267]\n", + "Epoch 267: 6%|▋ | 1/16 [00:00<00:02, 5.23it/s, ema_decay=0.985, loss=0.0328, lr=5.36e-5, step=268]\n", + "Epoch 268: 6%|▋ | 1/16 [00:00<00:02, 5.44it/s, ema_decay=0.985, loss=0.091, lr=5.38e-5, step=269]\n", + "Epoch 269: 6%|▋ | 1/16 [00:00<00:02, 5.49it/s, ema_decay=0.985, loss=0.0364, lr=5.4e-5, step=270]\n", + "Epoch 270: 6%|▋ | 1/16 [00:00<00:03, 4.44it/s, ema_decay=0.985, loss=0.0322, lr=5.42e-5, step=271]\n", + "Epoch 271: 6%|▋ | 1/16 [00:00<00:02, 5.33it/s, ema_decay=0.985, loss=0.0336, lr=5.44e-5, step=272]\n", + "Epoch 272: 6%|▋ | 1/16 [00:00<00:02, 5.28it/s, ema_decay=0.985, loss=0.0281, lr=5.46e-5, step=273]\n", + "Epoch 273: 6%|▋ | 1/16 [00:00<00:03, 4.45it/s, ema_decay=0.985, loss=0.039, lr=5.48e-5, step=274]\n", + "Epoch 274: 6%|▋ | 1/16 [00:00<00:03, 4.95it/s, ema_decay=0.985, loss=0.0823, lr=5.5e-5, step=275]\n", + "Epoch 275: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.985, loss=0.0316, lr=5.52e-5, step=276]\n", + "Epoch 276: 6%|▋ | 1/16 [00:00<00:03, 4.58it/s, ema_decay=0.985, loss=0.0237, lr=5.54e-5, step=277]\n", + "Epoch 277: 6%|▋ | 1/16 [00:00<00:03, 4.36it/s, ema_decay=0.985, loss=0.0213, lr=5.56e-5, step=278]\n", + "Epoch 278: 6%|▋ | 1/16 [00:00<00:03, 4.06it/s, ema_decay=0.985, loss=0.0293, lr=5.58e-5, step=279]\n", + "Epoch 279: 6%|▋ | 1/16 [00:00<00:03, 4.62it/s, ema_decay=0.985, loss=0.0354, lr=5.6e-5, step=280]\n", + "Epoch 280: 6%|▋ | 1/16 [00:00<00:03, 4.37it/s, ema_decay=0.985, loss=0.0734, lr=5.62e-5, step=281]\n", + "Epoch 281: 6%|▋ | 1/16 [00:00<00:05, 2.66it/s, ema_decay=0.985, loss=0.0315, lr=5.64e-5, step=282]\n", + "Epoch 282: 6%|▋ | 1/16 [00:00<00:03, 3.94it/s, ema_decay=0.985, loss=0.0352, lr=5.66e-5, step=283]\n", + "Epoch 283: 6%|▋ | 1/16 [00:00<00:03, 4.92it/s, ema_decay=0.986, loss=0.0697, lr=5.68e-5, step=284]\n", + "Epoch 284: 6%|▋ | 1/16 [00:00<00:02, 5.36it/s, ema_decay=0.986, loss=0.0479, lr=5.7e-5, step=285]\n", + "Epoch 285: 6%|▋ | 1/16 [00:00<00:03, 4.80it/s, ema_decay=0.986, loss=0.0246, lr=5.72e-5, step=286]\n", + "Epoch 286: 6%|▋ | 1/16 [00:00<00:02, 5.38it/s, ema_decay=0.986, loss=0.0248, lr=5.74e-5, step=287]\n", + "Epoch 287: 6%|▋ | 1/16 [00:00<00:02, 5.42it/s, ema_decay=0.986, loss=0.0469, lr=5.76e-5, step=288]\n", + "Epoch 288: 6%|▋ | 1/16 [00:00<00:02, 5.33it/s, ema_decay=0.986, loss=0.0251, lr=5.78e-5, step=289]\n", + "Epoch 289: 6%|▋ | 1/16 [00:00<00:02, 5.34it/s, ema_decay=0.986, loss=0.024, lr=5.8e-5, step=290]\n", + "Epoch 290: 6%|▋ | 1/16 [00:00<00:02, 5.38it/s, ema_decay=0.986, loss=0.0216, lr=5.82e-5, step=291]\n", + "Epoch 291: 6%|▋ | 1/16 [00:00<00:02, 5.46it/s, ema_decay=0.986, loss=0.0231, lr=5.84e-5, step=292]\n", + "Epoch 292: 6%|▋ | 1/16 [00:00<00:02, 5.39it/s, ema_decay=0.986, loss=0.049, lr=5.86e-5, step=293]\n", + "Epoch 293: 6%|▋ | 1/16 [00:00<00:03, 4.47it/s, ema_decay=0.986, loss=0.0541, lr=5.88e-5, step=294]\n", + "Epoch 294: 6%|▋ | 1/16 [00:00<00:02, 5.22it/s, ema_decay=0.986, loss=0.0287, lr=5.9e-5, step=295]\n", + "Epoch 295: 6%|▋ | 1/16 [00:00<00:04, 3.73it/s, ema_decay=0.986, loss=0.0301, lr=5.92e-5, step=296]\n", + "Epoch 296: 6%|▋ | 1/16 [00:00<00:03, 4.82it/s, ema_decay=0.986, loss=0.0633, lr=5.94e-5, step=297]\n", + "Epoch 297: 6%|▋ | 1/16 [00:00<00:02, 5.41it/s, ema_decay=0.986, loss=0.0324, lr=5.96e-5, step=298]\n", + "Epoch 298: 6%|▋ | 1/16 [00:00<00:03, 4.91it/s, ema_decay=0.986, loss=0.0429, lr=5.98e-5, step=299]\n", + "Epoch 299: 6%|▋ | 1/16 [00:00<00:03, 4.75it/s, ema_decay=0.986, loss=0.02, lr=6e-5, step=300]\n", + "Epoch 300: 6%|▋ | 1/16 [00:00<00:03, 4.64it/s, ema_decay=0.986, loss=0.0459, lr=6.02e-5, step=301]\n", + "Epoch 301: 6%|▋ | 1/16 [00:00<00:03, 4.80it/s, ema_decay=0.986, loss=0.0213, lr=6.04e-5, step=302]\n", + "Epoch 302: 6%|▋ | 1/16 [00:00<00:02, 5.18it/s, ema_decay=0.986, loss=0.0328, lr=6.06e-5, step=303]\n", + "Epoch 303: 6%|▋ | 1/16 [00:00<00:02, 5.29it/s, ema_decay=0.986, loss=0.0295, lr=6.08e-5, step=304]\n", + "Epoch 304: 6%|▋ | 1/16 [00:00<00:02, 5.08it/s, ema_decay=0.986, loss=0.0358, lr=6.1e-5, step=305]\n", + "Epoch 305: 6%|▋ | 1/16 [00:00<00:02, 5.34it/s, ema_decay=0.986, loss=0.0339, lr=6.12e-5, step=306]\n", + "Epoch 306: 6%|▋ | 1/16 [00:00<00:02, 5.24it/s, ema_decay=0.986, loss=0.0323, lr=6.14e-5, step=307]\n", + "Epoch 307: 6%|▋ | 1/16 [00:00<00:02, 5.24it/s, ema_decay=0.986, loss=0.0324, lr=6.16e-5, step=308]\n", + "Epoch 308: 6%|▋ | 1/16 [00:00<00:02, 5.18it/s, ema_decay=0.986, loss=0.0242, lr=6.18e-5, step=309]\n", + "Epoch 309: 6%|▋ | 1/16 [00:00<00:02, 5.23it/s, ema_decay=0.986, loss=0.0251, lr=6.2e-5, step=310]\n", + "Epoch 310: 6%|▋ | 1/16 [00:00<00:02, 5.31it/s, ema_decay=0.986, loss=0.0304, lr=6.22e-5, step=311]\n", + "Epoch 311: 6%|▋ | 1/16 [00:00<00:03, 4.97it/s, ema_decay=0.986, loss=0.0188, lr=6.24e-5, step=312]\n", + "Epoch 312: 6%|▋ | 1/16 [00:00<00:02, 5.27it/s, ema_decay=0.987, loss=0.0274, lr=6.26e-5, step=313]\n", + "Epoch 313: 6%|▋ | 1/16 [00:00<00:02, 5.47it/s, ema_decay=0.987, loss=0.0212, lr=6.28e-5, step=314]\n", + "Epoch 314: 6%|▋ | 1/16 [00:00<00:03, 4.66it/s, ema_decay=0.987, loss=0.0256, lr=6.3e-5, step=315]\n", + "Epoch 315: 6%|▋ | 1/16 [00:00<00:02, 5.46it/s, ema_decay=0.987, loss=0.0281, lr=6.32e-5, step=316]\n", + "Epoch 316: 6%|▋ | 1/16 [00:00<00:02, 5.17it/s, ema_decay=0.987, loss=0.0536, lr=6.34e-5, step=317]\n", + "Epoch 317: 6%|▋ | 1/16 [00:00<00:02, 5.36it/s, ema_decay=0.987, loss=0.0238, lr=6.36e-5, step=318]\n", + "Epoch 318: 6%|▋ | 1/16 [00:00<00:03, 4.56it/s, ema_decay=0.987, loss=0.0183, lr=6.38e-5, step=319]\n", + "Epoch 319: 6%|▋ | 1/16 [00:00<00:02, 5.48it/s, ema_decay=0.987, loss=0.0226, lr=6.4e-5, step=320]\n", + "Epoch 320: 6%|▋ | 1/16 [00:00<00:02, 5.44it/s, ema_decay=0.987, loss=0.0193, lr=6.42e-5, step=321]\n", + "Epoch 321: 6%|▋ | 1/16 [00:00<00:02, 5.29it/s, ema_decay=0.987, loss=0.0302, lr=6.44e-5, step=322]\n", + "Epoch 322: 6%|▋ | 1/16 [00:00<00:02, 5.33it/s, ema_decay=0.987, loss=0.021, lr=6.46e-5, step=323]\n", + "Epoch 323: 6%|▋ | 1/16 [00:00<00:02, 5.32it/s, ema_decay=0.987, loss=0.02, lr=6.48e-5, step=324]\n", + "Epoch 324: 6%|▋ | 1/16 [00:00<00:02, 5.20it/s, ema_decay=0.987, loss=0.0268, lr=6.5e-5, step=325]\n", + "Epoch 325: 6%|▋ | 1/16 [00:00<00:03, 4.55it/s, ema_decay=0.987, loss=0.0162, lr=6.52e-5, step=326]\n", + "Epoch 326: 6%|▋ | 1/16 [00:00<00:03, 4.46it/s, ema_decay=0.987, loss=0.0244, lr=6.54e-5, step=327]\n", + "Epoch 327: 6%|▋ | 1/16 [00:00<00:03, 4.54it/s, ema_decay=0.987, loss=0.0431, lr=6.56e-5, step=328]\n", + "Epoch 328: 6%|▋ | 1/16 [00:00<00:02, 5.30it/s, ema_decay=0.987, loss=0.0165, lr=6.58e-5, step=329]\n", + "Epoch 329: 6%|▋ | 1/16 [00:00<00:02, 5.27it/s, ema_decay=0.987, loss=0.0189, lr=6.6e-5, step=330]\n", + "Epoch 330: 6%|▋ | 1/16 [00:00<00:02, 5.23it/s, ema_decay=0.987, loss=0.0471, lr=6.62e-5, step=331]\n", + "Epoch 331: 6%|▋ | 1/16 [00:00<00:02, 5.32it/s, ema_decay=0.987, loss=0.0236, lr=6.64e-5, step=332]\n", + "Epoch 332: 6%|▋ | 1/16 [00:00<00:03, 4.38it/s, ema_decay=0.987, loss=0.0258, lr=6.66e-5, step=333]\n", + "Epoch 333: 6%|▋ | 1/16 [00:00<00:02, 5.29it/s, ema_decay=0.987, loss=0.0676, lr=6.68e-5, step=334]\n", + "Epoch 334: 6%|▋ | 1/16 [00:00<00:03, 4.42it/s, ema_decay=0.987, loss=0.02, lr=6.7e-5, step=335]\n", + "Epoch 335: 6%|▋ | 1/16 [00:00<00:03, 4.47it/s, ema_decay=0.987, loss=0.0218, lr=6.72e-5, step=336]\n", + "Epoch 336: 6%|▋ | 1/16 [00:00<00:04, 3.21it/s, ema_decay=0.987, loss=0.0219, lr=6.74e-5, step=337]\n", + "Epoch 337: 6%|▋ | 1/16 [00:00<00:02, 5.05it/s, ema_decay=0.987, loss=0.031, lr=6.76e-5, step=338]\n", + "Epoch 338: 6%|▋ | 1/16 [00:00<00:02, 5.40it/s, ema_decay=0.987, loss=0.0174, lr=6.78e-5, step=339]\n", + "Epoch 339: 6%|▋ | 1/16 [00:00<00:02, 5.33it/s, ema_decay=0.987, loss=0.0194, lr=6.8e-5, step=340]\n", + "Epoch 340: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.987, loss=0.0168, lr=6.82e-5, step=341]\n", + "Epoch 341: 6%|▋ | 1/16 [00:00<00:02, 5.30it/s, ema_decay=0.987, loss=0.0766, lr=6.84e-5, step=342]\n", + "Epoch 342: 6%|▋ | 1/16 [00:00<00:03, 4.80it/s, ema_decay=0.987, loss=0.0173, lr=6.86e-5, step=343]\n", + "Epoch 343: 6%|▋ | 1/16 [00:00<00:02, 5.11it/s, ema_decay=0.987, loss=0.0285, lr=6.88e-5, step=344]\n", + "Epoch 344: 6%|▋ | 1/16 [00:00<00:02, 5.08it/s, ema_decay=0.987, loss=0.016, lr=6.9e-5, step=345]\n", + "Epoch 345: 6%|▋ | 1/16 [00:00<00:03, 4.89it/s, ema_decay=0.988, loss=0.0267, lr=6.92e-5, step=346]\n", + "Epoch 346: 6%|▋ | 1/16 [00:00<00:03, 4.60it/s, ema_decay=0.988, loss=0.0201, lr=6.94e-5, step=347]\n", + "Epoch 347: 6%|▋ | 1/16 [00:00<00:03, 4.98it/s, ema_decay=0.988, loss=0.0246, lr=6.96e-5, step=348]\n", + "Epoch 348: 6%|▋ | 1/16 [00:00<00:02, 5.29it/s, ema_decay=0.988, loss=0.0177, lr=6.98e-5, step=349]\n", + "Epoch 349: 6%|▋ | 1/16 [00:00<00:03, 4.82it/s, ema_decay=0.988, loss=0.0152, lr=7e-5, step=350]\n", + "Epoch 350: 6%|▋ | 1/16 [00:00<00:02, 5.24it/s, ema_decay=0.988, loss=0.0323, lr=7.02e-5, step=351]\n", + "Epoch 351: 6%|▋ | 1/16 [00:00<00:03, 4.16it/s, ema_decay=0.988, loss=0.0175, lr=7.04e-5, step=352]\n", + "Epoch 352: 6%|▋ | 1/16 [00:00<00:03, 4.84it/s, ema_decay=0.988, loss=0.0186, lr=7.06e-5, step=353]\n", + "Epoch 353: 6%|▋ | 1/16 [00:00<00:02, 5.18it/s, ema_decay=0.988, loss=0.0279, lr=7.08e-5, step=354]\n", + "Epoch 354: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.988, loss=0.0239, lr=7.1e-5, step=355]\n", + "Epoch 355: 6%|▋ | 1/16 [00:00<00:04, 3.63it/s, ema_decay=0.988, loss=0.0392, lr=7.12e-5, step=356]\n", + "Epoch 356: 6%|▋ | 1/16 [00:00<00:04, 3.66it/s, ema_decay=0.988, loss=0.0191, lr=7.14e-5, step=357]\n", + "Epoch 357: 6%|▋ | 1/16 [00:00<00:02, 5.21it/s, ema_decay=0.988, loss=0.0175, lr=7.16e-5, step=358]\n", + "Epoch 358: 6%|▋ | 1/16 [00:00<00:02, 5.21it/s, ema_decay=0.988, loss=0.0119, lr=7.18e-5, step=359]\n", + "Epoch 359: 6%|▋ | 1/16 [00:00<00:02, 5.23it/s, ema_decay=0.988, loss=0.0181, lr=7.2e-5, step=360]\n", + "Epoch 360: 6%|▋ | 1/16 [00:00<00:03, 4.17it/s, ema_decay=0.988, loss=0.0208, lr=7.22e-5, step=361]\n", + "Epoch 361: 6%|▋ | 1/16 [00:00<00:02, 5.41it/s, ema_decay=0.988, loss=0.0167, lr=7.24e-5, step=362]\n", + "Epoch 362: 6%|▋ | 1/16 [00:00<00:03, 4.97it/s, ema_decay=0.988, loss=0.0182, lr=7.26e-5, step=363]\n", + "Epoch 363: 6%|▋ | 1/16 [00:00<00:02, 5.42it/s, ema_decay=0.988, loss=0.0166, lr=7.28e-5, step=364]\n", + "Epoch 364: 6%|▋ | 1/16 [00:00<00:02, 5.08it/s, ema_decay=0.988, loss=0.0141, lr=7.3e-5, step=365]\n", + "Epoch 365: 6%|▋ | 1/16 [00:00<00:02, 5.25it/s, ema_decay=0.988, loss=0.0276, lr=7.32e-5, step=366]\n", + "Epoch 366: 6%|▋ | 1/16 [00:00<00:02, 5.22it/s, ema_decay=0.988, loss=0.0597, lr=7.34e-5, step=367]\n", + "Epoch 367: 6%|▋ | 1/16 [00:00<00:02, 5.33it/s, ema_decay=0.988, loss=0.0143, lr=7.36e-5, step=368]\n", + "Epoch 368: 6%|▋ | 1/16 [00:00<00:02, 5.50it/s, ema_decay=0.988, loss=0.046, lr=7.38e-5, step=369]\n", + "Epoch 369: 6%|▋ | 1/16 [00:00<00:02, 5.49it/s, ema_decay=0.988, loss=0.0167, lr=7.4e-5, step=370]\n", + "Epoch 370: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.988, loss=0.0228, lr=7.42e-5, step=371]\n", + "Epoch 371: 6%|▋ | 1/16 [00:00<00:02, 5.43it/s, ema_decay=0.988, loss=0.0117, lr=7.44e-5, step=372]\n", + "Epoch 372: 6%|▋ | 1/16 [00:00<00:02, 5.07it/s, ema_decay=0.988, loss=0.0809, lr=7.46e-5, step=373]\n", + "Epoch 373: 6%|▋ | 1/16 [00:00<00:02, 5.38it/s, ema_decay=0.988, loss=0.0141, lr=7.48e-5, step=374]\n", + "Epoch 374: 6%|▋ | 1/16 [00:00<00:03, 4.92it/s, ema_decay=0.988, loss=0.0212, lr=7.5e-5, step=375]\n", + "Epoch 375: 6%|▋ | 1/16 [00:00<00:02, 5.04it/s, ema_decay=0.988, loss=0.0421, lr=7.52e-5, step=376]\n", + "Epoch 376: 6%|▋ | 1/16 [00:00<00:02, 5.12it/s, ema_decay=0.988, loss=0.0104, lr=7.54e-5, step=377]\n", + "Epoch 377: 6%|▋ | 1/16 [00:00<00:03, 4.26it/s, ema_decay=0.988, loss=0.0206, lr=7.56e-5, step=378]\n", + "Epoch 378: 6%|▋ | 1/16 [00:00<00:02, 5.27it/s, ema_decay=0.988, loss=0.0156, lr=7.58e-5, step=379]\n", + "Epoch 379: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.988, loss=0.0246, lr=7.6e-5, step=380]\n", + "Epoch 380: 6%|▋ | 1/16 [00:00<00:03, 4.90it/s, ema_decay=0.988, loss=0.013, lr=7.62e-5, step=381]\n", + "Epoch 381: 6%|▋ | 1/16 [00:00<00:02, 5.45it/s, ema_decay=0.988, loss=0.0169, lr=7.64e-5, step=382]\n", + "Epoch 382: 6%|▋ | 1/16 [00:00<00:02, 5.22it/s, ema_decay=0.988, loss=0.0192, lr=7.66e-5, step=383]\n", + "Epoch 383: 6%|▋ | 1/16 [00:00<00:02, 5.39it/s, ema_decay=0.988, loss=0.0521, lr=7.68e-5, step=384]\n", + "Epoch 384: 6%|▋ | 1/16 [00:00<00:02, 5.40it/s, ema_decay=0.988, loss=0.0218, lr=7.7e-5, step=385]\n", + "Epoch 385: 6%|▋ | 1/16 [00:00<00:03, 4.38it/s, ema_decay=0.988, loss=0.0297, lr=7.72e-5, step=386]\n", + "Epoch 386: 6%|▋ | 1/16 [00:00<00:03, 4.55it/s, ema_decay=0.989, loss=0.0373, lr=7.74e-5, step=387]\n", + "Epoch 387: 6%|▋ | 1/16 [00:00<00:02, 5.08it/s, ema_decay=0.989, loss=0.0153, lr=7.76e-5, step=388]\n", + "Epoch 388: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.989, loss=0.022, lr=7.78e-5, step=389]\n", + "Epoch 389: 6%|▋ | 1/16 [00:00<00:02, 5.46it/s, ema_decay=0.989, loss=0.0218, lr=7.8e-5, step=390]\n", + "Epoch 390: 6%|▋ | 1/16 [00:00<00:03, 4.61it/s, ema_decay=0.989, loss=0.0482, lr=7.82e-5, step=391]\n", + "Epoch 391: 6%|▋ | 1/16 [00:00<00:02, 5.04it/s, ema_decay=0.989, loss=0.0148, lr=7.84e-5, step=392]\n", + "Epoch 392: 6%|▋ | 1/16 [00:00<00:02, 5.29it/s, ema_decay=0.989, loss=0.0255, lr=7.86e-5, step=393]\n", + "Epoch 393: 6%|▋ | 1/16 [00:00<00:03, 3.96it/s, ema_decay=0.989, loss=0.0194, lr=7.88e-5, step=394]\n", + "Epoch 394: 6%|▋ | 1/16 [00:00<00:04, 3.19it/s, ema_decay=0.989, loss=0.0198, lr=7.9e-5, step=395]\n", + "Epoch 395: 6%|▋ | 1/16 [00:00<00:02, 5.11it/s, ema_decay=0.989, loss=0.027, lr=7.92e-5, step=396]\n", + "Epoch 396: 6%|▋ | 1/16 [00:00<00:02, 5.38it/s, ema_decay=0.989, loss=0.0133, lr=7.94e-5, step=397]\n", + "Epoch 397: 6%|▋ | 1/16 [00:00<00:02, 5.49it/s, ema_decay=0.989, loss=0.0109, lr=7.96e-5, step=398]\n", + "Epoch 398: 6%|▋ | 1/16 [00:00<00:03, 4.62it/s, ema_decay=0.989, loss=0.0165, lr=7.98e-5, step=399]\n", + "Epoch 399: 6%|▋ | 1/16 [00:00<00:03, 4.92it/s, ema_decay=0.989, loss=0.0156, lr=8e-5, step=400]\n", + "Epoch 400: 6%|▋ | 1/16 [00:00<00:02, 5.41it/s, ema_decay=0.989, loss=0.0187, lr=8.02e-5, step=401]\n", + "Epoch 401: 6%|▋ | 1/16 [00:00<00:02, 5.31it/s, ema_decay=0.989, loss=0.0218, lr=8.04e-5, step=402]\n", + "Epoch 402: 6%|▋ | 1/16 [00:00<00:02, 5.19it/s, ema_decay=0.989, loss=0.0111, lr=8.06e-5, step=403]\n", + "Epoch 403: 6%|▋ | 1/16 [00:00<00:02, 5.31it/s, ema_decay=0.989, loss=0.0309, lr=8.08e-5, step=404]\n", + "Epoch 404: 6%|▋ | 1/16 [00:00<00:02, 5.23it/s, ema_decay=0.989, loss=0.0111, lr=8.1e-5, step=405]\n", + "Epoch 405: 6%|▋ | 1/16 [00:00<00:02, 5.14it/s, ema_decay=0.989, loss=0.0131, lr=8.12e-5, step=406]\n", + "Epoch 406: 6%|▋ | 1/16 [00:00<00:02, 5.31it/s, ema_decay=0.989, loss=0.0249, lr=8.14e-5, step=407]\n", + "Epoch 407: 6%|▋ | 1/16 [00:00<00:02, 5.25it/s, ema_decay=0.989, loss=0.0121, lr=8.16e-5, step=408]\n", + "Epoch 408: 6%|▋ | 1/16 [00:00<00:02, 5.02it/s, ema_decay=0.989, loss=0.0186, lr=8.18e-5, step=409]\n", + "Epoch 409: 6%|▋ | 1/16 [00:00<00:02, 5.16it/s, ema_decay=0.989, loss=0.00988, lr=8.2e-5, step=410]\n", + "Epoch 410: 6%|▋ | 1/16 [00:00<00:02, 5.34it/s, ema_decay=0.989, loss=0.0151, lr=8.22e-5, step=411]\n", + "Epoch 411: 6%|▋ | 1/16 [00:00<00:02, 5.21it/s, ema_decay=0.989, loss=0.031, lr=8.24e-5, step=412]\n", + "Epoch 412: 6%|▋ | 1/16 [00:00<00:02, 5.40it/s, ema_decay=0.989, loss=0.0119, lr=8.26e-5, step=413]\n", + "Epoch 413: 6%|▋ | 1/16 [00:00<00:02, 5.19it/s, ema_decay=0.989, loss=0.0154, lr=8.28e-5, step=414]\n", + "Epoch 414: 6%|▋ | 1/16 [00:00<00:02, 5.14it/s, ema_decay=0.989, loss=0.0119, lr=8.3e-5, step=415]\n", + "Epoch 415: 6%|▋ | 1/16 [00:00<00:03, 4.93it/s, ema_decay=0.989, loss=0.00987, lr=8.32e-5, step=416]\n", + "Epoch 416: 6%|▋ | 1/16 [00:00<00:03, 4.42it/s, ema_decay=0.989, loss=0.0163, lr=8.34e-5, step=417]\n", + "Epoch 417: 6%|▋ | 1/16 [00:00<00:02, 5.20it/s, ema_decay=0.989, loss=0.0199, lr=8.36e-5, step=418]\n", + "Epoch 418: 6%|▋ | 1/16 [00:00<00:02, 5.01it/s, ema_decay=0.989, loss=0.0205, lr=8.38e-5, step=419]\n", + "Epoch 419: 6%|▋ | 1/16 [00:00<00:02, 5.10it/s, ema_decay=0.989, loss=0.0108, lr=8.4e-5, step=420]\n", + "Epoch 420: 6%|▋ | 1/16 [00:00<00:02, 5.13it/s, ema_decay=0.989, loss=0.0345, lr=8.42e-5, step=421]\n", + "Epoch 421: 6%|▋ | 1/16 [00:00<00:02, 5.37it/s, ema_decay=0.989, loss=0.0149, lr=8.44e-5, step=422]\n", + "Epoch 422: 6%|▋ | 1/16 [00:00<00:03, 4.88it/s, ema_decay=0.989, loss=0.0115, lr=8.46e-5, step=423]\n", + "Epoch 423: 6%|▋ | 1/16 [00:00<00:02, 5.10it/s, ema_decay=0.989, loss=0.0192, lr=8.48e-5, step=424]\n", + "Epoch 424: 6%|▋ | 1/16 [00:00<00:03, 4.51it/s, ema_decay=0.989, loss=0.00819, lr=8.5e-5, step=425]\n", + "Epoch 425: 6%|▋ | 1/16 [00:00<00:03, 4.36it/s, ema_decay=0.989, loss=0.0359, lr=8.52e-5, step=426]\n", + "Epoch 426: 6%|▋ | 1/16 [00:00<00:02, 5.22it/s, ema_decay=0.989, loss=0.0193, lr=8.54e-5, step=427]\n", + "Epoch 427: 6%|▋ | 1/16 [00:00<00:02, 5.32it/s, ema_decay=0.989, loss=0.0111, lr=8.56e-5, step=428]\n", + "Epoch 428: 6%|▋ | 1/16 [00:00<00:03, 4.37it/s, ema_decay=0.989, loss=0.044, lr=8.58e-5, step=429]\n", + "Epoch 429: 6%|▋ | 1/16 [00:00<00:02, 5.12it/s, ema_decay=0.989, loss=0.0133, lr=8.6e-5, step=430]\n", + "Epoch 430: 6%|▋ | 1/16 [00:00<00:03, 4.91it/s, ema_decay=0.989, loss=0.0396, lr=8.62e-5, step=431]\n", + "Epoch 431: 6%|▋ | 1/16 [00:00<00:03, 4.75it/s, ema_decay=0.989, loss=0.0204, lr=8.64e-5, step=432]\n", + "Epoch 432: 6%|▋ | 1/16 [00:00<00:02, 5.32it/s, ema_decay=0.989, loss=0.0117, lr=8.66e-5, step=433]\n", + "Epoch 433: 6%|▋ | 1/16 [00:00<00:02, 5.51it/s, ema_decay=0.989, loss=0.011, lr=8.68e-5, step=434]\n", + "Epoch 434: 6%|▋ | 1/16 [00:00<00:02, 5.38it/s, ema_decay=0.989, loss=0.0505, lr=8.7e-5, step=435]\n", + "Epoch 435: 6%|▋ | 1/16 [00:00<00:02, 5.55it/s, ema_decay=0.99, loss=0.00882, lr=8.72e-5, step=436]\n", + "Epoch 436: 6%|▋ | 1/16 [00:00<00:02, 5.43it/s, ema_decay=0.99, loss=0.0288, lr=8.74e-5, step=437]\n", + "Epoch 437: 6%|▋ | 1/16 [00:00<00:03, 4.38it/s, ema_decay=0.99, loss=0.0226, lr=8.76e-5, step=438]\n", + "Epoch 438: 6%|▋ | 1/16 [00:00<00:04, 3.63it/s, ema_decay=0.99, loss=0.032, lr=8.78e-5, step=439]\n", + "Epoch 439: 6%|▋ | 1/16 [00:00<00:02, 5.08it/s, ema_decay=0.99, loss=0.0156, lr=8.8e-5, step=440]\n", + "Epoch 440: 6%|▋ | 1/16 [00:00<00:02, 5.46it/s, ema_decay=0.99, loss=0.0157, lr=8.82e-5, step=441]\n", + "Epoch 441: 6%|▋ | 1/16 [00:00<00:02, 5.29it/s, ema_decay=0.99, loss=0.0104, lr=8.84e-5, step=442]\n", + "Epoch 442: 6%|▋ | 1/16 [00:00<00:02, 5.39it/s, ema_decay=0.99, loss=0.00898, lr=8.86e-5, step=443]\n", + "Epoch 443: 6%|▋ | 1/16 [00:00<00:02, 5.33it/s, ema_decay=0.99, loss=0.0113, lr=8.88e-5, step=444]\n", + "Epoch 444: 6%|▋ | 1/16 [00:00<00:02, 5.34it/s, ema_decay=0.99, loss=0.0191, lr=8.9e-5, step=445]\n", + "Epoch 445: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.99, loss=0.0658, lr=8.92e-5, step=446]\n", + "Epoch 446: 6%|▋ | 1/16 [00:00<00:02, 5.23it/s, ema_decay=0.99, loss=0.0207, lr=8.94e-5, step=447]\n", + "Epoch 447: 6%|▋ | 1/16 [00:00<00:02, 5.33it/s, ema_decay=0.99, loss=0.0563, lr=8.96e-5, step=448]\n", + "Epoch 448: 6%|▋ | 1/16 [00:00<00:05, 2.77it/s, ema_decay=0.99, loss=0.0152, lr=8.98e-5, step=449]\n", + "Epoch 449: 6%|▋ | 1/16 [00:00<00:02, 5.34it/s, ema_decay=0.99, loss=0.0134, lr=9e-5, step=450]\n", + "Epoch 450: 6%|▋ | 1/16 [00:00<00:03, 4.50it/s, ema_decay=0.99, loss=0.0187, lr=9.02e-5, step=451]\n", + "Epoch 451: 6%|▋ | 1/16 [00:00<00:02, 5.27it/s, ema_decay=0.99, loss=0.0132, lr=9.04e-5, step=452]\n", + "Epoch 452: 6%|▋ | 1/16 [00:00<00:02, 5.11it/s, ema_decay=0.99, loss=0.0104, lr=9.06e-5, step=453]\n", + "Epoch 453: 6%|▋ | 1/16 [00:00<00:02, 5.06it/s, ema_decay=0.99, loss=0.0289, lr=9.08e-5, step=454]\n", + "Epoch 454: 6%|▋ | 1/16 [00:00<00:03, 4.69it/s, ema_decay=0.99, loss=0.0184, lr=9.1e-5, step=455]\n", + "Epoch 455: 6%|▋ | 1/16 [00:00<00:03, 4.45it/s, ema_decay=0.99, loss=0.019, lr=9.12e-5, step=456]\n", + "Epoch 456: 6%|▋ | 1/16 [00:00<00:03, 4.47it/s, ema_decay=0.99, loss=0.0482, lr=9.14e-5, step=457]\n", + "Epoch 457: 6%|▋ | 1/16 [00:00<00:03, 4.39it/s, ema_decay=0.99, loss=0.0164, lr=9.16e-5, step=458]\n", + "Epoch 458: 6%|▋ | 1/16 [00:00<00:03, 4.26it/s, ema_decay=0.99, loss=0.00871, lr=9.18e-5, step=459]\n", + "Epoch 459: 6%|▋ | 1/16 [00:00<00:02, 5.28it/s, ema_decay=0.99, loss=0.0247, lr=9.2e-5, step=460]\n", + "Epoch 460: 6%|▋ | 1/16 [00:00<00:02, 5.13it/s, ema_decay=0.99, loss=0.00849, lr=9.22e-5, step=461]\n", + "Epoch 461: 6%|▋ | 1/16 [00:00<00:03, 4.48it/s, ema_decay=0.99, loss=0.0245, lr=9.24e-5, step=462]\n", + "Epoch 462: 6%|▋ | 1/16 [00:00<00:03, 4.33it/s, ema_decay=0.99, loss=0.014, lr=9.26e-5, step=463]\n", + "Epoch 463: 6%|▋ | 1/16 [00:00<00:03, 4.72it/s, ema_decay=0.99, loss=0.0233, lr=9.28e-5, step=464]\n", + "Epoch 464: 6%|▋ | 1/16 [00:00<00:03, 4.86it/s, ema_decay=0.99, loss=0.0309, lr=9.3e-5, step=465]\n", + "Epoch 465: 6%|▋ | 1/16 [00:00<00:02, 5.13it/s, ema_decay=0.99, loss=0.0153, lr=9.32e-5, step=466]\n", + "Epoch 466: 6%|▋ | 1/16 [00:00<00:03, 4.64it/s, ema_decay=0.99, loss=0.061, lr=9.34e-5, step=467]\n", + "Epoch 467: 6%|▋ | 1/16 [00:00<00:03, 4.58it/s, ema_decay=0.99, loss=0.0134, lr=9.36e-5, step=468]\n", + "Epoch 468: 6%|▋ | 1/16 [00:00<00:02, 5.25it/s, ema_decay=0.99, loss=0.0156, lr=9.38e-5, step=469]\n", + "Epoch 469: 6%|▋ | 1/16 [00:00<00:02, 5.42it/s, ema_decay=0.99, loss=0.0126, lr=9.4e-5, step=470]\n", + "Epoch 470: 6%|▋ | 1/16 [00:00<00:02, 5.38it/s, ema_decay=0.99, loss=0.0913, lr=9.42e-5, step=471]\n", + "Epoch 471: 6%|▋ | 1/16 [00:00<00:02, 5.01it/s, ema_decay=0.99, loss=0.0156, lr=9.44e-5, step=472]\n", + "Epoch 472: 6%|▋ | 1/16 [00:00<00:02, 5.22it/s, ema_decay=0.99, loss=0.0178, lr=9.46e-5, step=473]\n", + "Epoch 473: 6%|▋ | 1/16 [00:00<00:02, 5.39it/s, ema_decay=0.99, loss=0.0114, lr=9.48e-5, step=474]\n", + "Epoch 474: 6%|▋ | 1/16 [00:00<00:03, 4.51it/s, ema_decay=0.99, loss=0.00989, lr=9.5e-5, step=475]\n", + "Epoch 475: 6%|▋ | 1/16 [00:00<00:03, 5.00it/s, ema_decay=0.99, loss=0.0096, lr=9.52e-5, step=476]\n", + "Epoch 476: 6%|▋ | 1/16 [00:00<00:03, 4.48it/s, ema_decay=0.99, loss=0.00882, lr=9.54e-5, step=477]\n", + "Epoch 477: 6%|▋ | 1/16 [00:00<00:03, 4.65it/s, ema_decay=0.99, loss=0.0103, lr=9.56e-5, step=478]\n", + "Epoch 478: 6%|▋ | 1/16 [00:00<00:02, 5.18it/s, ema_decay=0.99, loss=0.0221, lr=9.58e-5, step=479]\n", + "Epoch 479: 6%|▋ | 1/16 [00:00<00:02, 5.38it/s, ema_decay=0.99, loss=0.0151, lr=9.6e-5, step=480]\n", + "Epoch 480: 6%|▋ | 1/16 [00:00<00:02, 5.35it/s, ema_decay=0.99, loss=0.0154, lr=9.62e-5, step=481]\n", + "Epoch 481: 6%|▋ | 1/16 [00:00<00:02, 5.29it/s, ema_decay=0.99, loss=0.0162, lr=9.64e-5, step=482]\n", + "Epoch 482: 6%|▋ | 1/16 [00:00<00:02, 5.46it/s, ema_decay=0.99, loss=0.019, lr=9.66e-5, step=483]\n", + "Epoch 483: 6%|▋ | 1/16 [00:00<00:03, 4.70it/s, ema_decay=0.99, loss=0.0108, lr=9.68e-5, step=484]\n", + "Epoch 484: 6%|▋ | 1/16 [00:00<00:02, 5.16it/s, ema_decay=0.99, loss=0.0206, lr=9.7e-5, step=485]\n", + "Epoch 485: 6%|▋ | 1/16 [00:00<00:02, 5.30it/s, ema_decay=0.99, loss=0.0155, lr=9.72e-5, step=486]\n", + "Epoch 486: 6%|▋ | 1/16 [00:00<00:02, 5.28it/s, ema_decay=0.99, loss=0.0104, lr=9.74e-5, step=487]\n", + "Epoch 487: 6%|▋ | 1/16 [00:00<00:02, 5.27it/s, ema_decay=0.99, loss=0.00854, lr=9.76e-5, step=488]\n", + "Epoch 488: 6%|▋ | 1/16 [00:00<00:03, 4.60it/s, ema_decay=0.99, loss=0.0157, lr=9.78e-5, step=489]\n", + "Epoch 489: 6%|▋ | 1/16 [00:00<00:02, 5.19it/s, ema_decay=0.99, loss=0.00803, lr=9.8e-5, step=490]\n", + "Epoch 490: 6%|▋ | 1/16 [00:00<00:02, 5.01it/s, ema_decay=0.99, loss=0.0204, lr=9.82e-5, step=491]\n", + "Epoch 491: 6%|▋ | 1/16 [00:00<00:02, 5.19it/s, ema_decay=0.99, loss=0.0265, lr=9.84e-5, step=492]\n", + "Epoch 492: 6%|▋ | 1/16 [00:00<00:02, 5.28it/s, ema_decay=0.99, loss=0.0207, lr=9.86e-5, step=493]\n", + "Epoch 493: 6%|▋ | 1/16 [00:00<00:02, 5.34it/s, ema_decay=0.99, loss=0.00809, lr=9.88e-5, step=494]\n", + "Epoch 494: 6%|▋ | 1/16 [00:00<00:02, 5.42it/s, ema_decay=0.99, loss=0.0113, lr=9.9e-5, step=495]\n", + "Epoch 495: 6%|▋ | 1/16 [00:00<00:02, 5.45it/s, ema_decay=0.99, loss=0.034, lr=9.92e-5, step=496]\n", + "Epoch 496: 6%|▋ | 1/16 [00:00<00:03, 4.62it/s, ema_decay=0.99, loss=0.00972, lr=9.94e-5, step=497]\n", + "Epoch 497: 6%|▋ | 1/16 [00:00<00:03, 4.83it/s, ema_decay=0.99, loss=0.0225, lr=9.96e-5, step=498]\n", + "Epoch 498: 6%|▋ | 1/16 [00:00<00:03, 4.87it/s, ema_decay=0.991, loss=0.0155, lr=9.98e-5, step=499]\n", + "Epoch 499: 6%|▋ | 1/16 [00:00<00:03, 4.74it/s, ema_decay=0.991, loss=0.0567, lr=0.0001, step=500]\n" + ] + } + ], + "source": [ + "global_step = 0\n", + "for epoch in range(training_config.num_epochs):\n", + " model.train()\n", + " progress_bar = tqdm(total=num_update_steps_per_epoch, disable=not accelerator.is_local_main_process)\n", + " progress_bar.set_description(f\"Epoch {epoch}\")\n", + " batch = train_image.unsqueeze(0).repeat(\n", + " 16, 1, 1, 1\n", + " ).to(accelerator.device)\n", + " noise = torch.randn(batch.shape).to(accelerator.device)\n", + " bsz = batch.shape[0]\n", + " # Sample a random timestep for each image\n", + " timesteps = torch.randint(\n", + " 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=batch.device\n", + " ).long()\n", + " noisy_images = noise_scheduler.add_noise(batch, noise, timesteps)\n", + " with accelerator.accumulate(model):\n", + " # Predict the noise residual\n", + " noise_pred = model(noisy_images, timesteps).sample\n", + " loss = F.mse_loss(noise_pred, noise)\n", + " accelerator.backward(loss)\n", + "\n", + " if accelerator.sync_gradients:\n", + " accelerator.clip_grad_norm_(model.parameters(), 1.0)\n", + " optimizer.step()\n", + " lr_scheduler.step()\n", + " if training_config.use_ema:\n", + " ema_model.step(model)\n", + " optimizer.zero_grad()\n", + "\n", + " # Checks if the accelerator has performed an optimization step behind the scenes\n", + " if accelerator.sync_gradients:\n", + " progress_bar.update(1)\n", + " global_step += 1\n", + "\n", + " logs = {\"loss\": loss.detach().item(), \"lr\": lr_scheduler.get_last_lr()[0], \"step\": global_step}\n", + " if training_config.use_ema:\n", + " logs[\"ema_decay\"] = ema_model.decay\n", + " progress_bar.set_postfix(**logs)\n", + " accelerator.log(logs, step=global_step)\n", + " progress_bar.close()\n", + "\n", + " accelerator.wait_for_everyone()" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "0be92e11858f43a3984716ac1e9de667", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/1000 [00:00" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display(Image.fromarray(images_processed[0]))" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "json.dump(model.config, open(\"teacher_config.json\", \"w\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "torch.save(model.state_dict(), \"minnie-diffusion/diffusion_pytorch_model.bin\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.8.10 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/progressive_distillation/utils.py b/examples/progressive_distillation/utils.py new file mode 100644 index 000000000000..76b75b7e0bf4 --- /dev/null +++ b/examples/progressive_distillation/utils.py @@ -0,0 +1,68 @@ + + +from dataclasses import dataclass +from torchvision.transforms import ( + CenterCrop, + Compose, + InterpolationMode, + Normalize, + RandomHorizontalFlip, + Resize, + ToTensor, +) +from diffusers import UNet2DModel + +@dataclass +class DiffusionTrainingArgs: + resolution: int = 64 + mixed_precision: str = "fp16" + gradient_accumulation_steps: int = 1 + learning_rate: float = 1e-4 + lr_scheduler: str = "cosine" + lr_warmup_steps: int = 500 + adam_beta1: float = 0.95 + adam_beta2: float = 0.999 + adam_weight_decay: float = 1e-6 + adam_epsilon: float = 1e-08 + use_ema: bool = True + ema_inv_gamma: float = 1.0 + ema_power: float = 3 / 4 + ema_max_decay: float = 0.9999 + batch_size: int = 16 + num_epochs: int = 500 + +def get_train_transforms(training_config): + return Compose( + [ + Resize(training_config.resolution, interpolation=InterpolationMode.BILINEAR), + CenterCrop(training_config.resolution), + RandomHorizontalFlip(), + ToTensor(), + Normalize([0.5], [0.5]), + ] +) + +def get_unet(training_config): + return UNet2DModel( + sample_size=training_config.resolution, + in_channels=3, + out_channels=3, + layers_per_block=2, + block_out_channels=(128, 128, 256, 256, 512, 512), + down_block_types=( + "DownBlock2D", + "DownBlock2D", + "DownBlock2D", + "DownBlock2D", + "AttnDownBlock2D", + "DownBlock2D", + ), + up_block_types=( + "UpBlock2D", + "AttnUpBlock2D", + "UpBlock2D", + "UpBlock2D", + "UpBlock2D", + "UpBlock2D", + ), + ) \ No newline at end of file diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index d51d58ac8f45..89124672d68d 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -26,6 +26,13 @@ from .scheduling_utils import SchedulerMixin +def E_(input, t, shape, device): + out = torch.gather(input.to(device), 0, t.to(device)) + reshape = [shape[0]] + [1] * (len(shape) - 1) + out = out.reshape(*reshape) + return out + + @dataclass class DDPMSchedulerOutput(BaseOutput): """ @@ -134,6 +141,8 @@ def __init__( self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod) + self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1 - self.alphas_cumprod) self.one = torch.tensor(1.0) # standard deviation of the initial noise distribution @@ -309,3 +318,8 @@ def add_noise( def __len__(self): return self.config.num_train_timesteps + + def get_alpha_sigma(self, x, t, device): + alpha = E_(self.sqrt_alphas_cumprod, t, x.shape, device) + sigma = E_(self.sqrt_one_minus_alphas_cumprod, t, x.shape, device) + return alpha, sigma \ No newline at end of file From d5eedff900f9823b2f0ad0a22ad1adc676441288 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Wed, 26 Oct 2022 15:08:43 -0700 Subject: [PATCH 097/133] make dance_diff test pass --- tests/pipelines/dance_diffusion/test_dance_diffusion.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/pipelines/dance_diffusion/test_dance_diffusion.py b/tests/pipelines/dance_diffusion/test_dance_diffusion.py index 72e67e4479d2..a63ef84c63f5 100644 --- a/tests/pipelines/dance_diffusion/test_dance_diffusion.py +++ b/tests/pipelines/dance_diffusion/test_dance_diffusion.py @@ -44,6 +44,10 @@ def dummy_unet(self): sample_rate=16_000, in_channels=2, out_channels=2, + flip_sin_to_cos=True, + use_timestep_embedding=False, + time_embedding_type="fourier", + mid_block_type="UNetMidBlock1D", down_block_types=["DownBlock1DNoSkip"] + ["DownBlock1D"] + ["AttnDownBlock1D"], up_block_types=["AttnUpBlock1D"] + ["UpBlock1D"] + ["UpBlock1DNoSkip"], ) From 1f93de2668d4c59e4bb33b6263fd2424cb583e33 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Wed, 26 Oct 2022 20:42:11 -0400 Subject: [PATCH 098/133] use ddim insstead of ddpm --- .../image_diffusion.ipynb | 593 ++++++++++-------- src/diffusers/schedulers/scheduling_ddim.py | 14 + 2 files changed, 339 insertions(+), 268 deletions(-) diff --git a/examples/progressive_distillation/image_diffusion.ipynb b/examples/progressive_distillation/image_diffusion.ipynb index 7fc08f7e3a5c..f01e393097a5 100644 --- a/examples/progressive_distillation/image_diffusion.ipynb +++ b/examples/progressive_distillation/image_diffusion.ipynb @@ -18,7 +18,7 @@ "source": [ "import torch\n", "from PIL import Image\n", - "from diffusers import AutoencoderKL, UNet2DModel, DDPMScheduler, DDPMPipeline\n", + "from diffusers import AutoencoderKL, UNet2DModel, DDIMPipeline, DDIMScheduler\n", "from diffusers.optimization import get_scheduler\n", "from diffusers.training_utils import EMAModel\n", "import math\n", @@ -89,256 +89,314 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A/tmp/ipykernel_602221/3684360613.py:49: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').\n", - " alpha_t_prime2, sigma_t_prime2 = student_scheduler.get_alpha_sigma(batch, timesteps // 2, accelerator.device)\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0, loss=0.461, lr=0.000297, step=1]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0, loss=0.418, lr=0.000294, step=2]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.405, loss=0.308, lr=0.000291, step=3]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.561, loss=0.368, lr=0.000288, step=4]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s, ema_decay=0.646, loss=0.278, lr=0.000285, step=5]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.701, loss=0.255, lr=0.000282, step=6]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.739, loss=0.199, lr=0.000279, step=7]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.768, loss=0.227, lr=0.000276, step=8]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.79, loss=0.298, lr=0.000273, step=9]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.808, loss=0.212, lr=0.00027, step=10]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.822, loss=0.254, lr=0.000267, step=11]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.834, loss=0.23, lr=0.000264, step=12]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.845, loss=0.129, lr=0.000261, step=13]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.854, loss=0.158, lr=0.000258, step=14]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.862, loss=0.137, lr=0.000255, step=15]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.869, loss=0.171, lr=0.000252, step=16]\n", - "\n", - "\n", - "\u001b[A\u001b[A\n", - "\n", - "\u001b[A\u001b[A" + "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0, loss=0.0551, lr=0.000299, step=1]\n", + "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0, loss=0.271, lr=0.000298, step=2]\n", + "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.405, loss=0.321, lr=0.000297, step=3]\n", + "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.561, loss=0.245, lr=0.000296, step=4]\n", + "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.646, loss=0.117, lr=0.000295, step=5]\n", + "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.701, loss=0.127, lr=0.000294, step=6]\n", + "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.739, loss=0.0749, lr=0.000293, step=7]\n", + "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.768, loss=0.0903, lr=0.000292, step=8]\n", + "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.79, loss=0.0618, lr=0.000291, step=9]\n", + "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.808, loss=0.0591, lr=0.00029, step=10]\n", + "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.822, loss=0.0587, lr=0.000289, step=11]\n", + "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.834, loss=0.064, lr=0.000288, step=12]\n", + "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.845, loss=0.0532, lr=0.000287, step=13]\n", + "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.854, loss=0.0532, lr=0.000286, step=14]\n", + "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.862, loss=0.0454, lr=0.000285, step=15]\n", + "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.869, loss=0.0596, lr=0.000284, step=16]\n", + "Epoch 16: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.875, loss=0.0493, lr=0.000283, step=17]\n", + "Epoch 17: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.881, loss=0.0497, lr=0.000282, step=18]\n", + "Epoch 18: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.886, loss=0.0391, lr=0.000281, step=19]\n", + "Epoch 19: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.89, loss=0.0442, lr=0.00028, step=20]\n", + "Epoch 20: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.894, loss=0.0519, lr=0.000279, step=21]\n", + "Epoch 21: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.898, loss=0.0468, lr=0.000278, step=22]\n", + "Epoch 22: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.902, loss=0.0442, lr=0.000277, step=23]\n", + "Epoch 23: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.905, loss=0.0378, lr=0.000276, step=24]\n", + "Epoch 24: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.908, loss=0.0383, lr=0.000275, step=25]\n", + "Epoch 25: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.911, loss=0.0502, lr=0.000274, step=26]\n", + "Epoch 26: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.913, loss=0.0449, lr=0.000273, step=27]\n", + "Epoch 27: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.916, loss=0.0377, lr=0.000272, step=28]\n", + "Epoch 28: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.918, loss=0.0364, lr=0.000271, step=29]\n", + "Epoch 29: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.92, loss=0.0339, lr=0.00027, step=30]\n", + "Epoch 30: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.922, loss=0.0374, lr=0.000269, step=31]\n", + "Epoch 31: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.924, loss=0.0334, lr=0.000268, step=32]\n", + "Epoch 32: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.926, loss=0.0448, lr=0.000267, step=33]\n", + "Epoch 33: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.927, loss=0.0365, lr=0.000266, step=34]\n", + "Epoch 34: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.929, loss=0.0334, lr=0.000265, step=35]\n", + "Epoch 35: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.931, loss=0.0338, lr=0.000264, step=36]\n", + "Epoch 36: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.932, loss=0.0318, lr=0.000263, step=37]\n", + "Epoch 37: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.933, loss=0.0383, lr=0.000262, step=38]\n", + "Epoch 38: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.935, loss=0.0317, lr=0.000261, step=39]\n", + "Epoch 39: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.936, loss=0.0331, lr=0.00026, step=40]\n", + "Epoch 40: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.937, loss=0.0338, lr=0.000259, step=41]\n", + "Epoch 41: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.938, loss=0.0355, lr=0.000258, step=42]\n", + "Epoch 42: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.939, loss=0.0342, lr=0.000257, step=43]\n", + "Epoch 43: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.94, loss=0.0301, lr=0.000256, step=44]\n", + "Epoch 44: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.941, loss=0.0348, lr=0.000255, step=45]\n", + "Epoch 45: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.942, loss=0.0327, lr=0.000254, step=46]\n", + "Epoch 46: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.943, loss=0.0314, lr=0.000253, step=47]\n", + "Epoch 47: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.944, loss=0.0337, lr=0.000252, step=48]\n", + "Epoch 48: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.945, loss=0.03, lr=0.000251, step=49]\n", + "Epoch 49: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.946, loss=0.0339, lr=0.00025, step=50]\n", + "Epoch 50: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.947, loss=0.0344, lr=0.000249, step=51]\n", + "Epoch 51: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.948, loss=0.0342, lr=0.000248, step=52]\n", + "Epoch 52: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.948, loss=0.0321, lr=0.000247, step=53]\n", + "Epoch 53: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.949, loss=0.0318, lr=0.000246, step=54]\n", + "Epoch 54: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.95, loss=0.0338, lr=0.000245, step=55]\n", + "Epoch 55: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.95, loss=0.0314, lr=0.000244, step=56]\n", + "Epoch 56: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.951, loss=0.0334, lr=0.000243, step=57]\n", + "Epoch 57: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.952, loss=0.0341, lr=0.000242, step=58]\n", + "Epoch 58: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.952, loss=0.0278, lr=0.000241, step=59]\n", + "Epoch 59: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.953, loss=0.0316, lr=0.00024, step=60]\n", + "Epoch 60: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.954, loss=0.0363, lr=0.000239, step=61]\n", + "Epoch 61: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.954, loss=0.029, lr=0.000238, step=62]\n", + "Epoch 62: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.955, loss=0.0299, lr=0.000237, step=63]\n", + "Epoch 63: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.955, loss=0.0319, lr=0.000236, step=64]\n", + "Epoch 64: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.956, loss=0.0359, lr=0.000235, step=65]\n", + "Epoch 65: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.956, loss=0.0271, lr=0.000234, step=66]\n", + "Epoch 66: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.957, loss=0.0304, lr=0.000233, step=67]\n", + "Epoch 67: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.957, loss=0.0332, lr=0.000232, step=68]\n", + "Epoch 68: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.958, loss=0.0291, lr=0.000231, step=69]\n", + "Epoch 69: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.958, loss=0.0242, lr=0.00023, step=70]\n", + "Epoch 70: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.959, loss=0.0309, lr=0.000229, step=71]\n", + "Epoch 71: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.959, loss=0.0291, lr=0.000228, step=72]\n", + "Epoch 72: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.96, loss=0.027, lr=0.000227, step=73]\n", + "Epoch 73: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.96, loss=0.0262, lr=0.000226, step=74]\n", + "Epoch 74: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.96, loss=0.0284, lr=0.000225, step=75]\n", + "Epoch 75: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.961, loss=0.0281, lr=0.000224, step=76]\n", + "Epoch 76: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.961, loss=0.0269, lr=0.000223, step=77]\n", + "Epoch 77: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.962, loss=0.0291, lr=0.000222, step=78]\n", + "Epoch 78: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.962, loss=0.0286, lr=0.000221, step=79]\n", + "Epoch 79: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.962, loss=0.0264, lr=0.00022, step=80]\n", + "Epoch 80: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.963, loss=0.0274, lr=0.000219, step=81]\n", + "Epoch 81: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.963, loss=0.0249, lr=0.000218, step=82]\n", + "Epoch 82: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.963, loss=0.0245, lr=0.000217, step=83]\n", + "Epoch 83: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.964, loss=0.0297, lr=0.000216, step=84]\n", + "Epoch 84: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.964, loss=0.028, lr=0.000215, step=85]\n", + "Epoch 85: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.964, loss=0.0258, lr=0.000214, step=86]\n", + "Epoch 86: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.965, loss=0.0258, lr=0.000213, step=87]\n", + "Epoch 87: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.965, loss=0.0268, lr=0.000212, step=88]\n", + "Epoch 88: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.965, loss=0.029, lr=0.000211, step=89]\n", + "Epoch 89: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.965, loss=0.0276, lr=0.00021, step=90]\n", + "Epoch 90: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.966, loss=0.025, lr=0.000209, step=91]\n", + "Epoch 91: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.966, loss=0.0236, lr=0.000208, step=92]\n", + "Epoch 92: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.966, loss=0.0269, lr=0.000207, step=93]\n", + "Epoch 93: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.967, loss=0.0231, lr=0.000206, step=94]\n", + "Epoch 94: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.0263, lr=0.000205, step=95]\n", + "Epoch 95: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.967, loss=0.024, lr=0.000204, step=96]\n", + "Epoch 96: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.967, loss=0.0252, lr=0.000203, step=97]\n", + "Epoch 97: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.968, loss=0.0248, lr=0.000202, step=98]\n", + "Epoch 98: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.968, loss=0.022, lr=0.000201, step=99]\n", + "Epoch 99: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.968, loss=0.0231, lr=0.0002, step=100]\n", + "Epoch 100: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.968, loss=0.0241, lr=0.000199, step=101]\n", + "Epoch 101: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.969, loss=0.0237, lr=0.000198, step=102]\n", + "Epoch 102: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0243, lr=0.000197, step=103]\n", + "Epoch 103: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.969, loss=0.0239, lr=0.000196, step=104]\n", + "Epoch 104: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.969, loss=0.0268, lr=0.000195, step=105]\n", + "Epoch 105: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.97, loss=0.0232, lr=0.000194, step=106]\n", + "Epoch 106: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.0199, lr=0.000193, step=107]\n", + "Epoch 107: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.97, loss=0.0233, lr=0.000192, step=108]\n", + "Epoch 108: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.97, loss=0.0198, lr=0.000191, step=109]\n", + "Epoch 109: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.97, loss=0.0211, lr=0.00019, step=110]\n", + "Epoch 110: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.971, loss=0.0219, lr=0.000189, step=111]\n", + "Epoch 111: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.0226, lr=0.000188, step=112]\n", + "Epoch 112: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.971, loss=0.022, lr=0.000187, step=113]\n", + "Epoch 113: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.971, loss=0.0237, lr=0.000186, step=114]\n", + "Epoch 114: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.971, loss=0.0233, lr=0.000185, step=115]\n", + "Epoch 115: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.972, loss=0.0204, lr=0.000184, step=116]\n", + "Epoch 116: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.972, loss=0.0231, lr=0.000183, step=117]\n", + "Epoch 117: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.972, loss=0.0187, lr=0.000182, step=118]\n", + "Epoch 118: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.972, loss=0.0182, lr=0.000181, step=119]\n", + "Epoch 119: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0215, lr=0.00018, step=120]\n", + "Epoch 120: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.972, loss=0.0221, lr=0.000179, step=121]\n", + "Epoch 121: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0208, lr=0.000178, step=122]\n", + "Epoch 122: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.973, loss=0.022, lr=0.000177, step=123]\n", + "Epoch 123: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.973, loss=0.024, lr=0.000176, step=124]\n", + "Epoch 124: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.973, loss=0.023, lr=0.000175, step=125]\n", + "Epoch 125: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.973, loss=0.023, lr=0.000174, step=126]\n", + "Epoch 126: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.973, loss=0.0207, lr=0.000173, step=127]\n", + "Epoch 127: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0178, lr=0.000172, step=128]\n", + "Epoch 128: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.0223, lr=0.000171, step=129]\n", + "Epoch 129: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.974, loss=0.023, lr=0.00017, step=130]\n", + "Epoch 130: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0246, lr=0.000169, step=131]\n", + "Epoch 131: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.974, loss=0.0199, lr=0.000168, step=132]\n", + "Epoch 132: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0201, lr=0.000167, step=133]\n", + "Epoch 133: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.974, loss=0.0202, lr=0.000166, step=134]\n", + "Epoch 134: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.975, loss=0.0186, lr=0.000165, step=135]\n", + "Epoch 135: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.0216, lr=0.000164, step=136]\n", + "Epoch 136: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.0227, lr=0.000163, step=137]\n", + "Epoch 137: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.975, loss=0.0168, lr=0.000162, step=138]\n", + "Epoch 138: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.975, loss=0.0239, lr=0.000161, step=139]\n", + "Epoch 139: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.0208, lr=0.00016, step=140]\n", + "Epoch 140: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.0217, lr=0.000159, step=141]\n", + "Epoch 141: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.976, loss=0.0206, lr=0.000158, step=142]\n", + "Epoch 142: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.976, loss=0.0206, lr=0.000157, step=143]\n", + "Epoch 143: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.976, loss=0.017, lr=0.000156, step=144]\n", + "Epoch 144: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.976, loss=0.0169, lr=0.000155, step=145]\n", + "Epoch 145: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.976, loss=0.0193, lr=0.000154, step=146]\n", + "Epoch 146: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.0199, lr=0.000153, step=147]\n", + "Epoch 147: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.976, loss=0.0222, lr=0.000152, step=148]\n", + "Epoch 148: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0198, lr=0.000151, step=149]\n", + "Epoch 149: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.0201, lr=0.00015, step=150]\n", + "Epoch 150: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.977, loss=0.0228, lr=0.000149, step=151]\n", + "Epoch 151: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.0188, lr=0.000148, step=152]\n", + "Epoch 152: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.0194, lr=0.000147, step=153]\n", + "Epoch 153: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.977, loss=0.0198, lr=0.000146, step=154]\n", + "Epoch 154: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.0186, lr=0.000145, step=155]\n", + "Epoch 155: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.0161, lr=0.000144, step=156]\n", + "Epoch 156: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.0209, lr=0.000143, step=157]\n", + "Epoch 157: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.0188, lr=0.000142, step=158]\n", + "Epoch 158: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.978, loss=0.0189, lr=0.000141, step=159]\n", + "Epoch 159: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.978, loss=0.0208, lr=0.00014, step=160]\n", + "Epoch 160: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.978, loss=0.0227, lr=0.000139, step=161]\n", + "Epoch 161: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.978, loss=0.0182, lr=0.000138, step=162]\n", + "Epoch 162: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.978, loss=0.0179, lr=0.000137, step=163]\n", + "Epoch 163: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.02, lr=0.000136, step=164]\n", + "Epoch 164: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.978, loss=0.0201, lr=0.000135, step=165]\n", + "Epoch 165: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.978, loss=0.0205, lr=0.000134, step=166]\n", + "Epoch 166: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.02, lr=0.000133, step=167]\n", + "Epoch 167: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.0161, lr=0.000132, step=168]\n", + "Epoch 168: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.0211, lr=0.000131, step=169]\n", + "Epoch 169: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.0195, lr=0.00013, step=170]\n", + "Epoch 170: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.979, loss=0.0179, lr=0.000129, step=171]\n", + "Epoch 171: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.016, lr=0.000128, step=172]\n", + "Epoch 172: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0198, lr=0.000127, step=173]\n", + "Epoch 173: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.979, loss=0.0176, lr=0.000126, step=174]\n", + "Epoch 174: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.979, loss=0.0174, lr=0.000125, step=175]\n", + "Epoch 175: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.0219, lr=0.000124, step=176]\n", + "Epoch 176: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0205, lr=0.000123, step=177]\n", + "Epoch 177: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.016, lr=0.000122, step=178]\n", + "Epoch 178: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0195, lr=0.000121, step=179]\n", + "Epoch 179: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.98, loss=0.019, lr=0.00012, step=180]\n", + "Epoch 180: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0195, lr=0.000119, step=181]\n", + "Epoch 181: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.0187, lr=0.000118, step=182]\n", + "Epoch 182: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.98, loss=0.0188, lr=0.000117, step=183]\n", + "Epoch 183: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.0196, lr=0.000116, step=184]\n", + "Epoch 184: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0236, lr=0.000115, step=185]\n", + "Epoch 185: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0189, lr=0.000114, step=186]\n", + "Epoch 186: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0203, lr=0.000113, step=187]\n", + "Epoch 187: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.022, lr=0.000112, step=188]\n", + "Epoch 188: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.0181, lr=0.000111, step=189]\n", + "Epoch 189: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.0168, lr=0.00011, step=190]\n", + "Epoch 190: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.023, lr=0.000109, step=191]\n", + "Epoch 191: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0174, lr=0.000108, step=192]\n", + "Epoch 192: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.0208, lr=0.000107, step=193]\n", + "Epoch 193: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.0164, lr=0.000106, step=194]\n", + "Epoch 194: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.016, lr=0.000105, step=195]\n", + "Epoch 195: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.0183, lr=0.000104, step=196]\n", + "Epoch 196: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.0186, lr=0.000103, step=197]\n", + "Epoch 197: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0151, lr=0.000102, step=198]\n", + "Epoch 198: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.981, loss=0.0173, lr=0.000101, step=199]\n", + "Epoch 199: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0159, lr=0.0001, step=200]\n", + "Epoch 200: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0191, lr=9.9e-5, step=201]\n", + "Epoch 201: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0195, lr=9.8e-5, step=202]\n", + "Epoch 202: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.981, loss=0.0147, lr=9.7e-5, step=203]\n", + "Epoch 203: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.0183, lr=9.6e-5, step=204]\n", + "Epoch 204: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.981, loss=0.0194, lr=9.5e-5, step=205]\n", + "Epoch 205: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0178, lr=9.4e-5, step=206]\n", + "Epoch 206: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.015, lr=9.3e-5, step=207]\n", + "Epoch 207: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.0175, lr=9.2e-5, step=208]\n", + "Epoch 208: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.0183, lr=9.1e-5, step=209]\n", + "Epoch 209: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0172, lr=9e-5, step=210]\n", + "Epoch 210: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.0183, lr=8.9e-5, step=211]\n", + "Epoch 211: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.982, loss=0.019, lr=8.8e-5, step=212]\n", + "Epoch 212: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0163, lr=8.7e-5, step=213]\n", + "Epoch 213: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.0168, lr=8.6e-5, step=214]\n", + "Epoch 214: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0181, lr=8.5e-5, step=215]\n", + "Epoch 215: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.0175, lr=8.4e-5, step=216]\n", + "Epoch 216: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0183, lr=8.3e-5, step=217]\n", + "Epoch 217: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.0184, lr=8.2e-5, step=218]\n", + "Epoch 218: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0158, lr=8.1e-5, step=219]\n", + "Epoch 219: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0188, lr=8e-5, step=220]\n", + "Epoch 220: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.0171, lr=7.9e-5, step=221]\n", + "Epoch 221: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0189, lr=7.8e-5, step=222]\n", + "Epoch 222: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0168, lr=7.7e-5, step=223]\n", + "Epoch 223: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.0159, lr=7.6e-5, step=224]\n", + "Epoch 224: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0176, lr=7.5e-5, step=225]\n", + "Epoch 225: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.983, loss=0.0171, lr=7.4e-5, step=226]\n", + "Epoch 226: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0163, lr=7.3e-5, step=227]\n", + "Epoch 227: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.0192, lr=7.2e-5, step=228]\n", + "Epoch 228: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.016, lr=7.1e-5, step=229]\n", + "Epoch 229: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.0173, lr=7e-5, step=230]\n", + "Epoch 230: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.0172, lr=6.9e-5, step=231]\n", + "Epoch 231: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0148, lr=6.8e-5, step=232]\n", + "Epoch 232: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.018, lr=6.7e-5, step=233]\n", + "Epoch 233: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.0199, lr=6.6e-5, step=234]\n", + "Epoch 234: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.0179, lr=6.5e-5, step=235]\n", + "Epoch 235: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0173, lr=6.4e-5, step=236]\n", + "Epoch 236: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0194, lr=6.3e-5, step=237]\n", + "Epoch 237: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0151, lr=6.2e-5, step=238]\n", + "Epoch 238: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.0221, lr=6.1e-5, step=239]\n", + "Epoch 239: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0162, lr=6e-5, step=240]\n", + "Epoch 240: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.984, loss=0.0184, lr=5.9e-5, step=241]\n", + "Epoch 241: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.0174, lr=5.8e-5, step=242]\n", + "Epoch 242: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.984, loss=0.0187, lr=5.7e-5, step=243]\n", + "Epoch 243: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.984, loss=0.018, lr=5.6e-5, step=244]\n", + "Epoch 244: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0184, lr=5.5e-5, step=245]\n", + "Epoch 245: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0202, lr=5.4e-5, step=246]\n", + "Epoch 246: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0184, lr=5.3e-5, step=247]\n", + "Epoch 247: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.016, lr=5.2e-5, step=248]\n", + "Epoch 248: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0194, lr=5.1e-5, step=249]\n", + "Epoch 249: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0193, lr=5e-5, step=250]\n", + "Epoch 250: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.984, loss=0.017, lr=4.9e-5, step=251]\n", + "Epoch 251: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.0193, lr=4.8e-5, step=252]\n", + "Epoch 252: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0187, lr=4.7e-5, step=253]\n", + "Epoch 253: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.984, loss=0.0149, lr=4.6e-5, step=254]\n", + "Epoch 254: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0174, lr=4.5e-5, step=255]\n", + "Epoch 255: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.984, loss=0.0169, lr=4.4e-5, step=256]\n", + "Epoch 256: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.0154, lr=4.3e-5, step=257]\n", + "Epoch 257: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.016, lr=4.2e-5, step=258]\n", + "Epoch 258: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0217, lr=4.1e-5, step=259]\n", + "Epoch 259: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.0188, lr=4e-5, step=260]\n", + "Epoch 260: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0187, lr=3.9e-5, step=261]\n", + "Epoch 261: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0144, lr=3.8e-5, step=262]\n", + "Epoch 262: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.985, loss=0.0161, lr=3.7e-5, step=263]\n", + "Epoch 263: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.985, loss=0.0123, lr=3.6e-5, step=264]\n", + "Epoch 264: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0182, lr=3.5e-5, step=265]\n", + "Epoch 265: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0204, lr=3.4e-5, step=266]\n", + "Epoch 266: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0202, lr=3.3e-5, step=267]\n", + "Epoch 267: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0159, lr=3.2e-5, step=268]\n", + "Epoch 268: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0186, lr=3.1e-5, step=269]\n", + "Epoch 269: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0171, lr=3e-5, step=270]\n", + "Epoch 270: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.0141, lr=2.9e-5, step=271]\n", + "Epoch 271: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0149, lr=2.8e-5, step=272]\n", + "Epoch 272: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.0169, lr=2.7e-5, step=273]\n", + "Epoch 273: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0157, lr=2.6e-5, step=274]\n", + "Epoch 274: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.0177, lr=2.5e-5, step=275]\n", + "Epoch 275: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0193, lr=2.4e-5, step=276]\n", + "Epoch 276: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0151, lr=2.3e-5, step=277]\n", + "Epoch 277: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0161, lr=2.2e-5, step=278]\n", + "Epoch 278: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0153, lr=2.1e-5, step=279]\n", + "Epoch 279: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0177, lr=2e-5, step=280]\n", + "Epoch 280: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0173, lr=1.9e-5, step=281]\n", + "Epoch 281: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.0172, lr=1.8e-5, step=282]\n", + "Epoch 282: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0161, lr=1.7e-5, step=283]\n", + "Epoch 283: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0155, lr=1.6e-5, step=284]\n", + "Epoch 284: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0152, lr=1.5e-5, step=285]\n", + "Epoch 285: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0159, lr=1.4e-5, step=286]\n", + "Epoch 286: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.0132, lr=1.3e-5, step=287]\n", + "Epoch 287: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0194, lr=1.2e-5, step=288]\n", + "Epoch 288: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.018, lr=1.1e-5, step=289]\n", + "Epoch 289: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0186, lr=1e-5, step=290]\n", + "Epoch 290: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0151, lr=9e-6, step=291]\n", + "Epoch 291: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0169, lr=8e-6, step=292]\n", + "Epoch 292: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0163, lr=7e-6, step=293]\n", + "Epoch 293: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0181, lr=6e-6, step=294]\n", + "Epoch 294: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0145, lr=5e-6, step=295]\n", + "Epoch 295: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0184, lr=4e-6, step=296]\n", + "Epoch 296: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0168, lr=3e-6, step=297]\n", + "Epoch 297: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.0177, lr=2e-6, step=298]\n", + "Epoch 298: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.986, loss=0.018, lr=1e-6, step=299]\n", + "Epoch 299: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0179, lr=0, step=300]\n" ] - }, - { - "data": { - "text/html": [ - "
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮\n",
-       " /tmp/ipykernel_602221/3684360613.py:90 in <module>                                               \n",
-       "                                                                                                  \n",
-       " [Errno 2] No such file or directory: '/tmp/ipykernel_602221/3684360613.py'                       \n",
-       "                                                                                                  \n",
-       " /tmp/ipykernel_602221/3684360613.py:66 in distill                                                \n",
-       "                                                                                                  \n",
-       " [Errno 2] No such file or directory: '/tmp/ipykernel_602221/3684360613.py'                       \n",
-       "                                                                                                  \n",
-       " /home/ben/.local/lib/python3.8/site-packages/accelerate/accelerator.py:1005 in backward          \n",
-       "                                                                                                  \n",
-       "   1002 │   │   if self.distributed_type == DistributedType.DEEPSPEED:                            \n",
-       "   1003 │   │   │   self.deepspeed_engine_wrapped.backward(loss, **kwargs)                        \n",
-       "   1004 │   │   elif self.scaler is not None:                                                     \n",
-       " 1005 │   │   │   self.scaler.scale(loss).backward(**kwargs)                                    \n",
-       "   1006 │   │   else:                                                                             \n",
-       "   1007 │   │   │   loss.backward(**kwargs)                                                       \n",
-       "   1008                                                                                           \n",
-       "                                                                                                  \n",
-       " /home/ben/.local/lib/python3.8/site-packages/torch/_tensor.py:396 in backward                    \n",
-       "                                                                                                  \n",
-       "    393 │   │   │   │   retain_graph=retain_graph,                                                \n",
-       "    394 │   │   │   │   create_graph=create_graph,                                                \n",
-       "    395 │   │   │   │   inputs=inputs)                                                            \n",
-       "  396 │   │   torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=input  \n",
-       "    397 │                                                                                         \n",
-       "    398 │   def register_hook(self, hook):                                                        \n",
-       "    399 │   │   r\"\"\"Registers a backward hook.                                                    \n",
-       "                                                                                                  \n",
-       " /home/ben/.local/lib/python3.8/site-packages/torch/autograd/__init__.py:173 in backward          \n",
-       "                                                                                                  \n",
-       "   170 │   # The reason we repeat same the comment below is that                                  \n",
-       "   171 │   # some Python versions print out the first line of a multi-line function               \n",
-       "   172 │   # calls in the traceback and some print out the last line                              \n",
-       " 173 Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the bac   \n",
-       "   174 │   │   tensors, grad_tensors_, retain_graph, create_graph, inputs,                        \n",
-       "   175 │   │   allow_unreachable=True, accumulate_grad=True)  # Calls into the C++ engine to ru   \n",
-       "   176                                                                                            \n",
-       "╰──────────────────────────────────────────────────────────────────────────────────────────────────╯\n",
-       "KeyboardInterrupt\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[31m╭─\u001b[0m\u001b[31m──────────────────────────────\u001b[0m\u001b[31m \u001b[0m\u001b[1;31mTraceback \u001b[0m\u001b[1;2;31m(most recent call last)\u001b[0m\u001b[31m \u001b[0m\u001b[31m───────────────────────────────\u001b[0m\u001b[31m─╮\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2;33m/tmp/ipykernel_602221/\u001b[0m\u001b[1;33m3684360613.py\u001b[0m:\u001b[94m90\u001b[0m in \u001b[92m\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[3;31m[Errno 2] No such file or directory: '/tmp/ipykernel_602221/3684360613.py'\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2;33m/tmp/ipykernel_602221/\u001b[0m\u001b[1;33m3684360613.py\u001b[0m:\u001b[94m66\u001b[0m in \u001b[92mdistill\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[3;31m[Errno 2] No such file or directory: '/tmp/ipykernel_602221/3684360613.py'\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2;33m/home/ben/.local/lib/python3.8/site-packages/accelerate/\u001b[0m\u001b[1;33maccelerator.py\u001b[0m:\u001b[94m1005\u001b[0m in \u001b[92mbackward\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m1002 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mif\u001b[0m \u001b[96mself\u001b[0m.distributed_type == DistributedType.DEEPSPEED: \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m1003 \u001b[0m\u001b[2m│ │ │ \u001b[0m\u001b[96mself\u001b[0m.deepspeed_engine_wrapped.backward(loss, **kwargs) \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m1004 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94melif\u001b[0m \u001b[96mself\u001b[0m.scaler \u001b[95mis\u001b[0m \u001b[95mnot\u001b[0m \u001b[94mNone\u001b[0m: \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m1005 \u001b[2m│ │ │ \u001b[0m\u001b[96mself\u001b[0m.scaler.scale(loss).backward(**kwargs) \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m1006 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94melse\u001b[0m: \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m1007 \u001b[0m\u001b[2m│ │ │ \u001b[0mloss.backward(**kwargs) \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m1008 \u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2;33m/home/ben/.local/lib/python3.8/site-packages/torch/\u001b[0m\u001b[1;33m_tensor.py\u001b[0m:\u001b[94m396\u001b[0m in \u001b[92mbackward\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m 393 \u001b[0m\u001b[2m│ │ │ │ \u001b[0mretain_graph=retain_graph, \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m 394 \u001b[0m\u001b[2m│ │ │ │ \u001b[0mcreate_graph=create_graph, \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m 395 \u001b[0m\u001b[2m│ │ │ │ \u001b[0minputs=inputs) \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m 396 \u001b[2m│ │ \u001b[0mtorch.autograd.backward(\u001b[96mself\u001b[0m, gradient, retain_graph, create_graph, inputs=input \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m 397 \u001b[0m\u001b[2m│ \u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m 398 \u001b[0m\u001b[2m│ \u001b[0m\u001b[94mdef\u001b[0m \u001b[92mregister_hook\u001b[0m(\u001b[96mself\u001b[0m, hook): \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m 399 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[33mr\u001b[0m\u001b[33m\"\"\"Registers a backward hook.\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2;33m/home/ben/.local/lib/python3.8/site-packages/torch/autograd/\u001b[0m\u001b[1;33m__init__.py\u001b[0m:\u001b[94m173\u001b[0m in \u001b[92mbackward\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m170 \u001b[0m\u001b[2m│ \u001b[0m\u001b[2m# The reason we repeat same the comment below is that\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m171 \u001b[0m\u001b[2m│ \u001b[0m\u001b[2m# some Python versions print out the first line of a multi-line function\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m172 \u001b[0m\u001b[2m│ \u001b[0m\u001b[2m# calls in the traceback and some print out the last line\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m173 \u001b[2m│ \u001b[0mVariable._execution_engine.run_backward( \u001b[2m# Calls into the C++ engine to run the bac\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m174 \u001b[0m\u001b[2m│ │ \u001b[0mtensors, grad_tensors_, retain_graph, create_graph, inputs, \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m175 \u001b[0m\u001b[2m│ │ \u001b[0mallow_unreachable=\u001b[94mTrue\u001b[0m, accumulate_grad=\u001b[94mTrue\u001b[0m) \u001b[2m# Calls into the C++ engine to ru\u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m│\u001b[0m \u001b[2m176 \u001b[0m \u001b[31m│\u001b[0m\n", - "\u001b[31m╰──────────────────────────────────────────────────────────────────────────────────────────────────╯\u001b[0m\n", - "\u001b[1;91mKeyboardInterrupt\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" } ], "source": [ @@ -350,8 +408,8 @@ " if accelerator.is_main_process:\n", " run = \"distill\"\n", " accelerator.init_trackers(run)\n", - " teacher_scheduler = DDPMScheduler(num_train_timesteps=n)\n", - " student_scheduler = DDPMScheduler(num_train_timesteps=n // 2)\n", + " teacher_scheduler = DDIMScheduler(num_train_timesteps=n)\n", + " student_scheduler = DDIMScheduler(num_train_timesteps=n // 2)\n", " student = utils.get_unet(training_config)\n", " student.load_state_dict(teacher.state_dict())\n", " student = accelerator.prepare(student)\n", @@ -385,26 +443,25 @@ " bsz = batch.shape[0]\n", " # Sample a random timestep for each image\n", " timesteps = torch.randint(\n", - " 0, student_scheduler.config.num_train_timesteps, (bsz,), device=batch.device\n", + " 2, student_scheduler.config.num_train_timesteps, (bsz,), device=batch.device\n", " ).long() * 2\n", " with torch.no_grad():\n", - " alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps + 1, accelerator.device)\n", + " alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps, accelerator.device)\n", " z_t = alpha_t * batch + sigma_t * noise\n", - " alpha_t_prime2, sigma_t_prime2 = student_scheduler.get_alpha_sigma(batch, timesteps // 2, accelerator.device)\n", - " alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma(batch, timesteps, accelerator.device)\n", - " v = teacher(z_t.float(), timesteps + 1).sample\n", - " rec_t = (alpha_t * z_t - sigma_t * v).clip(-1, 1)\n", + " alpha_t_prime2, sigma_t_prime2 = teacher_scheduler.get_alpha_sigma(batch, timesteps-2, accelerator.device)\n", + " alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma(batch, timesteps-1, accelerator.device)\n", + " noise_pred_t = teacher(z_t, timesteps).sample\n", + " x_teacher_z_t = (alpha_t * z_t - sigma_t * noise_pred_t).clip(-1, 1)\n", + "\n", + " z_t_prime = alpha_t_prime * x_teacher_z_t + (sigma_t_prime / sigma_t) * (z_t - alpha_t * x_teacher_z_t)\n", + " noise_pred_t_prime = teacher(z_t_prime.float(), timesteps - 1).sample\n", + " rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * noise_pred_t_prime).clip(-1, 1)\n", "\n", - " z_t_prime = alpha_t_prime * rec_t + (sigma_t_prime / sigma_t) * (z_t - alpha_t * rec_t)\n", - " v_1 = teacher(z_t_prime.float(), timesteps).sample\n", - " rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * v_1).clip(-1, 1)\n", - " z_t_prime_2 = alpha_t_prime2 * rec_t_prime + (sigma_t_prime2 / sigma_t_prime) * (z_t_prime - alpha_t_prime * rec_t_prime)\n", - " x_hat = z_t_prime_2 - ((sigma_t_prime2 / sigma_t_prime) * z_t) / (alpha_t_prime2 - (sigma_t_prime2 / sigma_t_prime) * alpha_t)\n", + " x_teacher_z_t_prime = (z_t - alpha_t_prime2 * rec_t_prime) / sigma_t_prime2\n", + " z_t_prime_2 = alpha_t_prime2 * x_teacher_z_t_prime - sigma_t_prime2 * rec_t_prime\n", "\n", " noise_pred = student(z_t, timesteps).sample\n", - " student_rec = (alpha_t * z_t - sigma_t * noise_pred).clip(-1, 1)\n", - " loss = F.mse_loss(student_rec, x_hat.clip(-1, 1))\n", - " \n", + " loss = F.mse_loss(noise_pred, z_t_prime_2)\n", " accelerator.backward(loss)\n", "\n", " if accelerator.sync_gradients:\n", @@ -429,23 +486,23 @@ "\n", " accelerator.wait_for_everyone()\n", " return student, ema_model, accelerator\n", - "teacher, distilled_ema, distill_accelrator = distill(model, 1000, train_image, epochs=100, batch_size=64)" + "teacher, distilled_ema, distill_accelrator = distill(model, 1000, train_image, epochs=300, batch_size=64)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "d8921e5a95394b7e888ba3af9fe4f0b8", + "model_id": "a1fd615f0a184cf8811bbe59f728e4c2", "version_major": 2, "version_minor": 0 }, "text/plain": [ - " 0%| | 0/500 [00:00" ] diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index 58e40b57e923..150d724eb334 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -26,6 +26,12 @@ from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin +def E_(input, t, shape, device): + out = torch.gather(input.to(device), 0, t.to(device)) + reshape = [shape[0]] + [1] * (len(shape) - 1) + out = out.reshape(*reshape) + return out + @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM @@ -138,6 +144,9 @@ def __init__( self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod) + self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1 - self.alphas_cumprod) + self.one = torch.tensor(1.0) # At every step in ddim, we are looking into the previous alphas_cumprod # For the final step, there is no previous alphas_cumprod because we are already at 0 @@ -308,3 +317,8 @@ def add_noise( def __len__(self): return self.config.num_train_timesteps + + def get_alpha_sigma(self, x, t, device): + alpha = E_(self.sqrt_alphas_cumprod, t, x.shape, device) + sigma = E_(self.sqrt_one_minus_alphas_cumprod, t, x.shape, device) + return alpha, sigma \ No newline at end of file From 826b459fdb5b8f4412696c92afadbff4a00e9a1f Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 27 Oct 2022 14:41:07 -0400 Subject: [PATCH 099/133] close on multi step, but still some quality loss --- .../image_diffusion.ipynb | 5723 +++++++++++++++-- examples/progressive_distillation/utils.py | 105 +- 2 files changed, 5403 insertions(+), 425 deletions(-) diff --git a/examples/progressive_distillation/image_diffusion.ipynb b/examples/progressive_distillation/image_diffusion.ipynb index f01e393097a5..acd3369df528 100644 --- a/examples/progressive_distillation/image_diffusion.ipynb +++ b/examples/progressive_distillation/image_diffusion.ipynb @@ -2,19 +2,9 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 13, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/usr/lib/python3/dist-packages/requests/__init__.py:89: RequestsDependencyWarning: urllib3 (1.26.12) or chardet (3.0.4) doesn't match a supported version!\n", - " warnings.warn(\"urllib3 ({}) or chardet ({}) doesn't match a supported \"\n", - "WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n" - ] - } - ], + "outputs": [], "source": [ "import torch\n", "from PIL import Image\n", @@ -31,6 +21,7 @@ " RandomHorizontalFlip,\n", " Resize,\n", " ToTensor,\n", + " ToPILImage\n", ")\n", "from accelerate import Accelerator\n", "import utils\n", @@ -40,7 +31,27 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "torch.manual_seed(0)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -49,7 +60,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -61,7 +72,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -71,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -80,429 +91,5287 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ - "model = UNet2DModel.from_pretrained(\"bglick13/minnie-diffusion\")\n" + "teacher = UNet2DModel.from_pretrained(\"bglick13/minnie-diffusion\")\n" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "N = 1000\n", + "generator = torch.manual_seed(0)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, "metadata": {}, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distill step 0 from 1000 -> 500\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0, loss=0.000283, lr=0.0003, step=1]\n", + "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0, loss=0.0966, lr=0.000299, step=2]\n", + "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.405, loss=0.0704, lr=0.000299, step=3]\n", + "Epoch 10: 0%| | 0/1 [03:45 250\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0, loss=0.000798, lr=0.0003, step=1]\n", + "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0, loss=0.115, lr=0.000299, step=2]\n", + "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.405, loss=0.033, lr=0.000299, step=3]\n", + "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.561, loss=0.0429, lr=0.000299, step=4]\n", + "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.646, loss=0.0262, lr=0.000298, step=5]\n", + "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.701, loss=0.0268, lr=0.000298, step=6]\n", + "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.739, loss=0.0195, lr=0.000298, step=7]\n", + "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.768, loss=0.0336, lr=0.000298, step=8]\n", + "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.79, loss=0.0425, lr=0.000297, step=9]\n", + "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.808, loss=0.0295, lr=0.000297, step=10]\n", + "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.822, loss=0.0134, lr=0.000297, step=11]\n", + "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.834, loss=0.0291, lr=0.000296, step=12]\n", + "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.845, loss=0.0232, lr=0.000296, step=13]\n", + "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.854, loss=0.0216, lr=0.000296, step=14]\n", + "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.862, loss=0.0248, lr=0.000295, step=15]\n", + "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.869, loss=0.0117, lr=0.000295, step=16]\n", + "Epoch 16: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.875, loss=0.0209, lr=0.000295, step=17]\n", + "Epoch 17: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.881, loss=0.00961, lr=0.000295, step=18]\n", + "Epoch 18: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.886, loss=0.011, lr=0.000294, step=19]\n", + "Epoch 19: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.89, loss=0.0124, lr=0.000294, step=20]\n", + "Epoch 20: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.894, loss=0.00877, lr=0.000294, step=21]\n", + "Epoch 21: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.898, loss=0.00901, lr=0.000293, step=22]\n", + "Epoch 22: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.902, loss=0.00904, lr=0.000293, step=23]\n", + "Epoch 23: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.905, loss=0.0174, lr=0.000293, step=24]\n", + "Epoch 24: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.908, loss=0.00802, lr=0.000292, step=25]\n", + "Epoch 25: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.911, loss=0.0164, lr=0.000292, step=26]\n", + "Epoch 26: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.913, loss=0.0143, lr=0.000292, step=27]\n", + "Epoch 27: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.916, loss=0.0061, lr=0.000292, step=28]\n", + "Epoch 28: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.918, loss=0.00538, lr=0.000291, step=29]\n", + "Epoch 29: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.92, loss=0.00737, lr=0.000291, step=30]\n", + "Epoch 30: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.922, loss=0.0143, lr=0.000291, step=31]\n", + "Epoch 31: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.924, loss=0.00581, lr=0.00029, step=32]\n", + "Epoch 32: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.926, loss=0.0125, lr=0.00029, step=33]\n", + "Epoch 33: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.927, loss=0.00567, lr=0.00029, step=34]\n", + "Epoch 34: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.929, loss=0.00576, lr=0.000289, step=35]\n", + "Epoch 35: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.931, loss=0.00681, lr=0.000289, step=36]\n", + "Epoch 36: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.932, loss=0.0113, lr=0.000289, step=37]\n", + "Epoch 37: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.933, loss=0.00709, lr=0.000289, step=38]\n", + "Epoch 38: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.935, loss=0.00823, lr=0.000288, step=39]\n", + "Epoch 39: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.936, loss=0.00722, lr=0.000288, step=40]\n", + "Epoch 40: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.937, loss=0.00954, lr=0.000288, step=41]\n", + "Epoch 41: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.938, loss=0.00462, lr=0.000287, step=42]\n", + "Epoch 42: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.939, loss=0.00425, lr=0.000287, step=43]\n", + "Epoch 43: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.94, loss=0.00753, lr=0.000287, step=44]\n", + "Epoch 44: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.941, loss=0.00645, lr=0.000286, step=45]\n", + "Epoch 45: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.942, loss=0.00323, lr=0.000286, step=46]\n", + "Epoch 46: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.943, loss=0.00659, lr=0.000286, step=47]\n", + "Epoch 47: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.944, loss=0.00712, lr=0.000286, step=48]\n", + "Epoch 48: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.945, loss=0.00283, lr=0.000285, step=49]\n", + "Epoch 49: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.946, loss=0.00361, lr=0.000285, step=50]\n", + "Epoch 50: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.947, loss=0.00534, lr=0.000285, step=51]\n", + "Epoch 51: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.948, loss=0.00697, lr=0.000284, step=52]\n", + "Epoch 52: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.948, loss=0.0117, lr=0.000284, step=53]\n", + "Epoch 53: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.949, loss=0.00533, lr=0.000284, step=54]\n", + "Epoch 54: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.00369, lr=0.000283, step=55]\n", + "Epoch 55: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.00604, lr=0.000283, step=56]\n", + "Epoch 56: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.951, loss=0.00237, lr=0.000283, step=57]\n", + "Epoch 57: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.952, loss=0.00308, lr=0.000283, step=58]\n", + "Epoch 58: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.952, loss=0.00518, lr=0.000282, step=59]\n", + "Epoch 59: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.953, loss=0.00296, lr=0.000282, step=60]\n", + "Epoch 60: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.954, loss=0.0031, lr=0.000282, step=61]\n", + "Epoch 61: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.954, loss=0.00459, lr=0.000281, step=62]\n", + "Epoch 62: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.955, loss=0.00542, lr=0.000281, step=63]\n", + "Epoch 63: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.955, loss=0.00202, lr=0.000281, step=64]\n", + "Epoch 64: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.956, loss=0.00534, lr=0.00028, step=65]\n", + "Epoch 65: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.956, loss=0.00719, lr=0.00028, step=66]\n", + "Epoch 66: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.957, loss=0.00342, lr=0.00028, step=67]\n", + "Epoch 67: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.957, loss=0.00165, lr=0.00028, step=68]\n", + "Epoch 68: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.958, loss=0.00338, lr=0.000279, step=69]\n", + "Epoch 69: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.958, loss=0.00195, lr=0.000279, step=70]\n", + "Epoch 70: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.959, loss=0.00168, lr=0.000279, step=71]\n", + "Epoch 71: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.959, loss=0.00254, lr=0.000278, step=72]\n", + "Epoch 72: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.96, loss=0.00225, lr=0.000278, step=73]\n", + "Epoch 73: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.96, loss=0.00185, lr=0.000278, step=74]\n", + "Epoch 74: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.96, loss=0.00489, lr=0.000277, step=75]\n", + "Epoch 75: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.961, loss=0.00144, lr=0.000277, step=76]\n", + "Epoch 76: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.961, loss=0.00135, lr=0.000277, step=77]\n", + "Epoch 77: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.962, loss=0.0043, lr=0.000277, step=78]\n", + "Epoch 78: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.962, loss=0.0046, lr=0.000276, step=79]\n", + "Epoch 79: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.962, loss=0.00146, lr=0.000276, step=80]\n", + "Epoch 80: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.00744, lr=0.000276, step=81]\n", + "Epoch 81: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.963, loss=0.00421, lr=0.000275, step=82]\n", + "Epoch 82: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.963, loss=0.00417, lr=0.000275, step=83]\n", + "Epoch 83: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.964, loss=0.00163, lr=0.000275, step=84]\n", + "Epoch 84: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.964, loss=0.00483, lr=0.000275, step=85]\n", + "Epoch 85: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.964, loss=0.00251, lr=0.000274, step=86]\n", + "Epoch 86: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.965, loss=0.00357, lr=0.000274, step=87]\n", + "Epoch 87: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.965, loss=0.00462, lr=0.000274, step=88]\n", + "Epoch 88: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.965, loss=0.00133, lr=0.000273, step=89]\n", + "Epoch 89: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.965, loss=0.00185, lr=0.000273, step=90]\n", + "Epoch 90: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.966, loss=0.0024, lr=0.000273, step=91]\n", + "Epoch 91: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.966, loss=0.00139, lr=0.000272, step=92]\n", + "Epoch 92: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.966, loss=0.00124, lr=0.000272, step=93]\n", + "Epoch 93: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.967, loss=0.00153, lr=0.000272, step=94]\n", + "Epoch 94: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.0035, lr=0.000271, step=95]\n", + "Epoch 95: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.967, loss=0.00244, lr=0.000271, step=96]\n", + "Epoch 96: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.00405, lr=0.000271, step=97]\n", + "Epoch 97: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.00134, lr=0.000271, step=98]\n", + "Epoch 98: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.968, loss=0.002, lr=0.00027, step=99]\n", + "Epoch 99: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.00211, lr=0.00027, step=100]\n", + "Epoch 100: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.968, loss=0.0014, lr=0.00027, step=101]\n", + "Epoch 101: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.00131, lr=0.000269, step=102]\n", + "Epoch 102: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0031, lr=0.000269, step=103]\n", + "Epoch 103: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.969, loss=0.00189, lr=0.000269, step=104]\n", + "Epoch 104: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.969, loss=0.00433, lr=0.000268, step=105]\n", + "Epoch 105: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.000932, lr=0.000268, step=106]\n", + "Epoch 106: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00149, lr=0.000268, step=107]\n", + "Epoch 107: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.97, loss=0.00336, lr=0.000268, step=108]\n", + "Epoch 108: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00123, lr=0.000267, step=109]\n", + "Epoch 109: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00302, lr=0.000267, step=110]\n", + "Epoch 110: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00194, lr=0.000267, step=111]\n", + "Epoch 111: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.0014, lr=0.000266, step=112]\n", + "Epoch 112: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00121, lr=0.000266, step=113]\n", + "Epoch 113: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00172, lr=0.000266, step=114]\n", + "Epoch 114: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.00105, lr=0.000266, step=115]\n", + "Epoch 115: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.000979, lr=0.000265, step=116]\n", + "Epoch 116: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0014, lr=0.000265, step=117]\n", + "Epoch 117: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.972, loss=0.00157, lr=0.000265, step=118]\n", + "Epoch 118: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.972, loss=0.00141, lr=0.000264, step=119]\n", + "Epoch 119: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.00108, lr=0.000264, step=120]\n", + "Epoch 120: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.00319, lr=0.000264, step=121]\n", + "Epoch 121: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.00106, lr=0.000263, step=122]\n", + "Epoch 122: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.00149, lr=0.000263, step=123]\n", + "Epoch 123: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.00155, lr=0.000263, step=124]\n", + "Epoch 124: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.00218, lr=0.000262, step=125]\n", + "Epoch 125: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.00092, lr=0.000262, step=126]\n", + "Epoch 126: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.000977, lr=0.000262, step=127]\n", + "Epoch 127: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.000972, lr=0.000262, step=128]\n", + "Epoch 128: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.974, loss=0.000963, lr=0.000261, step=129]\n", + "Epoch 129: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.00194, lr=0.000261, step=130]\n", + "Epoch 130: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.00311, lr=0.000261, step=131]\n", + "Epoch 131: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.000992, lr=0.00026, step=132]\n", + "Epoch 132: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.00109, lr=0.00026, step=133]\n", + "Epoch 133: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.974, loss=0.00108, lr=0.00026, step=134]\n", + "Epoch 134: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.00158, lr=0.000259, step=135]\n", + "Epoch 135: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.00141, lr=0.000259, step=136]\n", + "Epoch 136: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.00286, lr=0.000259, step=137]\n", + "Epoch 137: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.00262, lr=0.000259, step=138]\n", + "Epoch 138: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.975, loss=0.0014, lr=0.000258, step=139]\n", + "Epoch 139: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.00288, lr=0.000258, step=140]\n", + "Epoch 140: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.00258, lr=0.000258, step=141]\n", + "Epoch 141: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.00273, lr=0.000257, step=142]\n", + "Epoch 142: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.976, loss=0.00238, lr=0.000257, step=143]\n", + "Epoch 143: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.00241, lr=0.000257, step=144]\n", + "Epoch 144: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0029, lr=0.000256, step=145]\n", + "Epoch 145: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.00138, lr=0.000256, step=146]\n", + "Epoch 146: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.976, loss=0.00171, lr=0.000256, step=147]\n", + "Epoch 147: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.00112, lr=0.000256, step=148]\n", + "Epoch 148: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.00111, lr=0.000255, step=149]\n", + "Epoch 149: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.977, loss=0.0013, lr=0.000255, step=150]\n", + "Epoch 150: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00229, lr=0.000255, step=151]\n", + "Epoch 151: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.977, loss=0.00112, lr=0.000254, step=152]\n", + "Epoch 152: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.977, loss=0.000816, lr=0.000254, step=153]\n", + "Epoch 153: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.00091, lr=0.000254, step=154]\n", + "Epoch 154: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00116, lr=0.000253, step=155]\n", + "Epoch 155: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00185, lr=0.000253, step=156]\n", + "Epoch 156: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.00142, lr=0.000253, step=157]\n", + "Epoch 157: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00096, lr=0.000253, step=158]\n", + "Epoch 158: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.00191, lr=0.000252, step=159]\n", + "Epoch 159: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.00258, lr=0.000252, step=160]\n", + "Epoch 160: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00106, lr=0.000252, step=161]\n", + "Epoch 161: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.000761, lr=0.000251, step=162]\n", + "Epoch 162: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.000854, lr=0.000251, step=163]\n", + "Epoch 163: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.00172, lr=0.000251, step=164]\n", + "Epoch 164: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.978, loss=0.00223, lr=0.00025, step=165]\n", + "Epoch 165: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.978, loss=0.00096, lr=0.00025, step=166]\n", + "Epoch 166: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.978, loss=0.00137, lr=0.00025, step=167]\n", + "Epoch 167: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00232, lr=0.00025, step=168]\n", + "Epoch 168: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.979, loss=0.00153, lr=0.000249, step=169]\n", + "Epoch 169: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.979, loss=0.0022, lr=0.000249, step=170]\n", + "Epoch 170: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.000861, lr=0.000249, step=171]\n", + "Epoch 171: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.00117, lr=0.000248, step=172]\n", + "Epoch 172: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.00232, lr=0.000248, step=173]\n", + "Epoch 173: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.00104, lr=0.000248, step=174]\n", + "Epoch 174: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.00162, lr=0.000247, step=175]\n", + "Epoch 175: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.979, loss=0.000934, lr=0.000247, step=176]\n", + "Epoch 176: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.00127, lr=0.000247, step=177]\n", + "Epoch 177: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.00263, lr=0.000247, step=178]\n", + "Epoch 178: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.00195, lr=0.000246, step=179]\n", + "Epoch 179: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00128, lr=0.000246, step=180]\n", + "Epoch 180: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.98, loss=0.00118, lr=0.000246, step=181]\n", + "Epoch 181: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.98, loss=0.00118, lr=0.000245, step=182]\n", + "Epoch 182: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.00171, lr=0.000245, step=183]\n", + "Epoch 183: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00109, lr=0.000245, step=184]\n", + "Epoch 184: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00126, lr=0.000244, step=185]\n", + "Epoch 185: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.98, loss=0.00118, lr=0.000244, step=186]\n", + "Epoch 186: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.98, loss=0.00122, lr=0.000244, step=187]\n", + "Epoch 187: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.00104, lr=0.000244, step=188]\n", + "Epoch 188: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.00118, lr=0.000243, step=189]\n", + "Epoch 189: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.0011, lr=0.000243, step=190]\n", + "Epoch 190: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00237, lr=0.000243, step=191]\n", + "Epoch 191: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.0018, lr=0.000242, step=192]\n", + "Epoch 192: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.00202, lr=0.000242, step=193]\n", + "Epoch 193: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.981, loss=0.000783, lr=0.000242, step=194]\n", + "Epoch 194: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.981, loss=0.00261, lr=0.000241, step=195]\n", + "Epoch 195: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00125, lr=0.000241, step=196]\n", + "Epoch 196: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00144, lr=0.000241, step=197]\n", + "Epoch 197: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00127, lr=0.000241, step=198]\n", + "Epoch 198: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00177, lr=0.00024, step=199]\n", + "Epoch 199: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00119, lr=0.00024, step=200]\n", + "Epoch 200: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00147, lr=0.00024, step=201]\n", + "Epoch 201: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.000906, lr=0.000239, step=202]\n", + "Epoch 202: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00148, lr=0.000239, step=203]\n", + "Epoch 203: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00088, lr=0.000239, step=204]\n", + "Epoch 204: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.000957, lr=0.000238, step=205]\n", + "Epoch 205: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00164, lr=0.000238, step=206]\n", + "Epoch 206: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.000754, lr=0.000238, step=207]\n", + "Epoch 207: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00195, lr=0.000238, step=208]\n", + "Epoch 208: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00155, lr=0.000237, step=209]\n", + "Epoch 209: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00174, lr=0.000237, step=210]\n", + "Epoch 210: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00101, lr=0.000237, step=211]\n", + "Epoch 211: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.00089, lr=0.000236, step=212]\n", + "Epoch 212: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.000685, lr=0.000236, step=213]\n", + "Epoch 213: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.000836, lr=0.000236, step=214]\n", + "Epoch 214: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.00201, lr=0.000235, step=215]\n", + "Epoch 215: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.000965, lr=0.000235, step=216]\n", + "Epoch 216: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.00114, lr=0.000235, step=217]\n", + "Epoch 217: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.00076, lr=0.000235, step=218]\n", + "Epoch 218: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.000663, lr=0.000234, step=219]\n", + "Epoch 219: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.000809, lr=0.000234, step=220]\n", + "Epoch 220: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.000923, lr=0.000234, step=221]\n", + "Epoch 221: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.00154, lr=0.000233, step=222]\n", + "Epoch 222: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00226, lr=0.000233, step=223]\n", + "Epoch 223: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.983, loss=0.00191, lr=0.000233, step=224]\n", + "Epoch 224: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.000719, lr=0.000232, step=225]\n", + "Epoch 225: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00113, lr=0.000232, step=226]\n", + "Epoch 226: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00143, lr=0.000232, step=227]\n", + "Epoch 227: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.000821, lr=0.000232, step=228]\n", + "Epoch 228: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00158, lr=0.000231, step=229]\n", + "Epoch 229: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.000645, lr=0.000231, step=230]\n", + "Epoch 230: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00222, lr=0.000231, step=231]\n", + "Epoch 231: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.983, loss=0.000644, lr=0.00023, step=232]\n", + "Epoch 232: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00153, lr=0.00023, step=233]\n", + "Epoch 233: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.000882, lr=0.00023, step=234]\n", + "Epoch 234: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00102, lr=0.000229, step=235]\n", + "Epoch 235: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00112, lr=0.000229, step=236]\n", + "Epoch 236: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.00142, lr=0.000229, step=237]\n", + "Epoch 237: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.983, loss=0.00205, lr=0.000229, step=238]\n", + "Epoch 238: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.983, loss=0.000611, lr=0.000228, step=239]\n", + "Epoch 239: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.000678, lr=0.000228, step=240]\n", + "Epoch 240: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.984, loss=0.00244, lr=0.000228, step=241]\n", + "Epoch 241: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.000845, lr=0.000227, step=242]\n", + "Epoch 242: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.000861, lr=0.000227, step=243]\n", + "Epoch 243: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00157, lr=0.000227, step=244]\n", + "Epoch 244: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.000702, lr=0.000226, step=245]\n", + "Epoch 245: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.984, loss=0.000952, lr=0.000226, step=246]\n", + "Epoch 246: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00129, lr=0.000226, step=247]\n", + "Epoch 247: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.000802, lr=0.000226, step=248]\n", + "Epoch 248: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.000768, lr=0.000225, step=249]\n", + "Epoch 249: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.000761, lr=0.000225, step=250]\n", + "Epoch 250: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00182, lr=0.000225, step=251]\n", + "Epoch 251: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.984, loss=0.00136, lr=0.000224, step=252]\n", + "Epoch 252: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.000723, lr=0.000224, step=253]\n", + "Epoch 253: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00181, lr=0.000224, step=254]\n", + "Epoch 254: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.000783, lr=0.000223, step=255]\n", + "Epoch 255: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00115, lr=0.000223, step=256]\n", + "Epoch 256: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.984, loss=0.000885, lr=0.000223, step=257]\n", + "Epoch 257: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.0015, lr=0.000223, step=258]\n", + "Epoch 258: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00146, lr=0.000222, step=259]\n", + "Epoch 259: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00137, lr=0.000222, step=260]\n", + "Epoch 260: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.000762, lr=0.000222, step=261]\n", + "Epoch 261: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.000698, lr=0.000221, step=262]\n", + "Epoch 262: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.000899, lr=0.000221, step=263]\n", + "Epoch 263: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.985, loss=0.0014, lr=0.000221, step=264]\n", + "Epoch 264: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.000786, lr=0.00022, step=265]\n", + "Epoch 265: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.000722, lr=0.00022, step=266]\n", + "Epoch 266: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00072, lr=0.00022, step=267]\n", + "Epoch 267: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00096, lr=0.00022, step=268]\n", + "Epoch 268: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.00149, lr=0.000219, step=269]\n", + "Epoch 269: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.000771, lr=0.000219, step=270]\n", + "Epoch 270: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.000783, lr=0.000219, step=271]\n", + "Epoch 271: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.000699, lr=0.000218, step=272]\n", + "Epoch 272: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00135, lr=0.000218, step=273]\n", + "Epoch 273: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00133, lr=0.000218, step=274]\n", + "Epoch 274: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.000808, lr=0.000217, step=275]\n", + "Epoch 275: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00139, lr=0.000217, step=276]\n", + "Epoch 276: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.000641, lr=0.000217, step=277]\n", + "Epoch 277: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.000911, lr=0.000217, step=278]\n", + "Epoch 278: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.000942, lr=0.000216, step=279]\n", + "Epoch 279: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00072, lr=0.000216, step=280]\n", + "Epoch 280: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.000859, lr=0.000216, step=281]\n", + "Epoch 281: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.000665, lr=0.000215, step=282]\n", + "Epoch 282: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00268, lr=0.000215, step=283]\n", + "Epoch 283: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.000933, lr=0.000215, step=284]\n", + "Epoch 284: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.000801, lr=0.000214, step=285]\n", + "Epoch 285: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0049, lr=0.000214, step=286]\n", + "Epoch 286: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00116, lr=0.000214, step=287]\n", + "Epoch 287: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00466, lr=0.000214, step=288]\n", + "Epoch 288: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00146, lr=0.000213, step=289]\n", + "Epoch 289: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00128, lr=0.000213, step=290]\n", + "Epoch 290: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00228, lr=0.000213, step=291]\n", + "Epoch 291: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0019, lr=0.000212, step=292]\n", + "Epoch 292: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00123, lr=0.000212, step=293]\n", + "Epoch 293: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00334, lr=0.000212, step=294]\n", + "Epoch 294: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00179, lr=0.000211, step=295]\n", + "Epoch 295: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00208, lr=0.000211, step=296]\n", + "Epoch 296: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00257, lr=0.000211, step=297]\n", + "Epoch 297: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00161, lr=0.000211, step=298]\n", + "Epoch 298: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00131, lr=0.00021, step=299]\n", + "Epoch 299: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0018, lr=0.00021, step=300]\n", + "Epoch 300: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00145, lr=0.00021, step=301]\n", + "Epoch 301: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00166, lr=0.000209, step=302]\n", + "Epoch 302: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00147, lr=0.000209, step=303]\n", + "Epoch 303: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00108, lr=0.000209, step=304]\n", + "Epoch 304: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00228, lr=0.000208, step=305]\n", + "Epoch 305: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00126, lr=0.000208, step=306]\n", + "Epoch 306: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00176, lr=0.000208, step=307]\n", + "Epoch 307: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00152, lr=0.000208, step=308]\n", + "Epoch 308: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00213, lr=0.000207, step=309]\n", + "Epoch 309: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00104, lr=0.000207, step=310]\n", + "Epoch 310: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.000749, lr=0.000207, step=311]\n", + "Epoch 311: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0013, lr=0.000206, step=312]\n", + "Epoch 312: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.987, loss=0.00128, lr=0.000206, step=313]\n", + "Epoch 313: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.987, loss=0.00211, lr=0.000206, step=314]\n", + "Epoch 314: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.987, loss=0.000704, lr=0.000206, step=315]\n", + "Epoch 315: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00142, lr=0.000205, step=316]\n", + "Epoch 316: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.987, loss=0.00147, lr=0.000205, step=317]\n", + "Epoch 317: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.000768, lr=0.000205, step=318]\n", + "Epoch 318: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.000668, lr=0.000204, step=319]\n", + "Epoch 319: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.987, loss=0.000741, lr=0.000204, step=320]\n", + "Epoch 320: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.987, loss=0.00078, lr=0.000204, step=321]\n", + "Epoch 321: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00113, lr=0.000203, step=322]\n", + "Epoch 322: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00102, lr=0.000203, step=323]\n", + "Epoch 323: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00142, lr=0.000203, step=324]\n", + "Epoch 324: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00117, lr=0.000202, step=325]\n", + "Epoch 325: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.000915, lr=0.000202, step=326]\n", + "Epoch 326: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.000716, lr=0.000202, step=327]\n", + "Epoch 327: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.000807, lr=0.000202, step=328]\n", + "Epoch 328: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.000862, lr=0.000201, step=329]\n", + "Epoch 329: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00181, lr=0.000201, step=330]\n", + "Epoch 330: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.000856, lr=0.000201, step=331]\n", + "Epoch 331: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00085, lr=0.0002, step=332]\n", + "Epoch 332: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00247, lr=0.0002, step=333]\n", + "Epoch 333: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00105, lr=0.0002, step=334]\n", + "Epoch 334: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00112, lr=0.000199, step=335]\n", + "Epoch 335: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.000745, lr=0.000199, step=336]\n", + "Epoch 336: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00176, lr=0.000199, step=337]\n", + "Epoch 337: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.000712, lr=0.000199, step=338]\n", + "Epoch 338: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.000769, lr=0.000198, step=339]\n", + "Epoch 339: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.000619, lr=0.000198, step=340]\n", + "Epoch 340: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.987, loss=0.000977, lr=0.000198, step=341]\n", + "Epoch 341: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00143, lr=0.000197, step=342]\n", + "Epoch 342: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.000543, lr=0.000197, step=343]\n", + "Epoch 343: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.000907, lr=0.000197, step=344]\n", + "Epoch 344: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.000893, lr=0.000196, step=345]\n", + "Epoch 345: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000586, lr=0.000196, step=346]\n", + "Epoch 346: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00102, lr=0.000196, step=347]\n", + "Epoch 347: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.000565, lr=0.000196, step=348]\n", + "Epoch 348: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000792, lr=0.000195, step=349]\n", + "Epoch 349: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.000902, lr=0.000195, step=350]\n", + "Epoch 350: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000489, lr=0.000195, step=351]\n", + "Epoch 351: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.000812, lr=0.000194, step=352]\n", + "Epoch 352: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.000592, lr=0.000194, step=353]\n", + "Epoch 353: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.000823, lr=0.000194, step=354]\n", + "Epoch 354: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.000871, lr=0.000193, step=355]\n", + "Epoch 355: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000972, lr=0.000193, step=356]\n", + "Epoch 356: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000972, lr=0.000193, step=357]\n", + "Epoch 357: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.00066, lr=0.000193, step=358]\n", + "Epoch 358: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000663, lr=0.000192, step=359]\n", + "Epoch 359: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000581, lr=0.000192, step=360]\n", + "Epoch 360: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.000877, lr=0.000192, step=361]\n", + "Epoch 361: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000524, lr=0.000191, step=362]\n", + "Epoch 362: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.000475, lr=0.000191, step=363]\n", + "Epoch 363: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00126, lr=0.000191, step=364]\n", + "Epoch 364: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.000532, lr=0.00019, step=365]\n", + "Epoch 365: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.000736, lr=0.00019, step=366]\n", + "Epoch 366: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.000498, lr=0.00019, step=367]\n", + "Epoch 367: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00073, lr=0.00019, step=368]\n", + "Epoch 368: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.000545, lr=0.000189, step=369]\n", + "Epoch 369: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00127, lr=0.000189, step=370]\n", + "Epoch 370: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.000657, lr=0.000189, step=371]\n", + "Epoch 371: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000807, lr=0.000188, step=372]\n", + "Epoch 372: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.000704, lr=0.000188, step=373]\n", + "Epoch 373: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.000694, lr=0.000188, step=374]\n", + "Epoch 374: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000467, lr=0.000187, step=375]\n", + "Epoch 375: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.000625, lr=0.000187, step=376]\n", + "Epoch 376: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000503, lr=0.000187, step=377]\n", + "Epoch 377: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000541, lr=0.000187, step=378]\n", + "Epoch 378: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000737, lr=0.000186, step=379]\n", + "Epoch 379: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000508, lr=0.000186, step=380]\n", + "Epoch 380: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.000425, lr=0.000186, step=381]\n", + "Epoch 381: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.000554, lr=0.000185, step=382]\n", + "Epoch 382: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.000656, lr=0.000185, step=383]\n", + "Epoch 383: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.000614, lr=0.000185, step=384]\n", + "Epoch 384: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.000579, lr=0.000184, step=385]\n", + "Epoch 385: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.000514, lr=0.000184, step=386]\n", + "Epoch 386: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000582, lr=0.000184, step=387]\n", + "Epoch 387: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.000577, lr=0.000184, step=388]\n", + "Epoch 388: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.000682, lr=0.000183, step=389]\n", + "Epoch 389: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.989, loss=0.000392, lr=0.000183, step=390]\n", + "Epoch 390: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.000442, lr=0.000183, step=391]\n", + "Epoch 391: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.989, loss=0.000452, lr=0.000182, step=392]\n", + "Epoch 392: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000932, lr=0.000182, step=393]\n", + "Epoch 393: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000396, lr=0.000182, step=394]\n", + "Epoch 394: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.000389, lr=0.000181, step=395]\n", + "Epoch 395: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.000639, lr=0.000181, step=396]\n", + "Epoch 396: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.989, loss=0.00117, lr=0.000181, step=397]\n", + "Epoch 397: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000477, lr=0.000181, step=398]\n", + "Epoch 398: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000846, lr=0.00018, step=399]\n", + "Epoch 399: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.000534, lr=0.00018, step=400]\n", + "Epoch 400: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000476, lr=0.00018, step=401]\n", + "Epoch 401: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.000486, lr=0.000179, step=402]\n", + "Epoch 402: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000451, lr=0.000179, step=403]\n", + "Epoch 403: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.000491, lr=0.000179, step=404]\n", + "Epoch 404: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000517, lr=0.000178, step=405]\n", + "Epoch 405: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.000468, lr=0.000178, step=406]\n", + "Epoch 406: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.000399, lr=0.000178, step=407]\n", + "Epoch 407: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000483, lr=0.000178, step=408]\n", + "Epoch 408: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000528, lr=0.000177, step=409]\n", + "Epoch 409: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00044, lr=0.000177, step=410]\n", + "Epoch 410: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000381, lr=0.000177, step=411]\n", + "Epoch 411: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.000482, lr=0.000176, step=412]\n", + "Epoch 412: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.000398, lr=0.000176, step=413]\n", + "Epoch 413: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.000394, lr=0.000176, step=414]\n", + "Epoch 414: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0005, lr=0.000175, step=415]\n", + "Epoch 415: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.0008, lr=0.000175, step=416]\n", + "Epoch 416: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.000304, lr=0.000175, step=417]\n", + "Epoch 417: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.000429, lr=0.000175, step=418]\n", + "Epoch 418: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.000781, lr=0.000174, step=419]\n", + "Epoch 419: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00041, lr=0.000174, step=420]\n", + "Epoch 420: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.000562, lr=0.000174, step=421]\n", + "Epoch 421: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000402, lr=0.000173, step=422]\n", + "Epoch 422: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000437, lr=0.000173, step=423]\n", + "Epoch 423: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00132, lr=0.000173, step=424]\n", + "Epoch 424: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.000367, lr=0.000172, step=425]\n", + "Epoch 425: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.000496, lr=0.000172, step=426]\n", + "Epoch 426: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.000716, lr=0.000172, step=427]\n", + "Epoch 427: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.000346, lr=0.000172, step=428]\n", + "Epoch 428: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.989, loss=0.000518, lr=0.000171, step=429]\n", + "Epoch 429: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.000631, lr=0.000171, step=430]\n", + "Epoch 430: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.000458, lr=0.000171, step=431]\n", + "Epoch 431: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.000422, lr=0.00017, step=432]\n", + "Epoch 432: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.000413, lr=0.00017, step=433]\n", + "Epoch 433: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000378, lr=0.00017, step=434]\n", + "Epoch 434: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.000544, lr=0.000169, step=435]\n", + "Epoch 435: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000354, lr=0.000169, step=436]\n", + "Epoch 436: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00038, lr=0.000169, step=437]\n", + "Epoch 437: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000676, lr=0.000169, step=438]\n", + "Epoch 438: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000415, lr=0.000168, step=439]\n", + "Epoch 439: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000513, lr=0.000168, step=440]\n", + "Epoch 440: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000555, lr=0.000168, step=441]\n", + "Epoch 441: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.000386, lr=0.000167, step=442]\n", + "Epoch 442: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000421, lr=0.000167, step=443]\n", + "Epoch 443: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.000452, lr=0.000167, step=444]\n", + "Epoch 444: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000611, lr=0.000167, step=445]\n", + "Epoch 445: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000376, lr=0.000166, step=446]\n", + "Epoch 446: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000719, lr=0.000166, step=447]\n", + "Epoch 447: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.000403, lr=0.000166, step=448]\n", + "Epoch 448: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000317, lr=0.000165, step=449]\n", + "Epoch 449: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000352, lr=0.000165, step=450]\n", + "Epoch 450: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00043, lr=0.000165, step=451]\n", + "Epoch 451: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000483, lr=0.000164, step=452]\n", + "Epoch 452: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.99, loss=0.000416, lr=0.000164, step=453]\n", + "Epoch 453: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.000472, lr=0.000164, step=454]\n", + "Epoch 454: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000789, lr=0.000163, step=455]\n", + "Epoch 455: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000656, lr=0.000163, step=456]\n", + "Epoch 456: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000362, lr=0.000163, step=457]\n", + "Epoch 457: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000391, lr=0.000163, step=458]\n", + "Epoch 458: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000468, lr=0.000162, step=459]\n", + "Epoch 459: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00039, lr=0.000162, step=460]\n", + "Epoch 460: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000677, lr=0.000162, step=461]\n", + "Epoch 461: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00029, lr=0.000161, step=462]\n", + "Epoch 462: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000712, lr=0.000161, step=463]\n", + "Epoch 463: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.000411, lr=0.000161, step=464]\n", + "Epoch 464: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000595, lr=0.00016, step=465]\n", + "Epoch 465: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000536, lr=0.00016, step=466]\n", + "Epoch 466: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000353, lr=0.00016, step=467]\n", + "Epoch 467: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000365, lr=0.00016, step=468]\n", + "Epoch 468: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000328, lr=0.000159, step=469]\n", + "Epoch 469: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000404, lr=0.000159, step=470]\n", + "Epoch 470: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000527, lr=0.000159, step=471]\n", + "Epoch 471: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000412, lr=0.000158, step=472]\n", + "Epoch 472: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.000408, lr=0.000158, step=473]\n", + "Epoch 473: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00104, lr=0.000158, step=474]\n", + "Epoch 474: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000339, lr=0.000157, step=475]\n", + "Epoch 475: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00045, lr=0.000157, step=476]\n", + "Epoch 476: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000364, lr=0.000157, step=477]\n", + "Epoch 477: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000356, lr=0.000157, step=478]\n", + "Epoch 478: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00083, lr=0.000156, step=479]\n", + "Epoch 479: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000325, lr=0.000156, step=480]\n", + "Epoch 480: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000762, lr=0.000156, step=481]\n", + "Epoch 481: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000463, lr=0.000155, step=482]\n", + "Epoch 482: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000397, lr=0.000155, step=483]\n", + "Epoch 483: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00043, lr=0.000155, step=484]\n", + "Epoch 484: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000671, lr=0.000154, step=485]\n", + "Epoch 485: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000328, lr=0.000154, step=486]\n", + "Epoch 486: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000427, lr=0.000154, step=487]\n", + "Epoch 487: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000306, lr=0.000154, step=488]\n", + "Epoch 488: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000397, lr=0.000153, step=489]\n", + "Epoch 489: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000637, lr=0.000153, step=490]\n", + "Epoch 490: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000333, lr=0.000153, step=491]\n", + "Epoch 491: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00028, lr=0.000152, step=492]\n", + "Epoch 492: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000639, lr=0.000152, step=493]\n", + "Epoch 493: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000528, lr=0.000152, step=494]\n", + "Epoch 494: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000401, lr=0.000151, step=495]\n", + "Epoch 495: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00051, lr=0.000151, step=496]\n", + "Epoch 496: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.000334, lr=0.000151, step=497]\n", + "Epoch 497: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.000326, lr=0.000151, step=498]\n", + "Epoch 498: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.991, loss=0.000576, lr=0.00015, step=499]\n", + "Epoch 499: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00062, lr=0.00015, step=500]\n", + "Epoch 500: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000424, lr=0.00015, step=501]\n", + "Epoch 501: 100%|██████████| 1/1 [00:00<00:00, 1.10it/s, ema_decay=0.991, loss=0.000429, lr=0.000149, step=502]\n", + "Epoch 502: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.000499, lr=0.000149, step=503]\n", + "Epoch 503: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.0005, lr=0.000149, step=504]\n", + "Epoch 504: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000405, lr=0.000148, step=505]\n", + "Epoch 505: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000589, lr=0.000148, step=506]\n", + "Epoch 506: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00036, lr=0.000148, step=507]\n", + "Epoch 507: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000514, lr=0.000148, step=508]\n", + "Epoch 508: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000572, lr=0.000147, step=509]\n", + "Epoch 509: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.000597, lr=0.000147, step=510]\n", + "Epoch 510: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000386, lr=0.000147, step=511]\n", + "Epoch 511: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000354, lr=0.000146, step=512]\n", + "Epoch 512: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000446, lr=0.000146, step=513]\n", + "Epoch 513: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00051, lr=0.000146, step=514]\n", + "Epoch 514: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.000376, lr=0.000145, step=515]\n", + "Epoch 515: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000464, lr=0.000145, step=516]\n", + "Epoch 516: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000394, lr=0.000145, step=517]\n", + "Epoch 517: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000614, lr=0.000145, step=518]\n", + "Epoch 518: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000432, lr=0.000144, step=519]\n", + "Epoch 519: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000846, lr=0.000144, step=520]\n", + "Epoch 520: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.000402, lr=0.000144, step=521]\n", + "Epoch 521: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00055, lr=0.000143, step=522]\n", + "Epoch 522: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000355, lr=0.000143, step=523]\n", + "Epoch 523: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000406, lr=0.000143, step=524]\n", + "Epoch 524: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0004, lr=0.000142, step=525]\n", + "Epoch 525: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000392, lr=0.000142, step=526]\n", + "Epoch 526: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000338, lr=0.000142, step=527]\n", + "Epoch 527: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000367, lr=0.000142, step=528]\n", + "Epoch 528: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000321, lr=0.000141, step=529]\n", + "Epoch 529: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000536, lr=0.000141, step=530]\n", + "Epoch 530: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000346, lr=0.000141, step=531]\n", + "Epoch 531: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000286, lr=0.00014, step=532]\n", + "Epoch 532: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.000337, lr=0.00014, step=533]\n", + "Epoch 533: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000405, lr=0.00014, step=534]\n", + "Epoch 534: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000586, lr=0.00014, step=535]\n", + "Epoch 535: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000514, lr=0.000139, step=536]\n", + "Epoch 536: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000381, lr=0.000139, step=537]\n", + "Epoch 537: 100%|██████████| 1/1 [00:00<00:00, 1.28it/s, ema_decay=0.991, loss=0.000343, lr=0.000139, step=538]\n", + "Epoch 538: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000334, lr=0.000138, step=539]\n", + "Epoch 539: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.000556, lr=0.000138, step=540]\n", + "Epoch 540: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000359, lr=0.000138, step=541]\n", + "Epoch 541: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.000744, lr=0.000137, step=542]\n", + "Epoch 542: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000315, lr=0.000137, step=543]\n", + "Epoch 543: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00041, lr=0.000137, step=544]\n", + "Epoch 544: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00051, lr=0.000136, step=545]\n", + "Epoch 545: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000369, lr=0.000136, step=546]\n", + "Epoch 546: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.000367, lr=0.000136, step=547]\n", + "Epoch 547: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.000331, lr=0.000136, step=548]\n", + "Epoch 548: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000378, lr=0.000135, step=549]\n", + "Epoch 549: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000335, lr=0.000135, step=550]\n", + "Epoch 550: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.991, loss=0.000549, lr=0.000135, step=551]\n", + "Epoch 551: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.000552, lr=0.000134, step=552]\n", + "Epoch 552: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000321, lr=0.000134, step=553]\n", + "Epoch 553: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000442, lr=0.000134, step=554]\n", + "Epoch 554: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00047, lr=0.000133, step=555]\n", + "Epoch 555: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000482, lr=0.000133, step=556]\n", + "Epoch 556: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000343, lr=0.000133, step=557]\n", + "Epoch 557: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000381, lr=0.000133, step=558]\n", + "Epoch 558: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00044, lr=0.000132, step=559]\n", + "Epoch 559: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000345, lr=0.000132, step=560]\n", + "Epoch 560: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000365, lr=0.000132, step=561]\n", + "Epoch 561: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.991, loss=0.00032, lr=0.000131, step=562]\n", + "Epoch 562: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.991, loss=0.000263, lr=0.000131, step=563]\n", + "Epoch 563: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000293, lr=0.000131, step=564]\n", + "Epoch 564: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00043, lr=0.000131, step=565]\n", + "Epoch 565: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000535, lr=0.00013, step=566]\n", + "Epoch 566: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.000473, lr=0.00013, step=567]\n", + "Epoch 567: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000457, lr=0.00013, step=568]\n", + "Epoch 568: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000269, lr=0.000129, step=569]\n", + "Epoch 569: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.991, loss=0.000436, lr=0.000129, step=570]\n", + "Epoch 570: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000338, lr=0.000129, step=571]\n", + "Epoch 571: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000374, lr=0.000128, step=572]\n", + "Epoch 572: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0006, lr=0.000128, step=573]\n", + "Epoch 573: 100%|██████████| 1/1 [00:00<00:00, 1.05it/s, ema_decay=0.991, loss=0.000294, lr=0.000128, step=574]\n", + "Epoch 574: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000509, lr=0.000127, step=575]\n", + "Epoch 575: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.000322, lr=0.000127, step=576]\n", + "Epoch 576: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000291, lr=0.000127, step=577]\n", + "Epoch 577: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000357, lr=0.000127, step=578]\n", + "Epoch 578: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000318, lr=0.000126, step=579]\n", + "Epoch 579: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000516, lr=0.000126, step=580]\n", + "Epoch 580: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000346, lr=0.000126, step=581]\n", + "Epoch 581: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000508, lr=0.000125, step=582]\n", + "Epoch 582: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000395, lr=0.000125, step=583]\n", + "Epoch 583: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.992, loss=0.000379, lr=0.000125, step=584]\n", + "Epoch 584: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000437, lr=0.000124, step=585]\n", + "Epoch 585: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000381, lr=0.000124, step=586]\n", + "Epoch 586: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000346, lr=0.000124, step=587]\n", + "Epoch 587: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000323, lr=0.000124, step=588]\n", + "Epoch 588: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000324, lr=0.000123, step=589]\n", + "Epoch 589: 100%|██████████| 1/1 [00:00<00:00, 1.10it/s, ema_decay=0.992, loss=0.000292, lr=0.000123, step=590]\n", + "Epoch 590: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.000734, lr=0.000123, step=591]\n", + "Epoch 591: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000289, lr=0.000122, step=592]\n", + "Epoch 592: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000454, lr=0.000122, step=593]\n", + "Epoch 593: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000346, lr=0.000122, step=594]\n", + "Epoch 594: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000576, lr=0.000121, step=595]\n", + "Epoch 595: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000367, lr=0.000121, step=596]\n", + "Epoch 596: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000327, lr=0.000121, step=597]\n", + "Epoch 597: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000482, lr=0.000121, step=598]\n", + "Epoch 598: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000279, lr=0.00012, step=599]\n", + "Epoch 599: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000316, lr=0.00012, step=600]\n", + "Epoch 600: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000369, lr=0.00012, step=601]\n", + "Epoch 601: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000541, lr=0.000119, step=602]\n", + "Epoch 602: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00034, lr=0.000119, step=603]\n", + "Epoch 603: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000411, lr=0.000119, step=604]\n", + "Epoch 604: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000326, lr=0.000118, step=605]\n", + "Epoch 605: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000409, lr=0.000118, step=606]\n", + "Epoch 606: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000371, lr=0.000118, step=607]\n", + "Epoch 607: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000328, lr=0.000118, step=608]\n", + "Epoch 608: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000402, lr=0.000117, step=609]\n", + "Epoch 609: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000535, lr=0.000117, step=610]\n", + "Epoch 610: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.000354, lr=0.000117, step=611]\n", + "Epoch 611: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000326, lr=0.000116, step=612]\n", + "Epoch 612: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000292, lr=0.000116, step=613]\n", + "Epoch 613: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000414, lr=0.000116, step=614]\n", + "Epoch 614: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000322, lr=0.000115, step=615]\n", + "Epoch 615: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.000468, lr=0.000115, step=616]\n", + "Epoch 616: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000489, lr=0.000115, step=617]\n", + "Epoch 617: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00028, lr=0.000115, step=618]\n", + "Epoch 618: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000377, lr=0.000114, step=619]\n", + "Epoch 619: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000314, lr=0.000114, step=620]\n", + "Epoch 620: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00051, lr=0.000114, step=621]\n", + "Epoch 621: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00069, lr=0.000113, step=622]\n", + "Epoch 622: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000307, lr=0.000113, step=623]\n", + "Epoch 623: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000289, lr=0.000113, step=624]\n", + "Epoch 624: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.000312, lr=0.000112, step=625]\n", + "Epoch 625: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000631, lr=0.000112, step=626]\n", + "Epoch 626: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000898, lr=0.000112, step=627]\n", + "Epoch 627: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000357, lr=0.000112, step=628]\n", + "Epoch 628: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000371, lr=0.000111, step=629]\n", + "Epoch 629: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000645, lr=0.000111, step=630]\n", + "Epoch 630: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000421, lr=0.000111, step=631]\n", + "Epoch 631: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00038, lr=0.00011, step=632]\n", + "Epoch 632: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000371, lr=0.00011, step=633]\n", + "Epoch 633: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0003, lr=0.00011, step=634]\n", + "Epoch 634: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000327, lr=0.000109, step=635]\n", + "Epoch 635: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000445, lr=0.000109, step=636]\n", + "Epoch 636: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000521, lr=0.000109, step=637]\n", + "Epoch 637: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000319, lr=0.000109, step=638]\n", + "Epoch 638: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00033, lr=0.000108, step=639]\n", + "Epoch 639: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000538, lr=0.000108, step=640]\n", + "Epoch 640: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000426, lr=0.000108, step=641]\n", + "Epoch 641: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000379, lr=0.000107, step=642]\n", + "Epoch 642: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000502, lr=0.000107, step=643]\n", + "Epoch 643: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000283, lr=0.000107, step=644]\n", + "Epoch 644: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000403, lr=0.000106, step=645]\n", + "Epoch 645: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.000341, lr=0.000106, step=646]\n", + "Epoch 646: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000314, lr=0.000106, step=647]\n", + "Epoch 647: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000513, lr=0.000106, step=648]\n", + "Epoch 648: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.000266, lr=0.000105, step=649]\n", + "Epoch 649: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000309, lr=0.000105, step=650]\n", + "Epoch 650: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000608, lr=0.000105, step=651]\n", + "Epoch 651: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000571, lr=0.000104, step=652]\n", + "Epoch 652: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000461, lr=0.000104, step=653]\n", + "Epoch 653: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000347, lr=0.000104, step=654]\n", + "Epoch 654: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000328, lr=0.000103, step=655]\n", + "Epoch 655: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.000442, lr=0.000103, step=656]\n", + "Epoch 656: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000296, lr=0.000103, step=657]\n", + "Epoch 657: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000376, lr=0.000103, step=658]\n", + "Epoch 658: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00032, lr=0.000102, step=659]\n", + "Epoch 659: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.000456, lr=0.000102, step=660]\n", + "Epoch 660: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000363, lr=0.000102, step=661]\n", + "Epoch 661: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000392, lr=0.000101, step=662]\n", + "Epoch 662: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00033, lr=0.000101, step=663]\n", + "Epoch 663: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000345, lr=0.000101, step=664]\n", + "Epoch 664: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000543, lr=0.000101, step=665]\n", + "Epoch 665: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000464, lr=0.0001, step=666]\n", + "Epoch 666: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000322, lr=9.99e-5, step=667]\n", + "Epoch 667: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000304, lr=9.96e-5, step=668]\n", + "Epoch 668: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000519, lr=9.93e-5, step=669]\n", + "Epoch 669: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00029, lr=9.9e-5, step=670]\n", + "Epoch 670: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.992, loss=0.000308, lr=9.87e-5, step=671]\n", + "Epoch 671: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000439, lr=9.84e-5, step=672]\n", + "Epoch 672: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000354, lr=9.81e-5, step=673]\n", + "Epoch 673: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000263, lr=9.78e-5, step=674]\n", + "Epoch 674: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000289, lr=9.75e-5, step=675]\n", + "Epoch 675: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000299, lr=9.72e-5, step=676]\n", + "Epoch 676: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000385, lr=9.69e-5, step=677]\n", + "Epoch 677: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000374, lr=9.66e-5, step=678]\n", + "Epoch 678: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000288, lr=9.63e-5, step=679]\n", + "Epoch 679: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000303, lr=9.6e-5, step=680]\n", + "Epoch 680: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000315, lr=9.57e-5, step=681]\n", + "Epoch 681: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000304, lr=9.54e-5, step=682]\n", + "Epoch 682: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000338, lr=9.51e-5, step=683]\n", + "Epoch 683: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000267, lr=9.48e-5, step=684]\n", + "Epoch 684: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000302, lr=9.45e-5, step=685]\n", + "Epoch 685: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000266, lr=9.42e-5, step=686]\n", + "Epoch 686: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000388, lr=9.39e-5, step=687]\n", + "Epoch 687: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000278, lr=9.36e-5, step=688]\n", + "Epoch 688: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000638, lr=9.33e-5, step=689]\n", + "Epoch 689: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000359, lr=9.3e-5, step=690]\n", + "Epoch 690: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000341, lr=9.27e-5, step=691]\n", + "Epoch 691: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000282, lr=9.24e-5, step=692]\n", + "Epoch 692: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00033, lr=9.21e-5, step=693]\n", + "Epoch 693: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000287, lr=9.18e-5, step=694]\n", + "Epoch 694: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00028, lr=9.15e-5, step=695]\n", + "Epoch 695: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000399, lr=9.12e-5, step=696]\n", + "Epoch 696: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000317, lr=9.09e-5, step=697]\n", + "Epoch 697: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00055, lr=9.06e-5, step=698]\n", + "Epoch 698: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000388, lr=9.03e-5, step=699]\n", + "Epoch 699: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000498, lr=9e-5, step=700]\n", + "Epoch 700: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000496, lr=8.97e-5, step=701]\n", + "Epoch 701: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000296, lr=8.94e-5, step=702]\n", + "Epoch 702: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000319, lr=8.91e-5, step=703]\n", + "Epoch 703: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000253, lr=8.88e-5, step=704]\n", + "Epoch 704: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000325, lr=8.85e-5, step=705]\n", + "Epoch 705: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000329, lr=8.82e-5, step=706]\n", + "Epoch 706: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000319, lr=8.79e-5, step=707]\n", + "Epoch 707: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000452, lr=8.76e-5, step=708]\n", + "Epoch 708: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000488, lr=8.73e-5, step=709]\n", + "Epoch 709: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00039, lr=8.7e-5, step=710]\n", + "Epoch 710: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000296, lr=8.67e-5, step=711]\n", + "Epoch 711: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000333, lr=8.64e-5, step=712]\n", + "Epoch 712: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000315, lr=8.61e-5, step=713]\n", + "Epoch 713: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00036, lr=8.58e-5, step=714]\n", + "Epoch 714: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000487, lr=8.55e-5, step=715]\n", + "Epoch 715: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000296, lr=8.52e-5, step=716]\n", + "Epoch 716: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000267, lr=8.49e-5, step=717]\n", + "Epoch 717: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000291, lr=8.46e-5, step=718]\n", + "Epoch 718: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000296, lr=8.43e-5, step=719]\n", + "Epoch 719: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000306, lr=8.4e-5, step=720]\n", + "Epoch 720: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000388, lr=8.37e-5, step=721]\n", + "Epoch 721: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000378, lr=8.34e-5, step=722]\n", + "Epoch 722: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000448, lr=8.31e-5, step=723]\n", + "Epoch 723: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000325, lr=8.28e-5, step=724]\n", + "Epoch 724: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00031, lr=8.25e-5, step=725]\n", + "Epoch 725: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000649, lr=8.22e-5, step=726]\n", + "Epoch 726: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000334, lr=8.19e-5, step=727]\n", + "Epoch 727: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000273, lr=8.16e-5, step=728]\n", + "Epoch 728: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000292, lr=8.13e-5, step=729]\n", + "Epoch 729: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000315, lr=8.1e-5, step=730]\n", + "Epoch 730: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000321, lr=8.07e-5, step=731]\n", + "Epoch 731: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000281, lr=8.04e-5, step=732]\n", + "Epoch 732: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000306, lr=8.01e-5, step=733]\n", + "Epoch 733: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00079, lr=7.98e-5, step=734]\n", + "Epoch 734: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000344, lr=7.95e-5, step=735]\n", + "Epoch 735: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000312, lr=7.92e-5, step=736]\n", + "Epoch 736: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000265, lr=7.89e-5, step=737]\n", + "Epoch 737: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000467, lr=7.86e-5, step=738]\n", + "Epoch 738: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000395, lr=7.83e-5, step=739]\n", + "Epoch 739: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000327, lr=7.8e-5, step=740]\n", + "Epoch 740: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000315, lr=7.77e-5, step=741]\n", + "Epoch 741: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000498, lr=7.74e-5, step=742]\n", + "Epoch 742: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00031, lr=7.71e-5, step=743]\n", + "Epoch 743: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000363, lr=7.68e-5, step=744]\n", + "Epoch 744: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000351, lr=7.65e-5, step=745]\n", + "Epoch 745: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000574, lr=7.62e-5, step=746]\n", + "Epoch 746: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000305, lr=7.59e-5, step=747]\n", + "Epoch 747: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000326, lr=7.56e-5, step=748]\n", + "Epoch 748: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000337, lr=7.53e-5, step=749]\n", + "Epoch 749: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000517, lr=7.5e-5, step=750]\n", + "Epoch 750: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000303, lr=7.47e-5, step=751]\n", + "Epoch 751: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000267, lr=7.44e-5, step=752]\n", + "Epoch 752: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000502, lr=7.41e-5, step=753]\n", + "Epoch 753: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000591, lr=7.38e-5, step=754]\n", + "Epoch 754: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000351, lr=7.35e-5, step=755]\n", + "Epoch 755: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.000505, lr=7.32e-5, step=756]\n", + "Epoch 756: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000357, lr=7.29e-5, step=757]\n", + "Epoch 757: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000329, lr=7.26e-5, step=758]\n", + "Epoch 758: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000264, lr=7.23e-5, step=759]\n", + "Epoch 759: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000455, lr=7.2e-5, step=760]\n", + "Epoch 760: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000328, lr=7.17e-5, step=761]\n", + "Epoch 761: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.000304, lr=7.14e-5, step=762]\n", + "Epoch 762: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000378, lr=7.11e-5, step=763]\n", + "Epoch 763: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000362, lr=7.08e-5, step=764]\n", + "Epoch 764: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00046, lr=7.05e-5, step=765]\n", + "Epoch 765: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000406, lr=7.02e-5, step=766]\n", + "Epoch 766: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000442, lr=6.99e-5, step=767]\n", + "Epoch 767: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000276, lr=6.96e-5, step=768]\n", + "Epoch 768: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000278, lr=6.93e-5, step=769]\n", + "Epoch 769: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000365, lr=6.9e-5, step=770]\n", + "Epoch 770: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000372, lr=6.87e-5, step=771]\n", + "Epoch 771: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000486, lr=6.84e-5, step=772]\n", + "Epoch 772: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000582, lr=6.81e-5, step=773]\n", + "Epoch 773: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000289, lr=6.78e-5, step=774]\n", + "Epoch 774: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000412, lr=6.75e-5, step=775]\n", + "Epoch 775: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000332, lr=6.72e-5, step=776]\n", + "Epoch 776: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000366, lr=6.69e-5, step=777]\n", + "Epoch 777: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000282, lr=6.66e-5, step=778]\n", + "Epoch 778: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000312, lr=6.63e-5, step=779]\n", + "Epoch 779: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000295, lr=6.6e-5, step=780]\n", + "Epoch 780: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000426, lr=6.57e-5, step=781]\n", + "Epoch 781: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00027, lr=6.54e-5, step=782]\n", + "Epoch 782: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000321, lr=6.51e-5, step=783]\n", + "Epoch 783: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000505, lr=6.48e-5, step=784]\n", + "Epoch 784: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000309, lr=6.45e-5, step=785]\n", + "Epoch 785: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000516, lr=6.42e-5, step=786]\n", + "Epoch 786: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000394, lr=6.39e-5, step=787]\n", + "Epoch 787: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000301, lr=6.36e-5, step=788]\n", + "Epoch 788: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.000276, lr=6.33e-5, step=789]\n", + "Epoch 789: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000269, lr=6.3e-5, step=790]\n", + "Epoch 790: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000333, lr=6.27e-5, step=791]\n", + "Epoch 791: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000295, lr=6.24e-5, step=792]\n", + "Epoch 792: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000329, lr=6.21e-5, step=793]\n", + "Epoch 793: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00068, lr=6.18e-5, step=794]\n", + "Epoch 794: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000249, lr=6.15e-5, step=795]\n", + "Epoch 795: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000471, lr=6.12e-5, step=796]\n", + "Epoch 796: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000406, lr=6.09e-5, step=797]\n", + "Epoch 797: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000297, lr=6.06e-5, step=798]\n", + "Epoch 798: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000385, lr=6.03e-5, step=799]\n", + "Epoch 799: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000293, lr=6e-5, step=800]\n", + "Epoch 800: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000309, lr=5.97e-5, step=801]\n", + "Epoch 801: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000331, lr=5.94e-5, step=802]\n", + "Epoch 802: 100%|██████████| 1/1 [00:00<00:00, 1.25it/s, ema_decay=0.993, loss=0.000427, lr=5.91e-5, step=803]\n", + "Epoch 803: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000325, lr=5.88e-5, step=804]\n", + "Epoch 804: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000433, lr=5.85e-5, step=805]\n", + "Epoch 805: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000298, lr=5.82e-5, step=806]\n", + "Epoch 806: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000302, lr=5.79e-5, step=807]\n", + "Epoch 807: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000314, lr=5.76e-5, step=808]\n", + "Epoch 808: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000297, lr=5.73e-5, step=809]\n", + "Epoch 809: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000287, lr=5.7e-5, step=810]\n", + "Epoch 810: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00033, lr=5.67e-5, step=811]\n", + "Epoch 811: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000344, lr=5.64e-5, step=812]\n", + "Epoch 812: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000288, lr=5.61e-5, step=813]\n", + "Epoch 813: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000252, lr=5.58e-5, step=814]\n", + "Epoch 814: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000461, lr=5.55e-5, step=815]\n", + "Epoch 815: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000248, lr=5.52e-5, step=816]\n", + "Epoch 816: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000253, lr=5.49e-5, step=817]\n", + "Epoch 817: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000302, lr=5.46e-5, step=818]\n", + "Epoch 818: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00045, lr=5.43e-5, step=819]\n", + "Epoch 819: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000437, lr=5.4e-5, step=820]\n", + "Epoch 820: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000283, lr=5.37e-5, step=821]\n", + "Epoch 821: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000264, lr=5.34e-5, step=822]\n", + "Epoch 822: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000331, lr=5.31e-5, step=823]\n", + "Epoch 823: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000303, lr=5.28e-5, step=824]\n", + "Epoch 824: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000301, lr=5.25e-5, step=825]\n", + "Epoch 825: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000275, lr=5.22e-5, step=826]\n", + "Epoch 826: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000238, lr=5.19e-5, step=827]\n", + "Epoch 827: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000274, lr=5.16e-5, step=828]\n", + "Epoch 828: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00033, lr=5.13e-5, step=829]\n", + "Epoch 829: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000303, lr=5.1e-5, step=830]\n", + "Epoch 830: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000294, lr=5.07e-5, step=831]\n", + "Epoch 831: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000373, lr=5.04e-5, step=832]\n", + "Epoch 832: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000314, lr=5.01e-5, step=833]\n", + "Epoch 833: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000262, lr=4.98e-5, step=834]\n", + "Epoch 834: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000306, lr=4.95e-5, step=835]\n", + "Epoch 835: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000331, lr=4.92e-5, step=836]\n", + "Epoch 836: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000318, lr=4.89e-5, step=837]\n", + "Epoch 837: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000359, lr=4.86e-5, step=838]\n", + "Epoch 838: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000249, lr=4.83e-5, step=839]\n", + "Epoch 839: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000268, lr=4.8e-5, step=840]\n", + "Epoch 840: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000443, lr=4.77e-5, step=841]\n", + "Epoch 841: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000274, lr=4.74e-5, step=842]\n", + "Epoch 842: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000298, lr=4.71e-5, step=843]\n", + "Epoch 843: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000421, lr=4.68e-5, step=844]\n", + "Epoch 844: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000243, lr=4.65e-5, step=845]\n", + "Epoch 845: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000316, lr=4.62e-5, step=846]\n", + "Epoch 846: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000262, lr=4.59e-5, step=847]\n", + "Epoch 847: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000272, lr=4.56e-5, step=848]\n", + "Epoch 848: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000349, lr=4.53e-5, step=849]\n", + "Epoch 849: 100%|██████████| 1/1 [00:00<00:00, 1.22it/s, ema_decay=0.994, loss=0.000428, lr=4.5e-5, step=850]\n", + "Epoch 850: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000303, lr=4.47e-5, step=851]\n", + "Epoch 851: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000252, lr=4.44e-5, step=852]\n", + "Epoch 852: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000525, lr=4.41e-5, step=853]\n", + "Epoch 853: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000304, lr=4.38e-5, step=854]\n", + "Epoch 854: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000277, lr=4.35e-5, step=855]\n", + "Epoch 855: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000547, lr=4.32e-5, step=856]\n", + "Epoch 856: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000339, lr=4.29e-5, step=857]\n", + "Epoch 857: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000271, lr=4.26e-5, step=858]\n", + "Epoch 858: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000353, lr=4.23e-5, step=859]\n", + "Epoch 859: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000267, lr=4.2e-5, step=860]\n", + "Epoch 860: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000271, lr=4.17e-5, step=861]\n", + "Epoch 861: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000288, lr=4.14e-5, step=862]\n", + "Epoch 862: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000351, lr=4.11e-5, step=863]\n", + "Epoch 863: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0003, lr=4.08e-5, step=864]\n", + "Epoch 864: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000291, lr=4.05e-5, step=865]\n", + "Epoch 865: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.994, loss=0.000252, lr=4.02e-5, step=866]\n", + "Epoch 866: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000413, lr=3.99e-5, step=867]\n", + "Epoch 867: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000372, lr=3.96e-5, step=868]\n", + "Epoch 868: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000284, lr=3.93e-5, step=869]\n", + "Epoch 869: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000439, lr=3.9e-5, step=870]\n", + "Epoch 870: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000243, lr=3.87e-5, step=871]\n", + "Epoch 871: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000288, lr=3.84e-5, step=872]\n", + "Epoch 872: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000247, lr=3.81e-5, step=873]\n", + "Epoch 873: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000418, lr=3.78e-5, step=874]\n", + "Epoch 874: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000301, lr=3.75e-5, step=875]\n", + "Epoch 875: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000271, lr=3.72e-5, step=876]\n", + "Epoch 876: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000546, lr=3.69e-5, step=877]\n", + "Epoch 877: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000484, lr=3.66e-5, step=878]\n", + "Epoch 878: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.000283, lr=3.63e-5, step=879]\n", + "Epoch 879: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000389, lr=3.6e-5, step=880]\n", + "Epoch 880: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000374, lr=3.57e-5, step=881]\n", + "Epoch 881: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000387, lr=3.54e-5, step=882]\n", + "Epoch 882: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000263, lr=3.51e-5, step=883]\n", + "Epoch 883: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000281, lr=3.48e-5, step=884]\n", + "Epoch 884: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000271, lr=3.45e-5, step=885]\n", + "Epoch 885: 100%|██████████| 1/1 [00:00<00:00, 1.09it/s, ema_decay=0.994, loss=0.000228, lr=3.42e-5, step=886]\n", + "Epoch 886: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.994, loss=0.000365, lr=3.39e-5, step=887]\n", + "Epoch 887: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00038, lr=3.36e-5, step=888]\n", + "Epoch 888: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000286, lr=3.33e-5, step=889]\n", + "Epoch 889: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.000274, lr=3.3e-5, step=890]\n", + "Epoch 890: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000424, lr=3.27e-5, step=891]\n", + "Epoch 891: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000339, lr=3.24e-5, step=892]\n", + "Epoch 892: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000371, lr=3.21e-5, step=893]\n", + "Epoch 893: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00036, lr=3.18e-5, step=894]\n", + "Epoch 894: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000584, lr=3.15e-5, step=895]\n", + "Epoch 895: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000292, lr=3.12e-5, step=896]\n", + "Epoch 896: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00025, lr=3.09e-5, step=897]\n", + "Epoch 897: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000276, lr=3.06e-5, step=898]\n", + "Epoch 898: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000406, lr=3.03e-5, step=899]\n", + "Epoch 899: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000352, lr=3e-5, step=900]\n", + "Epoch 900: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000284, lr=2.97e-5, step=901]\n", + "Epoch 901: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00028, lr=2.94e-5, step=902]\n", + "Epoch 902: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000279, lr=2.91e-5, step=903]\n", + "Epoch 903: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000255, lr=2.88e-5, step=904]\n", + "Epoch 904: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000294, lr=2.85e-5, step=905]\n", + "Epoch 905: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000318, lr=2.82e-5, step=906]\n", + "Epoch 906: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000303, lr=2.79e-5, step=907]\n", + "Epoch 907: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000429, lr=2.76e-5, step=908]\n", + "Epoch 908: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000366, lr=2.73e-5, step=909]\n", + "Epoch 909: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000429, lr=2.7e-5, step=910]\n", + "Epoch 910: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000296, lr=2.67e-5, step=911]\n", + "Epoch 911: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000295, lr=2.64e-5, step=912]\n", + "Epoch 912: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000307, lr=2.61e-5, step=913]\n", + "Epoch 913: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000308, lr=2.58e-5, step=914]\n", + "Epoch 914: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000391, lr=2.55e-5, step=915]\n", + "Epoch 915: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000282, lr=2.52e-5, step=916]\n", + "Epoch 916: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000222, lr=2.49e-5, step=917]\n", + "Epoch 917: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000279, lr=2.46e-5, step=918]\n", + "Epoch 918: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000403, lr=2.43e-5, step=919]\n", + "Epoch 919: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000268, lr=2.4e-5, step=920]\n", + "Epoch 920: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000338, lr=2.37e-5, step=921]\n", + "Epoch 921: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000321, lr=2.34e-5, step=922]\n", + "Epoch 922: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000277, lr=2.31e-5, step=923]\n", + "Epoch 923: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000317, lr=2.28e-5, step=924]\n", + "Epoch 924: 100%|██████████| 1/1 [00:00<00:00, 1.22it/s, ema_decay=0.994, loss=0.000312, lr=2.25e-5, step=925]\n", + "Epoch 925: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.000309, lr=2.22e-5, step=926]\n", + "Epoch 926: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000306, lr=2.19e-5, step=927]\n", + "Epoch 927: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000301, lr=2.16e-5, step=928]\n", + "Epoch 928: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000241, lr=2.13e-5, step=929]\n", + "Epoch 929: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000278, lr=2.1e-5, step=930]\n", + "Epoch 930: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.994, loss=0.000399, lr=2.07e-5, step=931]\n", + "Epoch 931: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000281, lr=2.04e-5, step=932]\n", + "Epoch 932: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000307, lr=2.01e-5, step=933]\n", + "Epoch 933: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000317, lr=1.98e-5, step=934]\n", + "Epoch 934: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000253, lr=1.95e-5, step=935]\n", + "Epoch 935: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000359, lr=1.92e-5, step=936]\n", + "Epoch 936: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.000243, lr=1.89e-5, step=937]\n", + "Epoch 937: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000393, lr=1.86e-5, step=938]\n", + "Epoch 938: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000272, lr=1.83e-5, step=939]\n", + "Epoch 939: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.994, loss=0.000269, lr=1.8e-5, step=940]\n", + "Epoch 940: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000388, lr=1.77e-5, step=941]\n", + "Epoch 941: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000335, lr=1.74e-5, step=942]\n", + "Epoch 942: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000242, lr=1.71e-5, step=943]\n", + "Epoch 943: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000379, lr=1.68e-5, step=944]\n", + "Epoch 944: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000262, lr=1.65e-5, step=945]\n", + "Epoch 945: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00046, lr=1.62e-5, step=946]\n", + "Epoch 946: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000243, lr=1.59e-5, step=947]\n", + "Epoch 947: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000302, lr=1.56e-5, step=948]\n", + "Epoch 948: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.000267, lr=1.53e-5, step=949]\n", + "Epoch 949: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000295, lr=1.5e-5, step=950]\n", + "Epoch 950: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000314, lr=1.47e-5, step=951]\n", + "Epoch 951: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000375, lr=1.44e-5, step=952]\n", + "Epoch 952: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000239, lr=1.41e-5, step=953]\n", + "Epoch 953: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000255, lr=1.38e-5, step=954]\n", + "Epoch 954: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000266, lr=1.35e-5, step=955]\n", + "Epoch 955: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00029, lr=1.32e-5, step=956]\n", + "Epoch 956: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000284, lr=1.29e-5, step=957]\n", + "Epoch 957: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000283, lr=1.26e-5, step=958]\n", + "Epoch 958: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000263, lr=1.23e-5, step=959]\n", + "Epoch 959: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000245, lr=1.2e-5, step=960]\n", + "Epoch 960: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000235, lr=1.17e-5, step=961]\n", + "Epoch 961: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000271, lr=1.14e-5, step=962]\n", + "Epoch 962: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000285, lr=1.11e-5, step=963]\n", + "Epoch 963: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000252, lr=1.08e-5, step=964]\n", + "Epoch 964: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000526, lr=1.05e-5, step=965]\n", + "Epoch 965: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000531, lr=1.02e-5, step=966]\n", + "Epoch 966: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000333, lr=9.9e-6, step=967]\n", + "Epoch 967: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000313, lr=9.6e-6, step=968]\n", + "Epoch 968: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000292, lr=9.3e-6, step=969]\n", + "Epoch 969: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000292, lr=9e-6, step=970]\n", + "Epoch 970: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000274, lr=8.7e-6, step=971]\n", + "Epoch 971: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000361, lr=8.4e-6, step=972]\n", + "Epoch 972: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000262, lr=8.1e-6, step=973]\n", + "Epoch 973: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000283, lr=7.8e-6, step=974]\n", + "Epoch 974: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000248, lr=7.5e-6, step=975]\n", + "Epoch 975: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000383, lr=7.2e-6, step=976]\n", + "Epoch 976: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.000294, lr=6.9e-6, step=977]\n", + "Epoch 977: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0004, lr=6.6e-6, step=978]\n", + "Epoch 978: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000332, lr=6.3e-6, step=979]\n", + "Epoch 979: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000308, lr=6e-6, step=980]\n", + "Epoch 980: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000357, lr=5.7e-6, step=981]\n", + "Epoch 981: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00031, lr=5.4e-6, step=982]\n", + "Epoch 982: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000253, lr=5.1e-6, step=983]\n", + "Epoch 983: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000406, lr=4.8e-6, step=984]\n", + "Epoch 984: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00029, lr=4.5e-6, step=985]\n", + "Epoch 985: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000456, lr=4.2e-6, step=986]\n", + "Epoch 986: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000284, lr=3.9e-6, step=987]\n", + "Epoch 987: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00027, lr=3.6e-6, step=988]\n", + "Epoch 988: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000278, lr=3.3e-6, step=989]\n", + "Epoch 989: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.994, loss=0.000355, lr=3e-6, step=990]\n", + "Epoch 990: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000278, lr=2.7e-6, step=991]\n", + "Epoch 991: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000307, lr=2.4e-6, step=992]\n", + "Epoch 992: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000338, lr=2.1e-6, step=993]\n", + "Epoch 993: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000209, lr=1.8e-6, step=994]\n", + "Epoch 994: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000346, lr=1.5e-6, step=995]\n", + "Epoch 995: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000309, lr=1.2e-6, step=996]\n", + "Epoch 996: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000287, lr=9e-7, step=997]\n", + "Epoch 997: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000272, lr=6e-7, step=998]\n", + "Epoch 998: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000237, lr=3e-7, step=999]\n", + "Epoch 999: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000256, lr=0, step=1000]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4fe0bc2e545b46088b53f66672061d2e", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/50 [00:00 125\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0, loss=0.0114, lr=0.0003, step=1]\n", + "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0, loss=0.312, lr=0.000299, step=2]\n", + "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.405, loss=0.0621, lr=0.000299, step=3]\n", + "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.561, loss=0.0529, lr=0.000299, step=4]\n", + "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.646, loss=0.076, lr=0.000298, step=5]\n", + "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.701, loss=0.0575, lr=0.000298, step=6]\n", + "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.739, loss=0.0202, lr=0.000298, step=7]\n", + "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.768, loss=0.0204, lr=0.000298, step=8]\n", + "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.79, loss=0.0432, lr=0.000297, step=9]\n", + "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.808, loss=0.0506, lr=0.000297, step=10]\n", + "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.822, loss=0.0287, lr=0.000297, step=11]\n", + "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.834, loss=0.014, lr=0.000296, step=12]\n", + "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.845, loss=0.0525, lr=0.000296, step=13]\n", + "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.854, loss=0.0154, lr=0.000296, step=14]\n", + "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.862, loss=0.0152, lr=0.000295, step=15]\n", + "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.869, loss=0.0127, lr=0.000295, step=16]\n", + "Epoch 16: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.875, loss=0.0104, lr=0.000295, step=17]\n", + "Epoch 17: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.881, loss=0.0344, lr=0.000295, step=18]\n", + "Epoch 18: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.886, loss=0.0464, lr=0.000294, step=19]\n", + "Epoch 19: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.89, loss=0.017, lr=0.000294, step=20]\n", + "Epoch 20: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.894, loss=0.0132, lr=0.000294, step=21]\n", + "Epoch 21: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.898, loss=0.0161, lr=0.000293, step=22]\n", + "Epoch 22: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.902, loss=0.052, lr=0.000293, step=23]\n", + "Epoch 23: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.905, loss=0.0181, lr=0.000293, step=24]\n", + "Epoch 24: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.908, loss=0.017, lr=0.000292, step=25]\n", + "Epoch 25: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.911, loss=0.008, lr=0.000292, step=26]\n", + "Epoch 26: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.913, loss=0.0281, lr=0.000292, step=27]\n", + "Epoch 27: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.916, loss=0.0286, lr=0.000292, step=28]\n", + "Epoch 28: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.918, loss=0.0267, lr=0.000291, step=29]\n", + "Epoch 29: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.92, loss=0.0637, lr=0.000291, step=30]\n", + "Epoch 30: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.922, loss=0.00631, lr=0.000291, step=31]\n", + "Epoch 31: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.924, loss=0.0335, lr=0.00029, step=32]\n", + "Epoch 32: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.926, loss=0.0395, lr=0.00029, step=33]\n", + "Epoch 33: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.927, loss=0.00453, lr=0.00029, step=34]\n", + "Epoch 34: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.929, loss=0.014, lr=0.000289, step=35]\n", + "Epoch 35: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.931, loss=0.0115, lr=0.000289, step=36]\n", + "Epoch 36: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.932, loss=0.00655, lr=0.000289, step=37]\n", + "Epoch 37: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.933, loss=0.0124, lr=0.000289, step=38]\n", + "Epoch 38: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.935, loss=0.0569, lr=0.000288, step=39]\n", + "Epoch 39: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.936, loss=0.0114, lr=0.000288, step=40]\n", + "Epoch 40: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.937, loss=0.0256, lr=0.000288, step=41]\n", + "Epoch 41: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.938, loss=0.0184, lr=0.000287, step=42]\n", + "Epoch 42: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.939, loss=0.0213, lr=0.000287, step=43]\n", + "Epoch 43: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.94, loss=0.0234, lr=0.000287, step=44]\n", + "Epoch 44: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.941, loss=0.016, lr=0.000286, step=45]\n", + "Epoch 45: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.942, loss=0.00425, lr=0.000286, step=46]\n", + "Epoch 46: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.943, loss=0.00389, lr=0.000286, step=47]\n", + "Epoch 47: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.944, loss=0.0111, lr=0.000286, step=48]\n", + "Epoch 48: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.945, loss=0.0265, lr=0.000285, step=49]\n", + "Epoch 49: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.946, loss=0.0308, lr=0.000285, step=50]\n", + "Epoch 50: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.947, loss=0.00589, lr=0.000285, step=51]\n", + "Epoch 51: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.948, loss=0.0168, lr=0.000284, step=52]\n", + "Epoch 52: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.948, loss=0.00525, lr=0.000284, step=53]\n", + "Epoch 53: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.949, loss=0.00618, lr=0.000284, step=54]\n", + "Epoch 54: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.0267, lr=0.000283, step=55]\n", + "Epoch 55: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.0263, lr=0.000283, step=56]\n", + "Epoch 56: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.951, loss=0.00575, lr=0.000283, step=57]\n", + "Epoch 57: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.952, loss=0.00428, lr=0.000283, step=58]\n", + "Epoch 58: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.952, loss=0.00703, lr=0.000282, step=59]\n", + "Epoch 59: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.953, loss=0.0173, lr=0.000282, step=60]\n", + "Epoch 60: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.954, loss=0.0054, lr=0.000282, step=61]\n", + "Epoch 61: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.954, loss=0.00355, lr=0.000281, step=62]\n", + "Epoch 62: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.955, loss=0.00793, lr=0.000281, step=63]\n", + "Epoch 63: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.955, loss=0.0172, lr=0.000281, step=64]\n", + "Epoch 64: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.956, loss=0.0169, lr=0.00028, step=65]\n", + "Epoch 65: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.956, loss=0.0148, lr=0.00028, step=66]\n", + "Epoch 66: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.957, loss=0.0133, lr=0.00028, step=67]\n", + "Epoch 67: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.957, loss=0.019, lr=0.00028, step=68]\n", + "Epoch 68: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.958, loss=0.00753, lr=0.000279, step=69]\n", + "Epoch 69: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.958, loss=0.00686, lr=0.000279, step=70]\n", + "Epoch 70: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.959, loss=0.0124, lr=0.000279, step=71]\n", + "Epoch 71: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.959, loss=0.00266, lr=0.000278, step=72]\n", + "Epoch 72: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.96, loss=0.0189, lr=0.000278, step=73]\n", + "Epoch 73: 100%|██████████| 1/1 [00:00<00:00, 1.28it/s, ema_decay=0.96, loss=0.0084, lr=0.000278, step=74]\n", + "Epoch 74: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.96, loss=0.00493, lr=0.000277, step=75]\n", + "Epoch 75: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.961, loss=0.00398, lr=0.000277, step=76]\n", + "Epoch 76: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.961, loss=0.0123, lr=0.000277, step=77]\n", + "Epoch 77: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.962, loss=0.00553, lr=0.000277, step=78]\n", + "Epoch 78: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.962, loss=0.00301, lr=0.000276, step=79]\n", + "Epoch 79: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.962, loss=0.0145, lr=0.000276, step=80]\n", + "Epoch 80: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.963, loss=0.00281, lr=0.000276, step=81]\n", + "Epoch 81: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.963, loss=0.0036, lr=0.000275, step=82]\n", + "Epoch 82: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.00352, lr=0.000275, step=83]\n", + "Epoch 83: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.964, loss=0.0037, lr=0.000275, step=84]\n", + "Epoch 84: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.964, loss=0.00323, lr=0.000275, step=85]\n", + "Epoch 85: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.964, loss=0.00644, lr=0.000274, step=86]\n", + "Epoch 86: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.0105, lr=0.000274, step=87]\n", + "Epoch 87: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.965, loss=0.00332, lr=0.000274, step=88]\n", + "Epoch 88: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.00885, lr=0.000273, step=89]\n", + "Epoch 89: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.0167, lr=0.000273, step=90]\n", + "Epoch 90: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.966, loss=0.00248, lr=0.000273, step=91]\n", + "Epoch 91: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.966, loss=0.0111, lr=0.000272, step=92]\n", + "Epoch 92: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.966, loss=0.00319, lr=0.000272, step=93]\n", + "Epoch 93: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.967, loss=0.0142, lr=0.000272, step=94]\n", + "Epoch 94: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.00388, lr=0.000271, step=95]\n", + "Epoch 95: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.00221, lr=0.000271, step=96]\n", + "Epoch 96: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.00326, lr=0.000271, step=97]\n", + "Epoch 97: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.00251, lr=0.000271, step=98]\n", + "Epoch 98: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.00697, lr=0.00027, step=99]\n", + "Epoch 99: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.968, loss=0.00232, lr=0.00027, step=100]\n", + "Epoch 100: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.00304, lr=0.00027, step=101]\n", + "Epoch 101: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.969, loss=0.00916, lr=0.000269, step=102]\n", + "Epoch 102: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.969, loss=0.00354, lr=0.000269, step=103]\n", + "Epoch 103: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.00228, lr=0.000269, step=104]\n", + "Epoch 104: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.969, loss=0.00496, lr=0.000268, step=105]\n", + "Epoch 105: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.97, loss=0.00422, lr=0.000268, step=106]\n", + "Epoch 106: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00306, lr=0.000268, step=107]\n", + "Epoch 107: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00257, lr=0.000268, step=108]\n", + "Epoch 108: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00305, lr=0.000267, step=109]\n", + "Epoch 109: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.0035, lr=0.000267, step=110]\n", + "Epoch 110: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.971, loss=0.00655, lr=0.000267, step=111]\n", + "Epoch 111: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00252, lr=0.000266, step=112]\n", + "Epoch 112: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00335, lr=0.000266, step=113]\n", + "Epoch 113: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.00393, lr=0.000266, step=114]\n", + "Epoch 114: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00559, lr=0.000266, step=115]\n", + "Epoch 115: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.00148, lr=0.000265, step=116]\n", + "Epoch 116: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.972, loss=0.00826, lr=0.000265, step=117]\n", + "Epoch 117: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.972, loss=0.00361, lr=0.000265, step=118]\n", + "Epoch 118: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.00151, lr=0.000264, step=119]\n", + "Epoch 119: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.972, loss=0.00402, lr=0.000264, step=120]\n", + "Epoch 120: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.972, loss=0.00172, lr=0.000264, step=121]\n", + "Epoch 121: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.0055, lr=0.000263, step=122]\n", + "Epoch 122: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.00879, lr=0.000263, step=123]\n", + "Epoch 123: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.00183, lr=0.000263, step=124]\n", + "Epoch 124: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.00301, lr=0.000262, step=125]\n", + "Epoch 125: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0084, lr=0.000262, step=126]\n", + "Epoch 126: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.00145, lr=0.000262, step=127]\n", + "Epoch 127: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.00585, lr=0.000262, step=128]\n", + "Epoch 128: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.974, loss=0.00319, lr=0.000261, step=129]\n", + "Epoch 129: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.00189, lr=0.000261, step=130]\n", + "Epoch 130: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0026, lr=0.000261, step=131]\n", + "Epoch 131: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.00314, lr=0.00026, step=132]\n", + "Epoch 132: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.00223, lr=0.00026, step=133]\n", + "Epoch 133: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.974, loss=0.00451, lr=0.00026, step=134]\n", + "Epoch 134: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.00503, lr=0.000259, step=135]\n", + "Epoch 135: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.00236, lr=0.000259, step=136]\n", + "Epoch 136: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.00274, lr=0.000259, step=137]\n", + "Epoch 137: 100%|██████████| 1/1 [00:00<00:00, 1.17it/s, ema_decay=0.975, loss=0.00263, lr=0.000259, step=138]\n", + "Epoch 138: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.975, loss=0.00198, lr=0.000258, step=139]\n", + "Epoch 139: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.975, loss=0.00211, lr=0.000258, step=140]\n", + "Epoch 140: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.975, loss=0.00151, lr=0.000258, step=141]\n", + "Epoch 141: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.976, loss=0.00437, lr=0.000257, step=142]\n", + "Epoch 142: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.00485, lr=0.000257, step=143]\n", + "Epoch 143: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.976, loss=0.00131, lr=0.000257, step=144]\n", + "Epoch 144: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.0012, lr=0.000256, step=145]\n", + "Epoch 145: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.976, loss=0.00211, lr=0.000256, step=146]\n", + "Epoch 146: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.976, loss=0.00136, lr=0.000256, step=147]\n", + "Epoch 147: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.00726, lr=0.000256, step=148]\n", + "Epoch 148: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0013, lr=0.000255, step=149]\n", + "Epoch 149: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00401, lr=0.000255, step=150]\n", + "Epoch 150: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00106, lr=0.000255, step=151]\n", + "Epoch 151: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.977, loss=0.00271, lr=0.000254, step=152]\n", + "Epoch 152: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00163, lr=0.000254, step=153]\n", + "Epoch 153: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00119, lr=0.000254, step=154]\n", + "Epoch 154: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.00407, lr=0.000253, step=155]\n", + "Epoch 155: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.00165, lr=0.000253, step=156]\n", + "Epoch 156: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.977, loss=0.00134, lr=0.000253, step=157]\n", + "Epoch 157: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00182, lr=0.000253, step=158]\n", + "Epoch 158: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.00147, lr=0.000252, step=159]\n", + "Epoch 159: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00194, lr=0.000252, step=160]\n", + "Epoch 160: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00124, lr=0.000252, step=161]\n", + "Epoch 161: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.000933, lr=0.000251, step=162]\n", + "Epoch 162: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00447, lr=0.000251, step=163]\n", + "Epoch 163: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00348, lr=0.000251, step=164]\n", + "Epoch 164: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00319, lr=0.00025, step=165]\n", + "Epoch 165: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00366, lr=0.00025, step=166]\n", + "Epoch 166: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00104, lr=0.00025, step=167]\n", + "Epoch 167: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.978, loss=0.00136, lr=0.00025, step=168]\n", + "Epoch 168: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.979, loss=0.00119, lr=0.000249, step=169]\n", + "Epoch 169: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.00381, lr=0.000249, step=170]\n", + "Epoch 170: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.001, lr=0.000249, step=171]\n", + "Epoch 171: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.979, loss=0.00135, lr=0.000248, step=172]\n", + "Epoch 172: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.979, loss=0.00103, lr=0.000248, step=173]\n", + "Epoch 173: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.00164, lr=0.000248, step=174]\n", + "Epoch 174: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.00319, lr=0.000247, step=175]\n", + "Epoch 175: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.00287, lr=0.000247, step=176]\n", + "Epoch 176: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.00529, lr=0.000247, step=177]\n", + "Epoch 177: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.00118, lr=0.000247, step=178]\n", + "Epoch 178: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.00122, lr=0.000246, step=179]\n", + "Epoch 179: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00354, lr=0.000246, step=180]\n", + "Epoch 180: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00135, lr=0.000246, step=181]\n", + "Epoch 181: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.000869, lr=0.000245, step=182]\n", + "Epoch 182: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.00115, lr=0.000245, step=183]\n", + "Epoch 183: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.00122, lr=0.000245, step=184]\n", + "Epoch 184: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.00161, lr=0.000244, step=185]\n", + "Epoch 185: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.00363, lr=0.000244, step=186]\n", + "Epoch 186: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.00121, lr=0.000244, step=187]\n", + "Epoch 187: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.00113, lr=0.000244, step=188]\n", + "Epoch 188: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00676, lr=0.000243, step=189]\n", + "Epoch 189: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.000734, lr=0.000243, step=190]\n", + "Epoch 190: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.005, lr=0.000243, step=191]\n", + "Epoch 191: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0017, lr=0.000242, step=192]\n", + "Epoch 192: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.981, loss=0.000844, lr=0.000242, step=193]\n", + "Epoch 193: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00344, lr=0.000242, step=194]\n", + "Epoch 194: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.000867, lr=0.000241, step=195]\n", + "Epoch 195: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0044, lr=0.000241, step=196]\n", + "Epoch 196: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00243, lr=0.000241, step=197]\n", + "Epoch 197: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.00432, lr=0.000241, step=198]\n", + "Epoch 198: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00306, lr=0.00024, step=199]\n", + "Epoch 199: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00387, lr=0.00024, step=200]\n", + "Epoch 200: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.981, loss=0.000804, lr=0.00024, step=201]\n", + "Epoch 201: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.981, loss=0.00286, lr=0.000239, step=202]\n", + "Epoch 202: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.000794, lr=0.000239, step=203]\n", + "Epoch 203: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00165, lr=0.000239, step=204]\n", + "Epoch 204: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00418, lr=0.000238, step=205]\n", + "Epoch 205: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00409, lr=0.000238, step=206]\n", + "Epoch 206: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00221, lr=0.000238, step=207]\n", + "Epoch 207: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.00098, lr=0.000238, step=208]\n", + "Epoch 208: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.00122, lr=0.000237, step=209]\n", + "Epoch 209: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.982, loss=0.00122, lr=0.000237, step=210]\n", + "Epoch 210: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00095, lr=0.000237, step=211]\n", + "Epoch 211: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.000895, lr=0.000236, step=212]\n", + "Epoch 212: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0013, lr=0.000236, step=213]\n", + "Epoch 213: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.982, loss=0.00166, lr=0.000236, step=214]\n", + "Epoch 214: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.0028, lr=0.000235, step=215]\n", + "Epoch 215: 100%|██████████| 1/1 [00:00<00:00, 1.17it/s, ema_decay=0.982, loss=0.00147, lr=0.000235, step=216]\n", + "Epoch 216: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.982, loss=0.00135, lr=0.000235, step=217]\n", + "Epoch 217: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00123, lr=0.000235, step=218]\n", + "Epoch 218: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.00252, lr=0.000234, step=219]\n", + "Epoch 219: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00175, lr=0.000234, step=220]\n", + "Epoch 220: 100%|██████████| 1/1 [00:00<00:00, 1.19it/s, ema_decay=0.982, loss=0.00343, lr=0.000234, step=221]\n", + "Epoch 221: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.983, loss=0.000986, lr=0.000233, step=222]\n", + "Epoch 222: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.00277, lr=0.000233, step=223]\n", + "Epoch 223: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.983, loss=0.00221, lr=0.000233, step=224]\n", + "Epoch 224: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.983, loss=0.000964, lr=0.000232, step=225]\n", + "Epoch 225: 100%|██████████| 1/1 [00:00<00:00, 1.28it/s, ema_decay=0.983, loss=0.00265, lr=0.000232, step=226]\n", + "Epoch 226: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.00214, lr=0.000232, step=227]\n", + "Epoch 227: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.983, loss=0.00105, lr=0.000232, step=228]\n", + "Epoch 228: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00116, lr=0.000231, step=229]\n", + "Epoch 229: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.983, loss=0.00147, lr=0.000231, step=230]\n", + "Epoch 230: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.00163, lr=0.000231, step=231]\n", + "Epoch 231: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00086, lr=0.00023, step=232]\n", + "Epoch 232: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00119, lr=0.00023, step=233]\n", + "Epoch 233: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.00101, lr=0.00023, step=234]\n", + "Epoch 234: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0029, lr=0.000229, step=235]\n", + "Epoch 235: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.00105, lr=0.000229, step=236]\n", + "Epoch 236: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.002, lr=0.000229, step=237]\n", + "Epoch 237: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00114, lr=0.000229, step=238]\n", + "Epoch 238: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.0012, lr=0.000228, step=239]\n", + "Epoch 239: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00103, lr=0.000228, step=240]\n", + "Epoch 240: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00214, lr=0.000228, step=241]\n", + "Epoch 241: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00237, lr=0.000227, step=242]\n", + "Epoch 242: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.000634, lr=0.000227, step=243]\n", + "Epoch 243: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00225, lr=0.000227, step=244]\n", + "Epoch 244: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00264, lr=0.000226, step=245]\n", + "Epoch 245: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00126, lr=0.000226, step=246]\n", + "Epoch 246: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00571, lr=0.000226, step=247]\n", + "Epoch 247: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0011, lr=0.000226, step=248]\n", + "Epoch 248: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00154, lr=0.000225, step=249]\n", + "Epoch 249: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00402, lr=0.000225, step=250]\n", + "Epoch 250: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.00128, lr=0.000225, step=251]\n", + "Epoch 251: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00258, lr=0.000224, step=252]\n", + "Epoch 252: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0037, lr=0.000224, step=253]\n", + "Epoch 253: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00101, lr=0.000224, step=254]\n", + "Epoch 254: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00271, lr=0.000223, step=255]\n", + "Epoch 255: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00194, lr=0.000223, step=256]\n", + "Epoch 256: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.984, loss=0.00167, lr=0.000223, step=257]\n", + "Epoch 257: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.00178, lr=0.000223, step=258]\n", + "Epoch 258: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.00232, lr=0.000222, step=259]\n", + "Epoch 259: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0025, lr=0.000222, step=260]\n", + "Epoch 260: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00167, lr=0.000222, step=261]\n", + "Epoch 261: 100%|██████████| 1/1 [00:00<00:00, 1.04it/s, ema_decay=0.985, loss=0.000972, lr=0.000221, step=262]\n", + "Epoch 262: 100%|██████████| 1/1 [00:00<00:00, 1.14it/s, ema_decay=0.985, loss=0.0017, lr=0.000221, step=263]\n", + "Epoch 263: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00145, lr=0.000221, step=264]\n", + "Epoch 264: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00243, lr=0.00022, step=265]\n", + "Epoch 265: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.00122, lr=0.00022, step=266]\n", + "Epoch 266: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00136, lr=0.00022, step=267]\n", + "Epoch 267: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00222, lr=0.00022, step=268]\n", + "Epoch 268: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00156, lr=0.000219, step=269]\n", + "Epoch 269: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00116, lr=0.000219, step=270]\n", + "Epoch 270: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.00362, lr=0.000219, step=271]\n", + "Epoch 271: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00082, lr=0.000218, step=272]\n", + "Epoch 272: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00109, lr=0.000218, step=273]\n", + "Epoch 273: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00368, lr=0.000218, step=274]\n", + "Epoch 274: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00167, lr=0.000217, step=275]\n", + "Epoch 275: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00132, lr=0.000217, step=276]\n", + "Epoch 276: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00138, lr=0.000217, step=277]\n", + "Epoch 277: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.00138, lr=0.000217, step=278]\n", + "Epoch 278: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.0025, lr=0.000216, step=279]\n", + "Epoch 279: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00301, lr=0.000216, step=280]\n", + "Epoch 280: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00231, lr=0.000216, step=281]\n", + "Epoch 281: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00246, lr=0.000215, step=282]\n", + "Epoch 282: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00224, lr=0.000215, step=283]\n", + "Epoch 283: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00228, lr=0.000215, step=284]\n", + "Epoch 284: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00188, lr=0.000214, step=285]\n", + "Epoch 285: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00155, lr=0.000214, step=286]\n", + "Epoch 286: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00252, lr=0.000214, step=287]\n", + "Epoch 287: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.00114, lr=0.000214, step=288]\n", + "Epoch 288: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00162, lr=0.000213, step=289]\n", + "Epoch 289: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00185, lr=0.000213, step=290]\n", + "Epoch 290: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00122, lr=0.000213, step=291]\n", + "Epoch 291: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00208, lr=0.000212, step=292]\n", + "Epoch 292: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00119, lr=0.000212, step=293]\n", + "Epoch 293: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00153, lr=0.000212, step=294]\n", + "Epoch 294: 100%|██████████| 1/1 [00:00<00:00, 1.17it/s, ema_decay=0.986, loss=0.000898, lr=0.000211, step=295]\n", + "Epoch 295: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.000991, lr=0.000211, step=296]\n", + "Epoch 296: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.986, loss=0.00182, lr=0.000211, step=297]\n", + "Epoch 297: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00285, lr=0.000211, step=298]\n", + "Epoch 298: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00122, lr=0.00021, step=299]\n", + "Epoch 299: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00103, lr=0.00021, step=300]\n", + "Epoch 300: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00219, lr=0.00021, step=301]\n", + "Epoch 301: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.000659, lr=0.000209, step=302]\n", + "Epoch 302: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00182, lr=0.000209, step=303]\n", + "Epoch 303: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00211, lr=0.000209, step=304]\n", + "Epoch 304: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00161, lr=0.000208, step=305]\n", + "Epoch 305: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00117, lr=0.000208, step=306]\n", + "Epoch 306: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0026, lr=0.000208, step=307]\n", + "Epoch 307: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.000813, lr=0.000208, step=308]\n", + "Epoch 308: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00292, lr=0.000207, step=309]\n", + "Epoch 309: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.986, loss=0.000847, lr=0.000207, step=310]\n", + "Epoch 310: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.0027, lr=0.000207, step=311]\n", + "Epoch 311: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.00138, lr=0.000206, step=312]\n", + "Epoch 312: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.987, loss=0.00158, lr=0.000206, step=313]\n", + "Epoch 313: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00111, lr=0.000206, step=314]\n", + "Epoch 314: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.000876, lr=0.000206, step=315]\n", + "Epoch 315: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00136, lr=0.000205, step=316]\n", + "Epoch 316: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.000999, lr=0.000205, step=317]\n", + "Epoch 317: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00179, lr=0.000205, step=318]\n", + "Epoch 318: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0014, lr=0.000204, step=319]\n", + "Epoch 319: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.000911, lr=0.000204, step=320]\n", + "Epoch 320: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00237, lr=0.000204, step=321]\n", + "Epoch 321: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.00172, lr=0.000203, step=322]\n", + "Epoch 322: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00081, lr=0.000203, step=323]\n", + "Epoch 323: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00218, lr=0.000203, step=324]\n", + "Epoch 324: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00149, lr=0.000202, step=325]\n", + "Epoch 325: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.987, loss=0.000931, lr=0.000202, step=326]\n", + "Epoch 326: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00207, lr=0.000202, step=327]\n", + "Epoch 327: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00303, lr=0.000202, step=328]\n", + "Epoch 328: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00138, lr=0.000201, step=329]\n", + "Epoch 329: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.987, loss=0.00241, lr=0.000201, step=330]\n", + "Epoch 330: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00181, lr=0.000201, step=331]\n", + "Epoch 331: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00197, lr=0.0002, step=332]\n", + "Epoch 332: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00125, lr=0.0002, step=333]\n", + "Epoch 333: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00183, lr=0.0002, step=334]\n", + "Epoch 334: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00169, lr=0.000199, step=335]\n", + "Epoch 335: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00102, lr=0.000199, step=336]\n", + "Epoch 336: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00271, lr=0.000199, step=337]\n", + "Epoch 337: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00175, lr=0.000199, step=338]\n", + "Epoch 338: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00101, lr=0.000198, step=339]\n", + "Epoch 339: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.987, loss=0.0016, lr=0.000198, step=340]\n", + "Epoch 340: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.987, loss=0.00195, lr=0.000198, step=341]\n", + "Epoch 341: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.00202, lr=0.000197, step=342]\n", + "Epoch 342: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.000818, lr=0.000197, step=343]\n", + "Epoch 343: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00203, lr=0.000197, step=344]\n", + "Epoch 344: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00247, lr=0.000196, step=345]\n", + "Epoch 345: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.000914, lr=0.000196, step=346]\n", + "Epoch 346: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00253, lr=0.000196, step=347]\n", + "Epoch 347: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.000923, lr=0.000196, step=348]\n", + "Epoch 348: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.988, loss=0.00352, lr=0.000195, step=349]\n", + "Epoch 349: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.0018, lr=0.000195, step=350]\n", + "Epoch 350: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.000925, lr=0.000195, step=351]\n", + "Epoch 351: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.00163, lr=0.000194, step=352]\n", + "Epoch 352: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00458, lr=0.000194, step=353]\n", + "Epoch 353: 100%|██████████| 1/1 [00:00<00:00, 1.07it/s, ema_decay=0.988, loss=0.00276, lr=0.000194, step=354]\n", + "Epoch 354: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.00312, lr=0.000193, step=355]\n", + "Epoch 355: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00398, lr=0.000193, step=356]\n", + "Epoch 356: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00396, lr=0.000193, step=357]\n", + "Epoch 357: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.00188, lr=0.000193, step=358]\n", + "Epoch 358: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00291, lr=0.000192, step=359]\n", + "Epoch 359: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00386, lr=0.000192, step=360]\n", + "Epoch 360: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00215, lr=0.000192, step=361]\n", + "Epoch 361: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00184, lr=0.000191, step=362]\n", + "Epoch 362: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.00349, lr=0.000191, step=363]\n", + "Epoch 363: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.00115, lr=0.000191, step=364]\n", + "Epoch 364: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.00194, lr=0.00019, step=365]\n", + "Epoch 365: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.988, loss=0.00233, lr=0.00019, step=366]\n", + "Epoch 366: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00169, lr=0.00019, step=367]\n", + "Epoch 367: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00305, lr=0.00019, step=368]\n", + "Epoch 368: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00107, lr=0.000189, step=369]\n", + "Epoch 369: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00203, lr=0.000189, step=370]\n", + "Epoch 370: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0041, lr=0.000189, step=371]\n", + "Epoch 371: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00552, lr=0.000188, step=372]\n", + "Epoch 372: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.00117, lr=0.000188, step=373]\n", + "Epoch 373: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0018, lr=0.000188, step=374]\n", + "Epoch 374: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0114, lr=0.000187, step=375]\n", + "Epoch 375: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.00351, lr=0.000187, step=376]\n", + "Epoch 376: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0025, lr=0.000187, step=377]\n", + "Epoch 377: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00243, lr=0.000187, step=378]\n", + "Epoch 378: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00343, lr=0.000186, step=379]\n", + "Epoch 379: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00395, lr=0.000186, step=380]\n", + "Epoch 380: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00329, lr=0.000186, step=381]\n", + "Epoch 381: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0016, lr=0.000185, step=382]\n", + "Epoch 382: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00355, lr=0.000185, step=383]\n", + "Epoch 383: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00351, lr=0.000185, step=384]\n", + "Epoch 384: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0036, lr=0.000184, step=385]\n", + "Epoch 385: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00348, lr=0.000184, step=386]\n", + "Epoch 386: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00203, lr=0.000184, step=387]\n", + "Epoch 387: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.00298, lr=0.000184, step=388]\n", + "Epoch 388: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.00272, lr=0.000183, step=389]\n", + "Epoch 389: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00222, lr=0.000183, step=390]\n", + "Epoch 390: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00355, lr=0.000183, step=391]\n", + "Epoch 391: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0012, lr=0.000182, step=392]\n", + "Epoch 392: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00196, lr=0.000182, step=393]\n", + "Epoch 393: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00241, lr=0.000182, step=394]\n", + "Epoch 394: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00203, lr=0.000181, step=395]\n", + "Epoch 395: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.989, loss=0.00296, lr=0.000181, step=396]\n", + "Epoch 396: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.989, loss=0.00281, lr=0.000181, step=397]\n", + "Epoch 397: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.00145, lr=0.000181, step=398]\n", + "Epoch 398: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00315, lr=0.00018, step=399]\n", + "Epoch 399: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00378, lr=0.00018, step=400]\n", + "Epoch 400: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.002, lr=0.00018, step=401]\n", + "Epoch 401: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00133, lr=0.000179, step=402]\n", + "Epoch 402: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00177, lr=0.000179, step=403]\n", + "Epoch 403: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00392, lr=0.000179, step=404]\n", + "Epoch 404: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00157, lr=0.000178, step=405]\n", + "Epoch 405: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0019, lr=0.000178, step=406]\n", + "Epoch 406: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0019, lr=0.000178, step=407]\n", + "Epoch 407: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.0021, lr=0.000178, step=408]\n", + "Epoch 408: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00132, lr=0.000177, step=409]\n", + "Epoch 409: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00142, lr=0.000177, step=410]\n", + "Epoch 410: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00204, lr=0.000177, step=411]\n", + "Epoch 411: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00119, lr=0.000176, step=412]\n", + "Epoch 412: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00166, lr=0.000176, step=413]\n", + "Epoch 413: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00215, lr=0.000176, step=414]\n", + "Epoch 414: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00121, lr=0.000175, step=415]\n", + "Epoch 415: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000937, lr=0.000175, step=416]\n", + "Epoch 416: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.00132, lr=0.000175, step=417]\n", + "Epoch 417: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00114, lr=0.000175, step=418]\n", + "Epoch 418: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00109, lr=0.000174, step=419]\n", + "Epoch 419: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00107, lr=0.000174, step=420]\n", + "Epoch 420: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00119, lr=0.000174, step=421]\n", + "Epoch 421: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00207, lr=0.000173, step=422]\n", + "Epoch 422: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00147, lr=0.000173, step=423]\n", + "Epoch 423: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.000816, lr=0.000173, step=424]\n", + "Epoch 424: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.989, loss=0.00302, lr=0.000172, step=425]\n", + "Epoch 425: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00161, lr=0.000172, step=426]\n", + "Epoch 426: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000704, lr=0.000172, step=427]\n", + "Epoch 427: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000804, lr=0.000172, step=428]\n", + "Epoch 428: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00169, lr=0.000171, step=429]\n", + "Epoch 429: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00261, lr=0.000171, step=430]\n", + "Epoch 430: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0018, lr=0.000171, step=431]\n", + "Epoch 431: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00175, lr=0.00017, step=432]\n", + "Epoch 432: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00166, lr=0.00017, step=433]\n", + "Epoch 433: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.000882, lr=0.00017, step=434]\n", + "Epoch 434: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00332, lr=0.000169, step=435]\n", + "Epoch 435: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00113, lr=0.000169, step=436]\n", + "Epoch 436: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00149, lr=0.000169, step=437]\n", + "Epoch 437: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00218, lr=0.000169, step=438]\n", + "Epoch 438: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00235, lr=0.000168, step=439]\n", + "Epoch 439: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000792, lr=0.000168, step=440]\n", + "Epoch 440: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0015, lr=0.000168, step=441]\n", + "Epoch 441: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.99, loss=0.00163, lr=0.000167, step=442]\n", + "Epoch 442: 100%|██████████| 1/1 [00:00<00:00, 1.24it/s, ema_decay=0.99, loss=0.000747, lr=0.000167, step=443]\n", + "Epoch 443: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.00131, lr=0.000167, step=444]\n", + "Epoch 444: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00147, lr=0.000167, step=445]\n", + "Epoch 445: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000897, lr=0.000166, step=446]\n", + "Epoch 446: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00127, lr=0.000166, step=447]\n", + "Epoch 447: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00107, lr=0.000166, step=448]\n", + "Epoch 448: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00327, lr=0.000165, step=449]\n", + "Epoch 449: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.99, loss=0.000759, lr=0.000165, step=450]\n", + "Epoch 450: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00118, lr=0.000165, step=451]\n", + "Epoch 451: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00208, lr=0.000164, step=452]\n", + "Epoch 452: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0024, lr=0.000164, step=453]\n", + "Epoch 453: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00144, lr=0.000164, step=454]\n", + "Epoch 454: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.00148, lr=0.000163, step=455]\n", + "Epoch 455: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00219, lr=0.000163, step=456]\n", + "Epoch 456: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00148, lr=0.000163, step=457]\n", + "Epoch 457: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00125, lr=0.000163, step=458]\n", + "Epoch 458: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00297, lr=0.000162, step=459]\n", + "Epoch 459: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00278, lr=0.000162, step=460]\n", + "Epoch 460: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00136, lr=0.000162, step=461]\n", + "Epoch 461: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00119, lr=0.000161, step=462]\n", + "Epoch 462: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00175, lr=0.000161, step=463]\n", + "Epoch 463: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00221, lr=0.000161, step=464]\n", + "Epoch 464: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000969, lr=0.00016, step=465]\n", + "Epoch 465: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00253, lr=0.00016, step=466]\n", + "Epoch 466: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00154, lr=0.00016, step=467]\n", + "Epoch 467: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00118, lr=0.00016, step=468]\n", + "Epoch 468: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00235, lr=0.000159, step=469]\n", + "Epoch 469: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00148, lr=0.000159, step=470]\n", + "Epoch 470: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00148, lr=0.000159, step=471]\n", + "Epoch 471: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000965, lr=0.000158, step=472]\n", + "Epoch 472: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00146, lr=0.000158, step=473]\n", + "Epoch 473: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00353, lr=0.000158, step=474]\n", + "Epoch 474: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00221, lr=0.000157, step=475]\n", + "Epoch 475: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.0014, lr=0.000157, step=476]\n", + "Epoch 476: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00186, lr=0.000157, step=477]\n", + "Epoch 477: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00184, lr=0.000157, step=478]\n", + "Epoch 478: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00148, lr=0.000156, step=479]\n", + "Epoch 479: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00109, lr=0.000156, step=480]\n", + "Epoch 480: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00261, lr=0.000156, step=481]\n", + "Epoch 481: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00158, lr=0.000155, step=482]\n", + "Epoch 482: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00105, lr=0.000155, step=483]\n", + "Epoch 483: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0011, lr=0.000155, step=484]\n", + "Epoch 484: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.99, loss=0.00199, lr=0.000154, step=485]\n", + "Epoch 485: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.000702, lr=0.000154, step=486]\n", + "Epoch 486: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00129, lr=0.000154, step=487]\n", + "Epoch 487: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00196, lr=0.000154, step=488]\n", + "Epoch 488: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00197, lr=0.000153, step=489]\n", + "Epoch 489: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.99, loss=0.00176, lr=0.000153, step=490]\n", + "Epoch 490: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00125, lr=0.000153, step=491]\n", + "Epoch 491: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00136, lr=0.000152, step=492]\n", + "Epoch 492: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000661, lr=0.000152, step=493]\n", + "Epoch 493: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.00107, lr=0.000152, step=494]\n", + "Epoch 494: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0022, lr=0.000151, step=495]\n", + "Epoch 495: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000522, lr=0.000151, step=496]\n", + "Epoch 496: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00108, lr=0.000151, step=497]\n", + "Epoch 497: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00174, lr=0.000151, step=498]\n", + "Epoch 498: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00166, lr=0.00015, step=499]\n", + "Epoch 499: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00126, lr=0.00015, step=500]\n", + "Epoch 500: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000982, lr=0.00015, step=501]\n", + "Epoch 501: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000943, lr=0.000149, step=502]\n", + "Epoch 502: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00167, lr=0.000149, step=503]\n", + "Epoch 503: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00102, lr=0.000149, step=504]\n", + "Epoch 504: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00274, lr=0.000148, step=505]\n", + "Epoch 505: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00099, lr=0.000148, step=506]\n", + "Epoch 506: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00171, lr=0.000148, step=507]\n", + "Epoch 507: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00147, lr=0.000148, step=508]\n", + "Epoch 508: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00138, lr=0.000147, step=509]\n", + "Epoch 509: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00116, lr=0.000147, step=510]\n", + "Epoch 510: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000945, lr=0.000147, step=511]\n", + "Epoch 511: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00147, lr=0.000146, step=512]\n", + "Epoch 512: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000757, lr=0.000146, step=513]\n", + "Epoch 513: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00122, lr=0.000146, step=514]\n", + "Epoch 514: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00116, lr=0.000145, step=515]\n", + "Epoch 515: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00133, lr=0.000145, step=516]\n", + "Epoch 516: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00105, lr=0.000145, step=517]\n", + "Epoch 517: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00146, lr=0.000145, step=518]\n", + "Epoch 518: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000884, lr=0.000144, step=519]\n", + "Epoch 519: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000715, lr=0.000144, step=520]\n", + "Epoch 520: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00114, lr=0.000144, step=521]\n", + "Epoch 521: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00118, lr=0.000143, step=522]\n", + "Epoch 522: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00113, lr=0.000143, step=523]\n", + "Epoch 523: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00137, lr=0.000143, step=524]\n", + "Epoch 524: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00136, lr=0.000142, step=525]\n", + "Epoch 525: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000681, lr=0.000142, step=526]\n", + "Epoch 526: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.000633, lr=0.000142, step=527]\n", + "Epoch 527: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000716, lr=0.000142, step=528]\n", + "Epoch 528: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000834, lr=0.000141, step=529]\n", + "Epoch 529: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00118, lr=0.000141, step=530]\n", + "Epoch 530: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000867, lr=0.000141, step=531]\n", + "Epoch 531: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000871, lr=0.00014, step=532]\n", + "Epoch 532: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000786, lr=0.00014, step=533]\n", + "Epoch 533: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000995, lr=0.00014, step=534]\n", + "Epoch 534: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000961, lr=0.00014, step=535]\n", + "Epoch 535: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000577, lr=0.000139, step=536]\n", + "Epoch 536: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.000829, lr=0.000139, step=537]\n", + "Epoch 537: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00071, lr=0.000139, step=538]\n", + "Epoch 538: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0011, lr=0.000138, step=539]\n", + "Epoch 539: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.991, loss=0.000875, lr=0.000138, step=540]\n", + "Epoch 540: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.000768, lr=0.000138, step=541]\n", + "Epoch 541: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000979, lr=0.000137, step=542]\n", + "Epoch 542: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000753, lr=0.000137, step=543]\n", + "Epoch 543: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.000996, lr=0.000137, step=544]\n", + "Epoch 544: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000855, lr=0.000136, step=545]\n", + "Epoch 545: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000908, lr=0.000136, step=546]\n", + "Epoch 546: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000853, lr=0.000136, step=547]\n", + "Epoch 547: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000864, lr=0.000136, step=548]\n", + "Epoch 548: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000691, lr=0.000135, step=549]\n", + "Epoch 549: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000817, lr=0.000135, step=550]\n", + "Epoch 550: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000752, lr=0.000135, step=551]\n", + "Epoch 551: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000665, lr=0.000134, step=552]\n", + "Epoch 552: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000688, lr=0.000134, step=553]\n", + "Epoch 553: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.00117, lr=0.000134, step=554]\n", + "Epoch 554: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.000824, lr=0.000133, step=555]\n", + "Epoch 555: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00133, lr=0.000133, step=556]\n", + "Epoch 556: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000693, lr=0.000133, step=557]\n", + "Epoch 557: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00138, lr=0.000133, step=558]\n", + "Epoch 558: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000873, lr=0.000132, step=559]\n", + "Epoch 559: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000552, lr=0.000132, step=560]\n", + "Epoch 560: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00106, lr=0.000132, step=561]\n", + "Epoch 561: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000643, lr=0.000131, step=562]\n", + "Epoch 562: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000554, lr=0.000131, step=563]\n", + "Epoch 563: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000537, lr=0.000131, step=564]\n", + "Epoch 564: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000894, lr=0.000131, step=565]\n", + "Epoch 565: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000594, lr=0.00013, step=566]\n", + "Epoch 566: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00055, lr=0.00013, step=567]\n", + "Epoch 567: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00055, lr=0.00013, step=568]\n", + "Epoch 568: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00052, lr=0.000129, step=569]\n", + "Epoch 569: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00083, lr=0.000129, step=570]\n", + "Epoch 570: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000891, lr=0.000129, step=571]\n", + "Epoch 571: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000543, lr=0.000128, step=572]\n", + "Epoch 572: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000482, lr=0.000128, step=573]\n", + "Epoch 573: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000573, lr=0.000128, step=574]\n", + "Epoch 574: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00112, lr=0.000127, step=575]\n", + "Epoch 575: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00054, lr=0.000127, step=576]\n", + "Epoch 576: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.000415, lr=0.000127, step=577]\n", + "Epoch 577: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000805, lr=0.000127, step=578]\n", + "Epoch 578: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000666, lr=0.000126, step=579]\n", + "Epoch 579: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000628, lr=0.000126, step=580]\n", + "Epoch 580: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000482, lr=0.000126, step=581]\n", + "Epoch 581: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000538, lr=0.000125, step=582]\n", + "Epoch 582: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000516, lr=0.000125, step=583]\n", + "Epoch 583: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000563, lr=0.000125, step=584]\n", + "Epoch 584: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000631, lr=0.000124, step=585]\n", + "Epoch 585: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000715, lr=0.000124, step=586]\n", + "Epoch 586: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000908, lr=0.000124, step=587]\n", + "Epoch 587: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00047, lr=0.000124, step=588]\n", + "Epoch 588: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000568, lr=0.000123, step=589]\n", + "Epoch 589: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.000851, lr=0.000123, step=590]\n", + "Epoch 590: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000802, lr=0.000123, step=591]\n", + "Epoch 591: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00075, lr=0.000122, step=592]\n", + "Epoch 592: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.000662, lr=0.000122, step=593]\n", + "Epoch 593: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00119, lr=0.000122, step=594]\n", + "Epoch 594: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000687, lr=0.000121, step=595]\n", + "Epoch 595: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00107, lr=0.000121, step=596]\n", + "Epoch 596: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000414, lr=0.000121, step=597]\n", + "Epoch 597: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00124, lr=0.000121, step=598]\n", + "Epoch 598: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000663, lr=0.00012, step=599]\n", + "Epoch 599: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.000533, lr=0.00012, step=600]\n", + "Epoch 600: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000639, lr=0.00012, step=601]\n", + "Epoch 601: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000883, lr=0.000119, step=602]\n", + "Epoch 602: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000731, lr=0.000119, step=603]\n", + "Epoch 603: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000668, lr=0.000119, step=604]\n", + "Epoch 604: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000899, lr=0.000118, step=605]\n", + "Epoch 605: 100%|██████████| 1/1 [00:00<00:00, 1.08it/s, ema_decay=0.992, loss=0.000871, lr=0.000118, step=606]\n", + "Epoch 606: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.992, loss=0.000599, lr=0.000118, step=607]\n", + "Epoch 607: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000679, lr=0.000118, step=608]\n", + "Epoch 608: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00101, lr=0.000117, step=609]\n", + "Epoch 609: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000769, lr=0.000117, step=610]\n", + "Epoch 610: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000644, lr=0.000117, step=611]\n", + "Epoch 611: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000556, lr=0.000116, step=612]\n", + "Epoch 612: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000501, lr=0.000116, step=613]\n", + "Epoch 613: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000525, lr=0.000116, step=614]\n", + "Epoch 614: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000692, lr=0.000115, step=615]\n", + "Epoch 615: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000547, lr=0.000115, step=616]\n", + "Epoch 616: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000425, lr=0.000115, step=617]\n", + "Epoch 617: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000701, lr=0.000115, step=618]\n", + "Epoch 618: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000811, lr=0.000114, step=619]\n", + "Epoch 619: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000714, lr=0.000114, step=620]\n", + "Epoch 620: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000847, lr=0.000114, step=621]\n", + "Epoch 621: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000679, lr=0.000113, step=622]\n", + "Epoch 622: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000642, lr=0.000113, step=623]\n", + "Epoch 623: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000525, lr=0.000113, step=624]\n", + "Epoch 624: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000685, lr=0.000112, step=625]\n", + "Epoch 625: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000628, lr=0.000112, step=626]\n", + "Epoch 626: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000626, lr=0.000112, step=627]\n", + "Epoch 627: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000598, lr=0.000112, step=628]\n", + "Epoch 628: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000612, lr=0.000111, step=629]\n", + "Epoch 629: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000546, lr=0.000111, step=630]\n", + "Epoch 630: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000651, lr=0.000111, step=631]\n", + "Epoch 631: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000539, lr=0.00011, step=632]\n", + "Epoch 632: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000692, lr=0.00011, step=633]\n", + "Epoch 633: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000703, lr=0.00011, step=634]\n", + "Epoch 634: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.992, loss=0.000586, lr=0.000109, step=635]\n", + "Epoch 635: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00072, lr=0.000109, step=636]\n", + "Epoch 636: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000383, lr=0.000109, step=637]\n", + "Epoch 637: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000749, lr=0.000109, step=638]\n", + "Epoch 638: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000686, lr=0.000108, step=639]\n", + "Epoch 639: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00112, lr=0.000108, step=640]\n", + "Epoch 640: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000573, lr=0.000108, step=641]\n", + "Epoch 641: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000527, lr=0.000107, step=642]\n", + "Epoch 642: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000468, lr=0.000107, step=643]\n", + "Epoch 643: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000636, lr=0.000107, step=644]\n", + "Epoch 644: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000444, lr=0.000106, step=645]\n", + "Epoch 645: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000764, lr=0.000106, step=646]\n", + "Epoch 646: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.000418, lr=0.000106, step=647]\n", + "Epoch 647: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000658, lr=0.000106, step=648]\n", + "Epoch 648: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.992, loss=0.000562, lr=0.000105, step=649]\n", + "Epoch 649: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000902, lr=0.000105, step=650]\n", + "Epoch 650: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.000826, lr=0.000105, step=651]\n", + "Epoch 651: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.000503, lr=0.000104, step=652]\n", + "Epoch 652: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000645, lr=0.000104, step=653]\n", + "Epoch 653: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.992, loss=0.000551, lr=0.000104, step=654]\n", + "Epoch 654: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000545, lr=0.000103, step=655]\n", + "Epoch 655: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000848, lr=0.000103, step=656]\n", + "Epoch 656: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000561, lr=0.000103, step=657]\n", + "Epoch 657: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000609, lr=0.000103, step=658]\n", + "Epoch 658: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000722, lr=0.000102, step=659]\n", + "Epoch 659: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000832, lr=0.000102, step=660]\n", + "Epoch 660: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.000536, lr=0.000102, step=661]\n", + "Epoch 661: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000948, lr=0.000101, step=662]\n", + "Epoch 662: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00048, lr=0.000101, step=663]\n", + "Epoch 663: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000418, lr=0.000101, step=664]\n", + "Epoch 664: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00082, lr=0.000101, step=665]\n", + "Epoch 665: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000478, lr=0.0001, step=666]\n", + "Epoch 666: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000681, lr=9.99e-5, step=667]\n", + "Epoch 667: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000503, lr=9.96e-5, step=668]\n", + "Epoch 668: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000507, lr=9.93e-5, step=669]\n", + "Epoch 669: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.000528, lr=9.9e-5, step=670]\n", + "Epoch 670: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000361, lr=9.87e-5, step=671]\n", + "Epoch 671: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000562, lr=9.84e-5, step=672]\n", + "Epoch 672: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000445, lr=9.81e-5, step=673]\n", + "Epoch 673: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000528, lr=9.78e-5, step=674]\n", + "Epoch 674: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000567, lr=9.75e-5, step=675]\n", + "Epoch 675: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000612, lr=9.72e-5, step=676]\n", + "Epoch 676: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000957, lr=9.69e-5, step=677]\n", + "Epoch 677: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000549, lr=9.66e-5, step=678]\n", + "Epoch 678: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000412, lr=9.63e-5, step=679]\n", + "Epoch 679: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000554, lr=9.6e-5, step=680]\n", + "Epoch 680: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000474, lr=9.57e-5, step=681]\n", + "Epoch 681: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000442, lr=9.54e-5, step=682]\n", + "Epoch 682: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000918, lr=9.51e-5, step=683]\n", + "Epoch 683: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000711, lr=9.48e-5, step=684]\n", + "Epoch 684: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.000657, lr=9.45e-5, step=685]\n", + "Epoch 685: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000632, lr=9.42e-5, step=686]\n", + "Epoch 686: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000603, lr=9.39e-5, step=687]\n", + "Epoch 687: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000579, lr=9.36e-5, step=688]\n", + "Epoch 688: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000947, lr=9.33e-5, step=689]\n", + "Epoch 689: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00052, lr=9.3e-5, step=690]\n", + "Epoch 690: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00101, lr=9.27e-5, step=691]\n", + "Epoch 691: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000407, lr=9.24e-5, step=692]\n", + "Epoch 692: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000573, lr=9.21e-5, step=693]\n", + "Epoch 693: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000412, lr=9.18e-5, step=694]\n", + "Epoch 694: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000825, lr=9.15e-5, step=695]\n", + "Epoch 695: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000394, lr=9.12e-5, step=696]\n", + "Epoch 696: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000523, lr=9.09e-5, step=697]\n", + "Epoch 697: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00047, lr=9.06e-5, step=698]\n", + "Epoch 698: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000785, lr=9.03e-5, step=699]\n", + "Epoch 699: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000757, lr=9e-5, step=700]\n", + "Epoch 700: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00103, lr=8.97e-5, step=701]\n", + "Epoch 701: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000739, lr=8.94e-5, step=702]\n", + "Epoch 702: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00104, lr=8.91e-5, step=703]\n", + "Epoch 703: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.993, loss=0.000518, lr=8.88e-5, step=704]\n", + "Epoch 704: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000489, lr=8.85e-5, step=705]\n", + "Epoch 705: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000668, lr=8.82e-5, step=706]\n", + "Epoch 706: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00036, lr=8.79e-5, step=707]\n", + "Epoch 707: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000509, lr=8.76e-5, step=708]\n", + "Epoch 708: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000586, lr=8.73e-5, step=709]\n", + "Epoch 709: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000538, lr=8.7e-5, step=710]\n", + "Epoch 710: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000499, lr=8.67e-5, step=711]\n", + "Epoch 711: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000624, lr=8.64e-5, step=712]\n", + "Epoch 712: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000434, lr=8.61e-5, step=713]\n", + "Epoch 713: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000385, lr=8.58e-5, step=714]\n", + "Epoch 714: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000549, lr=8.55e-5, step=715]\n", + "Epoch 715: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000562, lr=8.52e-5, step=716]\n", + "Epoch 716: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000676, lr=8.49e-5, step=717]\n", + "Epoch 717: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000687, lr=8.46e-5, step=718]\n", + "Epoch 718: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000596, lr=8.43e-5, step=719]\n", + "Epoch 719: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000724, lr=8.4e-5, step=720]\n", + "Epoch 720: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000302, lr=8.37e-5, step=721]\n", + "Epoch 721: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.000407, lr=8.34e-5, step=722]\n", + "Epoch 722: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000545, lr=8.31e-5, step=723]\n", + "Epoch 723: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000629, lr=8.28e-5, step=724]\n", + "Epoch 724: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00062, lr=8.25e-5, step=725]\n", + "Epoch 725: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000615, lr=8.22e-5, step=726]\n", + "Epoch 726: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000504, lr=8.19e-5, step=727]\n", + "Epoch 727: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000501, lr=8.16e-5, step=728]\n", + "Epoch 728: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000615, lr=8.13e-5, step=729]\n", + "Epoch 729: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000698, lr=8.1e-5, step=730]\n", + "Epoch 730: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00056, lr=8.07e-5, step=731]\n", + "Epoch 731: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000537, lr=8.04e-5, step=732]\n", + "Epoch 732: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000828, lr=8.01e-5, step=733]\n", + "Epoch 733: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000592, lr=7.98e-5, step=734]\n", + "Epoch 734: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00058, lr=7.95e-5, step=735]\n", + "Epoch 735: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00114, lr=7.92e-5, step=736]\n", + "Epoch 736: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000936, lr=7.89e-5, step=737]\n", + "Epoch 737: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.000506, lr=7.86e-5, step=738]\n", + "Epoch 738: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000606, lr=7.83e-5, step=739]\n", + "Epoch 739: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000587, lr=7.8e-5, step=740]\n", + "Epoch 740: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.000703, lr=7.77e-5, step=741]\n", + "Epoch 741: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000478, lr=7.74e-5, step=742]\n", + "Epoch 742: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00107, lr=7.71e-5, step=743]\n", + "Epoch 743: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000574, lr=7.68e-5, step=744]\n", + "Epoch 744: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00083, lr=7.65e-5, step=745]\n", + "Epoch 745: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000726, lr=7.62e-5, step=746]\n", + "Epoch 746: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00109, lr=7.59e-5, step=747]\n", + "Epoch 747: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000596, lr=7.56e-5, step=748]\n", + "Epoch 748: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000546, lr=7.53e-5, step=749]\n", + "Epoch 749: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000592, lr=7.5e-5, step=750]\n", + "Epoch 750: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000427, lr=7.47e-5, step=751]\n", + "Epoch 751: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000401, lr=7.44e-5, step=752]\n", + "Epoch 752: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000474, lr=7.41e-5, step=753]\n", + "Epoch 753: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.000654, lr=7.38e-5, step=754]\n", + "Epoch 754: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000606, lr=7.35e-5, step=755]\n", + "Epoch 755: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000591, lr=7.32e-5, step=756]\n", + "Epoch 756: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000699, lr=7.29e-5, step=757]\n", + "Epoch 757: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00042, lr=7.26e-5, step=758]\n", + "Epoch 758: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000745, lr=7.23e-5, step=759]\n", + "Epoch 759: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000471, lr=7.2e-5, step=760]\n", + "Epoch 760: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000859, lr=7.17e-5, step=761]\n", + "Epoch 761: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000403, lr=7.14e-5, step=762]\n", + "Epoch 762: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000609, lr=7.11e-5, step=763]\n", + "Epoch 763: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000616, lr=7.08e-5, step=764]\n", + "Epoch 764: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000679, lr=7.05e-5, step=765]\n", + "Epoch 765: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000714, lr=7.02e-5, step=766]\n", + "Epoch 766: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.993, loss=0.000509, lr=6.99e-5, step=767]\n", + "Epoch 767: 100%|██████████| 1/1 [00:00<00:00, 1.09it/s, ema_decay=0.993, loss=0.000676, lr=6.96e-5, step=768]\n", + "Epoch 768: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.993, loss=0.00087, lr=6.93e-5, step=769]\n", + "Epoch 769: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000407, lr=6.9e-5, step=770]\n", + "Epoch 770: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00114, lr=6.87e-5, step=771]\n", + "Epoch 771: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000727, lr=6.84e-5, step=772]\n", + "Epoch 772: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000631, lr=6.81e-5, step=773]\n", + "Epoch 773: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000955, lr=6.78e-5, step=774]\n", + "Epoch 774: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000378, lr=6.75e-5, step=775]\n", + "Epoch 775: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000477, lr=6.72e-5, step=776]\n", + "Epoch 776: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000454, lr=6.69e-5, step=777]\n", + "Epoch 777: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00102, lr=6.66e-5, step=778]\n", + "Epoch 778: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000726, lr=6.63e-5, step=779]\n", + "Epoch 779: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000676, lr=6.6e-5, step=780]\n", + "Epoch 780: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000697, lr=6.57e-5, step=781]\n", + "Epoch 781: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000577, lr=6.54e-5, step=782]\n", + "Epoch 782: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000761, lr=6.51e-5, step=783]\n", + "Epoch 783: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000664, lr=6.48e-5, step=784]\n", + "Epoch 784: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000908, lr=6.45e-5, step=785]\n", + "Epoch 785: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000575, lr=6.42e-5, step=786]\n", + "Epoch 786: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00103, lr=6.39e-5, step=787]\n", + "Epoch 787: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000537, lr=6.36e-5, step=788]\n", + "Epoch 788: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000975, lr=6.33e-5, step=789]\n", + "Epoch 789: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s, ema_decay=0.993, loss=0.000614, lr=6.3e-5, step=790]\n", + "Epoch 790: 100%|██████████| 1/1 [00:00<00:00, 1.33it/s, ema_decay=0.993, loss=0.000519, lr=6.27e-5, step=791]\n", + "Epoch 791: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.000922, lr=6.24e-5, step=792]\n", + "Epoch 792: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000452, lr=6.21e-5, step=793]\n", + "Epoch 793: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00103, lr=6.18e-5, step=794]\n", + "Epoch 794: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00055, lr=6.15e-5, step=795]\n", + "Epoch 795: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.000492, lr=6.12e-5, step=796]\n", + "Epoch 796: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0007, lr=6.09e-5, step=797]\n", + "Epoch 797: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000515, lr=6.06e-5, step=798]\n", + "Epoch 798: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000492, lr=6.03e-5, step=799]\n", + "Epoch 799: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000683, lr=6e-5, step=800]\n", + "Epoch 800: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000911, lr=5.97e-5, step=801]\n", + "Epoch 801: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000765, lr=5.94e-5, step=802]\n", + "Epoch 802: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.000443, lr=5.91e-5, step=803]\n", + "Epoch 803: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000538, lr=5.88e-5, step=804]\n", + "Epoch 804: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000558, lr=5.85e-5, step=805]\n", + "Epoch 805: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000633, lr=5.82e-5, step=806]\n", + "Epoch 806: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000896, lr=5.79e-5, step=807]\n", + "Epoch 807: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000592, lr=5.76e-5, step=808]\n", + "Epoch 808: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00044, lr=5.73e-5, step=809]\n", + "Epoch 809: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000566, lr=5.7e-5, step=810]\n", + "Epoch 810: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.000402, lr=5.67e-5, step=811]\n", + "Epoch 811: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00035, lr=5.64e-5, step=812]\n", + "Epoch 812: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000434, lr=5.61e-5, step=813]\n", + "Epoch 813: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000437, lr=5.58e-5, step=814]\n", + "Epoch 814: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000471, lr=5.55e-5, step=815]\n", + "Epoch 815: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000953, lr=5.52e-5, step=816]\n", + "Epoch 816: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.001, lr=5.49e-5, step=817]\n", + "Epoch 817: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000994, lr=5.46e-5, step=818]\n", + "Epoch 818: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000664, lr=5.43e-5, step=819]\n", + "Epoch 819: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000368, lr=5.4e-5, step=820]\n", + "Epoch 820: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.993, loss=0.000557, lr=5.37e-5, step=821]\n", + "Epoch 821: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.000528, lr=5.34e-5, step=822]\n", + "Epoch 822: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000471, lr=5.31e-5, step=823]\n", + "Epoch 823: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000539, lr=5.28e-5, step=824]\n", + "Epoch 824: 100%|██████████| 1/1 [00:00<00:00, 1.20it/s, ema_decay=0.993, loss=0.000422, lr=5.25e-5, step=825]\n", + "Epoch 825: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.000801, lr=5.22e-5, step=826]\n", + "Epoch 826: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000615, lr=5.19e-5, step=827]\n", + "Epoch 827: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000855, lr=5.16e-5, step=828]\n", + "Epoch 828: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00066, lr=5.13e-5, step=829]\n", + "Epoch 829: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000446, lr=5.1e-5, step=830]\n", + "Epoch 830: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.000908, lr=5.07e-5, step=831]\n", + "Epoch 831: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0007, lr=5.04e-5, step=832]\n", + "Epoch 832: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000803, lr=5.01e-5, step=833]\n", + "Epoch 833: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000689, lr=4.98e-5, step=834]\n", + "Epoch 834: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00088, lr=4.95e-5, step=835]\n", + "Epoch 835: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000636, lr=4.92e-5, step=836]\n", + "Epoch 836: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000488, lr=4.89e-5, step=837]\n", + "Epoch 837: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000692, lr=4.86e-5, step=838]\n", + "Epoch 838: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000603, lr=4.83e-5, step=839]\n", + "Epoch 839: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000641, lr=4.8e-5, step=840]\n", + "Epoch 840: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000846, lr=4.77e-5, step=841]\n", + "Epoch 841: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000613, lr=4.74e-5, step=842]\n", + "Epoch 842: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000663, lr=4.71e-5, step=843]\n", + "Epoch 843: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000699, lr=4.68e-5, step=844]\n", + "Epoch 844: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000488, lr=4.65e-5, step=845]\n", + "Epoch 845: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000481, lr=4.62e-5, step=846]\n", + "Epoch 846: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000711, lr=4.59e-5, step=847]\n", + "Epoch 847: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000442, lr=4.56e-5, step=848]\n", + "Epoch 848: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00045, lr=4.53e-5, step=849]\n", + "Epoch 849: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000655, lr=4.5e-5, step=850]\n", + "Epoch 850: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000447, lr=4.47e-5, step=851]\n", + "Epoch 851: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000564, lr=4.44e-5, step=852]\n", + "Epoch 852: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000695, lr=4.41e-5, step=853]\n", + "Epoch 853: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000633, lr=4.38e-5, step=854]\n", + "Epoch 854: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000441, lr=4.35e-5, step=855]\n", + "Epoch 855: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000479, lr=4.32e-5, step=856]\n", + "Epoch 856: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000678, lr=4.29e-5, step=857]\n", + "Epoch 857: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000531, lr=4.26e-5, step=858]\n", + "Epoch 858: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000714, lr=4.23e-5, step=859]\n", + "Epoch 859: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000549, lr=4.2e-5, step=860]\n", + "Epoch 860: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000813, lr=4.17e-5, step=861]\n", + "Epoch 861: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000403, lr=4.14e-5, step=862]\n", + "Epoch 862: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000564, lr=4.11e-5, step=863]\n", + "Epoch 863: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000376, lr=4.08e-5, step=864]\n", + "Epoch 864: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000616, lr=4.05e-5, step=865]\n", + "Epoch 865: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000898, lr=4.02e-5, step=866]\n", + "Epoch 866: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000903, lr=3.99e-5, step=867]\n", + "Epoch 867: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000471, lr=3.96e-5, step=868]\n", + "Epoch 868: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000582, lr=3.93e-5, step=869]\n", + "Epoch 869: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000514, lr=3.9e-5, step=870]\n", + "Epoch 870: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000874, lr=3.87e-5, step=871]\n", + "Epoch 871: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000388, lr=3.84e-5, step=872]\n", + "Epoch 872: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000687, lr=3.81e-5, step=873]\n", + "Epoch 873: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000657, lr=3.78e-5, step=874]\n", + "Epoch 874: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000506, lr=3.75e-5, step=875]\n", + "Epoch 875: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00049, lr=3.72e-5, step=876]\n", + "Epoch 876: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000539, lr=3.69e-5, step=877]\n", + "Epoch 877: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000532, lr=3.66e-5, step=878]\n", + "Epoch 878: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.000432, lr=3.63e-5, step=879]\n", + "Epoch 879: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000624, lr=3.6e-5, step=880]\n", + "Epoch 880: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000396, lr=3.57e-5, step=881]\n", + "Epoch 881: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000713, lr=3.54e-5, step=882]\n", + "Epoch 882: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.000549, lr=3.51e-5, step=883]\n", + "Epoch 883: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000808, lr=3.48e-5, step=884]\n", + "Epoch 884: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00066, lr=3.45e-5, step=885]\n", + "Epoch 885: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000837, lr=3.42e-5, step=886]\n", + "Epoch 886: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000996, lr=3.39e-5, step=887]\n", + "Epoch 887: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.000508, lr=3.36e-5, step=888]\n", + "Epoch 888: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000392, lr=3.33e-5, step=889]\n", + "Epoch 889: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000657, lr=3.3e-5, step=890]\n", + "Epoch 890: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000709, lr=3.27e-5, step=891]\n", + "Epoch 891: 100%|██████████| 1/1 [00:00<00:00, 1.07it/s, ema_decay=0.994, loss=0.000499, lr=3.24e-5, step=892]\n", + "Epoch 892: 100%|██████████| 1/1 [00:00<00:00, 1.12it/s, ema_decay=0.994, loss=0.000525, lr=3.21e-5, step=893]\n", + "Epoch 893: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000764, lr=3.18e-5, step=894]\n", + "Epoch 894: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000778, lr=3.15e-5, step=895]\n", + "Epoch 895: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000438, lr=3.12e-5, step=896]\n", + "Epoch 896: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00058, lr=3.09e-5, step=897]\n", + "Epoch 897: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000736, lr=3.06e-5, step=898]\n", + "Epoch 898: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000464, lr=3.03e-5, step=899]\n", + "Epoch 899: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000478, lr=3e-5, step=900]\n", + "Epoch 900: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00118, lr=2.97e-5, step=901]\n", + "Epoch 901: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00071, lr=2.94e-5, step=902]\n", + "Epoch 902: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000834, lr=2.91e-5, step=903]\n", + "Epoch 903: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000612, lr=2.88e-5, step=904]\n", + "Epoch 904: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000417, lr=2.85e-5, step=905]\n", + "Epoch 905: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000422, lr=2.82e-5, step=906]\n", + "Epoch 906: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000702, lr=2.79e-5, step=907]\n", + "Epoch 907: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00053, lr=2.76e-5, step=908]\n", + "Epoch 908: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000679, lr=2.73e-5, step=909]\n", + "Epoch 909: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000812, lr=2.7e-5, step=910]\n", + "Epoch 910: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000467, lr=2.67e-5, step=911]\n", + "Epoch 911: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000375, lr=2.64e-5, step=912]\n", + "Epoch 912: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000665, lr=2.61e-5, step=913]\n", + "Epoch 913: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000468, lr=2.58e-5, step=914]\n", + "Epoch 914: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000645, lr=2.55e-5, step=915]\n", + "Epoch 915: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000498, lr=2.52e-5, step=916]\n", + "Epoch 916: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000579, lr=2.49e-5, step=917]\n", + "Epoch 917: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000579, lr=2.46e-5, step=918]\n", + "Epoch 918: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000475, lr=2.43e-5, step=919]\n", + "Epoch 919: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000431, lr=2.4e-5, step=920]\n", + "Epoch 920: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000498, lr=2.37e-5, step=921]\n", + "Epoch 921: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000573, lr=2.34e-5, step=922]\n", + "Epoch 922: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000542, lr=2.31e-5, step=923]\n", + "Epoch 923: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0006, lr=2.28e-5, step=924]\n", + "Epoch 924: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000901, lr=2.25e-5, step=925]\n", + "Epoch 925: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00071, lr=2.22e-5, step=926]\n", + "Epoch 926: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000612, lr=2.19e-5, step=927]\n", + "Epoch 927: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000424, lr=2.16e-5, step=928]\n", + "Epoch 928: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000468, lr=2.13e-5, step=929]\n", + "Epoch 929: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000517, lr=2.1e-5, step=930]\n", + "Epoch 930: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000382, lr=2.07e-5, step=931]\n", + "Epoch 931: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000414, lr=2.04e-5, step=932]\n", + "Epoch 932: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000621, lr=2.01e-5, step=933]\n", + "Epoch 933: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000435, lr=1.98e-5, step=934]\n", + "Epoch 934: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000459, lr=1.95e-5, step=935]\n", + "Epoch 935: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000627, lr=1.92e-5, step=936]\n", + "Epoch 936: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000698, lr=1.89e-5, step=937]\n", + "Epoch 937: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000822, lr=1.86e-5, step=938]\n", + "Epoch 938: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000613, lr=1.83e-5, step=939]\n", + "Epoch 939: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000401, lr=1.8e-5, step=940]\n", + "Epoch 940: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000901, lr=1.77e-5, step=941]\n", + "Epoch 941: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000415, lr=1.74e-5, step=942]\n", + "Epoch 942: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000399, lr=1.71e-5, step=943]\n", + "Epoch 943: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000459, lr=1.68e-5, step=944]\n", + "Epoch 944: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.994, loss=0.000592, lr=1.65e-5, step=945]\n", + "Epoch 945: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000599, lr=1.62e-5, step=946]\n", + "Epoch 946: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000634, lr=1.59e-5, step=947]\n", + "Epoch 947: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000678, lr=1.56e-5, step=948]\n", + "Epoch 948: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000868, lr=1.53e-5, step=949]\n", + "Epoch 949: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000835, lr=1.5e-5, step=950]\n", + "Epoch 950: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000487, lr=1.47e-5, step=951]\n", + "Epoch 951: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00046, lr=1.44e-5, step=952]\n", + "Epoch 952: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000586, lr=1.41e-5, step=953]\n", + "Epoch 953: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00062, lr=1.38e-5, step=954]\n", + "Epoch 954: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000937, lr=1.35e-5, step=955]\n", + "Epoch 955: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000532, lr=1.32e-5, step=956]\n", + "Epoch 956: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00045, lr=1.29e-5, step=957]\n", + "Epoch 957: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000611, lr=1.26e-5, step=958]\n", + "Epoch 958: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000543, lr=1.23e-5, step=959]\n", + "Epoch 959: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000702, lr=1.2e-5, step=960]\n", + "Epoch 960: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000444, lr=1.17e-5, step=961]\n", + "Epoch 961: 100%|██████████| 1/1 [00:00<00:00, 1.34it/s, ema_decay=0.994, loss=0.000506, lr=1.14e-5, step=962]\n", + "Epoch 962: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.000765, lr=1.11e-5, step=963]\n", + "Epoch 963: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000722, lr=1.08e-5, step=964]\n", + "Epoch 964: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000384, lr=1.05e-5, step=965]\n", + "Epoch 965: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000624, lr=1.02e-5, step=966]\n", + "Epoch 966: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000567, lr=9.9e-6, step=967]\n", + "Epoch 967: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000467, lr=9.6e-6, step=968]\n", + "Epoch 968: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000478, lr=9.3e-6, step=969]\n", + "Epoch 969: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000487, lr=9e-6, step=970]\n", + "Epoch 970: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.000461, lr=8.7e-6, step=971]\n", + "Epoch 971: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000543, lr=8.4e-6, step=972]\n", + "Epoch 972: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000387, lr=8.1e-6, step=973]\n", + "Epoch 973: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000575, lr=7.8e-6, step=974]\n", + "Epoch 974: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000382, lr=7.5e-6, step=975]\n", + "Epoch 975: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000522, lr=7.2e-6, step=976]\n", + "Epoch 976: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.000483, lr=6.9e-6, step=977]\n", + "Epoch 977: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000612, lr=6.6e-6, step=978]\n", + "Epoch 978: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000398, lr=6.3e-6, step=979]\n", + "Epoch 979: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000622, lr=6e-6, step=980]\n", + "Epoch 980: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000591, lr=5.7e-6, step=981]\n", + "Epoch 981: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000533, lr=5.4e-6, step=982]\n", + "Epoch 982: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000523, lr=5.1e-6, step=983]\n", + "Epoch 983: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000277, lr=4.8e-6, step=984]\n", + "Epoch 984: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000384, lr=4.5e-6, step=985]\n", + "Epoch 985: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000556, lr=4.2e-6, step=986]\n", + "Epoch 986: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000608, lr=3.9e-6, step=987]\n", + "Epoch 987: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00061, lr=3.6e-6, step=988]\n", + "Epoch 988: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000377, lr=3.3e-6, step=989]\n", + "Epoch 989: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000397, lr=3e-6, step=990]\n", + "Epoch 990: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00061, lr=2.7e-6, step=991]\n", + "Epoch 991: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00068, lr=2.4e-6, step=992]\n", + "Epoch 992: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000407, lr=2.1e-6, step=993]\n", + "Epoch 993: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000565, lr=1.8e-6, step=994]\n", + "Epoch 994: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00043, lr=1.5e-6, step=995]\n", + "Epoch 995: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0009, lr=1.2e-6, step=996]\n", + "Epoch 996: 100%|██████████| 1/1 [00:00<00:00, 1.10it/s, ema_decay=0.994, loss=0.000464, lr=9e-7, step=997]\n", + "Epoch 997: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000487, lr=6e-7, step=998]\n", + "Epoch 998: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00103, lr=3e-7, step=999]\n", + "Epoch 999: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000533, lr=0, step=1000]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "93ccd71935ce42d494cc2bb845c5593d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/50 [00:00 62\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.21it/s, ema_decay=0, loss=0.111, lr=0.0003, step=1]\n", + "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0, loss=0.577, lr=0.000299, step=2]\n", + "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.405, loss=0.297, lr=0.000299, step=3]\n", + "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.561, loss=0.144, lr=0.000299, step=4]\n", + "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.646, loss=0.136, lr=0.000298, step=5]\n", + "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.701, loss=0.109, lr=0.000298, step=6]\n", + "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.739, loss=0.141, lr=0.000298, step=7]\n", + "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.768, loss=0.181, lr=0.000298, step=8]\n", + "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.79, loss=0.13, lr=0.000297, step=9]\n", + "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.808, loss=0.145, lr=0.000297, step=10]\n", + "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.822, loss=0.0777, lr=0.000297, step=11]\n", + "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.834, loss=0.135, lr=0.000296, step=12]\n", + "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.845, loss=0.0251, lr=0.000296, step=13]\n", + "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.854, loss=0.0899, lr=0.000296, step=14]\n", + "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.862, loss=0.103, lr=0.000295, step=15]\n", + "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.869, loss=0.11, lr=0.000295, step=16]\n", + "Epoch 16: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.875, loss=0.177, lr=0.000295, step=17]\n", + "Epoch 17: 100%|██████████| 1/1 [00:00<00:00, 1.02it/s, ema_decay=0.881, loss=0.0393, lr=0.000295, step=18]\n", + "Epoch 18: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.886, loss=0.0691, lr=0.000294, step=19]\n", + "Epoch 19: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.89, loss=0.124, lr=0.000294, step=20]\n", + "Epoch 20: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.894, loss=0.00799, lr=0.000294, step=21]\n", + "Epoch 21: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.898, loss=0.0227, lr=0.000293, step=22]\n", + "Epoch 22: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.902, loss=0.0237, lr=0.000293, step=23]\n", + "Epoch 23: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.905, loss=0.106, lr=0.000293, step=24]\n", + "Epoch 24: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.908, loss=0.0772, lr=0.000292, step=25]\n", + "Epoch 25: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.911, loss=0.0482, lr=0.000292, step=26]\n", + "Epoch 26: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.913, loss=0.0199, lr=0.000292, step=27]\n", + "Epoch 27: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.916, loss=0.0306, lr=0.000292, step=28]\n", + "Epoch 28: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.918, loss=0.0203, lr=0.000291, step=29]\n", + "Epoch 29: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.92, loss=0.081, lr=0.000291, step=30]\n", + "Epoch 30: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.922, loss=0.0611, lr=0.000291, step=31]\n", + "Epoch 31: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.924, loss=0.0573, lr=0.00029, step=32]\n", + "Epoch 32: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.926, loss=0.0786, lr=0.00029, step=33]\n", + "Epoch 33: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.927, loss=0.0159, lr=0.00029, step=34]\n", + "Epoch 34: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.929, loss=0.0153, lr=0.000289, step=35]\n", + "Epoch 35: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.931, loss=0.0368, lr=0.000289, step=36]\n", + "Epoch 36: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.932, loss=0.0754, lr=0.000289, step=37]\n", + "Epoch 37: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.933, loss=0.0975, lr=0.000289, step=38]\n", + "Epoch 38: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.935, loss=0.0682, lr=0.000288, step=39]\n", + "Epoch 39: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.936, loss=0.0511, lr=0.000288, step=40]\n", + "Epoch 40: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.937, loss=0.0171, lr=0.000288, step=41]\n", + "Epoch 41: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.938, loss=0.017, lr=0.000287, step=42]\n", + "Epoch 42: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.939, loss=0.00777, lr=0.000287, step=43]\n", + "Epoch 43: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.94, loss=0.0331, lr=0.000287, step=44]\n", + "Epoch 44: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.941, loss=0.0244, lr=0.000286, step=45]\n", + "Epoch 45: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.942, loss=0.0109, lr=0.000286, step=46]\n", + "Epoch 46: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.943, loss=0.0879, lr=0.000286, step=47]\n", + "Epoch 47: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.944, loss=0.0101, lr=0.000286, step=48]\n", + "Epoch 48: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.945, loss=0.0332, lr=0.000285, step=49]\n", + "Epoch 49: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.946, loss=0.0564, lr=0.000285, step=50]\n", + "Epoch 50: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.947, loss=0.0193, lr=0.000285, step=51]\n", + "Epoch 51: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.948, loss=0.015, lr=0.000284, step=52]\n", + "Epoch 52: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.948, loss=0.0294, lr=0.000284, step=53]\n", + "Epoch 53: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.949, loss=0.03, lr=0.000284, step=54]\n", + "Epoch 54: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.95, loss=0.0312, lr=0.000283, step=55]\n", + "Epoch 55: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.0136, lr=0.000283, step=56]\n", + "Epoch 56: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.951, loss=0.00601, lr=0.000283, step=57]\n", + "Epoch 57: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.952, loss=0.0304, lr=0.000283, step=58]\n", + "Epoch 58: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.952, loss=0.0226, lr=0.000282, step=59]\n", + "Epoch 59: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.953, loss=0.0758, lr=0.000282, step=60]\n", + "Epoch 60: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.954, loss=0.0207, lr=0.000282, step=61]\n", + "Epoch 61: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.954, loss=0.0224, lr=0.000281, step=62]\n", + "Epoch 62: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.955, loss=0.00999, lr=0.000281, step=63]\n", + "Epoch 63: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.955, loss=0.0616, lr=0.000281, step=64]\n", + "Epoch 64: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.956, loss=0.0178, lr=0.00028, step=65]\n", + "Epoch 65: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.956, loss=0.0291, lr=0.00028, step=66]\n", + "Epoch 66: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.957, loss=0.0345, lr=0.00028, step=67]\n", + "Epoch 67: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.957, loss=0.0112, lr=0.00028, step=68]\n", + "Epoch 68: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.958, loss=0.00766, lr=0.000279, step=69]\n", + "Epoch 69: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.958, loss=0.00512, lr=0.000279, step=70]\n", + "Epoch 70: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.959, loss=0.0349, lr=0.000279, step=71]\n", + "Epoch 71: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.959, loss=0.0333, lr=0.000278, step=72]\n", + "Epoch 72: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.96, loss=0.0223, lr=0.000278, step=73]\n", + "Epoch 73: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.96, loss=0.0185, lr=0.000278, step=74]\n", + "Epoch 74: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.96, loss=0.0047, lr=0.000277, step=75]\n", + "Epoch 75: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.961, loss=0.031, lr=0.000277, step=76]\n", + "Epoch 76: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.961, loss=0.00932, lr=0.000277, step=77]\n", + "Epoch 77: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.962, loss=0.0171, lr=0.000277, step=78]\n", + "Epoch 78: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.962, loss=0.0263, lr=0.000276, step=79]\n", + "Epoch 79: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.962, loss=0.00591, lr=0.000276, step=80]\n", + "Epoch 80: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.0118, lr=0.000276, step=81]\n", + "Epoch 81: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.0145, lr=0.000275, step=82]\n", + "Epoch 82: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.963, loss=0.0336, lr=0.000275, step=83]\n", + "Epoch 83: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.964, loss=0.0216, lr=0.000275, step=84]\n", + "Epoch 84: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.964, loss=0.00635, lr=0.000275, step=85]\n", + "Epoch 85: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.964, loss=0.0182, lr=0.000274, step=86]\n", + "Epoch 86: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.965, loss=0.0135, lr=0.000274, step=87]\n", + "Epoch 87: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.0146, lr=0.000274, step=88]\n", + "Epoch 88: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.965, loss=0.0134, lr=0.000273, step=89]\n", + "Epoch 89: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.965, loss=0.00226, lr=0.000273, step=90]\n", + "Epoch 90: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.966, loss=0.0148, lr=0.000273, step=91]\n", + "Epoch 91: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.966, loss=0.0291, lr=0.000272, step=92]\n", + "Epoch 92: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.966, loss=0.0115, lr=0.000272, step=93]\n", + "Epoch 93: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.00575, lr=0.000272, step=94]\n", + "Epoch 94: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.00732, lr=0.000271, step=95]\n", + "Epoch 95: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.0169, lr=0.000271, step=96]\n", + "Epoch 96: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.00515, lr=0.000271, step=97]\n", + "Epoch 97: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.0033, lr=0.000271, step=98]\n", + "Epoch 98: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.968, loss=0.00344, lr=0.00027, step=99]\n", + "Epoch 99: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.013, lr=0.00027, step=100]\n", + "Epoch 100: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.0212, lr=0.00027, step=101]\n", + "Epoch 101: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0102, lr=0.000269, step=102]\n", + "Epoch 102: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.969, loss=0.011, lr=0.000269, step=103]\n", + "Epoch 103: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.00473, lr=0.000269, step=104]\n", + "Epoch 104: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0193, lr=0.000268, step=105]\n", + "Epoch 105: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00951, lr=0.000268, step=106]\n", + "Epoch 106: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.0277, lr=0.000268, step=107]\n", + "Epoch 107: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00406, lr=0.000268, step=108]\n", + "Epoch 108: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.0162, lr=0.000267, step=109]\n", + "Epoch 109: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.0201, lr=0.000267, step=110]\n", + "Epoch 110: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00843, lr=0.000267, step=111]\n", + "Epoch 111: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00398, lr=0.000266, step=112]\n", + "Epoch 112: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.971, loss=0.0185, lr=0.000266, step=113]\n", + "Epoch 113: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.003, lr=0.000266, step=114]\n", + "Epoch 114: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.971, loss=0.0133, lr=0.000266, step=115]\n", + "Epoch 115: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.972, loss=0.0101, lr=0.000265, step=116]\n", + "Epoch 116: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.972, loss=0.00752, lr=0.000265, step=117]\n", + "Epoch 117: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.972, loss=0.0224, lr=0.000265, step=118]\n", + "Epoch 118: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.00375, lr=0.000264, step=119]\n", + "Epoch 119: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0108, lr=0.000264, step=120]\n", + "Epoch 120: 100%|██████████| 1/1 [00:00<00:00, 1.06it/s, ema_decay=0.972, loss=0.0122, lr=0.000264, step=121]\n", + "Epoch 121: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.973, loss=0.0207, lr=0.000263, step=122]\n", + "Epoch 122: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.973, loss=0.017, lr=0.000263, step=123]\n", + "Epoch 123: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.0122, lr=0.000263, step=124]\n", + "Epoch 124: 100%|██████████| 1/1 [00:00<00:00, 1.25it/s, ema_decay=0.973, loss=0.0117, lr=0.000262, step=125]\n", + "Epoch 125: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.973, loss=0.0484, lr=0.000262, step=126]\n", + "Epoch 126: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.973, loss=0.0149, lr=0.000262, step=127]\n", + "Epoch 127: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.013, lr=0.000262, step=128]\n", + "Epoch 128: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.974, loss=0.00657, lr=0.000261, step=129]\n", + "Epoch 129: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.974, loss=0.0326, lr=0.000261, step=130]\n", + "Epoch 130: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.016, lr=0.000261, step=131]\n", + "Epoch 131: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.0053, lr=0.00026, step=132]\n", + "Epoch 132: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.974, loss=0.0174, lr=0.00026, step=133]\n", + "Epoch 133: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.0231, lr=0.00026, step=134]\n", + "Epoch 134: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.0116, lr=0.000259, step=135]\n", + "Epoch 135: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.017, lr=0.000259, step=136]\n", + "Epoch 136: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.0102, lr=0.000259, step=137]\n", + "Epoch 137: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.0134, lr=0.000259, step=138]\n", + "Epoch 138: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.0289, lr=0.000258, step=139]\n", + "Epoch 139: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.00827, lr=0.000258, step=140]\n", + "Epoch 140: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.975, loss=0.00443, lr=0.000258, step=141]\n", + "Epoch 141: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.0107, lr=0.000257, step=142]\n", + "Epoch 142: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.00598, lr=0.000257, step=143]\n", + "Epoch 143: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.0284, lr=0.000257, step=144]\n", + "Epoch 144: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.0107, lr=0.000256, step=145]\n", + "Epoch 145: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.00326, lr=0.000256, step=146]\n", + "Epoch 146: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0062, lr=0.000256, step=147]\n", + "Epoch 147: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.0352, lr=0.000256, step=148]\n", + "Epoch 148: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.00939, lr=0.000255, step=149]\n", + "Epoch 149: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.00641, lr=0.000255, step=150]\n", + "Epoch 150: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.014, lr=0.000255, step=151]\n", + "Epoch 151: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.977, loss=0.0251, lr=0.000254, step=152]\n", + "Epoch 152: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00373, lr=0.000254, step=153]\n", + "Epoch 153: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.977, loss=0.0208, lr=0.000254, step=154]\n", + "Epoch 154: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.977, loss=0.0139, lr=0.000253, step=155]\n", + "Epoch 155: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00304, lr=0.000253, step=156]\n", + "Epoch 156: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.0103, lr=0.000253, step=157]\n", + "Epoch 157: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.977, loss=0.0107, lr=0.000253, step=158]\n", + "Epoch 158: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.0112, lr=0.000252, step=159]\n", + "Epoch 159: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.0129, lr=0.000252, step=160]\n", + "Epoch 160: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00966, lr=0.000252, step=161]\n", + "Epoch 161: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.0038, lr=0.000251, step=162]\n", + "Epoch 162: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.00732, lr=0.000251, step=163]\n", + "Epoch 163: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.013, lr=0.000251, step=164]\n", + "Epoch 164: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.978, loss=0.0191, lr=0.00025, step=165]\n", + "Epoch 165: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.978, loss=0.0131, lr=0.00025, step=166]\n", + "Epoch 166: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.978, loss=0.00729, lr=0.00025, step=167]\n", + "Epoch 167: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00868, lr=0.00025, step=168]\n", + "Epoch 168: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.979, loss=0.0274, lr=0.000249, step=169]\n", + "Epoch 169: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.0101, lr=0.000249, step=170]\n", + "Epoch 170: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.011, lr=0.000249, step=171]\n", + "Epoch 171: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.979, loss=0.0094, lr=0.000248, step=172]\n", + "Epoch 172: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.00526, lr=0.000248, step=173]\n", + "Epoch 173: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.0198, lr=0.000248, step=174]\n", + "Epoch 174: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.022, lr=0.000247, step=175]\n", + "Epoch 175: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.00827, lr=0.000247, step=176]\n", + "Epoch 176: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0122, lr=0.000247, step=177]\n", + "Epoch 177: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.0132, lr=0.000247, step=178]\n", + "Epoch 178: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0198, lr=0.000246, step=179]\n", + "Epoch 179: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.98, loss=0.00994, lr=0.000246, step=180]\n", + "Epoch 180: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.00594, lr=0.000246, step=181]\n", + "Epoch 181: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0161, lr=0.000245, step=182]\n", + "Epoch 182: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0164, lr=0.000245, step=183]\n", + "Epoch 183: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00475, lr=0.000245, step=184]\n", + "Epoch 184: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.98, loss=0.0241, lr=0.000244, step=185]\n", + "Epoch 185: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.98, loss=0.0049, lr=0.000244, step=186]\n", + "Epoch 186: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0096, lr=0.000244, step=187]\n", + "Epoch 187: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0132, lr=0.000244, step=188]\n", + "Epoch 188: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.011, lr=0.000243, step=189]\n", + "Epoch 189: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.014, lr=0.000243, step=190]\n", + "Epoch 190: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.013, lr=0.000243, step=191]\n", + "Epoch 191: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.0075, lr=0.000242, step=192]\n", + "Epoch 192: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.014, lr=0.000242, step=193]\n", + "Epoch 193: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00428, lr=0.000242, step=194]\n", + "Epoch 194: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.00969, lr=0.000241, step=195]\n", + "Epoch 195: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0108, lr=0.000241, step=196]\n", + "Epoch 196: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00461, lr=0.000241, step=197]\n", + "Epoch 197: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00542, lr=0.000241, step=198]\n", + "Epoch 198: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0141, lr=0.00024, step=199]\n", + "Epoch 199: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00763, lr=0.00024, step=200]\n", + "Epoch 200: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.00529, lr=0.00024, step=201]\n", + "Epoch 201: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00515, lr=0.000239, step=202]\n", + "Epoch 202: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.981, loss=0.0166, lr=0.000239, step=203]\n", + "Epoch 203: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00693, lr=0.000239, step=204]\n", + "Epoch 204: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0113, lr=0.000238, step=205]\n", + "Epoch 205: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.0121, lr=0.000238, step=206]\n", + "Epoch 206: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.00643, lr=0.000238, step=207]\n", + "Epoch 207: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00594, lr=0.000238, step=208]\n", + "Epoch 208: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0133, lr=0.000237, step=209]\n", + "Epoch 209: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00965, lr=0.000237, step=210]\n", + "Epoch 210: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00478, lr=0.000237, step=211]\n", + "Epoch 211: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00661, lr=0.000236, step=212]\n", + "Epoch 212: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00655, lr=0.000236, step=213]\n", + "Epoch 213: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00409, lr=0.000236, step=214]\n", + "Epoch 214: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0038, lr=0.000235, step=215]\n", + "Epoch 215: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00429, lr=0.000235, step=216]\n", + "Epoch 216: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00919, lr=0.000235, step=217]\n", + "Epoch 217: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.00561, lr=0.000235, step=218]\n", + "Epoch 218: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.00237, lr=0.000234, step=219]\n", + "Epoch 219: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0161, lr=0.000234, step=220]\n", + "Epoch 220: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00742, lr=0.000234, step=221]\n", + "Epoch 221: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0108, lr=0.000233, step=222]\n", + "Epoch 222: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00452, lr=0.000233, step=223]\n", + "Epoch 223: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00371, lr=0.000233, step=224]\n", + "Epoch 224: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00537, lr=0.000232, step=225]\n", + "Epoch 225: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.00769, lr=0.000232, step=226]\n", + "Epoch 226: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0109, lr=0.000232, step=227]\n", + "Epoch 227: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00542, lr=0.000232, step=228]\n", + "Epoch 228: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00505, lr=0.000231, step=229]\n", + "Epoch 229: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00254, lr=0.000231, step=230]\n", + "Epoch 230: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0122, lr=0.000231, step=231]\n", + "Epoch 231: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00484, lr=0.00023, step=232]\n", + "Epoch 232: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00648, lr=0.00023, step=233]\n", + "Epoch 233: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00965, lr=0.00023, step=234]\n", + "Epoch 234: 100%|██████████| 1/1 [00:00<00:00, 1.30it/s, ema_decay=0.983, loss=0.00738, lr=0.000229, step=235]\n", + "Epoch 235: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.983, loss=0.00392, lr=0.000229, step=236]\n", + "Epoch 236: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00785, lr=0.000229, step=237]\n", + "Epoch 237: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.00933, lr=0.000229, step=238]\n", + "Epoch 238: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.00275, lr=0.000228, step=239]\n", + "Epoch 239: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00145, lr=0.000228, step=240]\n", + "Epoch 240: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00627, lr=0.000228, step=241]\n", + "Epoch 241: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.984, loss=0.0134, lr=0.000227, step=242]\n", + "Epoch 242: 100%|██████████| 1/1 [00:00<00:00, 1.30it/s, ema_decay=0.984, loss=0.00699, lr=0.000227, step=243]\n", + "Epoch 243: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.00766, lr=0.000227, step=244]\n", + "Epoch 244: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00802, lr=0.000226, step=245]\n", + "Epoch 245: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0105, lr=0.000226, step=246]\n", + "Epoch 246: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0038, lr=0.000226, step=247]\n", + "Epoch 247: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00619, lr=0.000226, step=248]\n", + "Epoch 248: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00931, lr=0.000225, step=249]\n", + "Epoch 249: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.00906, lr=0.000225, step=250]\n", + "Epoch 250: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00605, lr=0.000225, step=251]\n", + "Epoch 251: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00545, lr=0.000224, step=252]\n", + "Epoch 252: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.0057, lr=0.000224, step=253]\n", + "Epoch 253: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00644, lr=0.000224, step=254]\n", + "Epoch 254: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00379, lr=0.000223, step=255]\n", + "Epoch 255: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00803, lr=0.000223, step=256]\n", + "Epoch 256: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00301, lr=0.000223, step=257]\n", + "Epoch 257: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00585, lr=0.000223, step=258]\n", + "Epoch 258: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00992, lr=0.000222, step=259]\n", + "Epoch 259: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.015, lr=0.000222, step=260]\n", + "Epoch 260: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00918, lr=0.000222, step=261]\n", + "Epoch 261: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0134, lr=0.000221, step=262]\n", + "Epoch 262: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.985, loss=0.0054, lr=0.000221, step=263]\n", + "Epoch 263: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00842, lr=0.000221, step=264]\n", + "Epoch 264: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00763, lr=0.00022, step=265]\n", + "Epoch 265: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00267, lr=0.00022, step=266]\n", + "Epoch 266: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00541, lr=0.00022, step=267]\n", + "Epoch 267: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.00157, lr=0.00022, step=268]\n", + "Epoch 268: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00858, lr=0.000219, step=269]\n", + "Epoch 269: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00511, lr=0.000219, step=270]\n", + "Epoch 270: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00671, lr=0.000219, step=271]\n", + "Epoch 271: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.985, loss=0.00276, lr=0.000218, step=272]\n", + "Epoch 272: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.985, loss=0.0107, lr=0.000218, step=273]\n", + "Epoch 273: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.00236, lr=0.000218, step=274]\n", + "Epoch 274: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00436, lr=0.000217, step=275]\n", + "Epoch 275: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00556, lr=0.000217, step=276]\n", + "Epoch 276: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.0107, lr=0.000217, step=277]\n", + "Epoch 277: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00438, lr=0.000217, step=278]\n", + "Epoch 278: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00679, lr=0.000216, step=279]\n", + "Epoch 279: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00569, lr=0.000216, step=280]\n", + "Epoch 280: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0027, lr=0.000216, step=281]\n", + "Epoch 281: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00789, lr=0.000215, step=282]\n", + "Epoch 282: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.985, loss=0.00515, lr=0.000215, step=283]\n", + "Epoch 283: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00417, lr=0.000215, step=284]\n", + "Epoch 284: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00606, lr=0.000214, step=285]\n", + "Epoch 285: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.986, loss=0.00334, lr=0.000214, step=286]\n", + "Epoch 286: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00412, lr=0.000214, step=287]\n", + "Epoch 287: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00311, lr=0.000214, step=288]\n", + "Epoch 288: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00683, lr=0.000213, step=289]\n", + "Epoch 289: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00135, lr=0.000213, step=290]\n", + "Epoch 290: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00642, lr=0.000213, step=291]\n", + "Epoch 291: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00219, lr=0.000212, step=292]\n", + "Epoch 292: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00455, lr=0.000212, step=293]\n", + "Epoch 293: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0066, lr=0.000212, step=294]\n", + "Epoch 294: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00516, lr=0.000211, step=295]\n", + "Epoch 295: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0061, lr=0.000211, step=296]\n", + "Epoch 296: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0107, lr=0.000211, step=297]\n", + "Epoch 297: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00535, lr=0.000211, step=298]\n", + "Epoch 298: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00516, lr=0.00021, step=299]\n", + "Epoch 299: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00546, lr=0.00021, step=300]\n", + "Epoch 300: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.00399, lr=0.00021, step=301]\n", + "Epoch 301: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00729, lr=0.000209, step=302]\n", + "Epoch 302: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.00428, lr=0.000209, step=303]\n", + "Epoch 303: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.986, loss=0.00244, lr=0.000209, step=304]\n", + "Epoch 304: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00618, lr=0.000208, step=305]\n", + "Epoch 305: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00182, lr=0.000208, step=306]\n", + "Epoch 306: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00253, lr=0.000208, step=307]\n", + "Epoch 307: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00659, lr=0.000208, step=308]\n", + "Epoch 308: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00238, lr=0.000207, step=309]\n", + "Epoch 309: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00218, lr=0.000207, step=310]\n", + "Epoch 310: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0112, lr=0.000207, step=311]\n", + "Epoch 311: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00349, lr=0.000206, step=312]\n", + "Epoch 312: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.987, loss=0.00668, lr=0.000206, step=313]\n", + "Epoch 313: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00488, lr=0.000206, step=314]\n", + "Epoch 314: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00399, lr=0.000206, step=315]\n", + "Epoch 315: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00553, lr=0.000205, step=316]\n", + "Epoch 316: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00679, lr=0.000205, step=317]\n", + "Epoch 317: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00106, lr=0.000205, step=318]\n", + "Epoch 318: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00586, lr=0.000204, step=319]\n", + "Epoch 319: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00444, lr=0.000204, step=320]\n", + "Epoch 320: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00427, lr=0.000204, step=321]\n", + "Epoch 321: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00272, lr=0.000203, step=322]\n", + "Epoch 322: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00705, lr=0.000203, step=323]\n", + "Epoch 323: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00491, lr=0.000203, step=324]\n", + "Epoch 324: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00222, lr=0.000202, step=325]\n", + "Epoch 325: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00628, lr=0.000202, step=326]\n", + "Epoch 326: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00575, lr=0.000202, step=327]\n", + "Epoch 327: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00222, lr=0.000202, step=328]\n", + "Epoch 328: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00289, lr=0.000201, step=329]\n", + "Epoch 329: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00263, lr=0.000201, step=330]\n", + "Epoch 330: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00715, lr=0.000201, step=331]\n", + "Epoch 331: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.987, loss=0.0106, lr=0.0002, step=332]\n", + "Epoch 332: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00782, lr=0.0002, step=333]\n", + "Epoch 333: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.987, loss=0.00829, lr=0.0002, step=334]\n", + "Epoch 334: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.0107, lr=0.000199, step=335]\n", + "Epoch 335: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00272, lr=0.000199, step=336]\n", + "Epoch 336: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00998, lr=0.000199, step=337]\n", + "Epoch 337: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00608, lr=0.000199, step=338]\n", + "Epoch 338: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00245, lr=0.000198, step=339]\n", + "Epoch 339: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00666, lr=0.000198, step=340]\n", + "Epoch 340: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00714, lr=0.000198, step=341]\n", + "Epoch 341: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00794, lr=0.000197, step=342]\n", + "Epoch 342: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00383, lr=0.000197, step=343]\n", + "Epoch 343: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00394, lr=0.000197, step=344]\n", + "Epoch 344: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00854, lr=0.000196, step=345]\n", + "Epoch 345: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00279, lr=0.000196, step=346]\n", + "Epoch 346: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.0157, lr=0.000196, step=347]\n", + "Epoch 347: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00348, lr=0.000196, step=348]\n", + "Epoch 348: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0171, lr=0.000195, step=349]\n", + "Epoch 349: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0162, lr=0.000195, step=350]\n", + "Epoch 350: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00321, lr=0.000195, step=351]\n", + "Epoch 351: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0051, lr=0.000194, step=352]\n", + "Epoch 352: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.015, lr=0.000194, step=353]\n", + "Epoch 353: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00557, lr=0.000194, step=354]\n", + "Epoch 354: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00373, lr=0.000193, step=355]\n", + "Epoch 355: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00768, lr=0.000193, step=356]\n", + "Epoch 356: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00467, lr=0.000193, step=357]\n", + "Epoch 357: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00981, lr=0.000193, step=358]\n", + "Epoch 358: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00396, lr=0.000192, step=359]\n", + "Epoch 359: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.00286, lr=0.000192, step=360]\n", + "Epoch 360: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00579, lr=0.000192, step=361]\n", + "Epoch 361: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00653, lr=0.000191, step=362]\n", + "Epoch 362: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00633, lr=0.000191, step=363]\n", + "Epoch 363: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.00818, lr=0.000191, step=364]\n", + "Epoch 364: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.0044, lr=0.00019, step=365]\n", + "Epoch 365: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00446, lr=0.00019, step=366]\n", + "Epoch 366: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00327, lr=0.00019, step=367]\n", + "Epoch 367: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0163, lr=0.00019, step=368]\n", + "Epoch 368: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00368, lr=0.000189, step=369]\n", + "Epoch 369: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00292, lr=0.000189, step=370]\n", + "Epoch 370: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.0044, lr=0.000189, step=371]\n", + "Epoch 371: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0136, lr=0.000188, step=372]\n", + "Epoch 372: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00746, lr=0.000188, step=373]\n", + "Epoch 373: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00546, lr=0.000188, step=374]\n", + "Epoch 374: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00182, lr=0.000187, step=375]\n", + "Epoch 375: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00476, lr=0.000187, step=376]\n", + "Epoch 376: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0185, lr=0.000187, step=377]\n", + "Epoch 377: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00231, lr=0.000187, step=378]\n", + "Epoch 378: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00277, lr=0.000186, step=379]\n", + "Epoch 379: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00329, lr=0.000186, step=380]\n", + "Epoch 380: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.988, loss=0.00325, lr=0.000186, step=381]\n", + "Epoch 381: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.015, lr=0.000185, step=382]\n", + "Epoch 382: 100%|██████████| 1/1 [00:00<00:00, 1.03it/s, ema_decay=0.988, loss=0.00242, lr=0.000185, step=383]\n", + "Epoch 383: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.988, loss=0.00489, lr=0.000185, step=384]\n", + "Epoch 384: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00653, lr=0.000184, step=385]\n", + "Epoch 385: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.00384, lr=0.000184, step=386]\n", + "Epoch 386: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00563, lr=0.000184, step=387]\n", + "Epoch 387: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00425, lr=0.000184, step=388]\n", + "Epoch 388: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.00474, lr=0.000183, step=389]\n", + "Epoch 389: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0026, lr=0.000183, step=390]\n", + "Epoch 390: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00247, lr=0.000183, step=391]\n", + "Epoch 391: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.0082, lr=0.000182, step=392]\n", + "Epoch 392: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00352, lr=0.000182, step=393]\n", + "Epoch 393: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.0072, lr=0.000182, step=394]\n", + "Epoch 394: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00786, lr=0.000181, step=395]\n", + "Epoch 395: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00619, lr=0.000181, step=396]\n", + "Epoch 396: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00326, lr=0.000181, step=397]\n", + "Epoch 397: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00314, lr=0.000181, step=398]\n", + "Epoch 398: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00725, lr=0.00018, step=399]\n", + "Epoch 399: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00281, lr=0.00018, step=400]\n", + "Epoch 400: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00221, lr=0.00018, step=401]\n", + "Epoch 401: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00222, lr=0.000179, step=402]\n", + "Epoch 402: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00389, lr=0.000179, step=403]\n", + "Epoch 403: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00607, lr=0.000179, step=404]\n", + "Epoch 404: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0053, lr=0.000178, step=405]\n", + "Epoch 405: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00268, lr=0.000178, step=406]\n", + "Epoch 406: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.00278, lr=0.000178, step=407]\n", + "Epoch 407: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.00441, lr=0.000178, step=408]\n", + "Epoch 408: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00356, lr=0.000177, step=409]\n", + "Epoch 409: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0058, lr=0.000177, step=410]\n", + "Epoch 410: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00669, lr=0.000177, step=411]\n", + "Epoch 411: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.989, loss=0.00662, lr=0.000176, step=412]\n", + "Epoch 412: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00189, lr=0.000176, step=413]\n", + "Epoch 413: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00903, lr=0.000176, step=414]\n", + "Epoch 414: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00866, lr=0.000175, step=415]\n", + "Epoch 415: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00723, lr=0.000175, step=416]\n", + "Epoch 416: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00261, lr=0.000175, step=417]\n", + "Epoch 417: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.00727, lr=0.000175, step=418]\n", + "Epoch 418: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00605, lr=0.000174, step=419]\n", + "Epoch 419: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.989, loss=0.00689, lr=0.000174, step=420]\n", + "Epoch 420: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0039, lr=0.000174, step=421]\n", + "Epoch 421: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00794, lr=0.000173, step=422]\n", + "Epoch 422: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00209, lr=0.000173, step=423]\n", + "Epoch 423: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0036, lr=0.000173, step=424]\n", + "Epoch 424: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00562, lr=0.000172, step=425]\n", + "Epoch 425: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00488, lr=0.000172, step=426]\n", + "Epoch 426: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00272, lr=0.000172, step=427]\n", + "Epoch 427: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00204, lr=0.000172, step=428]\n", + "Epoch 428: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0056, lr=0.000171, step=429]\n", + "Epoch 429: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00376, lr=0.000171, step=430]\n", + "Epoch 430: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00123, lr=0.000171, step=431]\n", + "Epoch 431: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00344, lr=0.00017, step=432]\n", + "Epoch 432: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.0046, lr=0.00017, step=433]\n", + "Epoch 433: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.005, lr=0.00017, step=434]\n", + "Epoch 434: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0091, lr=0.000169, step=435]\n", + "Epoch 435: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00484, lr=0.000169, step=436]\n", + "Epoch 436: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0052, lr=0.000169, step=437]\n", + "Epoch 437: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.99, loss=0.00157, lr=0.000169, step=438]\n", + "Epoch 438: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00709, lr=0.000168, step=439]\n", + "Epoch 439: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00701, lr=0.000168, step=440]\n", + "Epoch 440: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00346, lr=0.000168, step=441]\n", + "Epoch 441: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00194, lr=0.000167, step=442]\n", + "Epoch 442: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00195, lr=0.000167, step=443]\n", + "Epoch 443: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00688, lr=0.000167, step=444]\n", + "Epoch 444: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00914, lr=0.000167, step=445]\n", + "Epoch 445: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00311, lr=0.000166, step=446]\n", + "Epoch 446: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00768, lr=0.000166, step=447]\n", + "Epoch 447: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00647, lr=0.000166, step=448]\n", + "Epoch 448: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00439, lr=0.000165, step=449]\n", + "Epoch 449: 100%|██████████| 1/1 [00:00<00:00, 1.19it/s, ema_decay=0.99, loss=0.00636, lr=0.000165, step=450]\n", + "Epoch 450: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.99, loss=0.00661, lr=0.000165, step=451]\n", + "Epoch 451: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00341, lr=0.000164, step=452]\n", + "Epoch 452: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00619, lr=0.000164, step=453]\n", + "Epoch 453: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0019, lr=0.000164, step=454]\n", + "Epoch 454: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00531, lr=0.000163, step=455]\n", + "Epoch 455: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00546, lr=0.000163, step=456]\n", + "Epoch 456: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00298, lr=0.000163, step=457]\n", + "Epoch 457: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00334, lr=0.000163, step=458]\n", + "Epoch 458: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00323, lr=0.000162, step=459]\n", + "Epoch 459: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.0028, lr=0.000162, step=460]\n", + "Epoch 460: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0034, lr=0.000162, step=461]\n", + "Epoch 461: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00731, lr=0.000161, step=462]\n", + "Epoch 462: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0036, lr=0.000161, step=463]\n", + "Epoch 463: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00185, lr=0.000161, step=464]\n", + "Epoch 464: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00356, lr=0.00016, step=465]\n", + "Epoch 465: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00309, lr=0.00016, step=466]\n", + "Epoch 466: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00323, lr=0.00016, step=467]\n", + "Epoch 467: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0102, lr=0.00016, step=468]\n", + "Epoch 468: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00431, lr=0.000159, step=469]\n", + "Epoch 469: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00248, lr=0.000159, step=470]\n", + "Epoch 470: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00225, lr=0.000159, step=471]\n", + "Epoch 471: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00606, lr=0.000158, step=472]\n", + "Epoch 472: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00636, lr=0.000158, step=473]\n", + "Epoch 473: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00424, lr=0.000158, step=474]\n", + "Epoch 474: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00226, lr=0.000157, step=475]\n", + "Epoch 475: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00555, lr=0.000157, step=476]\n", + "Epoch 476: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0033, lr=0.000157, step=477]\n", + "Epoch 477: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00316, lr=0.000157, step=478]\n", + "Epoch 478: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00474, lr=0.000156, step=479]\n", + "Epoch 479: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00363, lr=0.000156, step=480]\n", + "Epoch 480: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00152, lr=0.000156, step=481]\n", + "Epoch 481: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00757, lr=0.000155, step=482]\n", + "Epoch 482: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00367, lr=0.000155, step=483]\n", + "Epoch 483: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0033, lr=0.000155, step=484]\n", + "Epoch 484: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00321, lr=0.000154, step=485]\n", + "Epoch 485: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00391, lr=0.000154, step=486]\n", + "Epoch 486: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00138, lr=0.000154, step=487]\n", + "Epoch 487: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00544, lr=0.000154, step=488]\n", + "Epoch 488: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0036, lr=0.000153, step=489]\n", + "Epoch 489: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00644, lr=0.000153, step=490]\n", + "Epoch 490: 100%|██████████| 1/1 [00:00<00:00, 1.21it/s, ema_decay=0.99, loss=0.000968, lr=0.000153, step=491]\n", + "Epoch 491: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00479, lr=0.000152, step=492]\n", + "Epoch 492: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00224, lr=0.000152, step=493]\n", + "Epoch 493: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00382, lr=0.000152, step=494]\n", + "Epoch 494: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00516, lr=0.000151, step=495]\n", + "Epoch 495: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00323, lr=0.000151, step=496]\n", + "Epoch 496: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00351, lr=0.000151, step=497]\n", + "Epoch 497: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0029, lr=0.000151, step=498]\n", + "Epoch 498: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00206, lr=0.00015, step=499]\n", + "Epoch 499: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00642, lr=0.00015, step=500]\n", + "Epoch 500: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00151, lr=0.00015, step=501]\n", + "Epoch 501: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00319, lr=0.000149, step=502]\n", + "Epoch 502: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0037, lr=0.000149, step=503]\n", + "Epoch 503: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00491, lr=0.000149, step=504]\n", + "Epoch 504: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00521, lr=0.000148, step=505]\n", + "Epoch 505: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00403, lr=0.000148, step=506]\n", + "Epoch 506: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00188, lr=0.000148, step=507]\n", + "Epoch 507: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00201, lr=0.000148, step=508]\n", + "Epoch 508: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00556, lr=0.000147, step=509]\n", + "Epoch 509: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00771, lr=0.000147, step=510]\n", + "Epoch 510: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00302, lr=0.000147, step=511]\n", + "Epoch 511: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00327, lr=0.000146, step=512]\n", + "Epoch 512: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.0035, lr=0.000146, step=513]\n", + "Epoch 513: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00709, lr=0.000146, step=514]\n", + "Epoch 514: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00408, lr=0.000145, step=515]\n", + "Epoch 515: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0019, lr=0.000145, step=516]\n", + "Epoch 516: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00127, lr=0.000145, step=517]\n", + "Epoch 517: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00388, lr=0.000145, step=518]\n", + "Epoch 518: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00402, lr=0.000144, step=519]\n", + "Epoch 519: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00408, lr=0.000144, step=520]\n", + "Epoch 520: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.991, loss=0.00565, lr=0.000144, step=521]\n", + "Epoch 521: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00231, lr=0.000143, step=522]\n", + "Epoch 522: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00273, lr=0.000143, step=523]\n", + "Epoch 523: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00394, lr=0.000143, step=524]\n", + "Epoch 524: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00464, lr=0.000142, step=525]\n", + "Epoch 525: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00462, lr=0.000142, step=526]\n", + "Epoch 526: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00337, lr=0.000142, step=527]\n", + "Epoch 527: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00372, lr=0.000142, step=528]\n", + "Epoch 528: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00595, lr=0.000141, step=529]\n", + "Epoch 529: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00215, lr=0.000141, step=530]\n", + "Epoch 530: 100%|██████████| 1/1 [00:00<00:00, 1.29it/s, ema_decay=0.991, loss=0.0031, lr=0.000141, step=531]\n", + "Epoch 531: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00179, lr=0.00014, step=532]\n", + "Epoch 532: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00391, lr=0.00014, step=533]\n", + "Epoch 533: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00409, lr=0.00014, step=534]\n", + "Epoch 534: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00255, lr=0.00014, step=535]\n", + "Epoch 535: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00276, lr=0.000139, step=536]\n", + "Epoch 536: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0048, lr=0.000139, step=537]\n", + "Epoch 537: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.991, loss=0.00446, lr=0.000139, step=538]\n", + "Epoch 538: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.991, loss=0.0012, lr=0.000138, step=539]\n", + "Epoch 539: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00217, lr=0.000138, step=540]\n", + "Epoch 540: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00475, lr=0.000138, step=541]\n", + "Epoch 541: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000902, lr=0.000137, step=542]\n", + "Epoch 542: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00401, lr=0.000137, step=543]\n", + "Epoch 543: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00133, lr=0.000137, step=544]\n", + "Epoch 544: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00435, lr=0.000136, step=545]\n", + "Epoch 545: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00234, lr=0.000136, step=546]\n", + "Epoch 546: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00278, lr=0.000136, step=547]\n", + "Epoch 547: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00387, lr=0.000136, step=548]\n", + "Epoch 548: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00119, lr=0.000135, step=549]\n", + "Epoch 549: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00283, lr=0.000135, step=550]\n", + "Epoch 550: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00294, lr=0.000135, step=551]\n", + "Epoch 551: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00434, lr=0.000134, step=552]\n", + "Epoch 552: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00442, lr=0.000134, step=553]\n", + "Epoch 553: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0013, lr=0.000134, step=554]\n", + "Epoch 554: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0022, lr=0.000133, step=555]\n", + "Epoch 555: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00249, lr=0.000133, step=556]\n", + "Epoch 556: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0034, lr=0.000133, step=557]\n", + "Epoch 557: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00303, lr=0.000133, step=558]\n", + "Epoch 558: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00643, lr=0.000132, step=559]\n", + "Epoch 559: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00341, lr=0.000132, step=560]\n", + "Epoch 560: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00398, lr=0.000132, step=561]\n", + "Epoch 561: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00244, lr=0.000131, step=562]\n", + "Epoch 562: 100%|██████████| 1/1 [00:00<00:00, 1.10it/s, ema_decay=0.991, loss=0.0025, lr=0.000131, step=563]\n", + "Epoch 563: 100%|██████████| 1/1 [00:00<00:00, 1.19it/s, ema_decay=0.991, loss=0.00607, lr=0.000131, step=564]\n", + "Epoch 564: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00451, lr=0.000131, step=565]\n", + "Epoch 565: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00161, lr=0.00013, step=566]\n", + "Epoch 566: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00391, lr=0.00013, step=567]\n", + "Epoch 567: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00367, lr=0.00013, step=568]\n", + "Epoch 568: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00234, lr=0.000129, step=569]\n", + "Epoch 569: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00275, lr=0.000129, step=570]\n", + "Epoch 570: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00327, lr=0.000129, step=571]\n", + "Epoch 571: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00381, lr=0.000128, step=572]\n", + "Epoch 572: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00394, lr=0.000128, step=573]\n", + "Epoch 573: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00437, lr=0.000128, step=574]\n", + "Epoch 574: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00327, lr=0.000127, step=575]\n", + "Epoch 575: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00256, lr=0.000127, step=576]\n", + "Epoch 576: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0013, lr=0.000127, step=577]\n", + "Epoch 577: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00124, lr=0.000127, step=578]\n", + "Epoch 578: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00556, lr=0.000126, step=579]\n", + "Epoch 579: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00212, lr=0.000126, step=580]\n", + "Epoch 580: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00302, lr=0.000126, step=581]\n", + "Epoch 581: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00306, lr=0.000125, step=582]\n", + "Epoch 582: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00225, lr=0.000125, step=583]\n", + "Epoch 583: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000967, lr=0.000125, step=584]\n", + "Epoch 584: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00486, lr=0.000124, step=585]\n", + "Epoch 585: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00346, lr=0.000124, step=586]\n", + "Epoch 586: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00345, lr=0.000124, step=587]\n", + "Epoch 587: 100%|██████████| 1/1 [00:00<00:00, 1.26it/s, ema_decay=0.992, loss=0.00248, lr=0.000124, step=588]\n", + "Epoch 588: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00347, lr=0.000123, step=589]\n", + "Epoch 589: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0029, lr=0.000123, step=590]\n", + "Epoch 590: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000899, lr=0.000123, step=591]\n", + "Epoch 591: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00326, lr=0.000122, step=592]\n", + "Epoch 592: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00456, lr=0.000122, step=593]\n", + "Epoch 593: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00389, lr=0.000122, step=594]\n", + "Epoch 594: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00249, lr=0.000121, step=595]\n", + "Epoch 595: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00357, lr=0.000121, step=596]\n", + "Epoch 596: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00601, lr=0.000121, step=597]\n", + "Epoch 597: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00186, lr=0.000121, step=598]\n", + "Epoch 598: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00386, lr=0.00012, step=599]\n", + "Epoch 599: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00225, lr=0.00012, step=600]\n", + "Epoch 600: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00129, lr=0.00012, step=601]\n", + "Epoch 601: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00464, lr=0.000119, step=602]\n", + "Epoch 602: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00301, lr=0.000119, step=603]\n", + "Epoch 603: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.992, loss=0.00361, lr=0.000119, step=604]\n", + "Epoch 604: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.992, loss=0.00247, lr=0.000118, step=605]\n", + "Epoch 605: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00571, lr=0.000118, step=606]\n", + "Epoch 606: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00184, lr=0.000118, step=607]\n", + "Epoch 607: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0021, lr=0.000118, step=608]\n", + "Epoch 608: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.005, lr=0.000117, step=609]\n", + "Epoch 609: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00494, lr=0.000117, step=610]\n", + "Epoch 610: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00341, lr=0.000117, step=611]\n", + "Epoch 611: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00164, lr=0.000116, step=612]\n", + "Epoch 612: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00209, lr=0.000116, step=613]\n", + "Epoch 613: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00446, lr=0.000116, step=614]\n", + "Epoch 614: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00447, lr=0.000115, step=615]\n", + "Epoch 615: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00366, lr=0.000115, step=616]\n", + "Epoch 616: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00465, lr=0.000115, step=617]\n", + "Epoch 617: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00352, lr=0.000115, step=618]\n", + "Epoch 618: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00192, lr=0.000114, step=619]\n", + "Epoch 619: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00304, lr=0.000114, step=620]\n", + "Epoch 620: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00408, lr=0.000114, step=621]\n", + "Epoch 621: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00313, lr=0.000113, step=622]\n", + "Epoch 622: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00291, lr=0.000113, step=623]\n", + "Epoch 623: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00125, lr=0.000113, step=624]\n", + "Epoch 624: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00247, lr=0.000112, step=625]\n", + "Epoch 625: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00324, lr=0.000112, step=626]\n", + "Epoch 626: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00239, lr=0.000112, step=627]\n", + "Epoch 627: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0035, lr=0.000112, step=628]\n", + "Epoch 628: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00185, lr=0.000111, step=629]\n", + "Epoch 629: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0025, lr=0.000111, step=630]\n", + "Epoch 630: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00285, lr=0.000111, step=631]\n", + "Epoch 631: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00193, lr=0.00011, step=632]\n", + "Epoch 632: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00654, lr=0.00011, step=633]\n", + "Epoch 633: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00366, lr=0.00011, step=634]\n", + "Epoch 634: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00312, lr=0.000109, step=635]\n", + "Epoch 635: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00167, lr=0.000109, step=636]\n", + "Epoch 636: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0068, lr=0.000109, step=637]\n", + "Epoch 637: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00198, lr=0.000109, step=638]\n", + "Epoch 638: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00148, lr=0.000108, step=639]\n", + "Epoch 639: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00289, lr=0.000108, step=640]\n", + "Epoch 640: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00306, lr=0.000108, step=641]\n", + "Epoch 641: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000712, lr=0.000107, step=642]\n", + "Epoch 642: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00406, lr=0.000107, step=643]\n", + "Epoch 643: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00393, lr=0.000107, step=644]\n", + "Epoch 644: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00288, lr=0.000106, step=645]\n", + "Epoch 645: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00358, lr=0.000106, step=646]\n", + "Epoch 646: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00236, lr=0.000106, step=647]\n", + "Epoch 647: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00543, lr=0.000106, step=648]\n", + "Epoch 648: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00315, lr=0.000105, step=649]\n", + "Epoch 649: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00466, lr=0.000105, step=650]\n", + "Epoch 650: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00267, lr=0.000105, step=651]\n", + "Epoch 651: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00368, lr=0.000104, step=652]\n", + "Epoch 652: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00258, lr=0.000104, step=653]\n", + "Epoch 653: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.00416, lr=0.000104, step=654]\n", + "Epoch 654: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.992, loss=0.00143, lr=0.000103, step=655]\n", + "Epoch 655: 100%|██████████| 1/1 [00:00<00:00, 1.21it/s, ema_decay=0.992, loss=0.00256, lr=0.000103, step=656]\n", + "Epoch 656: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00208, lr=0.000103, step=657]\n", + "Epoch 657: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00134, lr=0.000103, step=658]\n", + "Epoch 658: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00351, lr=0.000102, step=659]\n", + "Epoch 659: 100%|██████████| 1/1 [00:00<00:00, 1.34it/s, ema_decay=0.992, loss=0.00262, lr=0.000102, step=660]\n", + "Epoch 660: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00307, lr=0.000102, step=661]\n", + "Epoch 661: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00214, lr=0.000101, step=662]\n", + "Epoch 662: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00219, lr=0.000101, step=663]\n", + "Epoch 663: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00398, lr=0.000101, step=664]\n", + "Epoch 664: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00243, lr=0.000101, step=665]\n", + "Epoch 665: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00214, lr=0.0001, step=666]\n", + "Epoch 666: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00469, lr=9.99e-5, step=667]\n", + "Epoch 667: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00359, lr=9.96e-5, step=668]\n", + "Epoch 668: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00297, lr=9.93e-5, step=669]\n", + "Epoch 669: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00198, lr=9.9e-5, step=670]\n", + "Epoch 670: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00228, lr=9.87e-5, step=671]\n", + "Epoch 671: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00247, lr=9.84e-5, step=672]\n", + "Epoch 672: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00217, lr=9.81e-5, step=673]\n", + "Epoch 673: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00342, lr=9.78e-5, step=674]\n", + "Epoch 674: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0013, lr=9.75e-5, step=675]\n", + "Epoch 675: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00247, lr=9.72e-5, step=676]\n", + "Epoch 676: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00273, lr=9.69e-5, step=677]\n", + "Epoch 677: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00256, lr=9.66e-5, step=678]\n", + "Epoch 678: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00156, lr=9.63e-5, step=679]\n", + "Epoch 679: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00114, lr=9.6e-5, step=680]\n", + "Epoch 680: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00209, lr=9.57e-5, step=681]\n", + "Epoch 681: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00175, lr=9.54e-5, step=682]\n", + "Epoch 682: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00276, lr=9.51e-5, step=683]\n", + "Epoch 683: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000822, lr=9.48e-5, step=684]\n", + "Epoch 684: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00232, lr=9.45e-5, step=685]\n", + "Epoch 685: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0025, lr=9.42e-5, step=686]\n", + "Epoch 686: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00111, lr=9.39e-5, step=687]\n", + "Epoch 687: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00244, lr=9.36e-5, step=688]\n", + "Epoch 688: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00252, lr=9.33e-5, step=689]\n", + "Epoch 689: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00197, lr=9.3e-5, step=690]\n", + "Epoch 690: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00326, lr=9.27e-5, step=691]\n", + "Epoch 691: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00288, lr=9.24e-5, step=692]\n", + "Epoch 692: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0015, lr=9.21e-5, step=693]\n", + "Epoch 693: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00107, lr=9.18e-5, step=694]\n", + "Epoch 694: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00249, lr=9.15e-5, step=695]\n", + "Epoch 695: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00293, lr=9.12e-5, step=696]\n", + "Epoch 696: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00174, lr=9.09e-5, step=697]\n", + "Epoch 697: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00228, lr=9.06e-5, step=698]\n", + "Epoch 698: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0023, lr=9.03e-5, step=699]\n", + "Epoch 699: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00635, lr=9e-5, step=700]\n", + "Epoch 700: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00185, lr=8.97e-5, step=701]\n", + "Epoch 701: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00337, lr=8.94e-5, step=702]\n", + "Epoch 702: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00159, lr=8.91e-5, step=703]\n", + "Epoch 703: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00279, lr=8.88e-5, step=704]\n", + "Epoch 704: 100%|██████████| 1/1 [00:00<00:00, 1.22it/s, ema_decay=0.993, loss=0.00329, lr=8.85e-5, step=705]\n", + "Epoch 705: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00245, lr=8.82e-5, step=706]\n", + "Epoch 706: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00314, lr=8.79e-5, step=707]\n", + "Epoch 707: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00266, lr=8.76e-5, step=708]\n", + "Epoch 708: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00163, lr=8.73e-5, step=709]\n", + "Epoch 709: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00343, lr=8.7e-5, step=710]\n", + "Epoch 710: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00437, lr=8.67e-5, step=711]\n", + "Epoch 711: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00393, lr=8.64e-5, step=712]\n", + "Epoch 712: 100%|██████████| 1/1 [00:00<00:00, 1.18it/s, ema_decay=0.993, loss=0.00185, lr=8.61e-5, step=713]\n", + "Epoch 713: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00374, lr=8.58e-5, step=714]\n", + "Epoch 714: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00175, lr=8.55e-5, step=715]\n", + "Epoch 715: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00127, lr=8.52e-5, step=716]\n", + "Epoch 716: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00394, lr=8.49e-5, step=717]\n", + "Epoch 717: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00123, lr=8.46e-5, step=718]\n", + "Epoch 718: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00236, lr=8.43e-5, step=719]\n", + "Epoch 719: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00149, lr=8.4e-5, step=720]\n", + "Epoch 720: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00141, lr=8.37e-5, step=721]\n", + "Epoch 721: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00432, lr=8.34e-5, step=722]\n", + "Epoch 722: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00413, lr=8.31e-5, step=723]\n", + "Epoch 723: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00123, lr=8.28e-5, step=724]\n", + "Epoch 724: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00372, lr=8.25e-5, step=725]\n", + "Epoch 725: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00228, lr=8.22e-5, step=726]\n", + "Epoch 726: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00736, lr=8.19e-5, step=727]\n", + "Epoch 727: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00298, lr=8.16e-5, step=728]\n", + "Epoch 728: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00264, lr=8.13e-5, step=729]\n", + "Epoch 729: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00577, lr=8.1e-5, step=730]\n", + "Epoch 730: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00166, lr=8.07e-5, step=731]\n", + "Epoch 731: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.00146, lr=8.04e-5, step=732]\n", + "Epoch 732: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00174, lr=8.01e-5, step=733]\n", + "Epoch 733: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00192, lr=7.98e-5, step=734]\n", + "Epoch 734: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00078, lr=7.95e-5, step=735]\n", + "Epoch 735: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00481, lr=7.92e-5, step=736]\n", + "Epoch 736: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.004, lr=7.89e-5, step=737]\n", + "Epoch 737: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00087, lr=7.86e-5, step=738]\n", + "Epoch 738: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00256, lr=7.83e-5, step=739]\n", + "Epoch 739: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00408, lr=7.8e-5, step=740]\n", + "Epoch 740: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00317, lr=7.77e-5, step=741]\n", + "Epoch 741: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00206, lr=7.74e-5, step=742]\n", + "Epoch 742: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00482, lr=7.71e-5, step=743]\n", + "Epoch 743: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00468, lr=7.68e-5, step=744]\n", + "Epoch 744: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00385, lr=7.65e-5, step=745]\n", + "Epoch 745: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00103, lr=7.62e-5, step=746]\n", + "Epoch 746: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00424, lr=7.59e-5, step=747]\n", + "Epoch 747: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00192, lr=7.56e-5, step=748]\n", + "Epoch 748: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00288, lr=7.53e-5, step=749]\n", + "Epoch 749: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00235, lr=7.5e-5, step=750]\n", + "Epoch 750: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00538, lr=7.47e-5, step=751]\n", + "Epoch 751: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00273, lr=7.44e-5, step=752]\n", + "Epoch 752: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00123, lr=7.41e-5, step=753]\n", + "Epoch 753: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00317, lr=7.38e-5, step=754]\n", + "Epoch 754: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00172, lr=7.35e-5, step=755]\n", + "Epoch 755: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00503, lr=7.32e-5, step=756]\n", + "Epoch 756: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00191, lr=7.29e-5, step=757]\n", + "Epoch 757: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00377, lr=7.26e-5, step=758]\n", + "Epoch 758: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00386, lr=7.23e-5, step=759]\n", + "Epoch 759: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00335, lr=7.2e-5, step=760]\n", + "Epoch 760: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00185, lr=7.17e-5, step=761]\n", + "Epoch 761: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00284, lr=7.14e-5, step=762]\n", + "Epoch 762: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00615, lr=7.11e-5, step=763]\n", + "Epoch 763: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00233, lr=7.08e-5, step=764]\n", + "Epoch 764: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00238, lr=7.05e-5, step=765]\n", + "Epoch 765: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00259, lr=7.02e-5, step=766]\n", + "Epoch 766: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00299, lr=6.99e-5, step=767]\n", + "Epoch 767: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00273, lr=6.96e-5, step=768]\n", + "Epoch 768: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00263, lr=6.93e-5, step=769]\n", + "Epoch 769: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00245, lr=6.9e-5, step=770]\n", + "Epoch 770: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00205, lr=6.87e-5, step=771]\n", + "Epoch 771: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00157, lr=6.84e-5, step=772]\n", + "Epoch 772: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00476, lr=6.81e-5, step=773]\n", + "Epoch 773: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00149, lr=6.78e-5, step=774]\n", + "Epoch 774: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00195, lr=6.75e-5, step=775]\n", + "Epoch 775: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00183, lr=6.72e-5, step=776]\n", + "Epoch 776: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.0024, lr=6.69e-5, step=777]\n", + "Epoch 777: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00201, lr=6.66e-5, step=778]\n", + "Epoch 778: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00349, lr=6.63e-5, step=779]\n", + "Epoch 779: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0013, lr=6.6e-5, step=780]\n", + "Epoch 780: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00169, lr=6.57e-5, step=781]\n", + "Epoch 781: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00256, lr=6.54e-5, step=782]\n", + "Epoch 782: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00377, lr=6.51e-5, step=783]\n", + "Epoch 783: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00158, lr=6.48e-5, step=784]\n", + "Epoch 784: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.0026, lr=6.45e-5, step=785]\n", + "Epoch 785: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00215, lr=6.42e-5, step=786]\n", + "Epoch 786: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0024, lr=6.39e-5, step=787]\n", + "Epoch 787: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00155, lr=6.36e-5, step=788]\n", + "Epoch 788: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00353, lr=6.33e-5, step=789]\n", + "Epoch 789: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000675, lr=6.3e-5, step=790]\n", + "Epoch 790: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0023, lr=6.27e-5, step=791]\n", + "Epoch 791: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0014, lr=6.24e-5, step=792]\n", + "Epoch 792: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00502, lr=6.21e-5, step=793]\n", + "Epoch 793: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00321, lr=6.18e-5, step=794]\n", + "Epoch 794: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0035, lr=6.15e-5, step=795]\n", + "Epoch 795: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s, ema_decay=0.993, loss=0.00198, lr=6.12e-5, step=796]\n", + "Epoch 796: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00156, lr=6.09e-5, step=797]\n", + "Epoch 797: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00196, lr=6.06e-5, step=798]\n", + "Epoch 798: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00456, lr=6.03e-5, step=799]\n", + "Epoch 799: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00082, lr=6e-5, step=800]\n", + "Epoch 800: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0016, lr=5.97e-5, step=801]\n", + "Epoch 801: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00114, lr=5.94e-5, step=802]\n", + "Epoch 802: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00273, lr=5.91e-5, step=803]\n", + "Epoch 803: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00238, lr=5.88e-5, step=804]\n", + "Epoch 804: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00312, lr=5.85e-5, step=805]\n", + "Epoch 805: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00316, lr=5.82e-5, step=806]\n", + "Epoch 806: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00212, lr=5.79e-5, step=807]\n", + "Epoch 807: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00101, lr=5.76e-5, step=808]\n", + "Epoch 808: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00144, lr=5.73e-5, step=809]\n", + "Epoch 809: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00145, lr=5.7e-5, step=810]\n", + "Epoch 810: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00427, lr=5.67e-5, step=811]\n", + "Epoch 811: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00176, lr=5.64e-5, step=812]\n", + "Epoch 812: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00329, lr=5.61e-5, step=813]\n", + "Epoch 813: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00329, lr=5.58e-5, step=814]\n", + "Epoch 814: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000988, lr=5.55e-5, step=815]\n", + "Epoch 815: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00542, lr=5.52e-5, step=816]\n", + "Epoch 816: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00457, lr=5.49e-5, step=817]\n", + "Epoch 817: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00286, lr=5.46e-5, step=818]\n", + "Epoch 818: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00321, lr=5.43e-5, step=819]\n", + "Epoch 819: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.003, lr=5.4e-5, step=820]\n", + "Epoch 820: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0046, lr=5.37e-5, step=821]\n", + "Epoch 821: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.0043, lr=5.34e-5, step=822]\n", + "Epoch 822: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00307, lr=5.31e-5, step=823]\n", + "Epoch 823: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00188, lr=5.28e-5, step=824]\n", + "Epoch 824: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00468, lr=5.25e-5, step=825]\n", + "Epoch 825: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00339, lr=5.22e-5, step=826]\n", + "Epoch 826: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.994, loss=0.00307, lr=5.19e-5, step=827]\n", + "Epoch 827: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00189, lr=5.16e-5, step=828]\n", + "Epoch 828: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00286, lr=5.13e-5, step=829]\n", + "Epoch 829: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00298, lr=5.1e-5, step=830]\n", + "Epoch 830: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00444, lr=5.07e-5, step=831]\n", + "Epoch 831: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0017, lr=5.04e-5, step=832]\n", + "Epoch 832: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.000803, lr=5.01e-5, step=833]\n", + "Epoch 833: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000553, lr=4.98e-5, step=834]\n", + "Epoch 834: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.002, lr=4.95e-5, step=835]\n", + "Epoch 835: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00145, lr=4.92e-5, step=836]\n", + "Epoch 836: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00274, lr=4.89e-5, step=837]\n", + "Epoch 837: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00124, lr=4.86e-5, step=838]\n", + "Epoch 838: 100%|██████████| 1/1 [00:00<00:00, 1.33it/s, ema_decay=0.994, loss=0.00147, lr=4.83e-5, step=839]\n", + "Epoch 839: 100%|██████████| 1/1 [00:00<00:00, 1.31it/s, ema_decay=0.994, loss=0.0025, lr=4.8e-5, step=840]\n", + "Epoch 840: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00393, lr=4.77e-5, step=841]\n", + "Epoch 841: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00208, lr=4.74e-5, step=842]\n", + "Epoch 842: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.003, lr=4.71e-5, step=843]\n", + "Epoch 843: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00126, lr=4.68e-5, step=844]\n", + "Epoch 844: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00344, lr=4.65e-5, step=845]\n", + "Epoch 845: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00307, lr=4.62e-5, step=846]\n", + "Epoch 846: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00214, lr=4.59e-5, step=847]\n", + "Epoch 847: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00283, lr=4.56e-5, step=848]\n", + "Epoch 848: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00341, lr=4.53e-5, step=849]\n", + "Epoch 849: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00226, lr=4.5e-5, step=850]\n", + "Epoch 850: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00473, lr=4.47e-5, step=851]\n", + "Epoch 851: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00469, lr=4.44e-5, step=852]\n", + "Epoch 852: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00164, lr=4.41e-5, step=853]\n", + "Epoch 853: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00392, lr=4.38e-5, step=854]\n", + "Epoch 854: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00174, lr=4.35e-5, step=855]\n", + "Epoch 855: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00247, lr=4.32e-5, step=856]\n", + "Epoch 856: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00303, lr=4.29e-5, step=857]\n", + "Epoch 857: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0018, lr=4.26e-5, step=858]\n", + "Epoch 858: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00336, lr=4.23e-5, step=859]\n", + "Epoch 859: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00387, lr=4.2e-5, step=860]\n", + "Epoch 860: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00192, lr=4.17e-5, step=861]\n", + "Epoch 861: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00388, lr=4.14e-5, step=862]\n", + "Epoch 862: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00224, lr=4.11e-5, step=863]\n", + "Epoch 863: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0037, lr=4.08e-5, step=864]\n", + "Epoch 864: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00378, lr=4.05e-5, step=865]\n", + "Epoch 865: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00168, lr=4.02e-5, step=866]\n", + "Epoch 866: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00227, lr=3.99e-5, step=867]\n", + "Epoch 867: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000804, lr=3.96e-5, step=868]\n", + "Epoch 868: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00147, lr=3.93e-5, step=869]\n", + "Epoch 869: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00357, lr=3.9e-5, step=870]\n", + "Epoch 870: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00405, lr=3.87e-5, step=871]\n", + "Epoch 871: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000856, lr=3.84e-5, step=872]\n", + "Epoch 872: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00224, lr=3.81e-5, step=873]\n", + "Epoch 873: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00396, lr=3.78e-5, step=874]\n", + "Epoch 874: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00392, lr=3.75e-5, step=875]\n", + "Epoch 875: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00189, lr=3.72e-5, step=876]\n", + "Epoch 876: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00226, lr=3.69e-5, step=877]\n", + "Epoch 877: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00248, lr=3.66e-5, step=878]\n", + "Epoch 878: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00177, lr=3.63e-5, step=879]\n", + "Epoch 879: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00158, lr=3.6e-5, step=880]\n", + "Epoch 880: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00246, lr=3.57e-5, step=881]\n", + "Epoch 881: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00196, lr=3.54e-5, step=882]\n", + "Epoch 882: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00182, lr=3.51e-5, step=883]\n", + "Epoch 883: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00104, lr=3.48e-5, step=884]\n", + "Epoch 884: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00281, lr=3.45e-5, step=885]\n", + "Epoch 885: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00311, lr=3.42e-5, step=886]\n", + "Epoch 886: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00111, lr=3.39e-5, step=887]\n", + "Epoch 887: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00335, lr=3.36e-5, step=888]\n", + "Epoch 888: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00236, lr=3.33e-5, step=889]\n", + "Epoch 889: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00175, lr=3.3e-5, step=890]\n", + "Epoch 890: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00218, lr=3.27e-5, step=891]\n", + "Epoch 891: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000836, lr=3.24e-5, step=892]\n", + "Epoch 892: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00266, lr=3.21e-5, step=893]\n", + "Epoch 893: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000972, lr=3.18e-5, step=894]\n", + "Epoch 894: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00122, lr=3.15e-5, step=895]\n", + "Epoch 895: 100%|██████████| 1/1 [00:00<00:00, 1.29it/s, ema_decay=0.994, loss=0.00212, lr=3.12e-5, step=896]\n", + "Epoch 896: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00161, lr=3.09e-5, step=897]\n", + "Epoch 897: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000924, lr=3.06e-5, step=898]\n", + "Epoch 898: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0034, lr=3.03e-5, step=899]\n", + "Epoch 899: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00345, lr=3e-5, step=900]\n", + "Epoch 900: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00362, lr=2.97e-5, step=901]\n", + "Epoch 901: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00113, lr=2.94e-5, step=902]\n", + "Epoch 902: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00217, lr=2.91e-5, step=903]\n", + "Epoch 903: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00199, lr=2.88e-5, step=904]\n", + "Epoch 904: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00166, lr=2.85e-5, step=905]\n", + "Epoch 905: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00207, lr=2.82e-5, step=906]\n", + "Epoch 906: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00389, lr=2.79e-5, step=907]\n", + "Epoch 907: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0033, lr=2.76e-5, step=908]\n", + "Epoch 908: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00116, lr=2.73e-5, step=909]\n", + "Epoch 909: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00182, lr=2.7e-5, step=910]\n", + "Epoch 910: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00137, lr=2.67e-5, step=911]\n", + "Epoch 911: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00124, lr=2.64e-5, step=912]\n", + "Epoch 912: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s, ema_decay=0.994, loss=0.00157, lr=2.61e-5, step=913]\n", + "Epoch 913: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00154, lr=2.58e-5, step=914]\n", + "Epoch 914: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00071, lr=2.55e-5, step=915]\n", + "Epoch 915: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00183, lr=2.52e-5, step=916]\n", + "Epoch 916: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0018, lr=2.49e-5, step=917]\n", + "Epoch 917: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000995, lr=2.46e-5, step=918]\n", + "Epoch 918: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00355, lr=2.43e-5, step=919]\n", + "Epoch 919: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00144, lr=2.4e-5, step=920]\n", + "Epoch 920: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000952, lr=2.37e-5, step=921]\n", + "Epoch 921: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00163, lr=2.34e-5, step=922]\n", + "Epoch 922: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00153, lr=2.31e-5, step=923]\n", + "Epoch 923: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00332, lr=2.28e-5, step=924]\n", + "Epoch 924: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00211, lr=2.25e-5, step=925]\n", + "Epoch 925: 100%|██████████| 1/1 [00:00<00:00, 1.13it/s, ema_decay=0.994, loss=0.00354, lr=2.22e-5, step=926]\n", + "Epoch 926: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00194, lr=2.19e-5, step=927]\n", + "Epoch 927: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00254, lr=2.16e-5, step=928]\n", + "Epoch 928: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00218, lr=2.13e-5, step=929]\n", + "Epoch 929: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.0021, lr=2.1e-5, step=930]\n", + "Epoch 930: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00482, lr=2.07e-5, step=931]\n", + "Epoch 931: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00342, lr=2.04e-5, step=932]\n", + "Epoch 932: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00132, lr=2.01e-5, step=933]\n", + "Epoch 933: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00211, lr=1.98e-5, step=934]\n", + "Epoch 934: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00103, lr=1.95e-5, step=935]\n", + "Epoch 935: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00169, lr=1.92e-5, step=936]\n", + "Epoch 936: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00276, lr=1.89e-5, step=937]\n", + "Epoch 937: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00127, lr=1.86e-5, step=938]\n", + "Epoch 938: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000533, lr=1.83e-5, step=939]\n", + "Epoch 939: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00134, lr=1.8e-5, step=940]\n", + "Epoch 940: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00182, lr=1.77e-5, step=941]\n", + "Epoch 941: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00158, lr=1.74e-5, step=942]\n", + "Epoch 942: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00175, lr=1.71e-5, step=943]\n", + "Epoch 943: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00155, lr=1.68e-5, step=944]\n", + "Epoch 944: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00267, lr=1.65e-5, step=945]\n", + "Epoch 945: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00195, lr=1.62e-5, step=946]\n", + "Epoch 946: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00139, lr=1.59e-5, step=947]\n", + "Epoch 947: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00243, lr=1.56e-5, step=948]\n", + "Epoch 948: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000852, lr=1.53e-5, step=949]\n", + "Epoch 949: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00178, lr=1.5e-5, step=950]\n", + "Epoch 950: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00326, lr=1.47e-5, step=951]\n", + "Epoch 951: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00171, lr=1.44e-5, step=952]\n", + "Epoch 952: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00175, lr=1.41e-5, step=953]\n", + "Epoch 953: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00199, lr=1.38e-5, step=954]\n", + "Epoch 954: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00301, lr=1.35e-5, step=955]\n", + "Epoch 955: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00173, lr=1.32e-5, step=956]\n", + "Epoch 956: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000652, lr=1.29e-5, step=957]\n", + "Epoch 957: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00237, lr=1.26e-5, step=958]\n", + "Epoch 958: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00104, lr=1.23e-5, step=959]\n", + "Epoch 959: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00357, lr=1.2e-5, step=960]\n", + "Epoch 960: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000907, lr=1.17e-5, step=961]\n", + "Epoch 961: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000641, lr=1.14e-5, step=962]\n", + "Epoch 962: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00106, lr=1.11e-5, step=963]\n", + "Epoch 963: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00109, lr=1.08e-5, step=964]\n", + "Epoch 964: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00175, lr=1.05e-5, step=965]\n", + "Epoch 965: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00229, lr=1.02e-5, step=966]\n", + "Epoch 966: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00154, lr=9.9e-6, step=967]\n", + "Epoch 967: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00183, lr=9.6e-6, step=968]\n", + "Epoch 968: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00241, lr=9.3e-6, step=969]\n", + "Epoch 969: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00314, lr=9e-6, step=970]\n", + "Epoch 970: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0018, lr=8.7e-6, step=971]\n", + "Epoch 971: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00224, lr=8.4e-6, step=972]\n", + "Epoch 972: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00208, lr=8.1e-6, step=973]\n", + "Epoch 973: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00109, lr=7.8e-6, step=974]\n", + "Epoch 974: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0024, lr=7.5e-6, step=975]\n", + "Epoch 975: 100%|██████████| 1/1 [00:00<00:00, 1.22it/s, ema_decay=0.994, loss=0.0018, lr=7.2e-6, step=976]\n", + "Epoch 976: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00261, lr=6.9e-6, step=977]\n", + "Epoch 977: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00247, lr=6.6e-6, step=978]\n", + "Epoch 978: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00144, lr=6.3e-6, step=979]\n", + "Epoch 979: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00213, lr=6e-6, step=980]\n", + "Epoch 980: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00139, lr=5.7e-6, step=981]\n", + "Epoch 981: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00185, lr=5.4e-6, step=982]\n", + "Epoch 982: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00191, lr=5.1e-6, step=983]\n", + "Epoch 983: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00202, lr=4.8e-6, step=984]\n", + "Epoch 984: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000456, lr=4.5e-6, step=985]\n", + "Epoch 985: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00384, lr=4.2e-6, step=986]\n", + "Epoch 986: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00267, lr=3.9e-6, step=987]\n", + "Epoch 987: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00344, lr=3.6e-6, step=988]\n", + "Epoch 988: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00499, lr=3.3e-6, step=989]\n", + "Epoch 989: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00283, lr=3e-6, step=990]\n", + "Epoch 990: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00263, lr=2.7e-6, step=991]\n", + "Epoch 991: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00299, lr=2.4e-6, step=992]\n", + "Epoch 992: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000997, lr=2.1e-6, step=993]\n", + "Epoch 993: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0015, lr=1.8e-6, step=994]\n", + "Epoch 994: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0037, lr=1.5e-6, step=995]\n", + "Epoch 995: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00172, lr=1.2e-6, step=996]\n", + "Epoch 996: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00158, lr=9e-7, step=997]\n", + "Epoch 997: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00356, lr=6e-7, step=998]\n", + "Epoch 998: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00153, lr=3e-7, step=999]\n", + "Epoch 999: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0014, lr=0, step=1000]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "0e5c2fa8fcec4feb8e30865591686739", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/50 [00:00 31\n" + ] + }, { "name": "stderr", "output_type": "stream", "text": [ - "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0, loss=0.0551, lr=0.000299, step=1]\n", - "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0, loss=0.271, lr=0.000298, step=2]\n", - "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.405, loss=0.321, lr=0.000297, step=3]\n", - "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.561, loss=0.245, lr=0.000296, step=4]\n", - "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.646, loss=0.117, lr=0.000295, step=5]\n", - "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.701, loss=0.127, lr=0.000294, step=6]\n", - "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.739, loss=0.0749, lr=0.000293, step=7]\n", - "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.768, loss=0.0903, lr=0.000292, step=8]\n", - "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.79, loss=0.0618, lr=0.000291, step=9]\n", - "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.808, loss=0.0591, lr=0.00029, step=10]\n", - "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.822, loss=0.0587, lr=0.000289, step=11]\n", - "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.834, loss=0.064, lr=0.000288, step=12]\n", - "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.845, loss=0.0532, lr=0.000287, step=13]\n", - "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.854, loss=0.0532, lr=0.000286, step=14]\n", - "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.862, loss=0.0454, lr=0.000285, step=15]\n", - "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.869, loss=0.0596, lr=0.000284, step=16]\n", - "Epoch 16: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.875, loss=0.0493, lr=0.000283, step=17]\n", - "Epoch 17: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.881, loss=0.0497, lr=0.000282, step=18]\n", - "Epoch 18: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.886, loss=0.0391, lr=0.000281, step=19]\n", - "Epoch 19: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.89, loss=0.0442, lr=0.00028, step=20]\n", - "Epoch 20: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.894, loss=0.0519, lr=0.000279, step=21]\n", - "Epoch 21: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.898, loss=0.0468, lr=0.000278, step=22]\n", - "Epoch 22: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.902, loss=0.0442, lr=0.000277, step=23]\n", - "Epoch 23: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.905, loss=0.0378, lr=0.000276, step=24]\n", - "Epoch 24: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.908, loss=0.0383, lr=0.000275, step=25]\n", - "Epoch 25: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.911, loss=0.0502, lr=0.000274, step=26]\n", - "Epoch 26: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.913, loss=0.0449, lr=0.000273, step=27]\n", - "Epoch 27: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.916, loss=0.0377, lr=0.000272, step=28]\n", - "Epoch 28: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.918, loss=0.0364, lr=0.000271, step=29]\n", - "Epoch 29: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.92, loss=0.0339, lr=0.00027, step=30]\n", - "Epoch 30: 100%|██████████| 1/1 [00:00<00:00, 1.54it/s, ema_decay=0.922, loss=0.0374, lr=0.000269, step=31]\n", - "Epoch 31: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.924, loss=0.0334, lr=0.000268, step=32]\n", - "Epoch 32: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.926, loss=0.0448, lr=0.000267, step=33]\n", - "Epoch 33: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.927, loss=0.0365, lr=0.000266, step=34]\n", - "Epoch 34: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.929, loss=0.0334, lr=0.000265, step=35]\n", - "Epoch 35: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.931, loss=0.0338, lr=0.000264, step=36]\n", - "Epoch 36: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.932, loss=0.0318, lr=0.000263, step=37]\n", - "Epoch 37: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.933, loss=0.0383, lr=0.000262, step=38]\n", - "Epoch 38: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.935, loss=0.0317, lr=0.000261, step=39]\n", - "Epoch 39: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.936, loss=0.0331, lr=0.00026, step=40]\n", - "Epoch 40: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.937, loss=0.0338, lr=0.000259, step=41]\n", - "Epoch 41: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.938, loss=0.0355, lr=0.000258, step=42]\n", - "Epoch 42: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.939, loss=0.0342, lr=0.000257, step=43]\n", - "Epoch 43: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.94, loss=0.0301, lr=0.000256, step=44]\n", - "Epoch 44: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.941, loss=0.0348, lr=0.000255, step=45]\n", - "Epoch 45: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.942, loss=0.0327, lr=0.000254, step=46]\n", - "Epoch 46: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.943, loss=0.0314, lr=0.000253, step=47]\n", - "Epoch 47: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.944, loss=0.0337, lr=0.000252, step=48]\n", - "Epoch 48: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.945, loss=0.03, lr=0.000251, step=49]\n", - "Epoch 49: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.946, loss=0.0339, lr=0.00025, step=50]\n", - "Epoch 50: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.947, loss=0.0344, lr=0.000249, step=51]\n", - "Epoch 51: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.948, loss=0.0342, lr=0.000248, step=52]\n", - "Epoch 52: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.948, loss=0.0321, lr=0.000247, step=53]\n", - "Epoch 53: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.949, loss=0.0318, lr=0.000246, step=54]\n", - "Epoch 54: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.95, loss=0.0338, lr=0.000245, step=55]\n", - "Epoch 55: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.95, loss=0.0314, lr=0.000244, step=56]\n", - "Epoch 56: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.951, loss=0.0334, lr=0.000243, step=57]\n", - "Epoch 57: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.952, loss=0.0341, lr=0.000242, step=58]\n", - "Epoch 58: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.952, loss=0.0278, lr=0.000241, step=59]\n", - "Epoch 59: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.953, loss=0.0316, lr=0.00024, step=60]\n", - "Epoch 60: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.954, loss=0.0363, lr=0.000239, step=61]\n", - "Epoch 61: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.954, loss=0.029, lr=0.000238, step=62]\n", - "Epoch 62: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.955, loss=0.0299, lr=0.000237, step=63]\n", - "Epoch 63: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.955, loss=0.0319, lr=0.000236, step=64]\n", - "Epoch 64: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.956, loss=0.0359, lr=0.000235, step=65]\n", - "Epoch 65: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.956, loss=0.0271, lr=0.000234, step=66]\n", - "Epoch 66: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.957, loss=0.0304, lr=0.000233, step=67]\n", - "Epoch 67: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.957, loss=0.0332, lr=0.000232, step=68]\n", - "Epoch 68: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.958, loss=0.0291, lr=0.000231, step=69]\n", - "Epoch 69: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.958, loss=0.0242, lr=0.00023, step=70]\n", - "Epoch 70: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.959, loss=0.0309, lr=0.000229, step=71]\n", - "Epoch 71: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.959, loss=0.0291, lr=0.000228, step=72]\n", - "Epoch 72: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.96, loss=0.027, lr=0.000227, step=73]\n", - "Epoch 73: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.96, loss=0.0262, lr=0.000226, step=74]\n", - "Epoch 74: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.96, loss=0.0284, lr=0.000225, step=75]\n", - "Epoch 75: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.961, loss=0.0281, lr=0.000224, step=76]\n", - "Epoch 76: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.961, loss=0.0269, lr=0.000223, step=77]\n", - "Epoch 77: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.962, loss=0.0291, lr=0.000222, step=78]\n", - "Epoch 78: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.962, loss=0.0286, lr=0.000221, step=79]\n", - "Epoch 79: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.962, loss=0.0264, lr=0.00022, step=80]\n", - "Epoch 80: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.963, loss=0.0274, lr=0.000219, step=81]\n", - "Epoch 81: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.963, loss=0.0249, lr=0.000218, step=82]\n", - "Epoch 82: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.963, loss=0.0245, lr=0.000217, step=83]\n", - "Epoch 83: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.964, loss=0.0297, lr=0.000216, step=84]\n", - "Epoch 84: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.964, loss=0.028, lr=0.000215, step=85]\n", - "Epoch 85: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.964, loss=0.0258, lr=0.000214, step=86]\n", - "Epoch 86: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.965, loss=0.0258, lr=0.000213, step=87]\n", - "Epoch 87: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.965, loss=0.0268, lr=0.000212, step=88]\n", - "Epoch 88: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.965, loss=0.029, lr=0.000211, step=89]\n", - "Epoch 89: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.965, loss=0.0276, lr=0.00021, step=90]\n", - "Epoch 90: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.966, loss=0.025, lr=0.000209, step=91]\n", - "Epoch 91: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.966, loss=0.0236, lr=0.000208, step=92]\n", - "Epoch 92: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.966, loss=0.0269, lr=0.000207, step=93]\n", - "Epoch 93: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.967, loss=0.0231, lr=0.000206, step=94]\n", - "Epoch 94: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.0263, lr=0.000205, step=95]\n", - "Epoch 95: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.967, loss=0.024, lr=0.000204, step=96]\n", - "Epoch 96: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.967, loss=0.0252, lr=0.000203, step=97]\n", - "Epoch 97: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.968, loss=0.0248, lr=0.000202, step=98]\n", - "Epoch 98: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.968, loss=0.022, lr=0.000201, step=99]\n", - "Epoch 99: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.968, loss=0.0231, lr=0.0002, step=100]\n", - "Epoch 100: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.968, loss=0.0241, lr=0.000199, step=101]\n", - "Epoch 101: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.969, loss=0.0237, lr=0.000198, step=102]\n", - "Epoch 102: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0243, lr=0.000197, step=103]\n", - "Epoch 103: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.969, loss=0.0239, lr=0.000196, step=104]\n", - "Epoch 104: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.969, loss=0.0268, lr=0.000195, step=105]\n", - "Epoch 105: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.97, loss=0.0232, lr=0.000194, step=106]\n", - "Epoch 106: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.0199, lr=0.000193, step=107]\n", - "Epoch 107: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.97, loss=0.0233, lr=0.000192, step=108]\n", - "Epoch 108: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.97, loss=0.0198, lr=0.000191, step=109]\n", - "Epoch 109: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.97, loss=0.0211, lr=0.00019, step=110]\n", - "Epoch 110: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.971, loss=0.0219, lr=0.000189, step=111]\n", - "Epoch 111: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.0226, lr=0.000188, step=112]\n", - "Epoch 112: 100%|██████████| 1/1 [00:00<00:00, 1.53it/s, ema_decay=0.971, loss=0.022, lr=0.000187, step=113]\n", - "Epoch 113: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.971, loss=0.0237, lr=0.000186, step=114]\n", - "Epoch 114: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.971, loss=0.0233, lr=0.000185, step=115]\n", - "Epoch 115: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.972, loss=0.0204, lr=0.000184, step=116]\n", - "Epoch 116: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.972, loss=0.0231, lr=0.000183, step=117]\n", - "Epoch 117: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.972, loss=0.0187, lr=0.000182, step=118]\n", - "Epoch 118: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.972, loss=0.0182, lr=0.000181, step=119]\n", - "Epoch 119: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0215, lr=0.00018, step=120]\n", - "Epoch 120: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.972, loss=0.0221, lr=0.000179, step=121]\n", - "Epoch 121: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0208, lr=0.000178, step=122]\n", - "Epoch 122: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.973, loss=0.022, lr=0.000177, step=123]\n", - "Epoch 123: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.973, loss=0.024, lr=0.000176, step=124]\n", - "Epoch 124: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.973, loss=0.023, lr=0.000175, step=125]\n", - "Epoch 125: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.973, loss=0.023, lr=0.000174, step=126]\n", - "Epoch 126: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.973, loss=0.0207, lr=0.000173, step=127]\n", - "Epoch 127: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0178, lr=0.000172, step=128]\n", - "Epoch 128: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.0223, lr=0.000171, step=129]\n", - "Epoch 129: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.974, loss=0.023, lr=0.00017, step=130]\n", - "Epoch 130: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0246, lr=0.000169, step=131]\n", - "Epoch 131: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.974, loss=0.0199, lr=0.000168, step=132]\n", - "Epoch 132: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0201, lr=0.000167, step=133]\n", - "Epoch 133: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.974, loss=0.0202, lr=0.000166, step=134]\n", - "Epoch 134: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.975, loss=0.0186, lr=0.000165, step=135]\n", - "Epoch 135: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.0216, lr=0.000164, step=136]\n", - "Epoch 136: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.0227, lr=0.000163, step=137]\n", - "Epoch 137: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.975, loss=0.0168, lr=0.000162, step=138]\n", - "Epoch 138: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.975, loss=0.0239, lr=0.000161, step=139]\n", - "Epoch 139: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.0208, lr=0.00016, step=140]\n", - "Epoch 140: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.0217, lr=0.000159, step=141]\n", - "Epoch 141: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.976, loss=0.0206, lr=0.000158, step=142]\n", - "Epoch 142: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.976, loss=0.0206, lr=0.000157, step=143]\n", - "Epoch 143: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.976, loss=0.017, lr=0.000156, step=144]\n", - "Epoch 144: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.976, loss=0.0169, lr=0.000155, step=145]\n", - "Epoch 145: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.976, loss=0.0193, lr=0.000154, step=146]\n", - "Epoch 146: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.0199, lr=0.000153, step=147]\n", - "Epoch 147: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.976, loss=0.0222, lr=0.000152, step=148]\n", - "Epoch 148: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0198, lr=0.000151, step=149]\n", - "Epoch 149: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.0201, lr=0.00015, step=150]\n", - "Epoch 150: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.977, loss=0.0228, lr=0.000149, step=151]\n", - "Epoch 151: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.0188, lr=0.000148, step=152]\n", - "Epoch 152: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.0194, lr=0.000147, step=153]\n", - "Epoch 153: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.977, loss=0.0198, lr=0.000146, step=154]\n", - "Epoch 154: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.0186, lr=0.000145, step=155]\n", - "Epoch 155: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.0161, lr=0.000144, step=156]\n", - "Epoch 156: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.0209, lr=0.000143, step=157]\n", - "Epoch 157: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.0188, lr=0.000142, step=158]\n", - "Epoch 158: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.978, loss=0.0189, lr=0.000141, step=159]\n", - "Epoch 159: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.978, loss=0.0208, lr=0.00014, step=160]\n", - "Epoch 160: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.978, loss=0.0227, lr=0.000139, step=161]\n", - "Epoch 161: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.978, loss=0.0182, lr=0.000138, step=162]\n", - "Epoch 162: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.978, loss=0.0179, lr=0.000137, step=163]\n", - "Epoch 163: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.02, lr=0.000136, step=164]\n", - "Epoch 164: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.978, loss=0.0201, lr=0.000135, step=165]\n", - "Epoch 165: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.978, loss=0.0205, lr=0.000134, step=166]\n", - "Epoch 166: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.02, lr=0.000133, step=167]\n", - "Epoch 167: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.0161, lr=0.000132, step=168]\n", - "Epoch 168: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.0211, lr=0.000131, step=169]\n", - "Epoch 169: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.0195, lr=0.00013, step=170]\n", - "Epoch 170: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.979, loss=0.0179, lr=0.000129, step=171]\n", - "Epoch 171: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.016, lr=0.000128, step=172]\n", - "Epoch 172: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0198, lr=0.000127, step=173]\n", - "Epoch 173: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.979, loss=0.0176, lr=0.000126, step=174]\n", - "Epoch 174: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.979, loss=0.0174, lr=0.000125, step=175]\n", - "Epoch 175: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.0219, lr=0.000124, step=176]\n", - "Epoch 176: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0205, lr=0.000123, step=177]\n", - "Epoch 177: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.016, lr=0.000122, step=178]\n", - "Epoch 178: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0195, lr=0.000121, step=179]\n", - "Epoch 179: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.98, loss=0.019, lr=0.00012, step=180]\n", - "Epoch 180: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0195, lr=0.000119, step=181]\n", - "Epoch 181: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.0187, lr=0.000118, step=182]\n", - "Epoch 182: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.98, loss=0.0188, lr=0.000117, step=183]\n", - "Epoch 183: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.0196, lr=0.000116, step=184]\n", - "Epoch 184: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0236, lr=0.000115, step=185]\n", - "Epoch 185: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0189, lr=0.000114, step=186]\n", - "Epoch 186: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0203, lr=0.000113, step=187]\n", - "Epoch 187: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.022, lr=0.000112, step=188]\n", - "Epoch 188: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.0181, lr=0.000111, step=189]\n", - "Epoch 189: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.0168, lr=0.00011, step=190]\n", - "Epoch 190: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.023, lr=0.000109, step=191]\n", - "Epoch 191: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0174, lr=0.000108, step=192]\n", - "Epoch 192: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.0208, lr=0.000107, step=193]\n", - "Epoch 193: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.0164, lr=0.000106, step=194]\n", - "Epoch 194: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.016, lr=0.000105, step=195]\n", - "Epoch 195: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.0183, lr=0.000104, step=196]\n", - "Epoch 196: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.0186, lr=0.000103, step=197]\n", - "Epoch 197: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0151, lr=0.000102, step=198]\n", - "Epoch 198: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.981, loss=0.0173, lr=0.000101, step=199]\n", - "Epoch 199: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0159, lr=0.0001, step=200]\n", - "Epoch 200: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0191, lr=9.9e-5, step=201]\n", - "Epoch 201: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0195, lr=9.8e-5, step=202]\n", - "Epoch 202: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.981, loss=0.0147, lr=9.7e-5, step=203]\n", - "Epoch 203: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.0183, lr=9.6e-5, step=204]\n", - "Epoch 204: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.981, loss=0.0194, lr=9.5e-5, step=205]\n", - "Epoch 205: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0178, lr=9.4e-5, step=206]\n", - "Epoch 206: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.015, lr=9.3e-5, step=207]\n", - "Epoch 207: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.0175, lr=9.2e-5, step=208]\n", - "Epoch 208: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.0183, lr=9.1e-5, step=209]\n", - "Epoch 209: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0172, lr=9e-5, step=210]\n", - "Epoch 210: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.0183, lr=8.9e-5, step=211]\n", - "Epoch 211: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.982, loss=0.019, lr=8.8e-5, step=212]\n", - "Epoch 212: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0163, lr=8.7e-5, step=213]\n", - "Epoch 213: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.0168, lr=8.6e-5, step=214]\n", - "Epoch 214: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0181, lr=8.5e-5, step=215]\n", - "Epoch 215: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.0175, lr=8.4e-5, step=216]\n", - "Epoch 216: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0183, lr=8.3e-5, step=217]\n", - "Epoch 217: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.0184, lr=8.2e-5, step=218]\n", - "Epoch 218: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0158, lr=8.1e-5, step=219]\n", - "Epoch 219: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0188, lr=8e-5, step=220]\n", - "Epoch 220: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.0171, lr=7.9e-5, step=221]\n", - "Epoch 221: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0189, lr=7.8e-5, step=222]\n", - "Epoch 222: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0168, lr=7.7e-5, step=223]\n", - "Epoch 223: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.0159, lr=7.6e-5, step=224]\n", - "Epoch 224: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0176, lr=7.5e-5, step=225]\n", - "Epoch 225: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.983, loss=0.0171, lr=7.4e-5, step=226]\n", - "Epoch 226: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0163, lr=7.3e-5, step=227]\n", - "Epoch 227: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.0192, lr=7.2e-5, step=228]\n", - "Epoch 228: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.016, lr=7.1e-5, step=229]\n", - "Epoch 229: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.0173, lr=7e-5, step=230]\n", - "Epoch 230: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.0172, lr=6.9e-5, step=231]\n", - "Epoch 231: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0148, lr=6.8e-5, step=232]\n", - "Epoch 232: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.018, lr=6.7e-5, step=233]\n", - "Epoch 233: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.0199, lr=6.6e-5, step=234]\n", - "Epoch 234: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.0179, lr=6.5e-5, step=235]\n", - "Epoch 235: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0173, lr=6.4e-5, step=236]\n", - "Epoch 236: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0194, lr=6.3e-5, step=237]\n", - "Epoch 237: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0151, lr=6.2e-5, step=238]\n", - "Epoch 238: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.0221, lr=6.1e-5, step=239]\n", - "Epoch 239: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0162, lr=6e-5, step=240]\n", - "Epoch 240: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.984, loss=0.0184, lr=5.9e-5, step=241]\n", - "Epoch 241: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.0174, lr=5.8e-5, step=242]\n", - "Epoch 242: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.984, loss=0.0187, lr=5.7e-5, step=243]\n", - "Epoch 243: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.984, loss=0.018, lr=5.6e-5, step=244]\n", - "Epoch 244: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0184, lr=5.5e-5, step=245]\n", - "Epoch 245: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0202, lr=5.4e-5, step=246]\n", - "Epoch 246: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0184, lr=5.3e-5, step=247]\n", - "Epoch 247: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.016, lr=5.2e-5, step=248]\n", - "Epoch 248: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0194, lr=5.1e-5, step=249]\n", - "Epoch 249: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0193, lr=5e-5, step=250]\n", - "Epoch 250: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.984, loss=0.017, lr=4.9e-5, step=251]\n", - "Epoch 251: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.0193, lr=4.8e-5, step=252]\n", - "Epoch 252: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0187, lr=4.7e-5, step=253]\n", - "Epoch 253: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.984, loss=0.0149, lr=4.6e-5, step=254]\n", - "Epoch 254: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0174, lr=4.5e-5, step=255]\n", - "Epoch 255: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.984, loss=0.0169, lr=4.4e-5, step=256]\n", - "Epoch 256: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.0154, lr=4.3e-5, step=257]\n", - "Epoch 257: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.016, lr=4.2e-5, step=258]\n", - "Epoch 258: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0217, lr=4.1e-5, step=259]\n", - "Epoch 259: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.0188, lr=4e-5, step=260]\n", - "Epoch 260: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0187, lr=3.9e-5, step=261]\n", - "Epoch 261: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0144, lr=3.8e-5, step=262]\n", - "Epoch 262: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.985, loss=0.0161, lr=3.7e-5, step=263]\n", - "Epoch 263: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.985, loss=0.0123, lr=3.6e-5, step=264]\n", - "Epoch 264: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0182, lr=3.5e-5, step=265]\n", - "Epoch 265: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0204, lr=3.4e-5, step=266]\n", - "Epoch 266: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0202, lr=3.3e-5, step=267]\n", - "Epoch 267: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0159, lr=3.2e-5, step=268]\n", - "Epoch 268: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0186, lr=3.1e-5, step=269]\n", - "Epoch 269: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0171, lr=3e-5, step=270]\n", - "Epoch 270: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.0141, lr=2.9e-5, step=271]\n", - "Epoch 271: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0149, lr=2.8e-5, step=272]\n", - "Epoch 272: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.0169, lr=2.7e-5, step=273]\n", - "Epoch 273: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0157, lr=2.6e-5, step=274]\n", - "Epoch 274: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.0177, lr=2.5e-5, step=275]\n", - "Epoch 275: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0193, lr=2.4e-5, step=276]\n", - "Epoch 276: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0151, lr=2.3e-5, step=277]\n", - "Epoch 277: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0161, lr=2.2e-5, step=278]\n", - "Epoch 278: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0153, lr=2.1e-5, step=279]\n", - "Epoch 279: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0177, lr=2e-5, step=280]\n", - "Epoch 280: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0173, lr=1.9e-5, step=281]\n", - "Epoch 281: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.0172, lr=1.8e-5, step=282]\n", - "Epoch 282: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0161, lr=1.7e-5, step=283]\n", - "Epoch 283: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0155, lr=1.6e-5, step=284]\n", - "Epoch 284: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0152, lr=1.5e-5, step=285]\n", - "Epoch 285: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0159, lr=1.4e-5, step=286]\n", - "Epoch 286: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.0132, lr=1.3e-5, step=287]\n", - "Epoch 287: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0194, lr=1.2e-5, step=288]\n", - "Epoch 288: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.018, lr=1.1e-5, step=289]\n", - "Epoch 289: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0186, lr=1e-5, step=290]\n", - "Epoch 290: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0151, lr=9e-6, step=291]\n", - "Epoch 291: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0169, lr=8e-6, step=292]\n", - "Epoch 292: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0163, lr=7e-6, step=293]\n", - "Epoch 293: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0181, lr=6e-6, step=294]\n", - "Epoch 294: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0145, lr=5e-6, step=295]\n", - "Epoch 295: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0184, lr=4e-6, step=296]\n", - "Epoch 296: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0168, lr=3e-6, step=297]\n", - "Epoch 297: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.0177, lr=2e-6, step=298]\n", - "Epoch 298: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.986, loss=0.018, lr=1e-6, step=299]\n", - "Epoch 299: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0179, lr=0, step=300]\n" + "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0, loss=0.189, lr=0.0003, step=1]\n", + "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0, loss=0.725, lr=0.000299, step=2]\n", + "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.405, loss=0.693, lr=0.000299, step=3]\n", + "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.561, loss=0.153, lr=0.000299, step=4]\n", + "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.646, loss=0.244, lr=0.000298, step=5]\n", + "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.701, loss=0.432, lr=0.000298, step=6]\n", + "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.739, loss=0.42, lr=0.000298, step=7]\n", + "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.768, loss=0.491, lr=0.000298, step=8]\n", + "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.79, loss=0.127, lr=0.000297, step=9]\n", + "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.808, loss=0.329, lr=0.000297, step=10]\n", + "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.822, loss=0.128, lr=0.000297, step=11]\n", + "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.834, loss=0.181, lr=0.000296, step=12]\n", + "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.845, loss=0.253, lr=0.000296, step=13]\n", + "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.854, loss=0.178, lr=0.000296, step=14]\n", + "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.862, loss=0.173, lr=0.000295, step=15]\n", + "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.869, loss=0.093, lr=0.000295, step=16]\n", + "Epoch 16: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.875, loss=0.291, lr=0.000295, step=17]\n", + "Epoch 17: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.881, loss=0.195, lr=0.000295, step=18]\n", + "Epoch 18: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.886, loss=0.132, lr=0.000294, step=19]\n", + "Epoch 19: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.89, loss=0.12, lr=0.000294, step=20]\n", + "Epoch 20: 100%|██████████| 1/1 [00:00<00:00, 1.08it/s, ema_decay=0.894, loss=0.187, lr=0.000294, step=21]\n", + "Epoch 21: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.898, loss=0.247, lr=0.000293, step=22]\n", + "Epoch 22: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.902, loss=0.116, lr=0.000293, step=23]\n", + "Epoch 23: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.905, loss=0.0518, lr=0.000293, step=24]\n", + "Epoch 24: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.908, loss=0.13, lr=0.000292, step=25]\n", + "Epoch 25: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.911, loss=0.131, lr=0.000292, step=26]\n", + "Epoch 26: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.913, loss=0.113, lr=0.000292, step=27]\n", + "Epoch 27: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.916, loss=0.147, lr=0.000292, step=28]\n", + "Epoch 28: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.918, loss=0.146, lr=0.000291, step=29]\n", + "Epoch 29: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.92, loss=0.118, lr=0.000291, step=30]\n", + "Epoch 30: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.922, loss=0.154, lr=0.000291, step=31]\n", + "Epoch 31: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.924, loss=0.0298, lr=0.00029, step=32]\n", + "Epoch 32: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.926, loss=0.112, lr=0.00029, step=33]\n", + "Epoch 33: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.927, loss=0.165, lr=0.00029, step=34]\n", + "Epoch 34: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.929, loss=0.238, lr=0.000289, step=35]\n", + "Epoch 35: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.931, loss=0.0597, lr=0.000289, step=36]\n", + "Epoch 36: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.932, loss=0.109, lr=0.000289, step=37]\n", + "Epoch 37: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.933, loss=0.0682, lr=0.000289, step=38]\n", + "Epoch 38: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.935, loss=0.087, lr=0.000288, step=39]\n", + "Epoch 39: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.936, loss=0.174, lr=0.000288, step=40]\n", + "Epoch 40: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.937, loss=0.157, lr=0.000288, step=41]\n", + "Epoch 41: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.938, loss=0.176, lr=0.000287, step=42]\n", + "Epoch 42: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.939, loss=0.0938, lr=0.000287, step=43]\n", + "Epoch 43: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.94, loss=0.0874, lr=0.000287, step=44]\n", + "Epoch 44: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.941, loss=0.184, lr=0.000286, step=45]\n", + "Epoch 45: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.942, loss=0.0758, lr=0.000286, step=46]\n", + "Epoch 46: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.943, loss=0.182, lr=0.000286, step=47]\n", + "Epoch 47: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.944, loss=0.0701, lr=0.000286, step=48]\n", + "Epoch 48: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.945, loss=0.119, lr=0.000285, step=49]\n", + "Epoch 49: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.946, loss=0.0674, lr=0.000285, step=50]\n", + "Epoch 50: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.947, loss=0.0804, lr=0.000285, step=51]\n", + "Epoch 51: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.948, loss=0.104, lr=0.000284, step=52]\n", + "Epoch 52: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.948, loss=0.143, lr=0.000284, step=53]\n", + "Epoch 53: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.949, loss=0.0482, lr=0.000284, step=54]\n", + "Epoch 54: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.122, lr=0.000283, step=55]\n", + "Epoch 55: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.0885, lr=0.000283, step=56]\n", + "Epoch 56: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.951, loss=0.046, lr=0.000283, step=57]\n", + "Epoch 57: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.952, loss=0.0483, lr=0.000283, step=58]\n", + "Epoch 58: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.952, loss=0.137, lr=0.000282, step=59]\n", + "Epoch 59: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.953, loss=0.0726, lr=0.000282, step=60]\n", + "Epoch 60: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.954, loss=0.0651, lr=0.000282, step=61]\n", + "Epoch 61: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.954, loss=0.144, lr=0.000281, step=62]\n", + "Epoch 62: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.955, loss=0.117, lr=0.000281, step=63]\n", + "Epoch 63: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.955, loss=0.034, lr=0.000281, step=64]\n", + "Epoch 64: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.956, loss=0.0775, lr=0.00028, step=65]\n", + "Epoch 65: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.956, loss=0.0589, lr=0.00028, step=66]\n", + "Epoch 66: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.957, loss=0.0747, lr=0.00028, step=67]\n", + "Epoch 67: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.957, loss=0.0671, lr=0.00028, step=68]\n", + "Epoch 68: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.958, loss=0.123, lr=0.000279, step=69]\n", + "Epoch 69: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.958, loss=0.0798, lr=0.000279, step=70]\n", + "Epoch 70: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.959, loss=0.0584, lr=0.000279, step=71]\n", + "Epoch 71: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.959, loss=0.169, lr=0.000278, step=72]\n", + "Epoch 72: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.96, loss=0.0817, lr=0.000278, step=73]\n", + "Epoch 73: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.96, loss=0.069, lr=0.000278, step=74]\n", + "Epoch 74: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.96, loss=0.0631, lr=0.000277, step=75]\n", + "Epoch 75: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.961, loss=0.0995, lr=0.000277, step=76]\n", + "Epoch 76: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.961, loss=0.11, lr=0.000277, step=77]\n", + "Epoch 77: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.962, loss=0.05, lr=0.000277, step=78]\n", + "Epoch 78: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.962, loss=0.0498, lr=0.000276, step=79]\n", + "Epoch 79: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.962, loss=0.0409, lr=0.000276, step=80]\n", + "Epoch 80: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.0865, lr=0.000276, step=81]\n", + "Epoch 81: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.131, lr=0.000275, step=82]\n", + "Epoch 82: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.0874, lr=0.000275, step=83]\n", + "Epoch 83: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.964, loss=0.0558, lr=0.000275, step=84]\n", + "Epoch 84: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.964, loss=0.12, lr=0.000275, step=85]\n", + "Epoch 85: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.964, loss=0.106, lr=0.000274, step=86]\n", + "Epoch 86: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.965, loss=0.0289, lr=0.000274, step=87]\n", + "Epoch 87: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.965, loss=0.0604, lr=0.000274, step=88]\n", + "Epoch 88: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.0709, lr=0.000273, step=89]\n", + "Epoch 89: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.0607, lr=0.000273, step=90]\n", + "Epoch 90: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.966, loss=0.0322, lr=0.000273, step=91]\n", + "Epoch 91: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.966, loss=0.0677, lr=0.000272, step=92]\n", + "Epoch 92: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.966, loss=0.0551, lr=0.000272, step=93]\n", + "Epoch 93: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.967, loss=0.0733, lr=0.000272, step=94]\n", + "Epoch 94: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.0219, lr=0.000271, step=95]\n", + "Epoch 95: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.0449, lr=0.000271, step=96]\n", + "Epoch 96: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.128, lr=0.000271, step=97]\n", + "Epoch 97: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.119, lr=0.000271, step=98]\n", + "Epoch 98: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.0365, lr=0.00027, step=99]\n", + "Epoch 99: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.968, loss=0.0357, lr=0.00027, step=100]\n", + "Epoch 100: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.968, loss=0.0343, lr=0.00027, step=101]\n", + "Epoch 101: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0437, lr=0.000269, step=102]\n", + "Epoch 102: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.112, lr=0.000269, step=103]\n", + "Epoch 103: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0365, lr=0.000269, step=104]\n", + "Epoch 104: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.969, loss=0.0659, lr=0.000268, step=105]\n", + "Epoch 105: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.0655, lr=0.000268, step=106]\n", + "Epoch 106: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.0936, lr=0.000268, step=107]\n", + "Epoch 107: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.0687, lr=0.000268, step=108]\n", + "Epoch 108: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.0658, lr=0.000267, step=109]\n", + "Epoch 109: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.0582, lr=0.000267, step=110]\n", + "Epoch 110: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.0259, lr=0.000267, step=111]\n", + "Epoch 111: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.0625, lr=0.000266, step=112]\n", + "Epoch 112: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.123, lr=0.000266, step=113]\n", + "Epoch 113: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.0491, lr=0.000266, step=114]\n", + "Epoch 114: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.0739, lr=0.000266, step=115]\n", + "Epoch 115: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.972, loss=0.0463, lr=0.000265, step=116]\n", + "Epoch 116: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0432, lr=0.000265, step=117]\n", + "Epoch 117: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0649, lr=0.000265, step=118]\n", + "Epoch 118: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.972, loss=0.0974, lr=0.000264, step=119]\n", + "Epoch 119: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0564, lr=0.000264, step=120]\n", + "Epoch 120: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.017, lr=0.000264, step=121]\n", + "Epoch 121: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0211, lr=0.000263, step=122]\n", + "Epoch 122: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0955, lr=0.000263, step=123]\n", + "Epoch 123: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0563, lr=0.000263, step=124]\n", + "Epoch 124: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0176, lr=0.000262, step=125]\n", + "Epoch 125: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0295, lr=0.000262, step=126]\n", + "Epoch 126: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.0802, lr=0.000262, step=127]\n", + "Epoch 127: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0502, lr=0.000262, step=128]\n", + "Epoch 128: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0437, lr=0.000261, step=129]\n", + "Epoch 129: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0177, lr=0.000261, step=130]\n", + "Epoch 130: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0745, lr=0.000261, step=131]\n", + "Epoch 131: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.974, loss=0.0856, lr=0.00026, step=132]\n", + "Epoch 132: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.974, loss=0.054, lr=0.00026, step=133]\n", + "Epoch 133: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.974, loss=0.0639, lr=0.00026, step=134]\n", + "Epoch 134: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.975, loss=0.0186, lr=0.000259, step=135]\n", + "Epoch 135: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.0376, lr=0.000259, step=136]\n", + "Epoch 136: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.0956, lr=0.000259, step=137]\n", + "Epoch 137: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.975, loss=0.0959, lr=0.000259, step=138]\n", + "Epoch 138: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.0496, lr=0.000258, step=139]\n", + "Epoch 139: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.0483, lr=0.000258, step=140]\n", + "Epoch 140: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.0725, lr=0.000258, step=141]\n", + "Epoch 141: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0451, lr=0.000257, step=142]\n", + "Epoch 142: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.0165, lr=0.000257, step=143]\n", + "Epoch 143: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.976, loss=0.0176, lr=0.000257, step=144]\n", + "Epoch 144: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.148, lr=0.000256, step=145]\n", + "Epoch 145: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0684, lr=0.000256, step=146]\n", + "Epoch 146: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0675, lr=0.000256, step=147]\n", + "Epoch 147: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.084, lr=0.000256, step=148]\n", + "Epoch 148: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0583, lr=0.000255, step=149]\n", + "Epoch 149: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.0231, lr=0.000255, step=150]\n", + "Epoch 150: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.977, loss=0.0448, lr=0.000255, step=151]\n", + "Epoch 151: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.0407, lr=0.000254, step=152]\n", + "Epoch 152: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.977, loss=0.111, lr=0.000254, step=153]\n", + "Epoch 153: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.977, loss=0.0317, lr=0.000254, step=154]\n", + "Epoch 154: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.0526, lr=0.000253, step=155]\n", + "Epoch 155: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.0925, lr=0.000253, step=156]\n", + "Epoch 156: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.977, loss=0.0786, lr=0.000253, step=157]\n", + "Epoch 157: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.977, loss=0.0738, lr=0.000253, step=158]\n", + "Epoch 158: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.0753, lr=0.000252, step=159]\n", + "Epoch 159: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.0268, lr=0.000252, step=160]\n", + "Epoch 160: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.978, loss=0.0953, lr=0.000252, step=161]\n", + "Epoch 161: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.0945, lr=0.000251, step=162]\n", + "Epoch 162: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.0127, lr=0.000251, step=163]\n", + "Epoch 163: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.0825, lr=0.000251, step=164]\n", + "Epoch 164: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.0397, lr=0.00025, step=165]\n", + "Epoch 165: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.016, lr=0.00025, step=166]\n", + "Epoch 166: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.978, loss=0.0606, lr=0.00025, step=167]\n", + "Epoch 167: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.032, lr=0.00025, step=168]\n", + "Epoch 168: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0322, lr=0.000249, step=169]\n", + "Epoch 169: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.0633, lr=0.000249, step=170]\n", + "Epoch 170: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.0848, lr=0.000249, step=171]\n", + "Epoch 171: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.979, loss=0.0722, lr=0.000248, step=172]\n", + "Epoch 172: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.0657, lr=0.000248, step=173]\n", + "Epoch 173: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.0294, lr=0.000248, step=174]\n", + "Epoch 174: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.117, lr=0.000247, step=175]\n", + "Epoch 175: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.00836, lr=0.000247, step=176]\n", + "Epoch 176: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0578, lr=0.000247, step=177]\n", + "Epoch 177: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.0946, lr=0.000247, step=178]\n", + "Epoch 178: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.0641, lr=0.000246, step=179]\n", + "Epoch 179: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.98, loss=0.0481, lr=0.000246, step=180]\n", + "Epoch 180: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.0598, lr=0.000246, step=181]\n", + "Epoch 181: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00935, lr=0.000245, step=182]\n", + "Epoch 182: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.107, lr=0.000245, step=183]\n", + "Epoch 183: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.0601, lr=0.000245, step=184]\n", + "Epoch 184: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.0452, lr=0.000244, step=185]\n", + "Epoch 185: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.0464, lr=0.000244, step=186]\n", + "Epoch 186: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0848, lr=0.000244, step=187]\n", + "Epoch 187: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0465, lr=0.000244, step=188]\n", + "Epoch 188: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.98, loss=0.0357, lr=0.000243, step=189]\n", + "Epoch 189: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0314, lr=0.000243, step=190]\n", + "Epoch 190: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.0797, lr=0.000243, step=191]\n", + "Epoch 191: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.091, lr=0.000242, step=192]\n", + "Epoch 192: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.0471, lr=0.000242, step=193]\n", + "Epoch 193: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.0794, lr=0.000242, step=194]\n", + "Epoch 194: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0616, lr=0.000241, step=195]\n", + "Epoch 195: 100%|██████████| 1/1 [00:00<00:00, 1.21it/s, ema_decay=0.981, loss=0.0824, lr=0.000241, step=196]\n", + "Epoch 196: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.019, lr=0.000241, step=197]\n", + "Epoch 197: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.043, lr=0.000241, step=198]\n", + "Epoch 198: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.0443, lr=0.00024, step=199]\n", + "Epoch 199: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0438, lr=0.00024, step=200]\n", + "Epoch 200: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00997, lr=0.00024, step=201]\n", + "Epoch 201: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.0835, lr=0.000239, step=202]\n", + "Epoch 202: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0471, lr=0.000239, step=203]\n", + "Epoch 203: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0304, lr=0.000239, step=204]\n", + "Epoch 204: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0182, lr=0.000238, step=205]\n", + "Epoch 205: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0481, lr=0.000238, step=206]\n", + "Epoch 206: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.00683, lr=0.000238, step=207]\n", + "Epoch 207: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0718, lr=0.000238, step=208]\n", + "Epoch 208: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0154, lr=0.000237, step=209]\n", + "Epoch 209: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.0949, lr=0.000237, step=210]\n", + "Epoch 210: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0431, lr=0.000237, step=211]\n", + "Epoch 211: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0602, lr=0.000236, step=212]\n", + "Epoch 212: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0487, lr=0.000236, step=213]\n", + "Epoch 213: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0593, lr=0.000236, step=214]\n", + "Epoch 214: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0618, lr=0.000235, step=215]\n", + "Epoch 215: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0747, lr=0.000235, step=216]\n", + "Epoch 216: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.0601, lr=0.000235, step=217]\n", + "Epoch 217: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.0255, lr=0.000235, step=218]\n", + "Epoch 218: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.0117, lr=0.000234, step=219]\n", + "Epoch 219: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.0809, lr=0.000234, step=220]\n", + "Epoch 220: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0715, lr=0.000234, step=221]\n", + "Epoch 221: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.031, lr=0.000233, step=222]\n", + "Epoch 222: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0789, lr=0.000233, step=223]\n", + "Epoch 223: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0409, lr=0.000233, step=224]\n", + "Epoch 224: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0838, lr=0.000232, step=225]\n", + "Epoch 225: 100%|██████████| 1/1 [00:00<00:00, 1.18it/s, ema_decay=0.983, loss=0.0293, lr=0.000232, step=226]\n", + "Epoch 226: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.083, lr=0.000232, step=227]\n", + "Epoch 227: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.983, loss=0.0684, lr=0.000232, step=228]\n", + "Epoch 228: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0396, lr=0.000231, step=229]\n", + "Epoch 229: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.1, lr=0.000231, step=230]\n", + "Epoch 230: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.983, loss=0.0363, lr=0.000231, step=231]\n", + "Epoch 231: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0175, lr=0.00023, step=232]\n", + "Epoch 232: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.0139, lr=0.00023, step=233]\n", + "Epoch 233: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0395, lr=0.00023, step=234]\n", + "Epoch 234: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0387, lr=0.000229, step=235]\n", + "Epoch 235: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.0644, lr=0.000229, step=236]\n", + "Epoch 236: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.983, loss=0.0278, lr=0.000229, step=237]\n", + "Epoch 237: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.101, lr=0.000229, step=238]\n", + "Epoch 238: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0307, lr=0.000228, step=239]\n", + "Epoch 239: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0602, lr=0.000228, step=240]\n", + "Epoch 240: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0341, lr=0.000228, step=241]\n", + "Epoch 241: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0618, lr=0.000227, step=242]\n", + "Epoch 242: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0595, lr=0.000227, step=243]\n", + "Epoch 243: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00958, lr=0.000227, step=244]\n", + "Epoch 244: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0873, lr=0.000226, step=245]\n", + "Epoch 245: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0635, lr=0.000226, step=246]\n", + "Epoch 246: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0591, lr=0.000226, step=247]\n", + "Epoch 247: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0743, lr=0.000226, step=248]\n", + "Epoch 248: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0798, lr=0.000225, step=249]\n", + "Epoch 249: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0273, lr=0.000225, step=250]\n", + "Epoch 250: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.0108, lr=0.000225, step=251]\n", + "Epoch 251: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0498, lr=0.000224, step=252]\n", + "Epoch 252: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0818, lr=0.000224, step=253]\n", + "Epoch 253: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0363, lr=0.000224, step=254]\n", + "Epoch 254: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0444, lr=0.000223, step=255]\n", + "Epoch 255: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.984, loss=0.0734, lr=0.000223, step=256]\n", + "Epoch 256: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0418, lr=0.000223, step=257]\n", + "Epoch 257: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0853, lr=0.000223, step=258]\n", + "Epoch 258: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.0556, lr=0.000222, step=259]\n", + "Epoch 259: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0483, lr=0.000222, step=260]\n", + "Epoch 260: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.985, loss=0.0692, lr=0.000222, step=261]\n", + "Epoch 261: 100%|██████████| 1/1 [00:00<00:00, 1.20it/s, ema_decay=0.985, loss=0.0479, lr=0.000221, step=262]\n", + "Epoch 262: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0264, lr=0.000221, step=263]\n", + "Epoch 263: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0554, lr=0.000221, step=264]\n", + "Epoch 264: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.066, lr=0.00022, step=265]\n", + "Epoch 265: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0483, lr=0.00022, step=266]\n", + "Epoch 266: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0526, lr=0.00022, step=267]\n", + "Epoch 267: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0613, lr=0.00022, step=268]\n", + "Epoch 268: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0223, lr=0.000219, step=269]\n", + "Epoch 269: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0406, lr=0.000219, step=270]\n", + "Epoch 270: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0246, lr=0.000219, step=271]\n", + "Epoch 271: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.0728, lr=0.000218, step=272]\n", + "Epoch 272: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0745, lr=0.000218, step=273]\n", + "Epoch 273: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0422, lr=0.000218, step=274]\n", + "Epoch 274: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.0452, lr=0.000217, step=275]\n", + "Epoch 275: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0441, lr=0.000217, step=276]\n", + "Epoch 276: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0659, lr=0.000217, step=277]\n", + "Epoch 277: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.136, lr=0.000217, step=278]\n", + "Epoch 278: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0157, lr=0.000216, step=279]\n", + "Epoch 279: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.128, lr=0.000216, step=280]\n", + "Epoch 280: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0729, lr=0.000216, step=281]\n", + "Epoch 281: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0577, lr=0.000215, step=282]\n", + "Epoch 282: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0266, lr=0.000215, step=283]\n", + "Epoch 283: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0839, lr=0.000215, step=284]\n", + "Epoch 284: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.137, lr=0.000214, step=285]\n", + "Epoch 285: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0399, lr=0.000214, step=286]\n", + "Epoch 286: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0362, lr=0.000214, step=287]\n", + "Epoch 287: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0652, lr=0.000214, step=288]\n", + "Epoch 288: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0577, lr=0.000213, step=289]\n", + "Epoch 289: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0412, lr=0.000213, step=290]\n", + "Epoch 290: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0259, lr=0.000213, step=291]\n", + "Epoch 291: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0375, lr=0.000212, step=292]\n", + "Epoch 292: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0476, lr=0.000212, step=293]\n", + "Epoch 293: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0684, lr=0.000212, step=294]\n", + "Epoch 294: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0435, lr=0.000211, step=295]\n", + "Epoch 295: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0565, lr=0.000211, step=296]\n", + "Epoch 296: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0556, lr=0.000211, step=297]\n", + "Epoch 297: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0906, lr=0.000211, step=298]\n", + "Epoch 298: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.128, lr=0.00021, step=299]\n", + "Epoch 299: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0318, lr=0.00021, step=300]\n", + "Epoch 300: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0805, lr=0.00021, step=301]\n", + "Epoch 301: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0705, lr=0.000209, step=302]\n", + "Epoch 302: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0434, lr=0.000209, step=303]\n", + "Epoch 303: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0749, lr=0.000209, step=304]\n", + "Epoch 304: 100%|██████████| 1/1 [00:00<00:00, 1.20it/s, ema_decay=0.986, loss=0.108, lr=0.000208, step=305]\n", + "Epoch 305: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.986, loss=0.0427, lr=0.000208, step=306]\n", + "Epoch 306: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0482, lr=0.000208, step=307]\n", + "Epoch 307: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0292, lr=0.000208, step=308]\n", + "Epoch 308: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0347, lr=0.000207, step=309]\n", + "Epoch 309: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.986, loss=0.0293, lr=0.000207, step=310]\n", + "Epoch 310: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.0437, lr=0.000207, step=311]\n", + "Epoch 311: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00857, lr=0.000206, step=312]\n", + "Epoch 312: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.0832, lr=0.000206, step=313]\n", + "Epoch 313: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0412, lr=0.000206, step=314]\n", + "Epoch 314: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0833, lr=0.000206, step=315]\n", + "Epoch 315: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0528, lr=0.000205, step=316]\n", + "Epoch 316: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0743, lr=0.000205, step=317]\n", + "Epoch 317: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0464, lr=0.000205, step=318]\n", + "Epoch 318: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.987, loss=0.0588, lr=0.000204, step=319]\n", + "Epoch 319: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.0508, lr=0.000204, step=320]\n", + "Epoch 320: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00674, lr=0.000204, step=321]\n", + "Epoch 321: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0276, lr=0.000203, step=322]\n", + "Epoch 322: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.0965, lr=0.000203, step=323]\n", + "Epoch 323: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00922, lr=0.000203, step=324]\n", + "Epoch 324: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0866, lr=0.000202, step=325]\n", + "Epoch 325: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0161, lr=0.000202, step=326]\n", + "Epoch 326: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0245, lr=0.000202, step=327]\n", + "Epoch 327: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0533, lr=0.000202, step=328]\n", + "Epoch 328: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0269, lr=0.000201, step=329]\n", + "Epoch 329: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0541, lr=0.000201, step=330]\n", + "Epoch 330: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0529, lr=0.000201, step=331]\n", + "Epoch 331: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0343, lr=0.0002, step=332]\n", + "Epoch 332: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0188, lr=0.0002, step=333]\n", + "Epoch 333: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.0515, lr=0.0002, step=334]\n", + "Epoch 334: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.103, lr=0.000199, step=335]\n", + "Epoch 335: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0516, lr=0.000199, step=336]\n", + "Epoch 336: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.987, loss=0.0122, lr=0.000199, step=337]\n", + "Epoch 337: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.0443, lr=0.000199, step=338]\n", + "Epoch 338: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.987, loss=0.0347, lr=0.000198, step=339]\n", + "Epoch 339: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0415, lr=0.000198, step=340]\n", + "Epoch 340: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0577, lr=0.000198, step=341]\n", + "Epoch 341: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0238, lr=0.000197, step=342]\n", + "Epoch 342: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0633, lr=0.000197, step=343]\n", + "Epoch 343: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.0126, lr=0.000197, step=344]\n", + "Epoch 344: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.046, lr=0.000196, step=345]\n", + "Epoch 345: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0676, lr=0.000196, step=346]\n", + "Epoch 346: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0165, lr=0.000196, step=347]\n", + "Epoch 347: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.00481, lr=0.000196, step=348]\n", + "Epoch 348: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.01, lr=0.000195, step=349]\n", + "Epoch 349: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.0172, lr=0.000195, step=350]\n", + "Epoch 350: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.135, lr=0.000195, step=351]\n", + "Epoch 351: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.0179, lr=0.000194, step=352]\n", + "Epoch 352: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0661, lr=0.000194, step=353]\n", + "Epoch 353: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0348, lr=0.000194, step=354]\n", + "Epoch 354: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.0539, lr=0.000193, step=355]\n", + "Epoch 355: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0509, lr=0.000193, step=356]\n", + "Epoch 356: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0318, lr=0.000193, step=357]\n", + "Epoch 357: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.136, lr=0.000193, step=358]\n", + "Epoch 358: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0767, lr=0.000192, step=359]\n", + "Epoch 359: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0278, lr=0.000192, step=360]\n", + "Epoch 360: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0305, lr=0.000192, step=361]\n", + "Epoch 361: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0708, lr=0.000191, step=362]\n", + "Epoch 362: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0461, lr=0.000191, step=363]\n", + "Epoch 363: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0753, lr=0.000191, step=364]\n", + "Epoch 364: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0445, lr=0.00019, step=365]\n", + "Epoch 365: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.988, loss=0.0818, lr=0.00019, step=366]\n", + "Epoch 366: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.031, lr=0.00019, step=367]\n", + "Epoch 367: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0722, lr=0.00019, step=368]\n", + "Epoch 368: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.027, lr=0.000189, step=369]\n", + "Epoch 369: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.0425, lr=0.000189, step=370]\n", + "Epoch 370: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0546, lr=0.000189, step=371]\n", + "Epoch 371: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.055, lr=0.000188, step=372]\n", + "Epoch 372: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0403, lr=0.000188, step=373]\n", + "Epoch 373: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.0199, lr=0.000188, step=374]\n", + "Epoch 374: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.058, lr=0.000187, step=375]\n", + "Epoch 375: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0308, lr=0.000187, step=376]\n", + "Epoch 376: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0461, lr=0.000187, step=377]\n", + "Epoch 377: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0331, lr=0.000187, step=378]\n", + "Epoch 378: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0468, lr=0.000186, step=379]\n", + "Epoch 379: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0977, lr=0.000186, step=380]\n", + "Epoch 380: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.012, lr=0.000186, step=381]\n", + "Epoch 381: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0749, lr=0.000185, step=382]\n", + "Epoch 382: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.0496, lr=0.000185, step=383]\n", + "Epoch 383: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0405, lr=0.000185, step=384]\n", + "Epoch 384: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0384, lr=0.000184, step=385]\n", + "Epoch 385: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.0168, lr=0.000184, step=386]\n", + "Epoch 386: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0603, lr=0.000184, step=387]\n", + "Epoch 387: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0629, lr=0.000184, step=388]\n", + "Epoch 388: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.0217, lr=0.000183, step=389]\n", + "Epoch 389: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0328, lr=0.000183, step=390]\n", + "Epoch 390: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0481, lr=0.000183, step=391]\n", + "Epoch 391: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.024, lr=0.000182, step=392]\n", + "Epoch 392: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.122, lr=0.000182, step=393]\n", + "Epoch 393: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0238, lr=0.000182, step=394]\n", + "Epoch 394: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0607, lr=0.000181, step=395]\n", + "Epoch 395: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0442, lr=0.000181, step=396]\n", + "Epoch 396: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0408, lr=0.000181, step=397]\n", + "Epoch 397: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0342, lr=0.000181, step=398]\n", + "Epoch 398: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0492, lr=0.00018, step=399]\n", + "Epoch 399: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0292, lr=0.00018, step=400]\n", + "Epoch 400: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.054, lr=0.00018, step=401]\n", + "Epoch 401: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0258, lr=0.000179, step=402]\n", + "Epoch 402: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0368, lr=0.000179, step=403]\n", + "Epoch 403: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00628, lr=0.000179, step=404]\n", + "Epoch 404: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0357, lr=0.000178, step=405]\n", + "Epoch 405: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0365, lr=0.000178, step=406]\n", + "Epoch 406: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.067, lr=0.000178, step=407]\n", + "Epoch 407: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0468, lr=0.000178, step=408]\n", + "Epoch 408: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.039, lr=0.000177, step=409]\n", + "Epoch 409: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0215, lr=0.000177, step=410]\n", + "Epoch 410: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0678, lr=0.000177, step=411]\n", + "Epoch 411: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0377, lr=0.000176, step=412]\n", + "Epoch 412: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0472, lr=0.000176, step=413]\n", + "Epoch 413: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0474, lr=0.000176, step=414]\n", + "Epoch 414: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0181, lr=0.000175, step=415]\n", + "Epoch 415: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0865, lr=0.000175, step=416]\n", + "Epoch 416: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0507, lr=0.000175, step=417]\n", + "Epoch 417: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0262, lr=0.000175, step=418]\n", + "Epoch 418: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.039, lr=0.000174, step=419]\n", + "Epoch 419: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.076, lr=0.000174, step=420]\n", + "Epoch 420: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0576, lr=0.000174, step=421]\n", + "Epoch 421: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00964, lr=0.000173, step=422]\n", + "Epoch 422: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.102, lr=0.000173, step=423]\n", + "Epoch 423: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0609, lr=0.000173, step=424]\n", + "Epoch 424: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.0599, lr=0.000172, step=425]\n", + "Epoch 425: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0382, lr=0.000172, step=426]\n", + "Epoch 426: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0552, lr=0.000172, step=427]\n", + "Epoch 427: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0699, lr=0.000172, step=428]\n", + "Epoch 428: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0114, lr=0.000171, step=429]\n", + "Epoch 429: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0557, lr=0.000171, step=430]\n", + "Epoch 430: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.036, lr=0.000171, step=431]\n", + "Epoch 431: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0678, lr=0.00017, step=432]\n", + "Epoch 432: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0249, lr=0.00017, step=433]\n", + "Epoch 433: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.989, loss=0.0275, lr=0.00017, step=434]\n", + "Epoch 434: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0141, lr=0.000169, step=435]\n", + "Epoch 435: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0448, lr=0.000169, step=436]\n", + "Epoch 436: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0584, lr=0.000169, step=437]\n", + "Epoch 437: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0527, lr=0.000169, step=438]\n", + "Epoch 438: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0373, lr=0.000168, step=439]\n", + "Epoch 439: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0494, lr=0.000168, step=440]\n", + "Epoch 440: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0309, lr=0.000168, step=441]\n", + "Epoch 441: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0338, lr=0.000167, step=442]\n", + "Epoch 442: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0352, lr=0.000167, step=443]\n", + "Epoch 443: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.029, lr=0.000167, step=444]\n", + "Epoch 444: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0316, lr=0.000167, step=445]\n", + "Epoch 445: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0293, lr=0.000166, step=446]\n", + "Epoch 446: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.0593, lr=0.000166, step=447]\n", + "Epoch 447: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0399, lr=0.000166, step=448]\n", + "Epoch 448: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0249, lr=0.000165, step=449]\n", + "Epoch 449: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0377, lr=0.000165, step=450]\n", + "Epoch 450: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0238, lr=0.000165, step=451]\n", + "Epoch 451: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.054, lr=0.000164, step=452]\n", + "Epoch 452: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0376, lr=0.000164, step=453]\n", + "Epoch 453: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.0549, lr=0.000164, step=454]\n", + "Epoch 454: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0596, lr=0.000163, step=455]\n", + "Epoch 455: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0254, lr=0.000163, step=456]\n", + "Epoch 456: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.0505, lr=0.000163, step=457]\n", + "Epoch 457: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.0508, lr=0.000163, step=458]\n", + "Epoch 458: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.0588, lr=0.000162, step=459]\n", + "Epoch 459: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0239, lr=0.000162, step=460]\n", + "Epoch 460: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0506, lr=0.000162, step=461]\n", + "Epoch 461: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0262, lr=0.000161, step=462]\n", + "Epoch 462: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0236, lr=0.000161, step=463]\n", + "Epoch 463: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0384, lr=0.000161, step=464]\n", + "Epoch 464: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.99, loss=0.0391, lr=0.00016, step=465]\n", + "Epoch 465: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.056, lr=0.00016, step=466]\n", + "Epoch 466: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0197, lr=0.00016, step=467]\n", + "Epoch 467: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0489, lr=0.00016, step=468]\n", + "Epoch 468: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0249, lr=0.000159, step=469]\n", + "Epoch 469: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0738, lr=0.000159, step=470]\n", + "Epoch 470: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0594, lr=0.000159, step=471]\n", + "Epoch 471: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0486, lr=0.000158, step=472]\n", + "Epoch 472: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00806, lr=0.000158, step=473]\n", + "Epoch 473: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0455, lr=0.000158, step=474]\n", + "Epoch 474: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0411, lr=0.000157, step=475]\n", + "Epoch 475: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0441, lr=0.000157, step=476]\n", + "Epoch 476: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00711, lr=0.000157, step=477]\n", + "Epoch 477: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0585, lr=0.000157, step=478]\n", + "Epoch 478: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0484, lr=0.000156, step=479]\n", + "Epoch 479: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0312, lr=0.000156, step=480]\n", + "Epoch 480: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0393, lr=0.000156, step=481]\n", + "Epoch 481: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.0468, lr=0.000155, step=482]\n", + "Epoch 482: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0434, lr=0.000155, step=483]\n", + "Epoch 483: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.0286, lr=0.000155, step=484]\n", + "Epoch 484: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0265, lr=0.000154, step=485]\n", + "Epoch 485: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.034, lr=0.000154, step=486]\n", + "Epoch 486: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.0451, lr=0.000154, step=487]\n", + "Epoch 487: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0379, lr=0.000154, step=488]\n", + "Epoch 488: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0436, lr=0.000153, step=489]\n", + "Epoch 489: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.039, lr=0.000153, step=490]\n", + "Epoch 490: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.0275, lr=0.000153, step=491]\n", + "Epoch 491: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00934, lr=0.000152, step=492]\n", + "Epoch 492: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0426, lr=0.000152, step=493]\n", + "Epoch 493: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0247, lr=0.000152, step=494]\n", + "Epoch 494: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0308, lr=0.000151, step=495]\n", + "Epoch 495: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.056, lr=0.000151, step=496]\n", + "Epoch 496: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.0303, lr=0.000151, step=497]\n", + "Epoch 497: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.0357, lr=0.000151, step=498]\n", + "Epoch 498: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0317, lr=0.00015, step=499]\n", + "Epoch 499: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0529, lr=0.00015, step=500]\n", + "Epoch 500: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00975, lr=0.00015, step=501]\n", + "Epoch 501: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0787, lr=0.000149, step=502]\n", + "Epoch 502: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0086, lr=0.000149, step=503]\n", + "Epoch 503: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0225, lr=0.000149, step=504]\n", + "Epoch 504: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0232, lr=0.000148, step=505]\n", + "Epoch 505: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0326, lr=0.000148, step=506]\n", + "Epoch 506: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0402, lr=0.000148, step=507]\n", + "Epoch 507: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0342, lr=0.000148, step=508]\n", + "Epoch 508: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0261, lr=0.000147, step=509]\n", + "Epoch 509: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0422, lr=0.000147, step=510]\n", + "Epoch 510: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0242, lr=0.000147, step=511]\n", + "Epoch 511: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0454, lr=0.000146, step=512]\n", + "Epoch 512: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0443, lr=0.000146, step=513]\n", + "Epoch 513: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0305, lr=0.000146, step=514]\n", + "Epoch 514: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.0578, lr=0.000145, step=515]\n", + "Epoch 515: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00477, lr=0.000145, step=516]\n", + "Epoch 516: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0437, lr=0.000145, step=517]\n", + "Epoch 517: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0353, lr=0.000145, step=518]\n", + "Epoch 518: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0156, lr=0.000144, step=519]\n", + "Epoch 519: 100%|██████████| 1/1 [00:00<00:00, 1.28it/s, ema_decay=0.991, loss=0.0389, lr=0.000144, step=520]\n", + "Epoch 520: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0292, lr=0.000144, step=521]\n", + "Epoch 521: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0194, lr=0.000143, step=522]\n", + "Epoch 522: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0254, lr=0.000143, step=523]\n", + "Epoch 523: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0352, lr=0.000143, step=524]\n", + "Epoch 524: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0675, lr=0.000142, step=525]\n", + "Epoch 525: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0449, lr=0.000142, step=526]\n", + "Epoch 526: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0225, lr=0.000142, step=527]\n", + "Epoch 527: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0279, lr=0.000142, step=528]\n", + "Epoch 528: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.016, lr=0.000141, step=529]\n", + "Epoch 529: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0792, lr=0.000141, step=530]\n", + "Epoch 530: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0558, lr=0.000141, step=531]\n", + "Epoch 531: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0282, lr=0.00014, step=532]\n", + "Epoch 532: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.0677, lr=0.00014, step=533]\n", + "Epoch 533: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0302, lr=0.00014, step=534]\n", + "Epoch 534: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0295, lr=0.00014, step=535]\n", + "Epoch 535: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0104, lr=0.000139, step=536]\n", + "Epoch 536: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0709, lr=0.000139, step=537]\n", + "Epoch 537: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0374, lr=0.000139, step=538]\n", + "Epoch 538: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.0441, lr=0.000138, step=539]\n", + "Epoch 539: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0346, lr=0.000138, step=540]\n", + "Epoch 540: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0799, lr=0.000138, step=541]\n", + "Epoch 541: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0266, lr=0.000137, step=542]\n", + "Epoch 542: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0307, lr=0.000137, step=543]\n", + "Epoch 543: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0547, lr=0.000137, step=544]\n", + "Epoch 544: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00683, lr=0.000136, step=545]\n", + "Epoch 545: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.991, loss=0.0602, lr=0.000136, step=546]\n", + "Epoch 546: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0254, lr=0.000136, step=547]\n", + "Epoch 547: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0465, lr=0.000136, step=548]\n", + "Epoch 548: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0517, lr=0.000135, step=549]\n", + "Epoch 549: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.035, lr=0.000135, step=550]\n", + "Epoch 550: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0241, lr=0.000135, step=551]\n", + "Epoch 551: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.023, lr=0.000134, step=552]\n", + "Epoch 552: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.04, lr=0.000134, step=553]\n", + "Epoch 553: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0363, lr=0.000134, step=554]\n", + "Epoch 554: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0673, lr=0.000133, step=555]\n", + "Epoch 555: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0308, lr=0.000133, step=556]\n", + "Epoch 556: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0555, lr=0.000133, step=557]\n", + "Epoch 557: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00592, lr=0.000133, step=558]\n", + "Epoch 558: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0361, lr=0.000132, step=559]\n", + "Epoch 559: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0714, lr=0.000132, step=560]\n", + "Epoch 560: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0816, lr=0.000132, step=561]\n", + "Epoch 561: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0203, lr=0.000131, step=562]\n", + "Epoch 562: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.035, lr=0.000131, step=563]\n", + "Epoch 563: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0912, lr=0.000131, step=564]\n", + "Epoch 564: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0218, lr=0.000131, step=565]\n", + "Epoch 565: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0316, lr=0.00013, step=566]\n", + "Epoch 566: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0165, lr=0.00013, step=567]\n", + "Epoch 567: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0507, lr=0.00013, step=568]\n", + "Epoch 568: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0183, lr=0.000129, step=569]\n", + "Epoch 569: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0215, lr=0.000129, step=570]\n", + "Epoch 570: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0182, lr=0.000129, step=571]\n", + "Epoch 571: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0363, lr=0.000128, step=572]\n", + "Epoch 572: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0369, lr=0.000128, step=573]\n", + "Epoch 573: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0299, lr=0.000128, step=574]\n", + "Epoch 574: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0371, lr=0.000127, step=575]\n", + "Epoch 575: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0305, lr=0.000127, step=576]\n", + "Epoch 576: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.047, lr=0.000127, step=577]\n", + "Epoch 577: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0308, lr=0.000127, step=578]\n", + "Epoch 578: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0455, lr=0.000126, step=579]\n", + "Epoch 579: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0362, lr=0.000126, step=580]\n", + "Epoch 580: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0341, lr=0.000126, step=581]\n", + "Epoch 581: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0745, lr=0.000125, step=582]\n", + "Epoch 582: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0186, lr=0.000125, step=583]\n", + "Epoch 583: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0169, lr=0.000125, step=584]\n", + "Epoch 584: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.03, lr=0.000124, step=585]\n", + "Epoch 585: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0226, lr=0.000124, step=586]\n", + "Epoch 586: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0323, lr=0.000124, step=587]\n", + "Epoch 587: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00292, lr=0.000124, step=588]\n", + "Epoch 588: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0697, lr=0.000123, step=589]\n", + "Epoch 589: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0417, lr=0.000123, step=590]\n", + "Epoch 590: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0922, lr=0.000123, step=591]\n", + "Epoch 591: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0118, lr=0.000122, step=592]\n", + "Epoch 592: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0394, lr=0.000122, step=593]\n", + "Epoch 593: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0317, lr=0.000122, step=594]\n", + "Epoch 594: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0126, lr=0.000121, step=595]\n", + "Epoch 595: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00654, lr=0.000121, step=596]\n", + "Epoch 596: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0195, lr=0.000121, step=597]\n", + "Epoch 597: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0584, lr=0.000121, step=598]\n", + "Epoch 598: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0259, lr=0.00012, step=599]\n", + "Epoch 599: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0283, lr=0.00012, step=600]\n", + "Epoch 600: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0433, lr=0.00012, step=601]\n", + "Epoch 601: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0426, lr=0.000119, step=602]\n", + "Epoch 602: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0288, lr=0.000119, step=603]\n", + "Epoch 603: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0521, lr=0.000119, step=604]\n", + "Epoch 604: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0193, lr=0.000118, step=605]\n", + "Epoch 605: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0431, lr=0.000118, step=606]\n", + "Epoch 606: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0313, lr=0.000118, step=607]\n", + "Epoch 607: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.052, lr=0.000118, step=608]\n", + "Epoch 608: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0249, lr=0.000117, step=609]\n", + "Epoch 609: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0353, lr=0.000117, step=610]\n", + "Epoch 610: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0308, lr=0.000117, step=611]\n", + "Epoch 611: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0227, lr=0.000116, step=612]\n", + "Epoch 612: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0791, lr=0.000116, step=613]\n", + "Epoch 613: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0183, lr=0.000116, step=614]\n", + "Epoch 614: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0219, lr=0.000115, step=615]\n", + "Epoch 615: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00682, lr=0.000115, step=616]\n", + "Epoch 616: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0651, lr=0.000115, step=617]\n", + "Epoch 617: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.0409, lr=0.000115, step=618]\n", + "Epoch 618: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0203, lr=0.000114, step=619]\n", + "Epoch 619: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0346, lr=0.000114, step=620]\n", + "Epoch 620: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0201, lr=0.000114, step=621]\n", + "Epoch 621: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0309, lr=0.000113, step=622]\n", + "Epoch 622: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.0284, lr=0.000113, step=623]\n", + "Epoch 623: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0216, lr=0.000113, step=624]\n", + "Epoch 624: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0432, lr=0.000112, step=625]\n", + "Epoch 625: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0735, lr=0.000112, step=626]\n", + "Epoch 626: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.018, lr=0.000112, step=627]\n", + "Epoch 627: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.021, lr=0.000112, step=628]\n", + "Epoch 628: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.992, loss=0.0287, lr=0.000111, step=629]\n", + "Epoch 629: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0403, lr=0.000111, step=630]\n", + "Epoch 630: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0196, lr=0.000111, step=631]\n", + "Epoch 631: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0397, lr=0.00011, step=632]\n", + "Epoch 632: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0201, lr=0.00011, step=633]\n", + "Epoch 633: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0295, lr=0.00011, step=634]\n", + "Epoch 634: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0189, lr=0.000109, step=635]\n", + "Epoch 635: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.0264, lr=0.000109, step=636]\n", + "Epoch 636: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0396, lr=0.000109, step=637]\n", + "Epoch 637: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0161, lr=0.000109, step=638]\n", + "Epoch 638: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0284, lr=0.000108, step=639]\n", + "Epoch 639: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0641, lr=0.000108, step=640]\n", + "Epoch 640: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.017, lr=0.000108, step=641]\n", + "Epoch 641: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0216, lr=0.000107, step=642]\n", + "Epoch 642: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0367, lr=0.000107, step=643]\n", + "Epoch 643: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.046, lr=0.000107, step=644]\n", + "Epoch 644: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0172, lr=0.000106, step=645]\n", + "Epoch 645: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00879, lr=0.000106, step=646]\n", + "Epoch 646: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0416, lr=0.000106, step=647]\n", + "Epoch 647: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0259, lr=0.000106, step=648]\n", + "Epoch 648: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.011, lr=0.000105, step=649]\n", + "Epoch 649: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0163, lr=0.000105, step=650]\n", + "Epoch 650: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.013, lr=0.000105, step=651]\n", + "Epoch 651: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0224, lr=0.000104, step=652]\n", + "Epoch 652: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00738, lr=0.000104, step=653]\n", + "Epoch 653: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0164, lr=0.000104, step=654]\n", + "Epoch 654: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0428, lr=0.000103, step=655]\n", + "Epoch 655: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0402, lr=0.000103, step=656]\n", + "Epoch 656: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0114, lr=0.000103, step=657]\n", + "Epoch 657: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0332, lr=0.000103, step=658]\n", + "Epoch 658: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0163, lr=0.000102, step=659]\n", + "Epoch 659: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0112, lr=0.000102, step=660]\n", + "Epoch 660: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0444, lr=0.000102, step=661]\n", + "Epoch 661: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00755, lr=0.000101, step=662]\n", + "Epoch 662: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0232, lr=0.000101, step=663]\n", + "Epoch 663: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.069, lr=0.000101, step=664]\n", + "Epoch 664: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0254, lr=0.000101, step=665]\n", + "Epoch 665: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.045, lr=0.0001, step=666]\n", + "Epoch 666: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.023, lr=9.99e-5, step=667]\n", + "Epoch 667: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0267, lr=9.96e-5, step=668]\n", + "Epoch 668: 100%|██████████| 1/1 [00:00<00:00, 1.09it/s, ema_decay=0.992, loss=0.0414, lr=9.93e-5, step=669]\n", + "Epoch 669: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.0481, lr=9.9e-5, step=670]\n", + "Epoch 670: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0641, lr=9.87e-5, step=671]\n", + "Epoch 671: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0257, lr=9.84e-5, step=672]\n", + "Epoch 672: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0178, lr=9.81e-5, step=673]\n", + "Epoch 673: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0222, lr=9.78e-5, step=674]\n", + "Epoch 674: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0153, lr=9.75e-5, step=675]\n", + "Epoch 675: 100%|██████████| 1/1 [00:00<00:00, 1.08it/s, ema_decay=0.992, loss=0.0167, lr=9.72e-5, step=676]\n", + "Epoch 676: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.992, loss=0.0573, lr=9.69e-5, step=677]\n", + "Epoch 677: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0652, lr=9.66e-5, step=678]\n", + "Epoch 678: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.0695, lr=9.63e-5, step=679]\n", + "Epoch 679: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.0251, lr=9.6e-5, step=680]\n", + "Epoch 680: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0141, lr=9.57e-5, step=681]\n", + "Epoch 681: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.0311, lr=9.54e-5, step=682]\n", + "Epoch 682: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0226, lr=9.51e-5, step=683]\n", + "Epoch 683: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0165, lr=9.48e-5, step=684]\n", + "Epoch 684: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0312, lr=9.45e-5, step=685]\n", + "Epoch 685: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0166, lr=9.42e-5, step=686]\n", + "Epoch 686: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0245, lr=9.39e-5, step=687]\n", + "Epoch 687: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.026, lr=9.36e-5, step=688]\n", + "Epoch 688: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0406, lr=9.33e-5, step=689]\n", + "Epoch 689: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0286, lr=9.3e-5, step=690]\n", + "Epoch 690: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0395, lr=9.27e-5, step=691]\n", + "Epoch 691: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0272, lr=9.24e-5, step=692]\n", + "Epoch 692: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0283, lr=9.21e-5, step=693]\n", + "Epoch 693: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0184, lr=9.18e-5, step=694]\n", + "Epoch 694: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0159, lr=9.15e-5, step=695]\n", + "Epoch 695: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0219, lr=9.12e-5, step=696]\n", + "Epoch 696: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0174, lr=9.09e-5, step=697]\n", + "Epoch 697: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0518, lr=9.06e-5, step=698]\n", + "Epoch 698: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0406, lr=9.03e-5, step=699]\n", + "Epoch 699: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.0397, lr=9e-5, step=700]\n", + "Epoch 700: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00677, lr=8.97e-5, step=701]\n", + "Epoch 701: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0367, lr=8.94e-5, step=702]\n", + "Epoch 702: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.029, lr=8.91e-5, step=703]\n", + "Epoch 703: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0614, lr=8.88e-5, step=704]\n", + "Epoch 704: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0137, lr=8.85e-5, step=705]\n", + "Epoch 705: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0399, lr=8.82e-5, step=706]\n", + "Epoch 706: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0305, lr=8.79e-5, step=707]\n", + "Epoch 707: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0597, lr=8.76e-5, step=708]\n", + "Epoch 708: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0184, lr=8.73e-5, step=709]\n", + "Epoch 709: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0412, lr=8.7e-5, step=710]\n", + "Epoch 710: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0388, lr=8.67e-5, step=711]\n", + "Epoch 711: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00576, lr=8.64e-5, step=712]\n", + "Epoch 712: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00649, lr=8.61e-5, step=713]\n", + "Epoch 713: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0409, lr=8.58e-5, step=714]\n", + "Epoch 714: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0281, lr=8.55e-5, step=715]\n", + "Epoch 715: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.032, lr=8.52e-5, step=716]\n", + "Epoch 716: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0272, lr=8.49e-5, step=717]\n", + "Epoch 717: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0173, lr=8.46e-5, step=718]\n", + "Epoch 718: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0357, lr=8.43e-5, step=719]\n", + "Epoch 719: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0562, lr=8.4e-5, step=720]\n", + "Epoch 720: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0469, lr=8.37e-5, step=721]\n", + "Epoch 721: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0316, lr=8.34e-5, step=722]\n", + "Epoch 722: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0465, lr=8.31e-5, step=723]\n", + "Epoch 723: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0298, lr=8.28e-5, step=724]\n", + "Epoch 724: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0511, lr=8.25e-5, step=725]\n", + "Epoch 725: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0275, lr=8.22e-5, step=726]\n", + "Epoch 726: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0583, lr=8.19e-5, step=727]\n", + "Epoch 727: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0264, lr=8.16e-5, step=728]\n", + "Epoch 728: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.028, lr=8.13e-5, step=729]\n", + "Epoch 729: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0259, lr=8.1e-5, step=730]\n", + "Epoch 730: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0292, lr=8.07e-5, step=731]\n", + "Epoch 731: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0156, lr=8.04e-5, step=732]\n", + "Epoch 732: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.0427, lr=8.01e-5, step=733]\n", + "Epoch 733: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.027, lr=7.98e-5, step=734]\n", + "Epoch 734: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0179, lr=7.95e-5, step=735]\n", + "Epoch 735: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00878, lr=7.92e-5, step=736]\n", + "Epoch 736: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0322, lr=7.89e-5, step=737]\n", + "Epoch 737: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00901, lr=7.86e-5, step=738]\n", + "Epoch 738: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.0224, lr=7.83e-5, step=739]\n", + "Epoch 739: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0505, lr=7.8e-5, step=740]\n", + "Epoch 740: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.0357, lr=7.77e-5, step=741]\n", + "Epoch 741: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0398, lr=7.74e-5, step=742]\n", + "Epoch 742: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0279, lr=7.71e-5, step=743]\n", + "Epoch 743: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0142, lr=7.68e-5, step=744]\n", + "Epoch 744: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0199, lr=7.65e-5, step=745]\n", + "Epoch 745: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0544, lr=7.62e-5, step=746]\n", + "Epoch 746: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0189, lr=7.59e-5, step=747]\n", + "Epoch 747: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0286, lr=7.56e-5, step=748]\n", + "Epoch 748: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00491, lr=7.53e-5, step=749]\n", + "Epoch 749: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.017, lr=7.5e-5, step=750]\n", + "Epoch 750: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0162, lr=7.47e-5, step=751]\n", + "Epoch 751: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.028, lr=7.44e-5, step=752]\n", + "Epoch 752: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0566, lr=7.41e-5, step=753]\n", + "Epoch 753: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.039, lr=7.38e-5, step=754]\n", + "Epoch 754: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0361, lr=7.35e-5, step=755]\n", + "Epoch 755: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0382, lr=7.32e-5, step=756]\n", + "Epoch 756: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0165, lr=7.29e-5, step=757]\n", + "Epoch 757: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0509, lr=7.26e-5, step=758]\n", + "Epoch 758: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0168, lr=7.23e-5, step=759]\n", + "Epoch 759: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0374, lr=7.2e-5, step=760]\n", + "Epoch 760: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0353, lr=7.17e-5, step=761]\n", + "Epoch 761: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0494, lr=7.14e-5, step=762]\n", + "Epoch 762: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0193, lr=7.11e-5, step=763]\n", + "Epoch 763: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0506, lr=7.08e-5, step=764]\n", + "Epoch 764: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0327, lr=7.05e-5, step=765]\n", + "Epoch 765: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0371, lr=7.02e-5, step=766]\n", + "Epoch 766: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0371, lr=6.99e-5, step=767]\n", + "Epoch 767: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0347, lr=6.96e-5, step=768]\n", + "Epoch 768: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00566, lr=6.93e-5, step=769]\n", + "Epoch 769: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0355, lr=6.9e-5, step=770]\n", + "Epoch 770: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0274, lr=6.87e-5, step=771]\n", + "Epoch 771: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0257, lr=6.84e-5, step=772]\n", + "Epoch 772: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.0376, lr=6.81e-5, step=773]\n", + "Epoch 773: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.016, lr=6.78e-5, step=774]\n", + "Epoch 774: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.0589, lr=6.75e-5, step=775]\n", + "Epoch 775: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0286, lr=6.72e-5, step=776]\n", + "Epoch 776: 100%|██████████| 1/1 [00:00<00:00, 1.09it/s, ema_decay=0.993, loss=0.026, lr=6.69e-5, step=777]\n", + "Epoch 777: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.0375, lr=6.66e-5, step=778]\n", + "Epoch 778: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.038, lr=6.63e-5, step=779]\n", + "Epoch 779: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.055, lr=6.6e-5, step=780]\n", + "Epoch 780: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0306, lr=6.57e-5, step=781]\n", + "Epoch 781: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.0202, lr=6.54e-5, step=782]\n", + "Epoch 782: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0363, lr=6.51e-5, step=783]\n", + "Epoch 783: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0402, lr=6.48e-5, step=784]\n", + "Epoch 784: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0449, lr=6.45e-5, step=785]\n", + "Epoch 785: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0449, lr=6.42e-5, step=786]\n", + "Epoch 786: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0464, lr=6.39e-5, step=787]\n", + "Epoch 787: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0238, lr=6.36e-5, step=788]\n", + "Epoch 788: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0257, lr=6.33e-5, step=789]\n", + "Epoch 789: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0352, lr=6.3e-5, step=790]\n", + "Epoch 790: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0152, lr=6.27e-5, step=791]\n", + "Epoch 791: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0435, lr=6.24e-5, step=792]\n", + "Epoch 792: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0263, lr=6.21e-5, step=793]\n", + "Epoch 793: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0164, lr=6.18e-5, step=794]\n", + "Epoch 794: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0245, lr=6.15e-5, step=795]\n", + "Epoch 795: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0272, lr=6.12e-5, step=796]\n", + "Epoch 796: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0131, lr=6.09e-5, step=797]\n", + "Epoch 797: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0439, lr=6.06e-5, step=798]\n", + "Epoch 798: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.026, lr=6.03e-5, step=799]\n", + "Epoch 799: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0542, lr=6e-5, step=800]\n", + "Epoch 800: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.0171, lr=5.97e-5, step=801]\n", + "Epoch 801: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0454, lr=5.94e-5, step=802]\n", + "Epoch 802: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0427, lr=5.91e-5, step=803]\n", + "Epoch 803: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0164, lr=5.88e-5, step=804]\n", + "Epoch 804: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0293, lr=5.85e-5, step=805]\n", + "Epoch 805: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0288, lr=5.82e-5, step=806]\n", + "Epoch 806: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0255, lr=5.79e-5, step=807]\n", + "Epoch 807: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.0423, lr=5.76e-5, step=808]\n", + "Epoch 808: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0378, lr=5.73e-5, step=809]\n", + "Epoch 809: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0296, lr=5.7e-5, step=810]\n", + "Epoch 810: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0442, lr=5.67e-5, step=811]\n", + "Epoch 811: 100%|██████████| 1/1 [00:00<00:00, 1.08it/s, ema_decay=0.993, loss=0.0565, lr=5.64e-5, step=812]\n", + "Epoch 812: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0444, lr=5.61e-5, step=813]\n", + "Epoch 813: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0378, lr=5.58e-5, step=814]\n", + "Epoch 814: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.0238, lr=5.55e-5, step=815]\n", + "Epoch 815: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.993, loss=0.00857, lr=5.52e-5, step=816]\n", + "Epoch 816: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0221, lr=5.49e-5, step=817]\n", + "Epoch 817: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.014, lr=5.46e-5, step=818]\n", + "Epoch 818: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0197, lr=5.43e-5, step=819]\n", + "Epoch 819: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s, ema_decay=0.993, loss=0.0338, lr=5.4e-5, step=820]\n", + "Epoch 820: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0274, lr=5.37e-5, step=821]\n", + "Epoch 821: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.019, lr=5.34e-5, step=822]\n", + "Epoch 822: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0293, lr=5.31e-5, step=823]\n", + "Epoch 823: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00845, lr=5.28e-5, step=824]\n", + "Epoch 824: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0436, lr=5.25e-5, step=825]\n", + "Epoch 825: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0422, lr=5.22e-5, step=826]\n", + "Epoch 826: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0177, lr=5.19e-5, step=827]\n", + "Epoch 827: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0328, lr=5.16e-5, step=828]\n", + "Epoch 828: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0148, lr=5.13e-5, step=829]\n", + "Epoch 829: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00843, lr=5.1e-5, step=830]\n", + "Epoch 830: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0363, lr=5.07e-5, step=831]\n", + "Epoch 831: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0418, lr=5.04e-5, step=832]\n", + "Epoch 832: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0213, lr=5.01e-5, step=833]\n", + "Epoch 833: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0172, lr=4.98e-5, step=834]\n", + "Epoch 834: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0253, lr=4.95e-5, step=835]\n", + "Epoch 835: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0168, lr=4.92e-5, step=836]\n", + "Epoch 836: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0057, lr=4.89e-5, step=837]\n", + "Epoch 837: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0192, lr=4.86e-5, step=838]\n", + "Epoch 838: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0251, lr=4.83e-5, step=839]\n", + "Epoch 839: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00503, lr=4.8e-5, step=840]\n", + "Epoch 840: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.994, loss=0.0246, lr=4.77e-5, step=841]\n", + "Epoch 841: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.994, loss=0.00764, lr=4.74e-5, step=842]\n", + "Epoch 842: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0358, lr=4.71e-5, step=843]\n", + "Epoch 843: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0139, lr=4.68e-5, step=844]\n", + "Epoch 844: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0529, lr=4.65e-5, step=845]\n", + "Epoch 845: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.0361, lr=4.62e-5, step=846]\n", + "Epoch 846: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0231, lr=4.59e-5, step=847]\n", + "Epoch 847: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0308, lr=4.56e-5, step=848]\n", + "Epoch 848: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0265, lr=4.53e-5, step=849]\n", + "Epoch 849: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0128, lr=4.5e-5, step=850]\n", + "Epoch 850: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0346, lr=4.47e-5, step=851]\n", + "Epoch 851: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00754, lr=4.44e-5, step=852]\n", + "Epoch 852: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0277, lr=4.41e-5, step=853]\n", + "Epoch 853: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.025, lr=4.38e-5, step=854]\n", + "Epoch 854: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00659, lr=4.35e-5, step=855]\n", + "Epoch 855: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0479, lr=4.32e-5, step=856]\n", + "Epoch 856: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0363, lr=4.29e-5, step=857]\n", + "Epoch 857: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0267, lr=4.26e-5, step=858]\n", + "Epoch 858: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0184, lr=4.23e-5, step=859]\n", + "Epoch 859: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0263, lr=4.2e-5, step=860]\n", + "Epoch 860: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0299, lr=4.17e-5, step=861]\n", + "Epoch 861: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0265, lr=4.14e-5, step=862]\n", + "Epoch 862: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0205, lr=4.11e-5, step=863]\n", + "Epoch 863: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0274, lr=4.08e-5, step=864]\n", + "Epoch 864: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0534, lr=4.05e-5, step=865]\n", + "Epoch 865: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0253, lr=4.02e-5, step=866]\n", + "Epoch 866: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0629, lr=3.99e-5, step=867]\n", + "Epoch 867: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0253, lr=3.96e-5, step=868]\n", + "Epoch 868: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0184, lr=3.93e-5, step=869]\n", + "Epoch 869: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00641, lr=3.9e-5, step=870]\n", + "Epoch 870: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0316, lr=3.87e-5, step=871]\n", + "Epoch 871: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0405, lr=3.84e-5, step=872]\n", + "Epoch 872: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0405, lr=3.81e-5, step=873]\n", + "Epoch 873: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.025, lr=3.78e-5, step=874]\n", + "Epoch 874: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.017, lr=3.75e-5, step=875]\n", + "Epoch 875: 100%|██████████| 1/1 [00:00<00:00, 1.10it/s, ema_decay=0.994, loss=0.00389, lr=3.72e-5, step=876]\n", + "Epoch 876: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.994, loss=0.0265, lr=3.69e-5, step=877]\n", + "Epoch 877: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0269, lr=3.66e-5, step=878]\n", + "Epoch 878: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.063, lr=3.63e-5, step=879]\n", + "Epoch 879: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.018, lr=3.6e-5, step=880]\n", + "Epoch 880: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0197, lr=3.57e-5, step=881]\n", + "Epoch 881: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0345, lr=3.54e-5, step=882]\n", + "Epoch 882: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00762, lr=3.51e-5, step=883]\n", + "Epoch 883: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0589, lr=3.48e-5, step=884]\n", + "Epoch 884: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.0326, lr=3.45e-5, step=885]\n", + "Epoch 885: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.0373, lr=3.42e-5, step=886]\n", + "Epoch 886: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.0261, lr=3.39e-5, step=887]\n", + "Epoch 887: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0365, lr=3.36e-5, step=888]\n", + "Epoch 888: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0163, lr=3.33e-5, step=889]\n", + "Epoch 889: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00711, lr=3.3e-5, step=890]\n", + "Epoch 890: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0324, lr=3.27e-5, step=891]\n", + "Epoch 891: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0436, lr=3.24e-5, step=892]\n", + "Epoch 892: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0352, lr=3.21e-5, step=893]\n", + "Epoch 893: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.038, lr=3.18e-5, step=894]\n", + "Epoch 894: 100%|██████████| 1/1 [00:00<00:00, 1.19it/s, ema_decay=0.994, loss=0.00791, lr=3.15e-5, step=895]\n", + "Epoch 895: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0266, lr=3.12e-5, step=896]\n", + "Epoch 896: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0284, lr=3.09e-5, step=897]\n", + "Epoch 897: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0471, lr=3.06e-5, step=898]\n", + "Epoch 898: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00819, lr=3.03e-5, step=899]\n", + "Epoch 899: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0178, lr=3e-5, step=900]\n", + "Epoch 900: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0251, lr=2.97e-5, step=901]\n", + "Epoch 901: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.054, lr=2.94e-5, step=902]\n", + "Epoch 902: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.017, lr=2.91e-5, step=903]\n", + "Epoch 903: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0238, lr=2.88e-5, step=904]\n", + "Epoch 904: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0153, lr=2.85e-5, step=905]\n", + "Epoch 905: 100%|██████████| 1/1 [00:00<00:00, 1.07it/s, ema_decay=0.994, loss=0.0329, lr=2.82e-5, step=906]\n", + "Epoch 906: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.0267, lr=2.79e-5, step=907]\n", + "Epoch 907: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0458, lr=2.76e-5, step=908]\n", + "Epoch 908: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0349, lr=2.73e-5, step=909]\n", + "Epoch 909: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0322, lr=2.7e-5, step=910]\n", + "Epoch 910: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0341, lr=2.67e-5, step=911]\n", + "Epoch 911: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0136, lr=2.64e-5, step=912]\n", + "Epoch 912: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.016, lr=2.61e-5, step=913]\n", + "Epoch 913: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0261, lr=2.58e-5, step=914]\n", + "Epoch 914: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0203, lr=2.55e-5, step=915]\n", + "Epoch 915: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0177, lr=2.52e-5, step=916]\n", + "Epoch 916: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0143, lr=2.49e-5, step=917]\n", + "Epoch 917: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0563, lr=2.46e-5, step=918]\n", + "Epoch 918: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0257, lr=2.43e-5, step=919]\n", + "Epoch 919: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0143, lr=2.4e-5, step=920]\n", + "Epoch 920: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0339, lr=2.37e-5, step=921]\n", + "Epoch 921: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0529, lr=2.34e-5, step=922]\n", + "Epoch 922: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0274, lr=2.31e-5, step=923]\n", + "Epoch 923: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0481, lr=2.28e-5, step=924]\n", + "Epoch 924: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0329, lr=2.25e-5, step=925]\n", + "Epoch 925: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0188, lr=2.22e-5, step=926]\n", + "Epoch 926: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00412, lr=2.19e-5, step=927]\n", + "Epoch 927: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0463, lr=2.16e-5, step=928]\n", + "Epoch 928: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0272, lr=2.13e-5, step=929]\n", + "Epoch 929: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0147, lr=2.1e-5, step=930]\n", + "Epoch 930: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0452, lr=2.07e-5, step=931]\n", + "Epoch 931: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0132, lr=2.04e-5, step=932]\n", + "Epoch 932: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0473, lr=2.01e-5, step=933]\n", + "Epoch 933: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0444, lr=1.98e-5, step=934]\n", + "Epoch 934: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0513, lr=1.95e-5, step=935]\n", + "Epoch 935: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0449, lr=1.92e-5, step=936]\n", + "Epoch 936: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0512, lr=1.89e-5, step=937]\n", + "Epoch 937: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.026, lr=1.86e-5, step=938]\n", + "Epoch 938: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0423, lr=1.83e-5, step=939]\n", + "Epoch 939: 100%|██████████| 1/1 [00:00<00:00, 1.06it/s, ema_decay=0.994, loss=0.034, lr=1.8e-5, step=940]\n", + "Epoch 940: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.0229, lr=1.77e-5, step=941]\n", + "Epoch 941: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0158, lr=1.74e-5, step=942]\n", + "Epoch 942: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.0284, lr=1.71e-5, step=943]\n", + "Epoch 943: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0145, lr=1.68e-5, step=944]\n", + "Epoch 944: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0375, lr=1.65e-5, step=945]\n", + "Epoch 945: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.994, loss=0.0209, lr=1.62e-5, step=946]\n", + "Epoch 946: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.994, loss=0.0105, lr=1.59e-5, step=947]\n", + "Epoch 947: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.994, loss=0.0228, lr=1.56e-5, step=948]\n", + "Epoch 948: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.014, lr=1.53e-5, step=949]\n", + "Epoch 949: 100%|██████████| 1/1 [00:00<00:00, 1.25it/s, ema_decay=0.994, loss=0.0356, lr=1.5e-5, step=950]\n", + "Epoch 950: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0256, lr=1.47e-5, step=951]\n", + "Epoch 951: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0496, lr=1.44e-5, step=952]\n", + "Epoch 952: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00755, lr=1.41e-5, step=953]\n", + "Epoch 953: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0286, lr=1.38e-5, step=954]\n", + "Epoch 954: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0264, lr=1.35e-5, step=955]\n", + "Epoch 955: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0325, lr=1.32e-5, step=956]\n", + "Epoch 956: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0323, lr=1.29e-5, step=957]\n", + "Epoch 957: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0347, lr=1.26e-5, step=958]\n", + "Epoch 958: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0175, lr=1.23e-5, step=959]\n", + "Epoch 959: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0276, lr=1.2e-5, step=960]\n", + "Epoch 960: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0456, lr=1.17e-5, step=961]\n", + "Epoch 961: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0161, lr=1.14e-5, step=962]\n", + "Epoch 962: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0155, lr=1.11e-5, step=963]\n", + "Epoch 963: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0313, lr=1.08e-5, step=964]\n", + "Epoch 964: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0337, lr=1.05e-5, step=965]\n", + "Epoch 965: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0232, lr=1.02e-5, step=966]\n", + "Epoch 966: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0339, lr=9.9e-6, step=967]\n", + "Epoch 967: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0352, lr=9.6e-6, step=968]\n", + "Epoch 968: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0326, lr=9.3e-6, step=969]\n", + "Epoch 969: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0161, lr=9e-6, step=970]\n", + "Epoch 970: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0454, lr=8.7e-6, step=971]\n", + "Epoch 971: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0277, lr=8.4e-6, step=972]\n", + "Epoch 972: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0446, lr=8.1e-6, step=973]\n", + "Epoch 973: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0416, lr=7.8e-6, step=974]\n", + "Epoch 974: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0188, lr=7.5e-6, step=975]\n", + "Epoch 975: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0334, lr=7.2e-6, step=976]\n", + "Epoch 976: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0301, lr=6.9e-6, step=977]\n", + "Epoch 977: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.065, lr=6.6e-6, step=978]\n", + "Epoch 978: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0392, lr=6.3e-6, step=979]\n", + "Epoch 979: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0294, lr=6e-6, step=980]\n", + "Epoch 980: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.026, lr=5.7e-6, step=981]\n", + "Epoch 981: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0191, lr=5.4e-6, step=982]\n", + "Epoch 982: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0263, lr=5.1e-6, step=983]\n", + "Epoch 983: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0638, lr=4.8e-6, step=984]\n", + "Epoch 984: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0551, lr=4.5e-6, step=985]\n", + "Epoch 985: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.0366, lr=4.2e-6, step=986]\n", + "Epoch 986: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.0319, lr=3.9e-6, step=987]\n", + "Epoch 987: 100%|██████████| 1/1 [00:00<00:00, 1.29it/s, ema_decay=0.994, loss=0.0156, lr=3.6e-6, step=988]\n", + "Epoch 988: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.0421, lr=3.3e-6, step=989]\n", + "Epoch 989: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0244, lr=3e-6, step=990]\n", + "Epoch 990: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0217, lr=2.7e-6, step=991]\n", + "Epoch 991: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00913, lr=2.4e-6, step=992]\n", + "Epoch 992: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.039, lr=2.1e-6, step=993]\n", + "Epoch 993: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0167, lr=1.8e-6, step=994]\n", + "Epoch 994: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0271, lr=1.5e-6, step=995]\n", + "Epoch 995: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0145, lr=1.2e-6, step=996]\n", + "Epoch 996: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00424, lr=9e-7, step=997]\n", + "Epoch 997: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0337, lr=6e-7, step=998]\n", + "Epoch 998: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0142, lr=3e-7, step=999]\n", + "Epoch 999: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0259, lr=0, step=1000]\n" ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "0a5a9fc78e7e40ee89ea570462bcf557", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/50 [00:00 {N // 2}\")\n", + " teacher, distilled_ema, distill_accelrator = utils.distill(teacher, N, train_image, training_config, epochs=1000, batch_size=64)\n", + " N = N // 2\n", + " new_scheduler = DDIMScheduler(num_train_timesteps=N)\n", + " pipeline = DDIMPipeline(\n", + " unet=distill_accelrator.unwrap_model(distilled_ema.averaged_model if training_config.use_ema else teacher),\n", + " scheduler=new_scheduler,\n", " )\n", - " lr_scheduler = get_scheduler(\n", - " \"linear\",\n", - " optimizer=optimizer,\n", - " num_warmup_steps=0,\n", - " num_training_steps=(epochs) // training_config.gradient_accumulation_steps,\n", - ")\n", - " teacher, student, optimizer, lr_scheduler, train_image, teacher_scheduler, student_scheduler = accelerator.prepare(\n", - " teacher, student, optimizer, lr_scheduler, train_image,teacher_scheduler, student_scheduler\n", - ")\n", - " ema_model = EMAModel(student, inv_gamma=training_config.ema_inv_gamma, power=training_config.ema_power, max_value=training_config.ema_max_decay)\n", - " global_step = 0\n", - " for epoch in range(epochs):\n", - " progress_bar = tqdm(total=1, disable=not accelerator.is_local_main_process)\n", - " progress_bar.set_description(f\"Epoch {epoch}\")\n", - " batch = train_image.unsqueeze(0).repeat(\n", - " batch_size, 1, 1, 1\n", - " ).to(accelerator.device)\n", - " with accelerator.accumulate(student):\n", - " noise = torch.randn(batch.shape).to(accelerator.device)\n", - " bsz = batch.shape[0]\n", - " # Sample a random timestep for each image\n", - " timesteps = torch.randint(\n", - " 2, student_scheduler.config.num_train_timesteps, (bsz,), device=batch.device\n", - " ).long() * 2\n", - " with torch.no_grad():\n", - " alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps, accelerator.device)\n", - " z_t = alpha_t * batch + sigma_t * noise\n", - " alpha_t_prime2, sigma_t_prime2 = teacher_scheduler.get_alpha_sigma(batch, timesteps-2, accelerator.device)\n", - " alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma(batch, timesteps-1, accelerator.device)\n", - " noise_pred_t = teacher(z_t, timesteps).sample\n", - " x_teacher_z_t = (alpha_t * z_t - sigma_t * noise_pred_t).clip(-1, 1)\n", - "\n", - " z_t_prime = alpha_t_prime * x_teacher_z_t + (sigma_t_prime / sigma_t) * (z_t - alpha_t * x_teacher_z_t)\n", - " noise_pred_t_prime = teacher(z_t_prime.float(), timesteps - 1).sample\n", - " rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * noise_pred_t_prime).clip(-1, 1)\n", - "\n", - " x_teacher_z_t_prime = (z_t - alpha_t_prime2 * rec_t_prime) / sigma_t_prime2\n", - " z_t_prime_2 = alpha_t_prime2 * x_teacher_z_t_prime - sigma_t_prime2 * rec_t_prime\n", "\n", - " noise_pred = student(z_t, timesteps).sample\n", - " loss = F.mse_loss(noise_pred, z_t_prime_2)\n", - " accelerator.backward(loss)\n", + " # run pipeline in inference (sample random noise and denoise)\n", + " images = pipeline(generator=generator, batch_size=training_config.batch_size, output_type=\"numpy\").images\n", "\n", - " if accelerator.sync_gradients:\n", - " accelerator.clip_grad_norm_(student.parameters(), 1.0)\n", - " optimizer.step()\n", - " lr_scheduler.step()\n", - " if training_config.use_ema:\n", - " ema_model.step(student)\n", - " optimizer.zero_grad()\n", - "\n", - " # Checks if the accelerator has performed an optimization step behind the scenes\n", - " if accelerator.sync_gradients:\n", - " progress_bar.update(1)\n", - " global_step += 1\n", - "\n", - " logs = {\"loss\": loss.detach().item(), \"lr\": lr_scheduler.get_last_lr()[0], \"step\": global_step}\n", - " if training_config.use_ema:\n", - " logs[\"ema_decay\"] = ema_model.decay\n", - " progress_bar.set_postfix(**logs)\n", - " accelerator.log(logs, step=global_step)\n", - " progress_bar.close()\n", - "\n", - " accelerator.wait_for_everyone()\n", - " return student, ema_model, accelerator\n", - "teacher, distilled_ema, distill_accelrator = distill(model, 1000, train_image, epochs=300, batch_size=64)" + " # denormalize the images and save to tensorboard\n", + " images_processed = (images * 255).round().astype(\"uint8\")\n", + " distilled_images.append(images_processed[0])\n" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 19, "metadata": {}, "outputs": [ { "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "a1fd615f0a184cf8811bbe59f728e4c2", - "version_major": 2, - "version_minor": 0 - }, + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAZwElEQVR4nHV6V7Mk15FeZp5zyra9bubOwA5ADgHCkFySIBeMDZERFHdDetCD/oekn6RHhaQIcUlQjH0Ul9KCFksDEoYzAzP2unZljsvUw+nu2zMAK2LmVlfXqUrz5ZfmNH7v26/lpbl65ajvPRHZrmUJSqvZxXI8GQHp4aAmQonQNF1mzOHRwcnp+b/99//x6vXnyrI4Oz/d3z8KPrR9O7s4+/Pvf/POL/5vAPX8c8/e+NKrg9FkMByfnpzcfu/33/93/2G+mI/G0+efe34xm83n86ZZHV+7rog+/vhOVdanp6fL5Wy1WnzjW9/5yte+qsicPHr0wXvvrdo2z4u2axFgUNe274uiqOq6yIv3339XC0BdV1VZnp8vhsNhiFzXA5YYIkeORV7keRY5zuaLGDl417Z1nudI5L01Rud5EUK8e/fj4XhiTNZ1XYjRBZnN5l3XFFXdtqvfvfMbxfbo8BABY4zW2hjZOR+Zl4vFcDjM8xJIOe+bpvHen52ePHp4kmdZkrV3vu/7+cVZkRdGKwAgotVySYh5lhMpnIzHIUYE0EoprRExM7lWejgYDOrKh2B7q7V2ziGpzBgiVdeDPC+01oN6WOS5yUzXNdb29+7dD56tc4vlcjm70Fqfnpz84fd/0EqVZTmd7lnbzy8uQvCAorVCBJNlWZaNxqM8z4wxHGPTNCJsskwbU1SV1gYE2qaZjMdVWWdZXtU1ADRNa60jRVDk2Wq1AhTvXQgBCX0MiFgWVW7M+dk5IopEEciLDAhZBFE554zJtDZNs8rz0lnXtf38/EwElsvVxcX8/ORktVy+/S9vz2azLDfe+7KqMpNZ21trQ3AHBwdFWSERKdWsVtb2VVUPhqO6rrU21rqmbQQAQJx3RDSeTibTqTbGWhtCQCKlFB0fX80LE6LPsmzVNISUZ6bvW0TM8kwbo3WWZxlztH2vjYoxMkcWJsSqqgBltpgNhqNr156+f//B4uJsUOYa4OJi/tFHH/3p3T/98Y9/jNFrk81mcxYuq6q3fYwBEfu+X62Wfd8J8OnJo9n5eQh+PNkDwEePHt27e3exWCwWc++9946ZBSArciTwwedlySBlVWqlFDAGz0WROeuyLBuOhqu2XUnnrMvyvCoLRcpaz8wiuGoakxVGG6U0IJosH48nJstF+L333uPIwYYiyxrnH56ce36/bTujYLnqTk9PtDZFURChItX1XdOssiy/OD9TpJyzMYYYeTgcE+L5yUnkeHT12DkvIIhIhMwiIpGZiOrB0PZdy0Ln5xeL1dIHDwCROUTnvXfOZ7lBxFWzKsucRUREay0CMcTJdD/LC2VMb511riqrrmvu3/v09u1b1vuu6733McbO+Yv5oizKzoa/3Pnk/OJ8uVrU9SAzmTFZCqr5fL5arUIIMUYkAoDz89PVauW8z/JCBETEGFOUZVFWi8XcOUuk8iIviiLPcwDR1566Pp9fiEgC7o3nnxERRbosqKxKtwhEyjobOYYYCQkUHR4eKW0QiZmD9867rusePHjwyad3S4q5zpfLpu37AHj/4TkRTqfTe/cfvv/+B0eHR48ePbB9n2VFDNFaC4Axhr7vmLmqBlmW+eAiR0Q0xiBiYsPgPSG2TbtcrhCxqgbC7J0TFgrOeufrwQAR66oEhBjZexuZhYFZYoyIKAJFWSilOEZC9M4igPcuBN+2DSLev/9gPpsrpRABUaoq9961bdP3/XKxzPPyj3949+zsIoZIpLxzzBEAYwgxhLZtAMEH13Vt2zZ5ltdVjUjeexEAQGddCCHP8gS/uq4Rse/6oiyIEIlMnhUxCgAysyJSSsUYAbEqK+c8iihSdVVpbSLHoqwSLq21bducn532Xffuu+/2fTco1CCHrMxCZOs8IsYYZ/PZ7du3z87O33rrrYcPH1VVrbQCwCLPrbOkVJblHKNSJMKEChFFZH5xsVqumDmE0DYr5khEtus5Ru9c27TMrHVGAqiU0kqPx2NtNDNroxGJSGVZxizMzCyIlGWZgABhPRgNh6NVM2cO3tkYws9/9s+//fVvmOOtTx7eeXC6ajtAZAYiDYCIZPv+1q1bt/7yl5/97P+E4JeL+XK1CDEUeZHnBQL0XXd68pBFJtM9IvLeL5cLa3siUkqJCAgAQPABRFaLZdc2AGCt1b31hNS1XVKjbTthqaqqbftV08QYtdaRAyKUZWmtjTEm8ztni6KKkX/9m9/+6B/fmp2fg8jFslu0riyyLM+RFAEnczofwmKxHOQPP7n121/+v6Nr11mkLCZGZ7PZzBhT1fXs4gRERGQ+uzAmQ4TgQ54DAIhEk+VE5JzlREYCANi1DTnrQowhBlKotHIueB8Q0HvfrFZEZK1rlp0xeVWWwkwIIYSu64qiEsRbt+/89H//0/nZOXMsjCICRABUIfBwOCRUpDQqMkbneTabLftm+dtfva2UUUhIWBRFCJ6UQsLBaFKWNZFijsKSZTkgIGIIXphNlnVd65ztbb9YzPu+M8aMxhO9XK2m07FzERFsb5VSzAwIIYSiyEUYWPq+39vba5oGIZZFwQDW9QB456P3/sd//58nj06stZmmQV1wY6Og814pLV2vtGZhZDAKh1UxX7bv3bnvsMjyPHgcDkYiHIJbLuZZnh8cXNE6izFqYwChLKsEbmG2thMZhRCU0sYYEOHIWmtjNHnvrbVaae+8d94YzSx1XUURFszzDFC0puGgXq3aoigGg4HWZrlcPLh/70c//NEH73+4pinEpg8+AjMAkAh47xWhQsw0TQalMGR50fb+/sNHb//L223ThBCstYQUgi/yIssKazsiquuBUkoQWQRElFJaa2OMNoYU9V0bQojMs9ns/PyclCIELIrMxyAIzaq9mF1Y67RWbdMCiIiUVWWMXi6XITAIENF8Pv/pT//pnX/9nYiwCAB45wcZaQJEREQAUEoZowdVfuPK8Ob1cV2bEIJz7vz84q0fv/WTn/z0ww8+7Lo+cPTeAyAiLBezGEOel4vF/NOP79z95M5qtVRKlWVNRIpIERERIiBCjNE5p9umL/JiPB51vS2KzPZOhJumvXJw2Pe9rCkI+67zPjAgII3He/NVe/fuveWqYWYABCTAuDcqQedn81YpVebmcDq8sj84GJhpgXfuneeEg9KEqF3k+WL5i1/++tatO9/+2289//yzzrZ2slfXg8XsPDMFx8As52cnV65e77v24PDKZO+AEJRSIQQAAMAsywEACHRk6HsbfAg+5Flme8csIqAUBmZjtHVOESLhYFgJc5YXs9nik3v3l6smhpgMzswKUGsdQqe1ZuZRlX/95pWjvVGpODoXbN+7ZZXXj+a9a12MHEK4e+/+j//xx698+aVXXn15NLzI8/zK8dNlWTlv87x8/oUvXrlybbG4QKLpdMrM3lmttTZGKV0UpYjEGHSZZ6PxxJiMWYajQdN2XefKotZaE2EIkZn3p3s+RBbp+/7T+6cffPKTd975/cXFBSKSohgDoESW3gYCUEoRqVVnm85pBBCIQFVdPXOEjxbuk7MWkWKMMcZKm+8898Xr5cR9/PChMkVZHV97BhDOTh8cX3+qrGqldNPMXd+PxuPRaNR1rTZGK+OcFUkVntIhhizXbdfFGK31ZVmuFk1VVYDIMYpwys29j52Njy5m8+WDZdPOLmbWWqNyYWBmBBSAi1UvqJ13mUEWtWz9w/N5XeTz1p7Puqax92atj6K18kEgwtXJ5NUbNyql+hCWy/74qWezLHPOGmMGwyEg9V1n+76o6siRhQWAmc9nJ8vFHJVKca+VUs7aGBgQAFFrs3dwOJmMZ7NZikUf+HzRPThbPjxbhBDbruu6LgorrWMMhaoRsZEli5wuemMMC1jntYJV210scTgaR4t//vRu2zsWHI8nk+nk9u07iBg4BmanFWj1xptvHl+7LiLL+axZzrI8d9YhYlmWhJgZo5SeTKaKKDPZZLJHSgECIemyrmOMiKSVJqTp4dHzL9w8uXerc/Fi2a/s4tEi3rp/14cYmbu26/veWosIMTKKdGE1zvdQaOVmLGydK/Iiy/Pe+aXlcYBF5+89nM0b50Ooqso6e//+g+l0cv/eg/3BmIwOSl177tnnX38tr0oipRW1zbIqq+FgBABFnvXWTqZTo40xmQgPR8PgIxExMwDo73z37//0u18s5wsk0/ROlv07v3/33qefLts+RCblVm3vfQgh2N72vfXec2REABEB8BIat5rmB8PB4Gx10tvOB6+NMcasOg+k758sTuZtlmWDehCZm6bJsqwsyi8899zrN14QpUZXDr/83b8r60obo7UOvqrqgYggYYwhL0vnHSIqpUU4lRCpOkrkoZG0C3A675vOd72z7gEzF0Xhve9760JIZVQIMfgQYwQRk2XBexFOLUHLTaby4/E1FHrg70aObdsprX1d1IOBznPM8snEPzqds/NVWebG1Ep/9cUXrhwcXn/p5ktvfOPw+nUffIxRKSUAWpvEDQlC3nmtdPK5AKc8AOt8g/p//fCt2WzmnA8hpHIaANq2tdZZ53yIiKCUJkWAkBpTZtbGAJgYAofAILP+DM7hoDx8ZnLjYffQhl4EeheWq+54MLj5/PWbr3zV0/Cff/7zX779S+eDZf7g9NHXvv+97/z9D9ZzEMoAIMbYNCsiZYwBgHQFCVPDCIgiwpEB1hoAgL51+yNFlAIiFUJE5Hxou45IIWGqY4VFkVJKee/TPQAgAEgEzFFkFZfKqmvDpw+nV1o1++Th3dGgrofjvYMr3/z2m69+/W+rwegH//APv/7Vr/71nXf29vZvvPjCzS99SWB9iIiIxBj7ro0xbhIWiMigHqRkj4gIJHK5BADw6vHTiCkHBQBAQhGwzllrkyWST5RSABBj8M6HGEUk5cX0bWJSpfTR4MrTh08fXB2dtKdffuWlb37zb75486Xj608NhsMEWWdt13daayK1XoiYZVmqulN+SK9LCEmVM8K6PElCCwgAJqV15KiU7vo+CcHCgOi9X/uLOb0jVWyIZLIMQ0jTtWQCAElfCcBZd+rP3VkYrdrVS6+8/ObffXc63QsxeO9Tj6uNrqhGROYYQrTWKqWIKM/zNHJL4JFNyS8iiICA8NiBAuvrGgAlSRqZ1xoCpmDABLxkBkkASyYn0iKcXkCkNjogCyyapu37Is9u3bp9cXExGo+zLFsXXlorSgRCSimtBRFTd6G1JiLZ4GOrAKyRCrLtygDTSbKfjjEG75OUAEJEMXIiqRRD6/4YgIiIKClsDGqt+94qRSJirdNap9sAAJCUUl//+t8cXbmSGqiECu89gAizyXJEJKKyLJ1zhASC2ztli/gEG4BNyCKACAABRuZ0kXATzkSEiD6Ez5ohPSvLDAAgglJkjEYkYzQAeh/WdIGISIjELCbPXvvK61VVMnOMMeFQKcUsyUApfkREKy0gkSPH5OBLNZ6QZPtZNtIiol4rhyiRmYWQdsCHSqnMmC5GZrbW0eYoinK5XCESQMTNQaTWCESsqnpvutd3fQghGTspyXwpaQghSZWSozYGERAIEVLbvRZYIE241uS5gVDiAL0NliicblNaxRBT4CpFLI/pn4LBe4cIROsRCG8dSkSkAPDw8HC6NwUApfV27Zp8BWKMCfExRu+9iBARCMCaRYBow/Qbi6xDABMFrVGDiFpALtt8QEQBESJK5okx9n2/Df7klsRRiGv+SZQHgABYlcVgMLyYzV999eXJZGKy7AnlE2UnCKW1iaAJ1fYVLMyRE3eKCAIlcdPXkBy0cYIGAZati9IAlRMfp3kqQNxSxDYkjNbOpbEZpByiFRHi3jB75towHo++92/ezPM8xphWpdyX5NiiPM18tgQv/NnYRUIUWS9Zf7V2RqJw1AKACMyy1kRka9ekFyAmhySEJIGcczvMAMO6uPn0/t64mNbZ8V69f/2FF198MT1h1/xJE2utiBRFsUGUgAAns2+kxA1dpnu2zoGtZoggAiA6fSIi4XV9t419ZmYQIpXgxMzOuZR3Nj4VRMiNvnF975UbB0eTam9cRFEvffO7g/HeLr63yWR7JeFzA4rkhJRjBQFhg5kUXbt0tPbSRivNIABAiKKQo2wdmm5KE94QQjLnJiGoVI4nWx0fjL7yhSvX94q9Sd30oXzqtcNnbm7BkzTf1gW4GTtvhQMAEdyadu2RnSM9ZEN0BPAYvxOwMLNwKrZFQChNRpOl15bZhNdG6PREESHE44PB03vF4bQCxHtdffMrb2Z5vvuaXSG01lvzXcJ9/e4Ue7zltK0O2/PH2UxERHvnlFJCO9ADTMSLRKR1mqcbrYmo3ZRG20dnRl3bq6bD3OTF+580L73x/YOjo21mXONwXX3QXxNo8xFTdkr/J4cnSCeyemJVOqH0AkUqMxkBkADIWh0iyowxxiTjVEVGOyUhACDAsMrHgxzz+tOLePDC115+9bXdamyXPbertlptxrSyce8WGLRNjrBJAtub48ZN6xgAgHSFiDfIJoD18FEplfQ53BtdORxZa1vrmVkpBSAsohDagEupD164+fJX35hMJlsNtxJvg35Xq91g29HhkmG3tt8uTNXKFnvpXKekGGNIVd+WgolIK5UK0v3p6KUbRwjxXpl11qc3cYwiMh7VV4+vvvzGDw6On62qaicuLwWVTaqWTY24C8L1bXBJO1vpP0s+j0eOAIAWYa0zY8y2PNqoTkqrLNPHR3vXjibtcnG6WK1amzh3rb1SL1zf3x+Pr15/tqgGW7g/4YRd8OyeP3asM8paB3wcq4/d+HgkaK11VVVXj6+++OKLw+Gw69oH9x92baO1qqri6tH+eDioy/zj23/58O4ZEmkNictFZDodPnNYD4YjbfLPVpFPSP8EgXxGmjSXws/F2BNy7zpQv/b6699641vfeOONa8fXmKO1drVYzGcX2ujhcDgYDIqyRMQf/rf/OvnLx2Hedn2LADEEUvTC9f2jcT4YTZBwC5JdNXZR9FlvPG7Uz6p0mRf+GvAAQP+n//xfrl65qo2JMZZFMd3bx2vXCREV6URehDGEwXAUQyAQrchoFbUqiuyLz+wTQjWYgOB2aLONy8/Kug2Gz0qDiH8FXE9iadd7iKiffe65tKEEm85da52qNwAQEI7cLGbSzQaFbjpbGKyrTAQO98fX9isA0nnFchlzu8XINgHvfvu5eBBZZ0xE2rXC51r90qsgOvggIllmtrljd+4VYxRgb7tSy7WDUZblzjaDqvCCz1yZlAaZhZROQj9BHdse94nY+HwwbOpkWP8TFAQEkCeD4XFlNh2ZSOoh1iVKEj3GGGMQEVRmOhm9+Mzx4Hy+uOCizLKifGp/AMwCilM7m1btdDZPksnGfLjTnayv4zqIQXZ8AoKQet7HQvlxOIHezRRbFbcWTX+JVFXXx4fjKpNzY3WWV4NRpcR7j1o4CrBIZBGWGEmp7eT1s8a+lOAybgEEHxN9W1TvrN0Fz1pgEBbRAEKkkAARlVJESDtKE5EAKpMPDp6endzdP5iOR1UEQsDV6QNhAVgyx65ZOWeds7bv6uFoOJ7iZpS0K9YTCN68hXCjnsiTDcAu/1xyGqQxIwCwJqVwMzJRijbVYho/p4eyKD3YO1ZZpQ0BkSHFrJgKgYikTx98Mls2zqUfSxEA5kWltJbHGWm3CN3iB4F2+XKr8OfG7lp0EUDagBIJMUmvtuSDG+FZmCWGEEJwPoQYozCjiFbk2gZQZ/UeAJ7eebddzrq2iTEOxtPBaCyb2cnWrtuTDR9tA0JgDZ9LptqBy2YYgZC6HRZmkE31DwCiU3eiFG37pi2ZMHOMHINPZY/JB9EtQRhJ53tPny0+RKHMFISoJIyOnqoGo6KqiUiECSgR4hOGvDTtmkXWJ0+IvvaYgKxnKCibB4hsFwuAaK0UKdol7C0LrSteZu9sOz/rVgv0yyzXeTHNx09fK/fLshyOhqPJvs7yvCi38EUik2XbWccToiMirusGWLfnn5fC5FLFtcSPR1H6mDyAlHJ5cq7wZc0tkYXZtovVyUcCRKiide1ycfjUaLR/lOeFMSbPCyLK8jxFpIjgJgB2U8FufY9IKQzWfAmPO2dNMoCbYF/rurF68kN6mN5otk6HMXKIIcaQbC8sHKMIkDIcAsYY+pUeCK5DRiuliSjtDjELKoVIsplrPIbnnWOD8LS7eclUlxl6LeVazMcAJmtAARIA6F2GSjsLa9szC3P6GCPHyNVo6ue9j2G9Z0MqTRxFxPuQts4JURm9rRe2J0+i6LG0tu1CJCWvtZE3C3frDoINEa2T32UmlhhjCD4hfyeIIzMH71bLGbIwiHOWtMnyPMty56wwiBallDJGrb2OabMe1tUEgAjDpgRaS5fkBMT0K1TcoEB2wbYN5eSSBPNUNySlRERva541bICTbiHGGMKmdYWsHDWPPgq2R4lueeK7lVIqhiAixmRFWQIg6UsySOlJEITXk44EYtkYP2267M5TNr7Z5IlNjD/hs0s4CWBqKWMMkeMOPUuMITFuYEYAMnlkAcC8GoX2vJs/vP/nX1SHN0xRl/WQiFICUUqtbbPhQALgzYgzkTpu6WW78YIbrO+STGJ+2KZUhsePLSzJB5+kh00RtsGPcGQW1pkxWT65dqOaHkVvlSlBpH34/umHvyKQ8XRvPN0zWU6KUgxsNmuQkNbj6vVvZBA37SiuqRTT71xS7iRAgk1PLsnAm1HoLjvJY3G/3inawi7lLFkXl2y0KYuiYykH01m1D4tzXU0wLIXl4Nrz1298oR6MTWaACBFpneovLbRbQm4T7e5W0vq2TV0B26YghchnpN8wwaUO/x8XjiuJ4A0S4QAAAABJRU5ErkJggg==", "text/plain": [ - " 0%| | 0/50 [00:00" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distilled image 0\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAVfUlEQVR4nH16XZfjRo7lvUAEKSmzsspltz3tnt152bP/aZ/2cf/2TG+3Pe3PclWmJDICuPsQpJRl9yxPnkwlKVEI4OLiAkH+7//1f96+m06nOddmXlpblAJ0XRY3mw8HkjR3t97DwMPDI4Gvvv328ektwYyY5qm1S18jhX98/7ef//aDCt5/+f7p3fvpcDo8Pv7y88/PP/z03//n/1jXq5NPX75vy/X88bxcL9NcSqnXl3OC13VZLmd3f3g4ff3nvxjtejm/PD8v1yvI1lpxp7uZzacDIm2ez+dfispS5yMMQdIQEs0AJeHFZUzJkBm5tLWWarkolNSyNlFOQ8+X5wvNajn2NcJSxOX64m3CNPF8+cffvxPX6Ti13i59OayxtrzG2nI1GYBGeS3o1qJf21Wmp1id0yp1Mt1S2dRBGi0hy1TmnFQvNs/TcT44zYwESLibkUarpVR3SMrxk25evJBuVgAU91oqoED06Ou6XpfVzCleljW7qtfn8/OvH38BSdg8TdHay8undb1GdpnB3Nx8qsWLmZkXApFKURAILw7zFBKwUuo0mzsAQb23iLTjodLZomdG661HSNkzUmlmgiJDylSk0twIAAIS6tWdRFsu7jb51Nu1ryuAde2X8yWWRdH+8f3fl8t5nkusCylDrtfnaIsZ5mlyNyidjN76enVDqV5rUW9tOfe2SIlMRRCYaj3MByeVSZIQyXI8nMxMEmkRAdCttNYBujsJkqRF75mAEJECJUKklcjecp0Oh8rjp4/P67JO1ZRYW/v42yfxxx9//CElt+l6uVp1mvcWVphKEQCSDOlyOb+cX0oppc7Gcnk592ggBLbee0ZmEualCFBkKQWkiFJrTUkSCSndaMaUAIGSYKSRmQlJUvQGqxzLIkUvZZp8Isqnj596rIfp4AaQ5/N1aT9dz6tXW5br+fp8tIdaJ1o387Zm7y2iOY1WYm3Ru3sxOpCX86eU5sNJkZAICMrMzJQSpHkxkFKB0NbWWxjYW7ibJGUCVEJKgAIiE4KUmZwmNzOALRoEuvdAxOXXX39tvfVWIzqB69q0NsCQ/PTb88PDp1onr5NBBgJURCgTdM8NsZltXeUmZCmTm2cECaMh1da11Gtm1Gl2c6VgLJkZEZIkZKo4SAAwM5IjAgSU4khy82ma3YuZZ+YIVkqXl/Ovv/5WPdbWemstYl2jR5DF6Mt1fXl5eXg41VTvDUBkjO9NqEePCC+VQO+NLKUWL5VmG3soM6JHW9sqsJQKIDMAGAkCRhuGDysBkNgSwEiSoNEAQKIxowOC0NVTYWbL+WVZlhEaIyC1tS1L67333tfeL9flcrlmdADKVApghnrvra2SzCwie2+AvFTSokcKEqKHJJIQaqm1TlJm7yCLu0eYuVGwYStBElv6EgABM5ZSBnJqrRAykkCPrgQmfPzwW6p7KaWyhFsEiBG73nR9uX4sv9FAs8eHxxRCQVISQKMHIiPG8kg3WGtrZJCEFL1LA9rpJqWitx5BsNCNZjY8Z0ajmbm7IDODkMqBqFIrzKCc5oO7ZzbzSnr2/uuHn7777vve2vkZOWdkxoivGUFIvffr8yUjCstUSgqtL8WdZhAoRF/XZSml1GkimJGtrRZ2ODwkldFBQIreCLa2rOuiBMgiQRIIYpQymZmbDbtHBCC521SrwAA4ssPkXpD54ddf/v3f/+Pjpw9UvpzXZe0DdW4Dn5QUqbWFeV8uL58+fqjzKbLXUorbsvbqxcDem7sTXNcVpDK1oQAAxqWMSIu2LhHdrGJw/7B/q8PaivFAKmkjcKWUUosZR/ak5GWi+eX5/Ne//vWXX35CYC6F4KijoLm7u42qXou7W0qX8+XDh9+MKO7upZQSvafCitVaa60EI5qU7k66pMjIDJKDbyJ6a2umzJxgiYhhNQEDxlLcPSIBFLfWAKCUQpJmhYWjNMiuz+f/+x9//eXXnyOjGEvxRBeSknJIAQkAMdViZIs8Xxun1YsXn+b5CKY+qUUrXo6nk1uJDBrd3b2CNBtOBWnmhtzTkma0UN+q73DtYCRAxYsXJ1lqNTMvpU6TICt+OB7cS4TWy/KP7//zhx9/kERhKzGQBAGZQ30oJTdOpbgbwZ79er1++PBhaauZKaFEX7uRXqceXcoyVZjlbpCZuRdzM9tyJgev9tZ7K6msXswoxWD8yOSW1TD3keGl1nVdzNxLsVLV8duHD99///26LgYKTGV1phgth1oizMzceDqUudp5iYSy5/KyfPe3757evRE4l4mJiJ6Zgi6Xc53q7HNb1t6bFZ/no5FeyrDe96QEOUpDycwRssj0UoTMzFpqrSMZnO5u5maSQPk8PZ4eIfvur+eXT89KEBDlzsNx0tJaJA3V/DBPtXohp2prjx7diGLeQ5eXdV0/XJ7Xx6eHOhdIhy5QvXWzEqYeLXqvpDLLPI8aJWm4BoCbKUlj6RESRrbXUnu0jPTZvJTMGCqbZqOqWfFSJoG9tU/PH6FuQ0Jl0rHrI5Ksk799e5yrQ2mAznGqzGKXFgBBi56//Pzh1w+fnt4+PJyOznI8Hk+nk7mn0r3M81zrPOigmqdydFbDnuoFYs9WRgIbTTSfPJfMbO5GLz0Awop58fHbYet1+eE/v/vw84eff/4pIp2WgEgBQ5GMPBrab7tC1MmM03mNXDIlIiPTrH5xenj0qS3xopdSeDwcZRZrO51O8+Eg4XI9SzHNh+EaL4WgIKMRtGQxbIm7ka4ZWeCVboY091Kru5MU2Hter8+Xy/XDhw9tXZ02YmpGKJfWekiAxJTWtQ+tn8B1ieh5XQOy6tZ6i+gPc33/9FBoy/Uiyc1LPURGsE3Ho/u0Lku06BbTTJpJisjMiN4Jjh6oFLdNzw2+O8x0n+Y5lYkECVqE1rWtSyfiel1ezi+tNYADjzYYLeJ8aREJEI4OtbW512meY41Pz0vrcvf5OLuV5xdhbQ6QjOhe+eZP7x6//KrW4/nyEtnm08npAmpfzc3rNAScm0uZGQSLT6DK6fEREoZorLXMh1qrm1+vF4AR6i17j4hY14bE5bqsS4uegkIy0syLXMzIiJTTAItQC02Rred1jXWBaLV4770pCJr5NM3VLNWnN49f/Pkvpzdvq0/TYXqp5enprdFPp5jn2no7PZyKV0iDG0f3YnClytv3X14vL31tEHpEXNdMAP16uRJy07q2iMjI3nprfbleo8doMlIpkbJa6lTqtV3jek3mkPsRQtqy9Ot1LZXmtZhFX2n+8HB8fDi8O5y8wObp3bd/fnh8W6d5ng4wrWuDTIS5T/NRgFtxL5kd20FtdMRSfMo8X5eVQms9pTrNZlzX1c2MtiwLpEz11tdl6b1rK68YPUQARpvrNGlerbfoK5KwjlqPB6OTfkq0rr520udpOs3zXPzheDq8OR3fvX368k80AxKQcrv5KOJDiowzpEEChlKG0QCWT59ePn58Xs6X4h49UwmR5Louo2q33o2U1KO36JkJgYA5TUyp9x6RAhx+nB6srWtfQ9HX3lo/HvnF+4d377+m/OeffvnPf/y4LKtBONa37/707s/fnk6PtUyRnUJf2vnlpfUmpVsZ/QrJjAwGMBpdbqxPo6F8+vRpuVwiw2ggCCgjUq11kkZkJojMzAztnbGwtzuyVKRiWa9Tmeb58HA4rtnOy6WYGWyepvf/8uev//Lf5sPp26V/8/fvfv3l5zJNx+Px3ZdfHY8PcBIwMlOtt2VdRPRIMGkU4KWY265LtfdbRiMTZbmcI4JARB99pVr0yNbaEBGZGZBSo2mCEBGppDD0Go1K9b7JgaeHN4/TqRz84fHx22+/eXh8PD29M5sz4fP89b/+5f03X5s7CKZFBCSWQjODHHp6eiIxTzMoEihO4+g2N/gTEIevQZbW2mbe3vQDiIiINDMzjl5+fGjzAqEcim1XPWAKGcrrNZXFS1c8vHl899WfHh7fGIsyM9JF81LMIEqZ6JFdCQC1VHMD4fU0AiJoU522G89dSGyVC5TKQJU2qZ25d27bSEJDK0iS2x19u6a92U8zjmnG9dqEjoiX40tbmo5g3ZTMoD8oSXNawin23vranM5iKRkpIBV45XJ+Zv/4T4IIltG4jGNbAZCZAJgkYWBKozUeaeLaBhakIKUyczRDQ0oTIEt9evt0PByABHNvjyMjU+mlYChEczOs6wIKm+uAQSMb6seIShxi4fUhCChKDZrfuuYBjpvuk8bCx+0JkjI323plZSLH7InU5hkmNNXy7ssvprkOeNpoQ3MbVFpaAlsfQoIcI3FI2HhUQ/prs3XwxhYIgrc/ZQiMm7W3NJF2qTAWu0dvNJ8S1Lc7Z246VymN8RdQzefDofUAaSn61upEhiADmJIyInpvvXeIt6ntgOdmsbT7RQPAe4xBEQNCm3G70znwtRm1wQKvzpA3TA8SYowp5ygyNGUeDtPpdCxWZDd/8BWCd8dmYsRnGwBsyNgN2s3flrAPGXb8QCo7re+fkzSIRwlRzIQNYZwpsx1cOxy3UQA1yMTcS6mZ8ebt43w8WikbY2SOtnXw8ja6hGh0uJdCGvEKvMNv41u2zlpblg0AbDkxkli7129x2BJHKdkQDvvIa4uEbr/HN8Fo5pzmejxMhP70zft5mkkIyi2gGpOszJEVo08y7iyZmTefvypZW2qM/zffbl2gNHIAW2w0SsRrUI3EHmpkj9LrBFdKAGv1wzwdqh0PZS5W5sNXX39dvIxxMGiSFEkyIyI6x+hGxs0PoxXP3dMGs8/MuKGIxMbF26kycEAjZGZCcm8wsU/zQEuCyC2IIw63ularn+b69Hg4Hbw4zezxy29OD0+Ceu8AaLfA6uavAZfM3b17ZACYkZuXdmreomH3nNiTuQxsGSHD2LbQEExE3tIjYIZkYqgrIwSakDTDYa5Pj/Pbx6k4e08/Ph7ffkkrPXpGgLQR/D2BOSaYpDRGvFuCfpaN9xhrT+nNp/c8hiSUcVsjZVRykMYtYEPsDPYwWGTI9pK1hZSHqT4e57mWHhn0N2++OMyPBHJgFchxI9zyflPIW+HZ0/K2gBvDbO8Z6N888Hkt2yB0N3Pj+4ENs5EXiJSTtbj6naTHnZw8HMphLiBbojy+Pb390r1ou6VJuWGG+xqGZNzZYGP17au1Q//O3p/Ze/fu9qcMctjGWLbl8+DSIbCjBzKL2TxNkbmNN8jxRppVN8Jal82Ht198dZiPIKTQHfS3uqPbsdccbgy4X3qFkFe28p+9BgSUjGCKEkEf1IZ90DBIPlUmPj09HA+HyH5d1sH5KfVQ8THummwuD09fPDy+MzPo1jbc+UM33GXejeBO8puO2BY9ILV/8Pb2PaHHzBuiWDKz0Kgx2d0SbL81ABXz4+Hw7os3oPmzaVGEIPTIjKx1ng/z8d3beng8HB7cPLNvNmTusb7rgdcmERBy4/cb3QiZOcTiHpxNxuzMNKrB1ncWCe5Wi5dSxreOIjZ2u81gZseHRyc/fnxel56pcfPIpPF4qPM8HU+PdX6kWWbHlh6bUt1Cvcdi38G6nX1F6Tcv45bNNx39B1ztJ0opPh+mN188Pbx5U6xkxLIs67KMPaU6TdN8mKbTbz/++Px86V2ZUEYoW8/jXKdiEAb6MiMzbcwh78pEnxv3qkKNZeJ3S+AdbVs93hXFHfm8qaXy1Z+/efv26end+8Ph4OYAeo++LiDNvdZSazWV6/OzJBgzMyNbRArT5GWbhXL0k3cLMu4acRfk2HP3BuTNmbxh7fMslV65/5ZOA3QDKCj/8q//djjMxSqkFEspc52n+WhGo5PyYkhGj9569MhIkqAV52Gu5E3r50YzA4QZGHR8s/cuaW5ZsMMf9yXslr/m/rvl3EavdwIup9OjIqN3EpYKsBQaOTbnEqmeucZyfhmMbkZ3S6BWn6spQ1sZSroNGTt0wYiNbhbditPrYxi158KtLL8eQNzD+PrVjqOSPTM7oeIFQEYmw8xhIweTxlgXMg/HCjdrMjMvmqq7YRi6tV1AIu91dN943mlfe1P4mWDkPZNvUOFNLt3XJW7vvMVBAFCydyHuW8S7hEMONSDS6Jzn6eE4Gxu0kpyqT8WZCSa4idJUDgYkx0jk3gN95vpb2R327lplz4LNWO1F//9DQQCKlKNNvDUN+xfvIjFSkk+1VkIOuUDzCkVkl1sIKYw8zojRnWCv2VsTeHfk3p/cLeErvfk7oOCWxK8K9KsrQtHN6OEW2vYCNnpnYRhcSRyPU/GxIrucl8wOlL629XqBGNmFPBwfWHkvqGOv+27XTUbhfnK78k98/Xqdvz87IISdxggQNrbt7z7iWIp7mUCYW6mVERkDpsXg62U5//ZbCj2aF3crY78fe1Hav+4WAX5mw14KNoJ5bShfGfdfrKPoVWWnbfutwtCLo6HKFBPau54h4wXQSjEy2tLXhaWalzLNdB/syB0buler190U/2jNPRDC3jX97urrfwSwvLafW4XbJqFbQzbGoJE015CoxVsMqqO7mcsLptPsdSp1cndsw8ub3/ay+pmI+/zMnsj/5bHT2qtqAGCMFvdg3URjZoxmA0gpo/fleo7eQRnS6mF6mN7UOs3zNE1erMyHUidij+AI/ee657UH94zkLR5/MP8zvt/Xv115Zf/ekd3cpZs0z42+lYq2rtczUjK03ovH9FDr6TBNcynVjLRi7kqNXVlsDxftXdJePl/Rx249brOgP6i6z5exy4fdzP3dZQwh8Vq34qa7de9Vle4Fyt6DkbMVljqeKBzzQTNLiPuGCm4DGGBDFHgrtfg9tPeD//TstszXH7u9KEPJ4Gan9sYJYyYS2CbWMneNXc4U6YT13jUs83CvY93kUI93ctZdJXzmXX3m9dfle+Sx7kLpVpNvYmhfQ+EdO3lTsbvoGs+GDZmsbdcgE4SV4laipyKTuT+GsT+mNuy/z4X/CTIw7OIrn0q72bxd/z1P/YGZSuYuTbcHwO4POQ0OUkqRCsXao/VYs6+9XRfORmA8VcipjgmAboX8phRejRhu1t9Fz51S72n+WTMgAPlZCXilVEUUMce0a/PXTqQb8Alzs1K81r6uCdGY6/L84cd6ePD5QPNaq8+zlbLL5L03GPrMbrb9IRD38fctNwb7a1v6nk5jhHD7FDhWYMws7DlygLb7YOxZRI5wFa9lKsfDm1xaXxvproxPv7Gt8/T14fh4fHxTp2k8InljGOVdee6j11v/jntD9ftj28LBhsXh1wFNbftbGpuLGEWpsEykRIK+8SaUIy6gubNUeJTDqVwudW2eYKYc8+mLh6ev59ObMleQiRzySaOMM7eEBjZBLW3C6h6B34ki7emz5dJ40GH4GyRgu/9dJFPXXv4f5OYDPTDfgv8AAAAASUVORK5CYII=", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distilled image 1\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAdUklEQVR4nC2ayZJlS3aW/9W4771PFxEZkTdvV42qZBJIaCLMMJjwBIDxxAwZAoYhBKhU0q26XbbRnHN2474aBlmP4GZu//r+hv79v/vPX31z+9u/+fX50/PLeb57OM0txZfW7cg7nJhj+OLwarFoapTjfp/zpw/f/OKvp92pHE+P7e3p4Zdx/ti2fPfpD+++++Ht9z/tTT7e//wf//V/Mrr/Ia+3l/Pyu5fy9a0XOPQV7vElvfzf7+TV3XhXe9s2HfbX9scf/jlIR1pPxy9ufnt/N9zZ0/rSL0ZuKdrXutmnlx8/7E6/ufn6OJCp/O//+l/08K2ebkt7fmltvr/dxeU6iSRxa1vsSu0FYrNfwaEtPNZrF4dgSMfiV29/PPP4ePnws2A40fT9y0uum9y+OTzZj48/3wo/iP39f/9/cuR/++1fPF9eLvN5vCFfN9t1au/78+6wn+zDT3Q8Dhzz8/vH7NM46PJKSrbSaTXbGlGuW+sswfuHVGWbmxerb77+l3z76m6/O7GAmaQojcxE8KzDMNRhHKehqBRtW9u8k0gZaxn3ohrC4xH7h83nzS3WQq3FFn2o07a+JMXycTtN8vbT8++evze46DiOA9p2nj89Pn4IkNSdu0NrHXaqu3TGUKY6ejABGZnMMirDt96ufZYbno6HaRzTclmW5puNrIeh7PaleTd39TzPvVYtpbDsiwoRCCxgZgqn/fGkRbvBe69lEmMZX/eYk8suyvv+vD5uQuN1eb6DnOeX37/9w/d//Jn6eWROvsgApt6WZwcUQHM51SJCktv8tNm2302xeZ+vvW3bet08YJxBRJyepUkdxtZXYhJiFa6laAY3W3vPQqVnpPtQ9gQE92m/a63Xca9DRdNBNQHAk70/9/Er92QZ+OVpGXdjGcbLd1fLdSzaV38Gf/j9x5z15XnZyT3TdHm8cJl03Jtnu8yWnQ7UFhuCe/fHTx9ezi/T+IDKVcpyXba1a1U3X627bcv16ne3x9tbe/EMtDWHyoftHcvFmEtrWx1KN5924zjsJdjXXuQw7A4M6nNDJonAbJ1X6zm+PjCRsBYexjqNumOvz58+epIUlMJeO+j6eH65vLxc2xaxPb9/kqFIqYOW21d3BUqe2vJ6Ofdt27alKLelRQSpLNe5zbOZWTglIsBFwDLWCgC8jvtBhyHpaz58tc8IAhOz9w6wFnZyLmxtmcZTqkqVolW9kJYUUdFaCswMzbYPUiXC1+3x+x9+6H3xzcmzb81Nt20B247rz4+Pz1gCre72u2lPqihi29aX+en8uCwvSIhKizVhvXVrq0VXLQIiBhFVnWy18/Nl3VaY7A6HymIH4hrq5sHo8xoZlBDiTBZUnqASaTHUPZmecamj1FKLqLBAKjw9ahAs+XK9np/ewjC3rffmq3v1x8en5ujdaKZ3f/h4+fmDrdvj/O758ZNWzoh1W7Z1QYR7ipTwDMO2LFxGqdNYjwlEOjGn9R5t3c6R8MN+KHXpbd0WvrQWFEICYqbii4PZPFRUcmzhmNs0PQQDy2ZL31oXKmAIZ2xoRJQ8DseXp8cuOyadClFkrWnPvlqH99X6TGsdbn56fFqWyzR+UVRzxebu3mWQdd28NxJ084SY+v44Kku3q7sTJCPcLNy31o/Hw0M9Ne9tne+PO21mp92OyFgoto0OFcmRNtTSeleCvqo9H7PFNN0FiybVw277aZ3eDJ02X53D1r68e/+jZgwj3RymdH/ureNShClJ1J7ev+T6dzcPd9t1/ou/OmVLVqCFSqEi8+XqMLHSwzyXsY6+tBbR2qrjRMjreW7r1prLMBJx823bFvdFd19qG12YEswqS1+ncSROpUKM5GTh3mDa+ChDOSQB8C9O9/22NduU+XjaPX18PH/8/cflOVq/XrBOdVBF68rSPT2yzZ1EPz5dAsYNTw+fnteXV8dDRLJom1uE+batCI9gS3Lemi2+1jreTl+c+9PaL9PdUAbO1ba8+KVHRSZ3Kvyrb/82AAzM4IBSZF82UpmXNSOIhMjNjM2rEKKl5NLO5kkpIky+v1zn3/3dd2//6SVB18v29sPlw+MZTpkqzBwchL6aEz+f7Wl5uVzemlH3HmGq2NZZSIqO52VGMwet62rRzQycy/zRrXUD98JRknKzrWHdrnN09Dxzb7WByZWYlHm+zt3XaTeZGTKi23pdWjcmyshMKJWtdUQKq5P2+dN3//j48/sfOXwQFS3Nc8lISxUJAgsEWgapGap4ma/v3z4OStu2CVFRtrYlPCKDYhoLU0aaA6PWGuooFo0RItn7Mi+XdV56b9u2OMU8X1jmD8pElOEe7t261HFfattauJGke0hob0GCkIE4M5MVBFrX6//5h9/9+PPvE61kSpGAuXeP3NLTA1ALV+VhGMaxes9m9P681GFIYDocxnEUzev6Ara73WF/czQ3wAun1jGq1CJplp7b0gPp1lkozECI7p+eL/z7yz96N3Z3M/dOQgRq3rKvXBRJUti5cdVoQX3RRsnhPbe2vf3up5++/9n63K/eN4fBg9KyXaxHs3CmqGVwot2kdRjKoPB+XtqPHz5tG01ln+CECspQh2nYL2tPwjTs17lt1hGcnpk0DIUIxE4KImzzbBn9uu4vofTTY7yp0IGUmYiI1mUmZq51lBK9eWT0Nu7263ztbtP9BC7W/Pn58Q9//OG8vJBlOiD9OJbe5NqW6TT5lg15GkaqdKejCi/L1jNEWK3//N0Pn/aa3ksVJmYWVq0qTz99kuN4o7rOG9pm27q1ERlDGVgYIUokop265yaF4yhqXskwHsvWUks3a5vbJLtpuq2en+Y5LS2z1Fz7RiIkepoOVIY/fPdP83wJA5LAEOi4KyOyeU/jOuL1cbrd77v3UfX5spGUCiv6Zm2Ps1/oJa6X9XSox/1hti5Z1at5imGdl+7etr4XKqXUYeBgp4iMRLRlDTOQRJFt3nT37S06GJR9M3h4H4dBVUulj5eLHibdfN2aR2NFKVVEm3d4PD19ir7Z3ECpgsJi0CKtyBCch8Pum69enyZu6wIXdBuKjJFrLi3Zm3frz5/69azLyWot7/tT+bLe3b9KIDinYX9zOymLk5FQOY5qEZFgCJPuJiUZptKdFGf2HQhkmfB1mI4+Wx46afUSt9Pdp+19khMxnJLy5dPz+eN5viw/fv+j9ZaabMQsc+vHtHTN9CqaxmGeAeESlONpx8vaTLfrDDNOWJAmnQ7HwRFzL1OhHrvD3iLBcf/wsBtv5+X8cn4k4ePuxt09HBR1GJx7e2pa5ZZ/Vs5OIS+XmYCxHD2p9cjku2GU4B5zt22kUlhffL5cn85zS2vX83x5ORdmDnfoZZmr6PXSjZwBkPb0ZZ4pStXikS9Xv7wsvVN2gLh7I0Qpen8zUIhv7ViLDuN0PIZjXS9SlBTuYc0aLb2MLOpb7xzZ4+OHd5WnhjavXaPCw7aIUYYUFfKHV6+NVFm2lsbOIkh9++7xer1Eb0/zStGXdRVl74nkIlR48LDVERQZlG49sK461jruj8/X9vHDjxczBVRVlcwsYgXZpOizUYm7m+Ppq9fH083Lx+e1odYqOgzjMAyjuaHKyFVPFRkiGXGPXdmnDfe/1VfjsfuGCIMR8/HVm/vbL57ef9z6kiLW4vKyEl3Pl82W1r37pfNO00JCt1xqKZSko6arZ4cnp4KZKFJZx7os9vbpPM8dld06CGFRSK6BvRYKWfvyq2+/vnvz5tXdl6IDbikjD8e7adwPZciM+fK004mVRilFxKztbrXqtD29q7c/6eHV3fL02Jr13peXxzq+ebu9W9eXZV2LSAYu5zns0laOUrbVbvb53Lu7OXlSGmI36r7e9mbn9rJui0ohztMwbJbZ8+3ytDwvuttZZAhRRlD/6rcT/iBfHsa+2un+9Iu//nMdx1J52k+Zba5XJU3riZx2R98amEVGym6W3SHOMnqXD+vjL/Xlrb3ElQ3Xde2b9X/+XRyGo2jaccHTuvTVmncQWRpv2T4siHUjZQmO7j2uHZiGU92NgPkSDX2ncnHbh3A90OXy5c0D/QLzPy8/fdxWrAPx+hg34/6vf/lnj+3l7ldfMA/DuMvM7kksxNR7D2akFymtB0iLoi0I3hgZiOdPfZx+fZ6etd3NP/3PD8cqibIuObenqf6Ke93vH+erL+ule88kFQmfIzys1yqJzOpppMPQw8/Xl/F0GOrhzevd83omMktLrUHr7rC7f/jm1cNxmfr++x9/+P7vl49xoZdvvz7ZLd344fU3vxhEhvHoibi083yx2fKBKmqMtFyes1Ch2OYWxdEvFMN1/hmyL/E6nm/0/T8992Y9OMiDiXRUeeYY//HHj68mFd7N/ZMgtYo5YXaAmnkR3S7LzXS7SLbr5rTaix3Hm5vjzel4Y+Tvzh/XHGLY39y9/ss//6thv6OxfvvrL3/6/uHHl7c1x9PXh6+nOx2P+5v7vvXeVk8NbNvWoACpu3fz5e15f3tS1XP2vQ7ztVOpu/23XErR4suij89LUgaiu7fMSaQta+sbKLZwoQ3JKdnWprXWoZhZZpBk0TpqzAgd4R5zM8oLV/7m62+HukvQw9+8+bPdL1llPI1ckq2Vm9tfvzn94vyvejb0TZJarNfnZ9lPEsJVQ/h42tzKpGQRWWL85qbMFqRDOifpVAYdPawt3TmO00lf5g8H5Q4xSrLIUdeX1SPTcY22184Jt3BvUxWCEDUhCc8QfFgvSE0tcNKUgFzmy8/v3g5Tbc779Xj3m6+HEpml9xWJqipOS21jTiGam2f3q6/HOmU5MK3MtD/dK8IXkx36hqrUqoC7CDmsFAa1l/nDaby3mD+cX1RjpZy6d2GWIfvWIixBQtK7r0TEmi2kytbcIxhDhCUiO5rTuC/oTIVIWFgIuazz+XxGleflg0cEI83Q0jXI0hmFwExBuoUpFXXufYEFChloyMmwkrK3YEemoFKmkIIR1jdjnuTIWkfsdsdZ7++/6m1JadqH2VaN1YNYaiIomRzNehVOw0wmTAhJzjSQ6r4opUaxcK5FxSN3rCF1rPv9Ybd7Lbp5GxTwQuneexOVCJciYVspY3IdCNfLwpqeyZEmL7ZA9uFbcNFw12QqJbfWLaCFSffTYKtBzRp0aR0OabL4VYUzZShiGrGaEFOVIQJJSUZJgVQJguz2U+uUjOYbHFzACbDk5jwwV+Xd+PDqWGnvuboLOJNCpIRbd08kcU1F7WzRN+pMk2iFrwGVybxFIpg4ybObcN3SiZKIGZTByBCplzrxu08/9GUmIpUqILOotI/NCGAhRpABlOEdQUzMRKoSRMmebJzERXxLp6yFkcXDFFY48kzN+9a3Rmu6Izk5ew8PT5IMyrNf2+U8fxD38G7b2i1iW7xbrMZSwpMCfYkt1mSSosqVBAhLVnLZPz7zkY+1jhSBSAdpYU9jAELpaS2gKZxExZAZFJmUbJa9R7SUMihP064MeofM06uyGypCMvsXXz9oCWKinmGOdO8REQKCg8M3dN9My25zjwADnJSgQKJwYSaPlk575pS0Dg8IB5JYiwopWVbWYczMHqCAmWVDcCfALXsEgHBsa7q7ChKWCMsIuBCx0O4oY00tXGTjQXeQ07RLdr17La+mvmxkTFSJODKBqDv9NC/dl54kRes41GF/U3dFJclAaJSiiqS2tQ7TFEmJTIBB1J8WylAijx7uzJWFzc3cgijNTFgicaTBkem+bmHuVEBMSIoIBiLSLRPg0DZna8bQUXGclDym23p/9/pf/PYvcWnWLbKZr917650hbq1df86WiYQHUUHvwZxgTkmgEMgpwi06IwFvyznSkZlIPsBbWEQmETJZ9c30F28v/5DFM8tQ9DAMH5ZzqCKcRDIikrI7kKAEUfcgArFEBhX03sah3B6nm9vDxEqVx91uOt2fdlNkS8owYspkj23rhaNjN76GIDKidRAJZzJli87GjkB22kiIeUiPZCIpytIjwg1RSS0pMxNEA3V9fvjBzqFgpkjWx8tFQMFGKQ5SokggnJgoAGb3AMVYJJJEKBP7abi9mR4ebnd1WgxV5XB7n9GD4S0hQGRPJEQV0VxFenf1jYlAFiDqWLNLEMI360OtSCJgnZvuJInMuveOInWP7CXNM4Fkp87xv3jzgKcj3T3Fh/FUsE+Qh1kgwghgBdi990xjRXoyc+tdBjkM5eH2y4l1XdrWFpqOohMibbOICLetr5IQGaITODlZVQlACjJ7b83fk4VbX20VnUApTB6eJQAQcXqQCJPkCnNPApMA6Vn5rf0ojYVTCwmBIO5zaGPB58ALyQEK49V6ZBDVSgohd+/d2VFK3Y0RyT1s+xBlPBTK8Ij0hLkHs2QiCb60BJcqDOpILsLpS1vDiDXdt4ikbAw169a2ABEogECqcFo4WfqsrCCLHv70E99+ec977HZvQhBmYG5h1kxIQZTIpOw9KGngYhGbexICFM2KFut5PA4S1DbrLsdf31etfe6dHEGepKIJSnOKFsWJSEjcTETYIwGKFyknCq110lIoI2CWRpWqlohARqRHhH1uy3iKhF0pBymvvmT9BgVYtw8MD2F4V9IExqKFiii13iKCGSoihfZKoRK9lWEwQhlYSRZEE0zHw83xITJiTIU0IiZ2GAGZsVlXLWzFGJlRed8yLKLo6xBJ8sxEZiY8MriM5SRgs57Wi4yZyQyCpGSKy57Y0o35h//xeN6WcItrBODBzbvU0qJJhRBXHYap7HbD7Wk/TVMPYwQLhXOY1aR5Ww317uEXx8OrOoyqhS4D1ClMWDglexhS1pKRxt3X7ms41mZrAGUYKiQzulk4gkCJCupt3nzd2jsIQwzgTEQLBMEJoCCnmnp+/nh4dbfCBiVKBudUNcODu1DVoQx1N4zlcEyk6Corg8QktYe523TcHU6vbh6+Ohxve99E0K1fC4aWQgzKjHRDGTkP4U4sGcK842jOTCwU0cMjAgLKCAvTkdJpA5iylK9Uua/OQxZWIKK7M1EiQ6x3PtUaBueFpBLTOElQIqjSxFyGqseD/uJXt4XHZXZrAZJ2wbaFpQvr/d3pcLqbyk5YiKh3C8LEsV1Vh5qbJUL3ZGtPQ6CFBYsiM8gBoZB0R6EinAEX11KISmfUyiRVlaw7OCg4nNIpHCTChYkJVPTh4c+U4v7Nfr8fq8i2bNd1PmrpxBI+Trtyqm/uviF69+792+bBBDPnSm1pD7c3wkmSPGjAQQhy6hEileOydc530u6H/c61ORqCaCSO6JbCzD1p4OysKd2ssxcqgmLpQvAWEAQyORmCpEgHQgYlp+6hHKWK/pv/8Lf1iuPpNFbsxmO37Ovl/Okj1RJabka9ffUF6jj/t3XLfwDMupGnZwB0dzOWzCIDGGSZCQo4pRLxSOhJ5Y2ghLtZlFLAHKsZTCAi2ntDOFEGZRIGVKRv2biHVE0CQxHGIGIGkJTMTAFoMAJE2VxPenv7zYkBaVamcVDONu1f3bIwRZXm42GygIFypRRJ78fX49N7m/bDNI37XU2AtnClcPMIZkXC4aJMLOR9edfKayHLTCKAVWmOYDeK6pxEmcFCCXNLcktmYo7Mnk6RgWQkEZJEXCIzyCIJkX39g37xzS+xPWfjHLxtjTtJGYdaxJgGWrMv2+rbenl5S0IwGaTwXLXY3X682ckyr0MYSvFt7VuXQVgoIrO3VCXAmexUR9a2zhgMSIHGziJNgUQg01sS8bZtMqiWoaUJOPDZi7F7sDAYBUPAMh3MDT4YLT/f87V9stWQnVgy/0TcMHa3trRrM2T2re3AuwNORxn2erjn+9Ph4XSK3tdtKdOhSk1KSCLJuodFEmV+rtW24yhkiRKUKaVQhm+WW2QyhAkgYha4vXAAFAI274kkfMa5zIj2sgX1JDAYKWI9A/tXlXfnkghDMMJ7CMLJrK2O8OyjfhZdDPvh/nC6GYdRczC+O07H/ajMzLUEln6NdAJ/xpsgkLAIBYxckWFhQRARIY7oRJ41CUA4gcDw7so3kb1dmkh67/5sQSAJMw9zHiU84OEAKIvuheRKAzOvCaBlJIgpHN57hLu36O7pzTdEkJahYDeV16dxvxtf3ezGWpe19e5LmyNJygCiSEuCEnMczEE5Jon3yHAKz8CyXC0CopUGgMIigAhHhKi0+QNNiGbZWx4CmdmTmEiZiZGZCCIy62XkkMjSdd6ajgVCoD8BX0aEMoOAJGZJ0DiWWkvR3aRFh4ykosv10zJfx8P+fJmHw5U6LutLWoyHPVhJAu6g9HSmZBW0GnBm0bHmZ8+e4URExKAU4UjRu+XdOr4e6v4YCaakot2MiECEz66MECTek1gedtAYbomu4cFJrIUAp2QSIXI1SgQnBxETExWVWsWbE+rz8olEs/P10xOBKeLDy1NRfc1f546GlGCy8ESkCAKWTsksLFFbzBQBpkwgMjLNXShD5PjFFJnpLmVw2wRM4MzkyGQKgSaYya0x4erO68v3EeBRWZiQICKAkUS09o0yemvmLZiZhNI5A8hAuAfJIEX69Xm5PC9tE5Hd/lD2JRGsmaCEEaiSRFpaCjOB3JZEBkAkDIoMAEUVYNUKcCYlg2AsJfNzVc5GHtFrGQFQUHiYbUKqlfdSVCEiZM25EAuFe+uNnHt2/ywoIaKjey+lRKBDkrjuJ++2u9uR0P60P9KNlsqOSO9sTAQplNm9Z4CVwkKY/E9UrMgMpICCGPCIzPAGlFpAcAstEhH9GrIXTpXgvvUWfVcGpspZbV603FQFMt0dxJQeHhFpCaREBiF6a21dL5ZOaVX3W9Iw3rw+vtpNY1LsDkeYJ4JpIBFCiCgzp4dWMbOIZCZKzoCFJaBcieE9GQRmwufIIpLAADNHRwLhARE5EII4A4J0G6qQgIKQGWXQaBJDMjMlQPCIIA8HEcFAFK21Nl/a+pTBZn5eljKehptXO0UVoalM0NUMFhAurJ7xeYYCigQAqHISYwsqRCkQpkzrYPm8JWG3TkSBFC6ZkREBJ5IAEzk7R3gKB4I/I5EjJRSokrzP6v7ZSCRBPJOChMTdgOzeSEhKKaJl0EI8X9YeRgC6hQW2XJYtM8owqJTPuFJKYXAmgBDlJCYPLmLuUFKVyGROApHDfc0ABL51+nyzegegRbhQbGFhmYzP0ZowE6F6XxYSCWGuu0kUlJSRTt17d+sWPVv0sAQARcIC2bzUghrWSZ16733rEYlgBBHos0irqqgCxMLMjGRJgYdzsjCB3TITTBKeiPAkQmQnqVVYw43BoqM7cs1mTspMCbPFWjIQRE5BTki35LNfAA6kRfgWGRkJBqhyejITtmbrNi+rowUXe3kRBY8Yj7fDOMLC05AF/PmQFGIlEsjnB5RClGjNnpWzkjIxwjK6hRMjAQaIlCgJiAgwr2czsQxD/bwB2CfcrWkdi2uncIpaxsxEAac185YRmdH7FhkRaRHuTsnWvPfNM5DBrfs6a3Jb1nbdBuYM6jZHNK0JqrUU1QHJGSGq3kEEy0wR3d1SECjD3TPAylIATmQmJaWAV/KeBuTu1X6gHRUGITwiNhLhUkWzo6cnbUmhkcIUbI+LW3d3CIE/fwMEIskDFh4LxzhNu90xdYcQL0Nfzx8//fH88cO8XYirlkm4SAaxBjuhQ4gypbBnQqTormDMkN4amLkKE8HNrqtdTSBJbrGqMrtpIUjfHi9zDzKFEANJZOfNLQgQkeadRjAsr1fdfN7lDXOy/QnXMyIjCMiIZLodXxXg5u6L+fKhvbR07DnXp/eoA9Ht4f6+1FGFkZnCtv6k8pqLyJ9GLEQshGiEommenPEnWDafbo99vjBFb1FEp/TtGbSzZbNpoJ1MRWGuEJcOvh0YlJ5EnfSJTTvQ2fjr1795xgTItl1ZuKVDsViPtaCIHm5EKwvr3eQY+PjtUO/l+JvTeDsNX4w3X07lFL0AmsnS8yfc7trhfN1tbbRlNNTcaOtsWmvdrcMgoj/Nufygvm7URxtGqeWqhXfx+DPkF7GSaVQv0+qFRB61KeTCnKnk+lyAGCJ//dhfNOX167/8/7HDsXUoV5n5AAAAAElFTkSuQmCC", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distilled image 2\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAj90lEQVR4nAXB19Jl2WEQ4JXX2vnk8+fQSROkMSMky9gGY6jC91A8Bi8Aj8E7UAUXcAEURZVtwNgYbJnB0mh6pnP3H0/eea+8+D74L/71v/zh1v8cj3abx8MMPnNPOKweqm9U8kVgvZXro8mLDdC2WtvSnL/48dtv98tny1w27kkxDKW+Sa9p07qiH165KOk1y7+dw/j/uX/4+8Of3f/i7xarfvjO9X8U5Q9/Rf/6yYcXlz+/zvobLOX9YEO7mEQZYA+/Oqx+Op3t7f5V+6hWP/35uIh/lk96pVX9Aazw/bxdvBm15FbNrmCyHuLp2eF6cvPSP/7RKRIfhpN9X7tHLPj4Xtf9WxlGnixdueWtxS7pHsvpY5qLfAyz+vXfnNgD6L/BRJ41Ffr+4zIrO2Z6+1ZhTCO++/7u8fmf33xeDP/rf5OL97vEkwidgPDN453/J/bzuDOr14GatPJEPuBGuw5ra8Hv5pfAoYjqokoi19nMDv/RNCTzvEpKjr3Ddbz6U0I/gV8dDhxaLKPdh2f0/d/5t/8eveR+kjAsIQhKjEs3Us++2EdpqtKABOIEj4tIkm15sB1HWJzDPEvHUxCEwXF+dJmzcd9bJzmUpWqsOMawZC82Lyy1wcbEhDbQ+826C6EQI3HxM5+Alx82jdwbtEIjAaHr2EwMhfkBhbaPqfAMYTdg9jvjSLT9cguFsJPKdaq6iuBl9Nl0VMz2phs3+UCPWXaE4BsAWAQzwyKo0zGtxF/+TROsykCSOM7xSMkuEBpnYZGIyfgnHTC3HyOk4129Tno8SAuwZbGnYelrTxTaNsyOvt3BwGCsiXz/8BiczDJmZVIMfBTHqjkAqSB+luMiCBglIIXaTz8eyleq2+fLpNXQUnlb7wbw7Yk83IBCge9MGvIvk5zNXNfbzW6/VYx3pj6g69/+UbgCBrJVLUPNDbHXp/kBsyY41xTJVzMrCMU8DdyG+cr8UFBw+uOIzxXPVmTMjLCJKRDPXKQCUBCZ8Y/od6UtNOiDWh+qce7H0YRR4UBlKJRiMStmgWQQBNIagOi+vAfgsnMzjTKEs2XO4JugD7YcmqF1+8N21t2D7px4Oiop8ME7V7NgT3lVda8oQUseps1PYUmmRZwxn4S0hJQr7SwcPQfw+zLrjxBAah3v5YfMIaixf6XqfSDt55CNuUtsEmkZCAPW6aDQ7JG4vpWxC60E1u8O/V3v+jI46j3eFLISkzhiII/RCu3UZg1qt/fvHOtBhXDe2ypR10PZd6CXnZGj0RUUvQtOxKyT7YCVRGExnicqYUgf/Uaj18PLbfu/tW4zIt4fqgOTWS0lpTP42SoCGaBVutqLPaA7JLk2NZggNpJFbpzAgXUw2tPapEPUOdd5qVkfjmEKRZQkUezAxruqRmhgMX19kDGec5Ya6yBhUFPusXQpRCE99PKtMjniNK5QWUzXPe4jqinOSOS9dTxNjDf7SjluQB3FQaJUchkVpwFlZTy8rmBbtpIyTTOXD3aYMuDZ29HjwcX0EMMTOdYmRDi4QKdqDpoLDSMxS6ChGEzdnJSjPmr5UFpbqUmTRgnZffDb0G7lijLKFGutjF3tfPWKu2bf48CkcpTRrems7UpT+hmaGz5gJN8GXE8LvR2wEAzDFinPonFEnBnCRlXDYkkVZzsFFdUVl0hBjq8Le7n8PHVMmE7tWweN8sSDbdus+zLbWJklSRdnB1hcTbah7aZdnHHQ6ra5Q7JToR/RmOVWkMDHec2l13p0Wfme9VrIHiRYYdLj8nD7IK8/NdfnF4PR2iB6LJLBNJUaEIo8CtcJ7m/MAuwlF/mleyuV433mgUHyUweUgUTl+BinkPtzbP/sZITWpwf0oIy6+MRK/MNKVjGkoBmNGNobQ/a5GKcgplQkQ/ATKk5S93+jffk/xUFGCucBs2juvM18h5GuNYQCMZDHJ3QxPc3MzHTe8YFkTw9H6PB2qLdUO+hun8PaAKVzKvUjBWngPMG1d8GpT/uMPKNUzUdLgnh77LztQW2KjBOMOjieyM9CYh/+xhaAFe4Xhz04MhdkmUv861M09UaRaZnQOO4qpTPkfco4wR6SgDpckSkwmyN3uZf3IZxyw1y52xjA1W2anOOXDzeRMdBGijfb17DFLHZIAjCJua/u2YbirO91JQ/Y5zeH38yGqxquMUmbCBOGqcOIMvjweuW+QtOuiDzSnrrepwwNeCiNkMhkk2e9/aDedfgzKEeHzae7iGSFEcjgPMkiq0iECMsxhAaQDEHMLfcU26AOtmJKs7ptoxuzGqb0FzM5RtIO+lMqk3wx/mG1LQbd4FbbNt3CnlgXu4Si0SxhWoTQEdpVjYXMjLj8hpiPM8HkJD07SiaB8Mhjj9O2NZ/gOMWbCQBYQr+Wq5iTrSk9ymIIbNDjw4P0prpGUB3UxwrAodfaHY3RpJvKoAi4o7ZQ3jRaUe4iQFjEVg/rupYJZk1p91JhO/DQOC3tsE4wzuY/WbyIGvXeVOt+qFJ/EoE2jLthxyKAQkwpwAkBPBZD14/TYiqOq9XqTJR/eG7IZQJ0E+HEM2xMeNBxDeZ8jvIZHFpXNjvxmPG8wBzR8dSbOgirTzq6Qcn/U48t3RY6BD6Ol3XToSZsB11Sd9THG9wDawCyHxx2wGnohghSKJH1BlFmQzAMspjWejACIX24fdjetduhbXrg3e69mByrOMvTwLcWhCeK+DZ45B0QBcl9MyqdvBu26PX7Q7S3yckoHs9GWRIQnBh6NZ7kuyRDOJqKlIf8jIXGT5G4sJ0aOv9oYPPccqCX3RIyvvYeaoPrtJPow+2bFJ9uBecaCx9ERFt94ruobl4xCkEcWqQQ9iMxCX0wEIpEUkblPtRVfnd7gHV/Mp1by0KaBxOAt02wugrV5qWQeNA6dOXYIujpF4fBp9NE5k0GmrZLaJwKlAnCgIPyXpBldEIGoEljIPSOBopi+zRdA8UCjRYZ8t9rHwAWgx0UBpu+Hpxjp0dk/PQYy+GoDuEYN/2+IAsnh2jxdFNdxM0PMWSdbyEwaUxaKdkJR2WUpvFQYd3X9bZSB+NPQaE5nIILkX4YFDM9WCyyGNjArQL8JCooyeHlfuiZewTgsPnYd3lTvWt/9MUcsiJmxUBgb1fAX63aX00Ra0GSdh0j/v27Lt6tXuTPfyPLXiJUuHPG7wNl0ETiytXG+ndo6iGmkhfJpgNTf97y/gCkSM/Hux9sZIc5i0wfPyls1+g6la8AiLizLD+f940XwvupQACB1BOCW66ZYAxNijiXj3w+Tp8s8iU+fjH+AiDdHN4tMoFsAntbtv7T325++d/fPn44aIFbi6h7H9pPrpTHg4jnUAVDKLD3ihrQUptPgU5o6oyXvlGdj5qFwwrZh8Eh/7lYqvSg1LSzqoiiYZRMU/XpL1z8xkyO/P43/n4ZqS8em9+ATEXXHFvMr0W/Wh2QRGAUxQxqRhhyjcUAj1iUHiEdpL/yJ8dnx+fnk5Okh0HgMp5MLFW6cDEnC5edjtDm7X5I/+rm5q586LrwAi+jWRPfTdjDq1YDOnRmLvJAzsyIAcVGXdDBVew8oRFX5wl7xyMBkyfo8ntUQw0wHJ+OcwtU2aAKdaqDYMF3W5dch58Ev38n5s+iiBcma8Ph8M3N+uaNb/eq68eecSBNgDh4znkwRNbYQTnRWOtBPbad9J4PoSsmy4StKHepCp4myIguSSL44Zi59Y+FfF+vT2zDL6ju78eUzIvZxWzqMbMgP9xX3o7g8SifXGFwWMbpmQD36GpYBcED+vU43mqTSP1y3cfk1kWU84i+KPLRSZZRchiksybX2eVIStB4KVX87uFhpbu4SEHqLVJRnBDGMKPcKQs9GTNCiBPeAVvHVa7a/cP7UqiqhvrCQdmP86vtpmtLuiwWachG+Pm7dH4s3bv6r1RMcZxpp9n4cBAQwSHU99XwHQiaO3AhwKas++C/L/d6f0NF92MUI/Ox6VwJnhdxr9bvsjQf8QwIneUXY+M5G6cpgkrh/p1H2Hc18NsWGgpGGCoXe8oLOngnMCeU35WmaiXU6MLHYUCqOSAwwvkkYkf1rtut79jwIzqOny4wLxoddRkdbE550iMNkxMJ0q+TSRrFIrEg6DjaHbgQ6VFE2CXOAitYI8DlJBaTnIpodny9eP40LhLyRxfh16vJcNtfHuUhHYeUB7oSZLQ7dG0PRDLyoXbGo0F5jyjuVBJggmIXLBYBD7rDnCMdcBInIcgIaAvoA1fMMRoXyJteIojJ4bGyI4CGH3pkm8cNntBFMzmQ/VmPeyyWp2l6esZN4qlToezCIwUWLUez5fPH7/8yg2mqwgZuFrRYnM3LUqMsGo1OsB+2qkG7o380HV1mTtkMqrSEpK8GrdMhqCZwKw+V0WqQwwHogGznXUSSJYuDF2RXuZbpXmd5MpvECWAEUaliZu14clwgHga4l8CF5r6tRIpoiAaC9m2zOWKjuECMRHTmOgGK4zyJ/S5oK8WAo6gYT5Zak+ptH61/aQ5teDROmBxnRsF9L3dBWj1Id5DBjGRDqu9WAMm9dviN+2jrHwmzf1cepkZmalTwRIJdGLiEkxT0LoVNZ9N+kKzTcnGW82KiDJG0oS1gJzmu0/kRtMJ0XXBc8LDP3MwGm1gXzbLd1nfD/oTEsq7XPj2PIk5Z8bwYUp2IRSfvyRC5JQul35bVUXFMrybm0LnU1rAhOuqpOUHE1VtLQTG+ds39mifQZ0j1j/ubR7PGpt4VqYFQoic55TL21n5a3e+rUCO8RLwiotrPp7ggzDfNhAmCJ4h2al81d/pQt6HTi+t0lEcEIyNvhkS//vDnJDbbXlJootPRM5YdF198avuX9/VnhLq+tKlrRtfL9BSTsSvH95iSjV8/3MWkrfb3Hm50IpmnYx/RQRZai66u+vuo72Gz3X4YCtbm/REppz3urEtRhL2qi4ECTCTD/LHZz4q0t5j7AoTbbjLmJI0t2XYSJSnft9ns+W5X41jOAt8jo/XgtqmlJKcZn38Vf/9Wo9/qUciQYtPF0ckTN/Hw8B2kXzXyriSBAiPY/JpJX7Fa39GlTTu1qnTd9zxNM/eO7CaPxkVC1cpbpxlP3sLOjM4ucdoqYi6jPJsn6P8S+HHoBkhCcGAuSTXNctWvKvhjTm4mYKIiM/J4fyBFYpWmA/A+lozWVXEyHz44aTWMVAwn5LrWW3m4Tcg8uZ5NJVtnyWw6y1gN8DHNGRj6sH3Is6fkReN3v9XQNdLHKd/d/48C/B6annh3a6zsJmlqyikJfpzMy0EZ0GR8XiDZOQgFmBh2eJWYM0pFGHViaKaGDUhQb10HcU3CkIUHMCAtiR4+QT4phabQ1eZDTBnBkYU6B5SJROxnOf1iywjJQJYEkCZ62QYAet9Rnun9+K56+PC4ExBG7CwWcaQnDrCBjyitkE/JQsfwEHO2+u4o+QrXjdp9jANlRT5TXMALyJF5ZzoT4clo5vwc+IgLBnrnEf/yQlrTEowbsqHdm41zyBMUi5igQrIOmrObpsYYxYBK70xvggSRWLA4NsoyhyCnKYr9PEP1jTeWmtinPHMY3cHg+e9Mltzvmu0rU3aJt+Vm2ynCCVS96YZOgrDb003QtGaUXybJePJlGLy3njD2iKHT1nUfmgIm4OlUUJgAbh2l9tHnGmiM5aewBOsGNszbpkzgbMui4+cTZOspAixoZA8Ht3Y8wgBFMPMjEPcKxXFEIbDeKScxgSjyiHmJIBaKipxmNO6j0ur+3s7P/+F7FQ8xOpqiOCb7GEUoSlNhoOex2UPIUIinyawDsqh6UECwQ0xQAqZpf7dnrXIGOjCO1rIm60Nenw8Wo+GeMywfq2Aa2V/yO9IDM0Is0PEeaNrPJv/5T9Cbt//HuYFAqOBWJxoggyKqTEehcdAJizH2kCBu0ywfJUAYxxOtERHe6AQxQhVUe3fpEfg1nB+ZBj/ebpr9/lTGRydZxJ2lzItDof+ySHDoh5ervzXrlqxvJSWMTKMA2IZ8njCMNOw15ooOaqe0ild9Z51mN9tHRAsuFiih69tqxJEDTZ80CxRhcvc/kEM034eYUizZ9GfjZfCDZYPs7MhCtrzKNQJdSlPD3JgOOyMJyHWvbOz9jjDeU90CELJZgITY0HZ/i7Rz59qSAS+8Rsn+zQN53D2spEG/XXdYSTklGQKczfO9Iah/v9aVQehB7W43chGgawkOgZFMBgOzZvOmjvLZePKjYEsDV/yo32sTslY8ts6069V7Fv0CRb//exlMrVlh3mOYpYXvKvuTEaFJrN6ELqjEA6MgowQXvg/vygrEJ5jpE+yxudekCZ1xYw7lXqpKDEgX5VEz8OAR8D09O5KTSKCM99YMACAOo6AP5SBlUpZ1ryyxmQOubCB4tWZIig10DLjWqqaxevT8qL3/pZTve2ugW3gRQwL9GgVxXEdiym7ytELJQ2l0H47PI4I7INWAjo7/8GPbj3xFoyQdqK9soKY77Azkgj1Jjp7GLmLceaxAJsVRDB2UFjgWGLVfpGywPQ09QTOc5CgAoo8R7JWtjejM4pHQGZtOyUU8BDdJUqaZyo0bzU7wz1dqJ3RBi3yoEaTZVGMS8Kj4cvuuCtShvAuOc4CAjGvcjL9TbvSPu/4MjQStgQKrQHdc9zIVm3fn/wV5dOjx3m0PwITKHJVR1CNuh0K5Ty/fNn3Zot5CMjBBkF3GcXEdAW287Fay9kCrmsSfF9ys5Ha16d96RGO8KHqmPxzPAOkHJl+3MJgDQDwiZfeaL+KukLTzSeSaar/xDaGgCcEghQ1iEWzb7NWnPfoECFItbhkayrM+BxZ3GokHyimu2nLgPVvMiX8C3+ZJidp9zawN1umcf1sMukC+TTZVz68HBDHD0bC5KwDYvWnjGX38WAcGspSh2Zwc/NWL5Zcbpn0KFiLWHmhbY196h/GbA2hz+8d1uWoZihn1vfb4ON9R3PXM0NthZuv+kqduYwDwva0b4sPIL9OaeoQuQe98lIwiFou40Gh8lXv0bmk9lunZ7Bml+A6oMFxFCT76OrmKnTfQ9BIY175JeepZ3TX9CKW17WvQjotnChAQ366+241wNB/NaXJ93Lv4sphNfuseNg+Vlu8a4wYAXIt37U1J8ud7F9rsBchzoWNTVlZR2MoOPNJAkl1Q9pU/co4hPIpc1dnGOf0A0LPe0KdpQmxhVccUk4MDlaaoLYNCWZ4Em6IdenmDb7CpBnDo9kY/POwDRhgbih0BHna3+93jR5JTSwWzLNRD55o1UvHZsy8ujyfZuKvvxfvVh30bYL/p3jR+WBGCjoWKI9e68YbmF5wkLP6075A9+UII7N/aXsyhhxjIogqRXKYxKELJyqaOQAeOAZ9QKua6/lVwscEpwlrKWIc96A1Iba9dM4rQDK7Ism9FR67UKS+RB8FgALqZyKMgVgX29Q5V0wFrIXKpZd01pgGKUi/CgmSqI1DWhEwUHtVPEObA6aPYQ2v9lx1Cd2fERElEfUo32sl3GzuOMUjalxvbXJ/Eo2GnGUo7sncaazdwqwB0Xjy2E4Q8Mz0CzKUs2q0eD2DYK8gixtlP3UiEdMUFoxCh9Zvk7k25DGOTjWwQU64L7jfkA7O+Fs3ZmbJzmn4Nx5gGOjKYEWVZAROIsAmyNKHtH9E5oPdiJ9D2aetLxBWKjQ5+mG7t7FU8Ay2sEKiw/kGzenp0RJvYNCiiH4K6SjISCUM8ZLCuw94ZCR0l4SuxTaMqMGi8RH3oJnkqwo71e8kAZm/lAQp5jUk4nnYonoSTeV9FLd18jMdfGxirafZb4gzN3ATF8IO4W/eH11qjMZPUlqGVPXCW4jioNYys9tiRAQepZ/sgXop8IhxOiIbMNeVDLz3u1DAET2MLnjAxl+3O+wEfjU1inH5b+LxF2tVOQz+SaZzxQLs4896vRIv7CcPQZKwoOZ646ejzmD4GH3hSOEDAYGP8mqC7d8A/PN3JsFxdyfYOYl1/7F6Hh7Db7g5Dm8YTjDD2cm5fPB/FGcIcew2BlYYn5bf3OArGb9Z1dAXqye476bgumNlO6Zov4+vR5KTa9j6G2LD5JME8IkFACJtaGY9UZF/vbjeVx0Wc86iNd4Pm3kGz3dcq66c4bl40Q9JTHBsl9Yl9xNGCa601pNZg6pt22aLr3QU7kan6tJ/C+w+rVpNhbt0bELlTgqRfH4CLGY2PDO489tgIe9kH3S+4biWYsCya9g/AohDE+XD+u/AwS4nQRegj4PgouA4BVMRJIB2ODKg2VkVM8CQPfCCMiqFxZ1Geop8NdR/5qe8q5wAKJCmmcRRly1tGBhA8taWltw42PQMOEuIUNGCDpBoKNDDersdn6Oj7Bs9PgqCj6HvIvgglqnk6i/fmiKURayodbl/ee2mk+yi9rd9U5YHLODKkuf7iYgKyfiys8XS6Hb7Z7p1CK80oM6Z10BM3sQ7grVacON4YiTmAXkDE/Xg5AVNvzF8r6lxuOuATjjj9EtFWggztIINx1AmUZdgGF/AizmKOYk78gh2ni9OnCTp8/teWDeWIf3HplsvF+AjOX4zjKoGwji8gvsx4Fmp4Mc9CDx7VGLgUS9lWauW9nUYYW6h8TVLkBsRBxliKv0TCweTJl3I4fNqoNCu8kVK3b5Wn1lATRYmwveveBy4VxqXfPL5TVfyjzG5QLkYaAZ28HXxUHd7XANJUeGS0Iq4AABVI+7rRK01Q3dvVcPNtS2B2Nfe6SuOr8dExCO/7ejyNVDfB2amu7PGpXkanRzG8+e6dISN957GRUREYiaweZnycUeKdRdR5CT2UftubWS5ITc2HTlfHZAwBIFB5Fj3hiWmRTizyHgEEviJDZRPLZBM/y8ThjfLCUICNdWBTcdD7URpZYHYSYYujAvIwCmZQ+t6Zr+zIxUqRRu8ylP5Vj4/yn52nQ9XqiCcCwUjMT7KiyLLJWByf6KMlidkdo0a/Qna4F4j1eOdTLMigiav32lrYgSgFlgYUx4wqALOGBIZzJFyP/ABCHogxULPvYaikc8Hgfm1QTSUdg9lPOtP6Zl+EMaBeW6kTgT4rOEytIyEzTriVLydkCaBt+ur5LEfJMFiFfUjHPYr++VcvSI5ULpJ+X+27QSZdpBG1ELKIuJbsq/vdw7ti1w52pgY/60MjbeFajLOTo0hppIeGjcnmQaFaxQnVlhhfOxcwwcHhiPhJ4L3Gla9gfEXjKCOBxtEs6pFDjfo1iL9JI85oKbtudXugeVZksXurgB4AdjBghMmTPIN6c/vDKory3BerPuA46BJETqNv/tvbQ93JuvUuc9owBkwcHutVkHSAxofe4cp5G7Ma80SIaJzB4owH3E25DSDUoJnFEzoAPy4QPQcWo9rvvgNcUWMCT4FuusabNAdEhtZAILk2WBKlbP5wqidP/yA65Ep7TC9IwfhpQQamKoMiifsAiA0Eq4CxAeuuYlf9ZJY0oBTpAxgQH4X9vEF/8M/GBvuh2Y3RNBvHYKCgxznEBADfNA92NT1M88ULBmdn5ws6QIfQsIM5HCWEH27WRELFmLGG9y2UZQcVKfT0OkGx8cB4BaD9VEtDCefUj6mwzneH980vy8+BxJWofn07mOqRegghFjzH2HSl7mrhL0nh+gEBkPBg2q7n58UU/V6160VOQj0GGmNSsE/v0NlLAMUQzUeEd+6jdUMz0Hvmxc7cvlzZq+Lr2tqhVWaOr7waXcIgK9EMo5hbPxiiQgFoo9vgDEW6lLbTSlHMmXUAQOhBkPA8y2bCMiFS7waeDWiA8d+f36P0EtuoAZbxcx/afg87wRuTUkpFIcOdwwliGujWIJLlqX+1IbBSwZrBqtAIkUIko59/gZrbhocAON0f/uuTI2qaX7pQhG0VFPvpdGL1TWm3KVKHUv1wcDgrxtmiWI4xj4xSagOqOmJIulSFTSPznsuSOtMFo5wxGntAMjHRsirtdqUlwCo74I/4ePxgvPui8wGcQmTAvh0oz6PkaIdsCNlZTCz0iuqMQIcy0gjdqag4pthBB7w0ghSU99rLv3hIkLrovcbzHAf89TqnbPb7Ana3Qk+XJyKlP6xWEu6wyI6iRLoH3huaW0S1le+NTj2MEsN/fb87/Pohg/v93ae2tvtd6RsZsXwqMJCmtL1LnAI48b4rwYDAJUUqSrz9NqQ90rLtdwWfIFn11Q+YYFMs7omxnREqAiLL50CMSkjFgMpK7QCE3ouB+TnMoWU/Nlu0dAs+mfZbBJjerV6CfYyLy9MkCmXVUnWRTJ/Ov4LcKOQyPiVUwjYQQXQXGyvXdW/Qx0YFGYZPDvnA2pQKMdBRcEBVztYjB2zstcp0ES9nPLWYExh1OexQZpghPXdpUmCgQTQKBWZhiEij5B0uOIAOKVC9+m/GVsJLeO8sYSOeMMymHb5VZXCBtsfogHCgmCKdkTkXn01/UlBHSDEfhAPKd3bQfaUGSYFFMTTWI2+QYVJRy1RcQOhERHdC0EzYNIuSWIyTH7PG4R286eykJyMkwUNsBeDGDc1eOeCZPUjsmQuWT3lOE2yxdUMFpDLGo6FBfozKnlCClIxmYzbCxg/mQsDAGGFQDIP+a5d6W/YsXZPkYgY3+8FPbNHSATX2wLY1FjpoUokmIZk0GPZ1rWo0WAdMDISjmgni4Jhd+jwaqbYTMeEIDwZwCjVWDnKWgyd6xGaL/fDIWJv2sKOPBqyweAJhkaWnlbkjDIbeGdMzgmGCMcMAzBVrGWToyTP8adfvOXj62169JRjl+QT3qsaIuO5e/L2vA9lOy5bP0ZObsnndaHgDJAAiDn0EMr3Vwk3T2GYGOkv6vt7jA2lsANpJGpQOfHooog9TnIrStBRGXmCK83k8Wo4cdXQUQeZoD7VdC2CDi5qFVGh8fP0PQO8iOunQnr6mbsdLaBFGLpklPEHV0pe31hX9gNDNbmMlvvyQoJbDBeYZlcqCELoSdckVQLY5pnBeeIL+DCvzJBJxjghFwGHuFUhCCib8BKcIFsTDABT0MUgTQGjCXC2wGNoZjc4YqdEJOQ6AJikRLDIXWsLAWK0YwUQtAR2Cs4+IQVmH/mB5nfLiFEkPd6bNNZr78+i81TapD9a0xcWBzdJRHtIkMhx0EcrF39Ot0RriiJXdBiFvUwJTpMS2AZ9EYvPGoDOWmeJpb0UAdTfUrjIYBAAsevg3ZvMJdQPAyQ7hdCrwHDAS9dbXpsyKJZyee3bhyqu0PTP9UFlg2E1oUoriZ9iHgALQWPaMXmQoRNQn071S0j/WhDcDtslRVkST1j6kAxk0ZHDUrxG2uOlb7kuwdUeCIdMbAngEiJrE2ZL3CJvYlS4iU+Yi13uZvkb4s/kJWoOhh9llHE5cBNaqDvX9I/mnZkkOqwYom4XU3L9pX/e1fbRa8VmyOH9SIAbpwOO1yW3PDDIZZAkhDgG6PQIhYhioMH9GXGgg5mQC5PkqjVcn9JcPNYpi17v6/m0aqC0wQQE66rgsicf+oG0KU6qsMKbBVnhPQlzCXpkxIFmTjC6JYpZ6Z/9S8yv0n/72rVIDEQjAnSIP1pB5LQRbXrt7UV8nY+GGHWz6QyuSUYwRx75gqtt+UxEmbb3rZIlTl412109GOXR2dJC+DHuSU47ZUqxLa+BsETSXNNGTi1Cx9hcX86w9vN4e4DpfywbXAqUp5cTQeBGYABcefRsxOpHSIkggRJxJaUEeY0t5JVxbeypSzTX8R+2uQtPy5vWnxleZWd/09XbOUpNEPLNvmZZsrfeffFz6OYiW+Yic2KDSGTpIrej7h9dlWdPYCVc1JP/DnTN9fOwrs3BJAqJho0A49BMjON4/HM564YOfgf5pv9C71rLZ6bJIPkPjeI6SgRLpobFan/BEcqNn136qmw9d4IhT4ChDoWUaQU4kRSZtAWubncXgT1L2HP6rf/cf/Lsmm1jvmwjY13/68eR3XkCcY92aIVSoWhQUrdm63W7Ma2THCQYAmGBxxCKWLuc/OkMDjDAaME3n8/LmdR5DeZCV6IwbxvQJAaByrdnFk5y04E8Y/B0PLCVUP1ow6s2gXQAsBm2L0zRjznvkK+O2tr+6WtB9NBzt2X1KMwJlsBgAA1DKvEMsIFfujpIB/Qd89fHv6Pur6T66fvzWjv7pwv7xn7OPpqcLdMyYnMMPGc0DHU3j/MvV15hkgr+I+OyrLDsR56dqR81qEfIL/SC//4uPd9OAYd5eFH+j4g/s63v/5BuLD0Py4999/zwenHqWobNiJuRM9/RGow8oX79bb7vNr5r3uO0ZGJ8ScXh39fHZr0aHG3q3ffPdR+zddcTjQdja18SQBhm3h1P4RjFqy+L/Aze7FsSORaSNAAAAAElFTkSuQmCC", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distilled image 3\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAWOUlEQVR4nE1625Icy5Gcu0dmVc8McA7IpbiS2Zq+Ss/6Cf2ybEUul3sOMLeuyoxwPWQ1SACGaZueS1ZmhN8i+b/+9//5wx/0tG8eE7HRBywEzzED7E8tz9u+3QO9YDN5+2Obb7/8+V9vt91AHtFv9jSURvz297/+7f99L50vL9++/fplf/5FL/vn999/+/f//PO//ffzfrLXt69/OsZ9fPz2/u62b0+3Pj4Ps+7HvL8f0tluL//j3/4nVH57//3t8PH6zja+v1mpQpm3592F2/70sX9vcfsIPDtHOcOeOcWIkmpAjaOa3qtUnokMc+8fY56ROe6nCBs+2/3+3voW0fJI4cyZeX89d3Q1zfn93/9yzvtTa44jx8R5cIxxWHU2g5OuEa2HXfl5HOOpgPONiFGjaX6EtjHvvrfUtEmghDSq4TvU4yn6JtGkCZIKQTDVe9MWFkFZBVutqUkRpSBBqTdZMOqcec7jfh5luHiOc46i9HF8fH9/pSTF09Pm9Mf5MXIUgR6MYAttXa1BIAijUNNAE1pv27ZpO+esKkTse48miCVMz6xq0YrIMafL01lpUoCrUoTgqoIsyqjW1MVpBEpgl7I8xwdB2GN8zuOo8jlGlcf5kfP8/tt/jvP+6y9fKg/R5Jz3TwWboL6FJFQAcx7j+ARKQoiYY1bWnFlz5JjzyHTf2bf++TnrHCQrC3C7NYmRmDBhABY1axIRERIlSc0ugFLYhsKgQUDGnJl934P76+vvY0ySKGTW+/uh9v2311cwWr+NOVuXomcWSlUDkKtgp/35cXy+fzgi+ibGx/unYZCZNc5zzISLUrRu36dr67sUhFrEJpEAyHKJFAmbLBIAQqGIzCJJ0WVJpERQIqO3rUcH4/PzyJrBHsEsHjP5/pZjSnKOWSPwtN1uNSfB46jKadjVSFaOc9YWLYK2P97fFLHfnqvSthQg7QIAuzX1vrWIc6KBrTwME4SLip/PAwooSgrlRFAiDbTeJBrOSsARMafB++vH65ilVrABVvkcB7zJ8zjGcY5te4poAFiWmOmqQg1IKLcQ7DwnSFgR0Vp4OsQQAZxjxjgN922/3XabatE6RpVpiiwwSIlrgyUCoQgpSDJEBcTeOkmQtmG7UOU5z+P9swpIG4adM2sSKiCyMmflTBLjPGzPrKqElZWeI+HYOsqZGa31bW/bFopPEkbZVXZW5Wyt3V5epFZVkjREkVobvz5K5M/nkEIKmZQaruqiM2nQqKpkqsmo+5lNYjAEiFkemZmVyKw6x5hz2AskKEJqRqUrcxJo0WBkZvS+325BEQZp0lm2EQJj3/fb7TlaoNyjt+i9VUpk0S5JilVFksTrKRhS65skV7I1g+WiOSsBxN7G/QQymnqLcvOcAwBAOGcex/Hx9orKL19/ve1bzTnLJWg6KUUvT7sMrMohdZ6zdLA24545AYoECuoEZs7MclNr0YMWyGA61vevSpWCEgADlNSayXT11iPknEVKnNOfb69//ft/zVknBqrKsEECEACUcuTn532MabP98VvOPOc9tFdEL5bsY47zaK1v+1MwnJ55aOytRR8tEyQB5SwxxzjP86hy9q1RoAMqkVJbO99CZQMgWS6UFQrJZVQRJp2wWkPFx8fvf/v7f328vdI+z6wygwQjmm0AtjPJMwGcx/3jx/fYn8veGqV+HGdXnyFnaVOLNsZQCKANiGUYDgWcM6mocd7HMXvrosXyOmqSQRu4ioasKoCZmbOoEAm5jMx0Ua2L7Tzuf/v7bz/eX51oEuCCYZKKUKwfBEgA6fQ4j++vb61Ha1uP2PpGF4i+t/3ptu83STNnEdu2MeiqrFmZJKuQOWfO4ziqJiN6RjuqwpawGtcwxWjNc8IWtaAm1O2ioSYDpgGe9+Pv//H3t483lwVGhF1cWF0GgVV/UIuQkPbncd6k27Znix67lO+fLTHbfntpLdzmPGOLbduktn4MUFh7rNXRcJVNMazRPICOhfC8frklKlQuhkAGGNErBxS9NUljpLN+//7b72+vLmCiXAyCgFfT+HphNHHvzbYzszxmvv748fT81J521xmhyuoRLfq4fzB06y+Klll2hSRKAGGJTi6Apnmcd1Y1YICNDNcEWK45JyFA9iRJUyHJ9zMbAAXBnPn6/uPH9+9ZM6zipB0hMKqsJfVIkk2xbeoR92OeObPsz/GXv/7l69cv/9p6j95bH3UQyMr7ce/73mM/Pz7f31/RtPWdUISaJGkJGFcZMY95RjZzyUpmMSJgVFWL1jrnaQIgEo4MexRjb+32dKvCjx+/jXOwVChRbPV022bN8yg19oiXl6dt3xpF+zzO+zEEhZ2F+8c4x9t5/7/fvn3RtqXdCoJnloY9j/P4uB9H802YW2tPt6fKWZkuk6TQe6MYipbzqNYNEFQLz3KWOqO1mkkDME0oFb23iNhslucc0wtgCRMR3HrnZMWIHs+3/qf/9q33jpGV89V5G4GaH8VkGVGZv3//8f3t7evXl/3pCT73vb88PZV1/3yn9OXrL9u+H/dPKvbbNk65JntrPaLF0+2ZihOjVS7SismM6JyjqgAQBFFwkNF6b6zqdMzz8/f7/fh8fX19pbFIG3IQITiYoZCw7IVtscTb81bwmXOeR8F01UCT/nB7uaHP+/l2nPz1qbdtwc4vX37t247y59uPZO2359vzzVURjYqqbJLVbkSTk4ArnUCUzZIucyOpqbsrwgwis8Y4x32M8/5ZlRGrbdlcBNJpmAqQBR4j7aLadNzP++fncRxTpqgjZ1Vubf/ydKPtY0xm+akKrrRn692V4/M8zgF430tqszDGzHk/z1Pk09PXuLFJMSuVTqCL2/OTIlo0OFsXCTMq7RpzTgNzJuZY0oV2SCAqWznHWekSZKMyc4zktu/N5/z++9uPt0/D27aBMT8+Rs7KOuekK+mXb19eXr5g4n4Mlyk50/K2bQpRIcW2d1F23nKX+tafILXnfQ8GqX2P7fa0bVuou8Z53AFmLtqyyayiWU6Xl/UhyUCLnlFj9vQsuLeGaHBWzao47/Pz8/72fs5CV0tnZRpCMSjBWbm/7F9+/WXfb85KsO0ft21j8LbfImKcR+u99aZoUtgFF9i6+kS22x//2OaoHGLMc8yRimHnHKdNeKJglxBFLwNFokmMcNWS2c9901P7PN4+j8qqQIJ5jtn77fN+//H6SqJHgzHPYWjfo/Xn59aKqU3Pv37dtMHe+2bP9x89y0JBihbjBJcrLGdNY+1dTaaYrUfPMY5jALMy02itRQScYkxoIRcEoWWdoiiZJjEnCMAJ9dvLc4LH/ftxfoKExJhfegtkI/Ye96PmnAuvAtEDitC2PX95fro9l+3KjOaqctnOMlEUqUXpF8mSgAKEpAm333988ng9x9jbjjIBhmWnXU64DEtBYCUXIHJ5m8u4oaru49DZWsTTy0u98RhnoT59vr3+MBwdf/r6p/Oc3397/fH+OsZsnJL3P3378u3rvu+k4OHRznl/f32fNQCT4qUeVOmJSdIwqVgPZjSqHR+vqhEkCYXA1ZdG2iyrX/4X0NoQWS4CuggezjFzvn8cLbDvz7f95bjf395/pPPj/ewbfvn266/f/kWIX759/Mdf/vr77z+i932LX59fmiKnHZPpkZ81/Xb/cGGMGSFKZUQEKZDraUyZgAVoMts8jz1IwjVymXarEuVappIKcJ2dIoJc5FZL9hmgG3PO8Xm/19jHr1/+8PTLy+Ts+/bll6+9ubdNoOntefvTn//l6fnJLrIi4vj8ZN82BqACwHp+fiGqRUAQiRYb9vV7KdoiiwzAgoVqVRNtMzyzyCQoTZuuUsBVooh1doQUIKIyDdAsmEZlec4amTPvM38T6+04/xjfvnx5acEcOXMSINhu/YZbHjPnkZ6eJD7pPXqjaHJvNziBn56IqwkMCwSLIFiyig6jhdhEeBkYLE5bXsKPT5pLwa7XBqVYrxIuwF6+20bioz7nqDnHb/PHL7++Pn99CRBAlQFrxUkBaiNssDLP4+gAxcqSbJuYEAHZ12L4U+Py2jnZsFoqKFYuv24bVtkgBdvOQtNKtWhAJgB5xWCpzDSyqqqAclaqRFvQfttaBGriUYTOnDldufCAFgWTxzicabNqObhM9suKXOW7fuE/HoRAldyq7STgdb4AFkURJLwcLVGwgDIEwpbCS84QdmXOMWe5ssouT1S50Pj8/BQdzixcmVReWctgBIpWBlrRVZ5zLpIqGwZUcPiylUs/UyAWoi/DLn+oGrVMEyRmeYUsWMfIVYYCjaJdxIq2aCDtWTWyRla51upXUjjtDsE+7wcVvQMAbdsjR2YFBaNyJmrOc55nRUR0kIEEBV/baqyqWA5XFEWGAALgnmoitZwkKQu8MjO4VnmXTaBWggUuBWFc716ywqsNCnYVqxIRBCpzVcKqgqr1EjDBdKIwcmat4naRqsJqg+WDL1cqPDoaFAw9irgatIOTkkAzH7y3Tg6VlZorr85UhGEjwH863cvHuy4DTJuMdsWPAJxVTlJYAMAsW3k9h9YfPngRZSBdrMZa63mc+qr/AqLSDJfLTVoPuuKI1QbXc6+2riwJP/ncBqvKRFbNOebMrHUOLhMQuUXcekioSmZOV1UtlKjKORNI2Cs1My2zXDDW6hsFG8wsBR8dzGXZCyRKcBmissECC7xM7OpwksD6dxXRsuursFwEPMpVNWdWleWgFI+AXrrtPQiUjVm+UqYq15gzBworj2phl/MqrCvewVVmF07rUV0LVGHZJSxM7M1VWFyFSzNIWpj9Uz+50pLLpmlj7ZBrZqJMsiuWOCkXidbavnUQWZWFCKw9rBwzZ80CIBjAMFELHR/n7wU5JcgoEgkCFoFiocgSVEQ3zz5bXuB6efuV3UpcDbmQqADZVWUScNhZdlVVGm5Nopqc5ZmQtG89WjNwcdvj+OZA1iKZqhSQLJYtLGxsfiDP2j7DVSAe1X81yTosmmBGsxPWQnxe711oe4VUxM/yr1FQwc7KmTNnASX13lb3lcjeWou+pJ4NuwppIMuVs865Wn6tUgyjKMEri17AfR0FapUvCdgiL4Ghi65yGq1QoHlBAO3i9RMIkGsQiYWkLE9PhDxzVmZVudg2Nq3ItBTRt1v0roiyjSwXEiinc465iAIkVnan4gMjzaIllQDhWrxdFIG4GnmhKpf40VbZXE9k4lGGoCjK18aQLLtqLidROTNZclZV2ca1OxZQ0Vq0vu1btABIV5hpuzyrnLNWx5tmAiGsUVsZWsbCcpoNNIl6RHz2pYAXg3ppC0KYLVuLe1ajfk6isDqAVl3FV2ZQNJTlzDESLtuuKq1plCKiM6K13lpfi7GRMiazkpVnJiqrUJfHG2CEr0FPweuRSNkJLCRdlSS4YPmC8bV+k+jFRkOgbci1WuEKSbniUoprwISalTlmXgJrcVqL3tv+dIvWRa2eKcMu00hX5eoYFUbZ5nKFZCMNoYzg+mjUogTmHLbWbkswY618abRLpxlHqH1k9KgI2hBWGL84lgZXbkXQzuMYi7bws4TpaNF669sWamVU/YOPXJXlcmbNAqfn0hMPlZ9kYDk7AdJq5gKmESVjRbSuS4hefXkJUoMwq9oTEohVBxSFNZsK4pqrBpvJPM9zjpmVZSxss1sorubXQtglYmnBCQCVLti8loHi2kLGgykFmF4UteaDEgpKF8HCVfc/gRWijTUKhsR2J28rTb/Ge4oIosoMOVoAIvx6HDmn67EmXrHoTxlygS9JqmrCLFc9QN2+5LxtWvRcwT2Jx18Vr/nfA7j9mFOtongoQqOtJ6KT2X7ZWmtdrGspF50hnGZTCGZlVY4xFntdi+JjfEtQ0AMxACzknEKQI1ZGSmLpcpMyKYhk80NwLgeypinlBwgA15TtHwh0PcpSrG1szSiCZdCuxbw2xcUHNlk5M8eZgKtIuFZwSuhiFq3duuq1XCgWveTA44u9JOxDbokKImgwAICFUi40ormWgSVEr/Lhz1dwFYI829kOP3dPXSdml0v14GTTlWtTABIhlB0WhKCWs7vuWBSsRQqr5X6O+1ccsg62bIdW2EEFjNDjy5YnWAJkSf9a67+c7qORL61g0DHZgsfV3KDLRtXaZ3tJo1UxIQRkGY/9F0OwEBJy1YDrYh9TRFlErvUj1sy6GXXNz7Xe4T9pmFpbeNmwCzMvSXD9f2ERL8sSbkTaYT4U+bqyY14ZxLpJ4FyqaqGeUaIiChAFmsosEel0CvJDb11tqlAVtPSSQqKwri+s9Sz/iiIu+ZiP0v/HEV4S//p4abcSWtpaE7mf1I1LNT2mvEWsmxITV1KgCBJezGVjVqlqFssTaot0BFiUI5lWZyZRsYSiGmUwlk016iEj1xa2RVV84Boevf7zTxlhc6pNPxn5uLfhn8bl8rm43pFAxePHLJFXlIGGypycBWO6iJ68JlaU5XDUQsQQwlhxH9fZwbGKtK4Tlp0EjeCK7/Xg0utC1wWyXreDNrQXDS9Kse0l3vzodgO5gJNkxDrDXOC2ulGsspCJ0OV7/6l2KBFOUARrgh1rIrjAGiibviKDq8gRQK6d4s+Al4tmwX88gS1iZrtXfoluJB8Qh0fP27yMbBo/LfMVeIdUjKDaghCJWDrykVxQEbZRhEHVOja0VcNLN2m1LKhczL8WELriaVGi1ymU0WuFalel1Ca03R10rPtOV+VffLUO0ZXrM4+WktQYAntEixYS4LXXF3KsEPya6lpcYYcI+uIy8kIYP3hFopfEXBLmcSNsieP1rSoirhKGjWCxlU5iL2RcweOlWX8aN4N2/iNoFdQY0RTRWmMsqVfLhGjZjVWjVwiGrCARD6165QYP21VG+LLuvFwdVp6+JM5CAsQqTK+Es8Bmfna0Jmrd+Cg+Ll7CC9mcXnoKChmOFWmGInqL6AvLQdPrTtvydoaxREYJK2hZlAc3sR7PeG1VmGlEhOHAsiGkdImaazQA0WbgEa+YhKqh2oxtJ1gUkatuDJT5wCAioCnFuv0EWtEi2vodXmNwrvt1P1v40r0NV34GEoh1qe0no/60W5Lsao7iEq3CVVXX1ZlVQpTBthTfuvQls+0M0tfdBstYKdDKfmArmIYoS3EN5XvTGsEu9iDW/cVi/UwAeAmalbUtob8IiH4Ie15upSDJcRmSWsf3YAJpKZSS4gJnrEiMFsRWmK5WYIPMpEUsLQGACpQlyj8xeFXmmpdcHHTxrR569mfM98+8uJZewCVNfwZPEFAEl8Kk1gYSWkooHjoHxZWzUkSBQUDVClSsCGVlV48Q0heSU0sLgAjSErB0KQgGeF2NRdC59u2R7pBipkO+xiqXh7lCQqzpDpdK1TqyZToVsulCI203sq6hHJZzYqMk3ZKt8RbKxljiDKoywg+xELGuAQLtAhFYDAoKMToDgkySdriMIK+MHwbUFnWYtYz4w3SsKeLytwZdrCpbcIFMeF3abqss11VbiSCCQZkR0myzja+1zbZYblKuiVKhSAhNgYq2UumCq2AIAYpdPUILHZZiFQnWeVmGslvQBUR4zBCQrYh1h9gz9xYjqwmuCqjMLDqzgynHRFE8rI1RkIK5hhcMiUUNcPDr/wek6CscVDPZbgAAAABJRU5ErkJggg==", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distilled image 4\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAomElEQVR4nAXBB0AIigIF0GuXzMjILmREEYpKu7S0JC2FhiQt7T01tfc0S1Gi5SEqpUSUQnY2ZY9H7+v+c/D03Sj6AMLXPnM+xhvABMS9LU0X0fT5PM8tMO1HukrjyFTkgvC4Frxi7CP8A4YL19bT1bYDBLLMFoO+QJ3s5JNdu3npduLr7+g/zEFQp/N/JKoVsFWWzUn733MuAdFblQTqDFIiX7iWrH64ncfo85t3yv/FfwAES5aMr+f7dY/+1WLTwVTaNgK6MVC75Xd6NwHmln/s9JxdYWBjGROISL05eHcyweCzh7avC0Gx7liWNiJ3UOkjDsclb4a3125yCg5T+FWFXwYY/ldFDOlZ7rkLJoh9nm3mauV7CFW9nM/AE9EVL7h5BCnnO3ysWoFdjjAMhGg7Cl7xUS1twN9IWQzw1FIznW3wwPljLJRsIyhHsOXz/e2SQCt8dfGGfbWmalsK5MEwHGbQjZH+7eBFac8d//itQ7DytbsAq9lYeZqaRBR6MHzgvnPAPlIqnIy2o65qq4gVlLTP2RC9m1y+Ae3t+Lb7vOFF8XkE3hPJJLim5w21SCv84Fckljp00cwDw+asSM0C3IH5gB5WeMYnkaBoWtucxf2tjJ2orAW8UsItpK0nd/A0lo7mG4BwkESp1FKLXyF9fMIqMrxI2s7Z6HAIE6gKfD3vRscFVFInSnwCzIuD9lUsXMCNdSTAZ1LpFszZDz/Q8bCax5kRAI1JxLIdVsdqcAYQIcj4lacwpEk+68u1wGGwkvTdpRH5pXdrEDXHbSlBAl49gNIrRGAdvv0QIUzdkBrXEzQ5GNo/NRUA3NnzIHaPPRehvPHcCxDZm6rkcf4zeAGBmpMeGEx4bFHLDE7FLmioHxE5RtCV1WXXE4C2+pwa6udAiZuqtTNkHFYFl+nSkAbQ+d7Vx7F2oUjTIZqxvxMBNivjJx/xG3HYMW2mkukDQTDRMLHEG8wUC759WwQVSCujXQYccF3TwGBIiYVJrnU8rfaa7qQnFXkT5CVrjdjFX0BjL8BnY9doJ5Y4IR84SzPmENTLsvlOq21u09KPVHWSwOjUIGCzopo6qFwGB45p5eODTpcVdLTSBevbsciO6zh93lEFOQyIEttVWXLnAtbdBXj/Dj5caDYF01ECs+vnWMwTePRnv15m9rbGNWKMBnG+nHAnKvIfd5PQkz4X7ga+On39lbouLEIGeB7EtL9EQhDhxwN75bH4WS+ozcl1JK8SnDysrLZrKtTJk0CYnLDVvf02JF8Q1/rrE8hWUaLuptkYfQHLSVhud+pjR8scsl6xYeRNvN1AxLlvxd7UszKxlSX/wwt2qSMQ9yr4gniFtglswpKaw9XkPxnBC3cz7Bw7a6e7GexfPRAXRjVwd81UXVKPPAXOyovm60mOht2vy26MHlor+vp76QxM6Kwh4FnBQVKhlQLLp8BVswPYjEYoPYxoSA3fGLLIF7TECq0bIJZEJ1Eb+Yq7pOm8Y74cyq0oGbAgbH5I9+Spc22gvjngcawTMdzxZJKDLQqBQDOAbgT9rwFgKJy9WgELZBAbKqEjKyc/H6z4kPiKB2jvyuEqY2uLvq2DmkOri7GB4PztLpP5AkYJ2DLa4jRfAUVEEDGZKQ3Ho5vdCbKK+eVepzI3HpfZ66WwMPeBxmc9C1eGLTFKvlo3DBfGBeGJtZh97Fu2m6ORhx3Sy0YHXQZANIG0p9rpPAXy/X1ImmR4h3PtP6BTIsGAyUIpm244vFAZ/ZVn5q2QYRQBmATv/7YSM5ZCgK7VWXoAzVOpcgIPx23xUJsN85x12rDla2UXgpGLslIchdYbHOD5wtwX15DwJzJqyKVorsP9B3FnGKslP+eLD5+jjBxgCGh048bV+2kEt5zQj9SSlvX81PiLFUo3bfCl6DdabYyBlUhg+UEc3mYwdT63L8PPdNLcbDmyiOPk5xcoDtTBd86d9pCrBTI2Cm6Fi5f9A9mA+oof5F1Sx3IN5g8FU8CwG0EW0LMkyWju1uXqLshx1GrIXw+gOBJ/CW3y+cyyE1Z3X0WqdX9lFZfVt4r5jvZWyDp4F18OLgslS9LJSh5vsHW1WZpHzKtx7yvgTVLHlOfSv2HsPK5O82UM/EFIs4Y01jreykIQWQ+d4rGPGhkxAHXQxeP34dFm1G8xTR+kLQ5rFIDc34tCAr14ewXbni8LYFfCGWT1FH5kkkwPgbG8GRy4HqMqf8adQprqrfd5pPYR8kEhAAGWMcuDVU0amB5QT1U60NagS7eUC27zZyiSGQKPIEAql8/B5BZUBy/o9hFD7YvJbFtLo+WrOjpyAeEJYGq/K62JTBxGHR2HFaiZKseO9bEYCbmEscW35X8LH1S8WozF5zxn7V2rhXUQovnNz5naT24NfuKkeSUjWyHO/57EHVghwXeoDUHGa7u3UhgJxcrmdxzGuX/qDx/pKXPiRXwFCtZgLQJ3egBlZRbudsdToZW5nYTAigUG9UiNuXeOkzkSpl5t/5a4vkAJ2cjVx8AbYQf3LUYxFirouxp/hVafFjk1LvX9yN3CVwxoNAJyE57yCTtNb/XJ3LW94oTWSRh2dqYAlMgDeAhKEb24JbfQ5aHfRihzL73my/Kq86zyVI/DcEYDtW71qy8yvX7nEQlJuhFUDQAwyo1uAtiYlLSafv4MF4HFkoUHHzB21BWne0PRLjjeTdwjTTjFDtunpDNc9rjD+j8EE3u5GvPKeObRhbS1b4ycCStZvi+JTuLg3w02OwxDa+FetNTiOvw5TYnVMtfV042Ssg0SN1JP9tn7fjev8PL5NOuEuTxGb07R9NffFQEeuJu7KH8jSKTjwud6RJX7e3Iuzv8+5Ml0AcKfwAXYvcG++rHwvalOapBCnF02fRnXABag/PYsgyAtSNLeDzisxQwcC3iDDz11zV+LCbhkc8BkEo79cgYmyIBOTItSNOI1DLZGnDITGhx36fWdYKutoULy2xMHgMSgp5eXY9bUOrvnuLMAVUQfTwLc9cK6ATjS1l+GqEvNwkYsTcXoftv6PyrVuC7jEo+JVvu/jj0NEM2JuuAr8g+J0EWnEHArXle7o3iOgRemExfy5IAwP/iaAL1eFp0sQJHlhnd4m8owbu4hA96+EdWcZPemzoN3uBmHheB6HlJGyIwDAYDiy+xSeJHBAWmAuvWatVxydinF8CJtFyYDq3EwUBiYfGrAIk6eVD7shX794f/1BwtzUyvDWu/ikj3lVZALaWkq7rNi89/0v2Pu2TG+6eXFUwTtSkDgg/x577XEhdG0LHqXOqKS91JQMhGD7uLeeSug9iw0Fp+nosIE9uS1Km2F7ziOCwZ/mBN0gxh0+8hgfIrc5yLRFW23rZurIu9GyBRqgND0lypC2pdn+8g/0hqz/C13g8RqQpUjJYiHpZhEsmh6+4hMLkAFpW07FklsIdd/aHWJgrdU0UeET7400Hy7w1UUvdJe6ABkqVJvt9iJjH8wertdBiuPcpgcnDjX1/I5c9PDFUH/QuJjxVVLfANadtNyPGBt0PM/LHR8+GghLToNHhNQZDtkk3Dw0RzBW3DkwYc4TH7AnEkuDOEUeIdrKc/himMrKEzXsB69CjbKPtyLnXWkvkgYwhW5BxOq3YoWOKI2//A5ovvvY+BedPVwFPYwx12aWAnuBkTgXJrz5teNyhLh8jlX0Plx8z9smuZtTBY5+yjmC7Kb5s/+gbxzykc9xmYRT4pQgWNqm90AIIW36Kp9DKNd9oU89zopDNbOaa6H2O7z3ScxunXzc6XcAy5+cWbCKGMkjXauB5MdxxPcq0IwSy5axQD0O9g5COA1Nyu3ilp3ZwCPH3W2icn3zNctMSkYjy3wKJaIj5EHCi15NVHmDPknUXdOOd8qK/NabXD9dQvX6L62o/62JGZoaQ1WgjuUSZQwuRczJLuoIsHv6zh+xiJc/Igu/x0Ef79ZWuWHNSngY+PknQRmPrkcA4dMdNAQFAB/grEAyPFvFiNWej8A2ZsSmE2MPGGdhy29eKpa7vmTYCelLkTB4LbZp6UYGIVLeV23cI8ZyR1UqoMWj8mmEMYPycBRUuHY7n+kho2DX3AQ8aCdAqjjQLPKmX7kiK9bWo72buUJTL4PdBFJehf5JSAPm71gti2WdOn0ttB4kvz59fw6JVASM/ehjhzDasOQl1vdEyPDrwR76RwBPNhfNs0JpVBZ6LX5plOtsMtsFCiDWBKmRy0jBHYyJ3RJFrAZlILOQlxcmGghUb2ltBtXArVNz+fAla1s+T5KeKL+t5iX0EVj8PqWItSv2iRRPRvyKo/A7y7c5noWAuURS/y3wo8NelOUT4+GQDLetpPbuZNxUpGsEGE/V9eNmK4GYAM19rS+uw8RNHIcLFPSiS6I4CS45Gih4jHl5ZB3eToZ/OlBsvcSbX+zqVThBZd+6G0HryPk3ZWKrCoHpDdny98nWwQfDV7jXRdf4xfkx8KI1hh8OHNJi0jZf4HgzTmWYkxDRsa4ebgoegXWhxnrzc8RdXYwgUl5duWOuWMvN9CCFtJD3j9EjaALJr9OdnOUQ4wsKR2On7h78+Rqaez2fqOAJXiHrOcSuxVIUWCM4ZdVFkMbO07nYaqKP6CxaApiBg5KgMPwXz8dP8u6hNTBph/T+O83Qs8XjyEu8IYRj4pYkBaP5IZs5xHt7OSpx95bjtdugvC+y8bCBN3Z8lhrFz+J6qr9SfYiwj/jUwOjV1bp2sBEH8ShfHTcNExi/ktmcgDHbvM8hN1YDNybwqa74ICJKDwrFrjNVvN9sIrKF51gIDtt8ZHVe7l5YbE6yFoQouDrAkowcE4k3JfEdd39oo+1XwgTy/zarraM39hFxZAPsMh1GT9SDPVzw9a/bshvOXcI0hOn8ICKCYZAY7A+mafxK56bAaLQMOwUeY8gxfnHFClne8xG8jqQX24dH8R7DcfBF7SVQE0Vw+m2hJPmLqm9sA0XwOfo0uUW/nQVPXI7vIUbQ+pqMGtDRNoSz+7158rBDyoDbgQPbg1bJAPyNmRZ+fcpRB8GgYREyxU07J9y8UK4pjdxx1xi3uCk0x+m9o94AnDTyWcF6Xf/V/q3xAU3ElmAxP6GohocBAehJAIMlsutjd5VCKJzmAcCB+Zh4vDsEWr689okbj03i/+EN6K7/O4/xSc9ydnXIPtuakxF5LfnAL5EVxHvNR7yg/A59zFXmRmuqlCLvL7pUZQHL9fZ/cqHjAhX8fLa8bl25b9eJOEdSu+mZaaRqzAW92jfcB48DIAg8LbDF0SLKqExM3xzgTvXB2KWfn+bWxj4DoaHjNhxkhRRXARCZ5t+/NHzGbLgJVRxNi0usRY/pQHLfAoAjuwXAQdC1YmcM8KHBo2PHos1IiFbReYSeIMl3k66SGHKtuU8xFsV1JKebIP97KZw9mmAzvTkNhfly7kmBPeNx7YGAgL85ZS0Hd6tAqSftieUZJLqdag7CWHwWsoEMjyBVWJhkapiu1fwmOj3Okp78Y12G2wl0QwVN/qQ9tMIJ97FiPP+DD5BBRYK/7yxi0SiVS/CSH+DcXpALV22jjja9Z7iwvu9BCPQeiBeSAQgseI6FQYCEJs8CCYxO9urjUDmtvgHAPfNCEpuUj8ZfZjZoz8S4CZQo4H/M26ZqhBK30SKb0g3He5AJbt9iXymckZ5aFLOwXTjaLyZcAX0XYhXe7h8g+mxNElCuTpZnFSI3W61V5VNJAFuVdn8TYev6SJy/WWipxDPs7T5hmB9xoptW9wbppxsT70cSIrWnazcya13EFl2l5z5pOmQUZQskYXPF69ODcogp1xJMgSJocUkAZMcg+m7Fm/f91gJQF+9TBpIl/B/mXFbYttPFoPUVSFRN4HIxF/iLOn1WB6BCBedXoa/Mkf/ZAJHfAr+I5CoJ+Qw5tdGfWK1pwatvxwkemyr4aClQiFiVLXr8xtI62I052B0Vg1qxsBoI2Recphw9uvpPyvJIAL6TzOlvPFdN8eBB/OI+G3drofoxIJsiM3MBz09aGwOVKZQJ/Dw3dOOJ5bMPKt+blYngEuYMHLMoucP/bM/QVXtiNpcRy2H//o4ZXpjOwH4xREGCwlojgcqx99i1L8sIzCRQODs410FnSE/jjFpBfew9qrn1hK4izt8Znh1dW5J6ymURuiPE/g3wRRWPuRAgWB6SvRV92177acq3mbRkli3x5fVVdy0OXOmNOjzJFkL94PCFKowyYqnFai60es7ckwW/W4Wu3vV2ipODDjbfdWc30VJ78LLfCi9k6AmI2n4ADXk/9oKoP9t3T1AElzYynZoRJfnJQQVgoqFmE+pbQzyB1PZY5gAPNkKU4lY/5IKS2ukOwZCGtrS0Rr4aYw1c9GOpKnnm061tQIS1rej+cRcj8zeK09hCkFVdBEe4yfnDyI2vOOf5+e5IvqC+Fbyv6GZEyOxwGO8FHCbfyIq5v/gKkzR1nVEr/vZY8PBf5PiOPOf3w88FfM1EaEI32CZI0AjbEory0FnESrnz3z6hdC1KgLY70JfoXnHRZKbFqLapvkOLqGpIC85owzFe/C2/lQhmKL8Fc5hgP6rlzxFncxmaiOjkSa3wMKbGLp2HMSsEUG6wDGmutIoNfw95r8BmRfI8m8uD/N/zjapP7r4HfmG8LhwUlAngjF4/PDpVMixT0b7QW1fBQ9EofY+n2PO5zJcda8kJ6FCHKvY8HIB7cf1YRGBGvTKU6yp3Lyrha6G92nGTQeE1M1w+xlAsQPE/mxxzYNuP096Ao2XnFDOp/DqhSSvYp8W4Cvwjd4hebu/hUBxkhwIhFZoJrwqQ0858X64cjUotzC/jYHx7Y0ma+FS78y00Ie6KfANXyaT2MJ9W5ipt5a76rAQ4Zt8NlTc2QGSPYiD2daLWT9wIvWZTtkHHmDthpJjJPjOVICwgD4Qs9Rtstg1PAdmaUDvN93fsR1tUMTq6YQqSUPaV/p8X0ApkpGpkMNkIpD4j1O6MGmhB+gcAuw4IGn4szmfZz+I3FzE/XOveePwMYd5Z0GQnCZehK2MZyMXw7NSlTTfMqqvzAM8IA3Hs0LaalQYIE5vVG2dxDadP1Y01ptV9M9xCkscwshhzA0vMdLNOtE4w3oPBTMcb+o/Kb+K0u/alJg5Y/pwBIq5pPImYZyz7/7v03NLPvmbn8FjNRlGgTeF5vAv2SQvAMII5jiCEU7BmygVm/n+Jgies7m+AzyRjomBSEh8WrTTwB/R1dgEEaSSY8oQ6UttguuzARlnTiZjbLpA0YuJUvQkmYypj3dy+psDIAWmvfXeTAh80L80RZmrdn2ESFlL9aiDO3yO68TgNlV1ZzerZZBokPpUEnS0bBrcNoCxJCDLZonrQtYGp7bYdI1Sr55AeN5K/lNwhL30Lq2lIRTIWL+A0F6SX9mJqDS6nyZSgHku4AmKubpT6hED9b9Y158mdvg3XmMSjzqZltgTHHEocWz1GoLrowrzX5JQk7yD0tkEweKYkMzhUfhl04IbvWw5mf7fQ4QOs5wjr43vde5xL/6CAkYTz+wWmZlrASMMNH8w6Z8EYiTuKN0qtEcufmv2joeiP0Ar/TtsurXvOux0xnysZC6Ul+xye4tWP0bh9puhKsjA48hVeGHZVJ0ixncCKCoZUGmj+F/0RPCuQS+bAZLX4AL0tu5UhSq2oFUB5KmTcqWyulxAIShf+3q+o9IKiE99JDdpzsDn5qLIenKKP2KIo5Shfs1Lbnpnvju/TDYpgfGaxN5xz/X6OvYCUNv7J5DClk9N4C+04Sv3I23uFzyYcjQuxq+eKBUEMMk9cg45oIqLUnRYBvCfDAL6VUH8k8DeNYn8hXHZB/AN0eMQJgmE/o8GpfmgVjvH9qfDCnNozo2vrjVTWv2ptfV4pl7FBvafIBCFDi+sGcShrQ/8s+OEvEZOT11LhqX0AWil8HdWw5KMjVu4pSRojxygmR1vlOT7mdBgMq+5H91nOA5uc2dgqQ5fdO5pyzDrdbrvwDNiHwVp2Dnj0BJWoHu1yWVfCYxat92QLyACII+/RcErofgyyHhuKoog17l5k1FH1BMxtAm8yEBm3FFX6nt8wQw65+Mg+w3k7gwn0VQ+0kAdSK3SzVjG7TyHo/7HIAHM3MY0fJ+5n5N7KDndh3FQP82gqb1Jv3gYO6fa/he64mkHUj6bhMypwXzOzD6k/NWHPFivlPzE1R+D0EhiOr0puGVjgqYe/apd2eUGcGI3DkHZnv+SObDUeYKSRge7edsBwBLmTgyVA580ANweF380QSMGBtRcgisXAGauzFzW2bphDzBjIeTAh7OKFkniB00/6163JtNxnnvfZ1/OmHklwdtn/0gAWuzRsgcDM//hihFgTZOLT9Ue+VFmq2WKgHTCrLhzylL8d5J4dGstZaRjQqN5Miu+fersuHMTePCn8BbmfdmNnyvU2EG+EsZ7Zsl1L6KiqyzvZ+v6jSa+GzIHPbwC6AB0X0XLJA+A/ht/je3TdkA8oORdec+K1jzIsiZwtpei5xbzuU+GGrXJNWfEJaOYgYqVrrsg3zO7Sb0Z5ON53DM79ur3A/frnmcB5MYu6jr3mJFo0pHWfEYeIEw1HguRHgUkVpGrd1EbUmZ8ZfVgND9sUoflHhhbOoK0f8oo+u+GvoBF26W/7biG3dOusWf6yXN8xXH6fqbRIlJ1QK3leyHwG8SvRoBVQa0ckN37NEj8QTVCANi2RnQhbCHNGdhu03WIsMtbHoGd3gFA4/N4Vd/ut1zDbTs8SOO48V5TcCIETKY3dU1xNXB2iptg6SrjnXxeAHkbsCXv5rZ7FyzOjtocfsulw7O5+dwjoX/AcK5ErY+Lq+vvYqZa4tNRhXG7VIihR2zALlFMOXsDqBu5ubHznRxWfq0Rxg9PvH4PIru8oWOi0wFjzj30YRqAJ64PcrrGu1pQWe3wKcDcGrTwME96u5K0d2Nqef8KpRsY5jMG5AEk83I2hMdEg/b4+MGZyHa8WrFY60Kj+h+yXLeH+C1vC/t/MXG/KZP2OmMn8HA1HnQUKlMfPZOKYp8/RTbmH0eEOL6FegaJeZTg5TNV27BCQuzGEZTdvhcsBVuApMuRWoLUupzJzKwxeZZyt4zqbobHDALFUQe5oI4gt0mOKEAnh6Qy5VeGkow6fRvl3s84naA64d0yMRa4CWzyJUvJih88jo7X7MLLeA8aLNUi9Qgnsx9MHWZadXpFC3Xwi7+OwACz/U5M515Fj5Bbj6InbLTCwGAIkkxht7P0zsYydcYvQwRzJ81Dpx53gp/JS/9JPSEvgFzm6g7CVW6bO73EZC+PLbUn9UGjPY+4KSEYu5n8CB0cWiBI5tMuu7NrCz568O2K+YPs1AJkOSCN+W2QwW7YZJDNn69wFw5wGs+pgbQGCdJG9Ru5tUnKJA3FHI7BZbA+RLmdzYeZadO2f3RfsOVE/cQleMMHQgbBJUQIKcvGIF+Qjj1qkSBWSYXiEff7Y0OslqsE+QF4x0cw+XeflDJNGs6g9/Xpk159nne/bJr//hbfP6sCJnPPwmttULyGX/RA5uhTgCGqsh3Sr1yYhttYsisOqHrSnuizmQi//szWRcgrF1enweOr4g6+uxOacSV7bm13wG6fwEU8exFmZK22qT9uPGSBguCBSy5mXXnEu7Nv1oFjQgx2ogl3WATu5enLwPop/nI/oITkgAA3V8sRNMrkhnVXHgEAFK702d2DHkFOvoE/x9lXOeUj8XvqViInYurzCNYQeB1+4C2/orgYVL91x02EIcweeyfK8Ao0CeI47xB1kKu/csaEbFhFejhy9yV2lN5ktvEcII0EWcYM1ocyLRUk/d2xzkEDd4lZfkeZk4ndXy4pITM5hgZf1+cRnNToK0efN1eLaxs4H7QJooOngW/r092hWgCwRLp4nmq8whxA3elvx/t7XAMMAA4OuBBvyLrjmzj/7O0IzhNTScSN7eW5/Eju5AcQWMYaLN8DOlB9FPBnlC3dRuEIPfBQk8EER5tI7+NcnP3cF1vgHv9ckHrBbXQpQ+j8Z22hUJcunHeKKQhk0O2zpXz30YAJtCx4HNR8ec/ARbZDHk1Y+b+3IushTBJDMvyOazqwXr61NE6Sl+ShlCD8w/5kyvdvTw6c07ocuxgPJrLYhys6LO+0kDAtxvkdObnkFLHLQ1mtBiRfTn3Q1E4AUWQL2W2xm2Qk67fbcm+Xj0UxTINNxg6thAmcsBNKDwf3Wue6ji05Sswt/mt59uH+GUzu4Fz672Bb4Nph2/5iIpveg0FIAEHhcMvc29Vec8ihjQ7v8J3gbb6XwbSEYFbjTlm8C6LwSwJnWKXxV4KWmDX1DPBgpcCQxtYljrdV/uIiThQd4ebFpc1OqqDet8dOFBQXmBd4iZc60ebJXe3BMpjErMPeQdgDMxM4jnjz7EMJbalbRVIvHqv1zwPge8WRdCG2k0Q+N0VEuYSDToooeASbMWv2vbfZ04kz24EiDXmbMi3cS0b0IcJLH/4ztX+41RVlrR4nKLuDswVQsnhe0KiqENHHRdNFyY2KN4764gmDOeVL80o+GhuPwry9NTp8BASly/xYtowLWJ0AxKX4uV2ABAByqMyIFsGvMqtb3b9FvlOxWxIsCJ5wxCABlAmOpshyAAGQgw87aQjYI1Ktvd9uxVTH4/yZZPUXx6gBwSxjW/IIgGkvP1xpevRctKUh+KVUxPqTwDmEkdxED4Qdnuexk7mfYrygTgKvdYg2vMvz8Htl3zGCXPBkol8rjm495pN6ZnyyLS+St1bjpKQPO0Iv50lb4Ss+VzBNaefq2lM9NQ3R9GCV61kgRyiJ3NiNH81T8PspZLgRfInEMTgSxylblMnXKxDeuN/gty4078ABg5WunIM1wbxVflv8JyTgyvYyI4wof0Zz+qxJ8IC9aerE+dvFcXuB7RnGIpFzn26+CFKESrgHXJfqVwzHWbsr84vD8J5zvl1v/IjubvjoVlcDz5K/mOOMwqfednwBEHOVwV+okuYXKCLtBAgoiszaF7atI7fvCMXTgemmCA1eptttTPK13yuCK3cYi/IJ1rQR5+nWzl7fslcqePeZNApYMnJvKFCOc3BBXXUgV1HfAm2F+HpneYsvBOf5jX5zIP6/PccfvOkR3AzxF6wZUm2JG8pRhUA1kDf70QHmrfACf5F4MTExmuUDMZW3S+EFZp5p/KxiDodCmtppJyJtg/csg2G8WNUlzr38cVoR9ZQCgVN9YzpNiGXTo6savP3UEeJ+5KQDqLGQsnh8FHfFQIYeQ8jxt6b4n+Up6/PUqMKhDC+2gzy9DdQj9mBBwDWYIXPUOIDzF0IaXgh4vVT/mFW2Fd2fCrzPvd24aoQMJctmkb+I4wifJkPw0ppGttUMkTynLLU6aMK4IvI4MpavoWOiAqgkSSA1oWdM2muHbfZ48PAevEmJEK943PfkhjkpdrLk6ePEbMQidiRJcgfePnJdxl95U/GKK7gSHMCaK+vJCLmpoV7kLaNH0J2dSYMfJ2CljyNa3S+J0321oP6biPGDxMdnZ6zvnJn5HdtKXOJxKp/cM485M/8ECFhKnzFduMqhinJcpYOxNGU5Qfer3/aGQW5klZ7BHMfFc8JTREBKG2ShZ59GwsNcYtrc7wShGCFhfHYCxnSknpdXI2RConkgI28sDI1vGodcwFTXqM7vAQffToZHHfkC3nbIkcbN3r8OJaP6GHpC+DUCfMZzW74mKIou5v+Jd56nPJ3839xA4hfGnhIBAGIHwdF0Kb54YjYL9j2bAlu86c6Z3W+FFe9UlrLOcA/xjuamstthuxeq31JO+qs0CKVbBEgOkJVs60LLSRbvZFz4/SuebM2VnlJSwq8k8GxYqUhHul3XknsqmXORvPi01rkMTuOECVN/S7mUd0WgmBMHIm9+Xrt0NlwRU0tRgBEhBrVBvPfeMKu4UZzlEAriplK0jQGauL6EkAM86IevphhQBJphiyAypPEnsX/nPwUvCjaGWwXbpSCNQdsu4crTdwB54g8PEcRygqARBWdkUfzcLBwEM0adZlXfqW9uPBK0r69f8hQw3pnfMfE6oAKSvThNFVIjBxVA911fSHEtsfP7wI16EuwNSFD886e+bNZlcVOU3yGOshPOwaFw/TRX0NB8L2hiSRA/v+IfeigC3EqktENJc21jMtydOOOIo8YOjlwFEOg4t8Y6JpenJtni31foZ9AynUD7mkSf9OZ4REExb3pq8RZ+47fpz/F4Wa4/2I9VWR9HrOcVnMu5t3CHvxWX1U0p+HyYiAoAD/ROm9+gHXHGEBrHjLkjzbqcyD9+ZldUC6gIPSyH/u485pSMHLVDP/kK4b9qIvlaeBeiR+q2cf6HQDUeZMxcSTO9O2THN53iSWwvACowArgHa8U7EUmemQ1lrJQZs9wjZKRz6wr8+bJrzyXJXBWsQlVfy6QzWtNG2NGHpxdCk+bdHzdvx4AgFhx7VUhcZdfg741c9As1Z0wnsyAkmthDDvpIQkTEasJL7S7UbKYDz177QKkazAUYSu9X/F8aUaHhgLVHwa8g7g/NGeFmsQv1a5+OmNoUKc4kqYPt/Uaq+C6H/WNucMV/1eJIqPUCrNam4VXaqkcOqNXWmPkpifFAtEsKCNx370rQTDUdnh35bPiTwL57XuLgEnOONSMwN8r+AkAIMAXHRTRAFvjVvTRLkWKJoM9FHQZl8ugEyWIzV8rOwzD1cFZZlxL5fDATPDO3yCf8EXnvly/fE89rss7oVA7xSsSMt5kMfC+FkPjfvwRrOidN54rcNyQDLudpkkBGUy89fz4fiCbS9quKLaY+ZXbmnIQYjmqQ4Nlp1bE+axlFx6qWWwDD6FBguboemCBXrBNvtME4QToILz/9YV/vGsR4UmXmJaNgRnIl57a0ATs0MMmwQCQkcuYY1GE9znHLppazVa++7A0dVYMc7hGajQ25o5UvTjhCROEeQPBb3829t2wPkVctswlC1YiAsBHUtqJGx46QHwkAuxaQHML1+3y/icqRFySHdOIdyBbVoH2StWx0LvQxbN+GUXr93Pik+tSN7hbHvTVzOq24exvrtUsoZPTvw324j37w8gJz/BiTaYMU513ceF2YkVM3YXTaBxfLyFHgqCnI5gXyMZivR6p8QGmIeYG3zfY2kGKRUWemhisU0O1XlQPflxBQLhB7Em8kCAovg9b9W7hgC9NjEM9luSbwLMS/n/0SRMHmNuActUjFV/y7TO7DQCFYTE1g+3VUMPchUm6jh4vASkZi9FVz4/jv3mSGD29t1vfEJ4O6GR9/LQ670xz/d5Qvmt5btw9tvvuQ2qmf6p6Pqs1hUsm6sY6Eq1a5FfP/92Uf0txgbXxdZwbjH8NciIWykYR2JalGktyPEw48vJzokTkWRVciBUiozQMiwTVLX+YbRT1OOyEpjFlRYhBUyrzxCRO8tBzuguRPoAihvluPx9IpYylWbn1pzsBz7hiS0kx0f0AUEbqAWR7/2WRSySP/srDVBhQ88aKBOqXTwXrXRQZgxzrCHmFXdZxwAGTUhiMTFK98KQR1mb7HFxpNfwHFMdUrmJH7dyi6BINImbnOHo3IqpeBPfkE/wb74QvlAXwAigmGfRkkqik7qo+4ZnKIh5aqxoT5vGJ4O9c39zRW5ZTuiA0kalu+NT41I1BaPobcvqloAbn/9jI3VjYPfAWewtn7X0ezY6mFCrBQxzr+B1D00C+u++Rm8mOFfBP0HAp/aZKrgWEgxiKh1UX5Thga563aJJoM2nCyUx5yV0DMdZKJz/lmHrLo4oEcUo5kVgO8mbg8UCUZ6hDYbpcTmPsnlG11MNOZTXCdlWL7QY5R5PmShpzcSIYTNob2jnkGVvsuEHHZ8sS8q32nCmlAtFLqtGNag3wrulp2wT9bbqfaWPS4oMbt/7JP95c9xR5fAAAAAElFTkSuQmCC", + "text/plain": [ + "" ] }, "metadata": {}, @@ -510,18 +5379,15 @@ } ], "source": [ - "new_scheduler = DDIMScheduler(num_train_timesteps=500)\n", - "pipeline = DDIMPipeline(\n", - " unet=distill_accelrator.unwrap_model(distilled_ema.averaged_model if training_config.use_ema else teacher),\n", - " scheduler=new_scheduler,\n", - ")\n", - "\n", - "generator = torch.manual_seed(0)\n", - "# run pipeline in inference (sample random noise and denoise)\n", - "images = pipeline(generator=generator, batch_size=training_config.batch_size, output_type=\"numpy\").images\n", + "# Display train image for reference\n", + "train_image_display = train_image * 0.5 + 0.5\n", + "train_image_display = ToPILImage()(train_image_display)\n", + "display(train_image_display)\n", "\n", - "# denormalize the images and save to tensorboard\n", - "images_processed = (images * 255).round().astype(\"uint8\")" + "for i, image in enumerate(distilled_images):\n", + " print(f\"Distilled image {i}\")\n", + " display(Image.fromarray(image))\n", + " Image.fromarray(image).save(f\"distilled_{i}.png\")" ] }, { @@ -531,7 +5397,17 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAZeElEQVR4nEV625Icx5HlOe4RmVlVfQEIkASpkcZm52JaG9sP28d92R8d27Wx0Y6kGVIiKRAE0JeqzIxwP/sQ1Ro8VVcXqiM93M8tgv/zf/+vE93d1XI5Hvr6HHv79PS8lLJu2+nuVj1V/O7+9f78bJw4Fz9Mv/3Hf769OW5P/WG9HO/vtH1ql/3xfP7jf/z7zz98WA7L1/dvXv/NNz7Pr159+8d/+5f1vP79f/8nsmp/fv3V1z3b0y+Pse/z/cmc7Xldn8+fH8+fLx89/dsv3vz9P/92mu+fzh8/f/h4eX56ft6xTAdHrRPpPFVGuPBTb9Y/X9BVzEop6pEh0g/T0lq3YoxMiF2xnR+fn4EwKNb96fz0+HTplsdl5rZ+/Px5F+fp5vxw6bF9/PTxw9P7p/MnbIzz43f/8cc9Ho7zUqxeOiLRtnjazmvfFGrP67pfRMyT5WV7fvr4p0/vH/d93de2RSjgzol9O5+3tUld0tq3rfW0u71ZWcoyLcWrQgb23op7KYWJyadS3M3NStu6E16KmRVzsFhxn4rXEshEBuL5/PT8+OwqaHj8/Lyez1b1/uMPHz9/MC9ep9NpzmyfHj7ubS21sJRUyGV1KssiAbJ5Pt4cXwm2900lbT74fKLNCfNlORxvrZSkRLS2Pa9R5nn2ySH1bSMlCICULOalGs0tISUwTZWEkfBprlP0pohap55tmqqrtvbQerfMtkdEuzw/aFt///9+FxFWHQozumdbz6p18pLG6G06TO5Yz1vb13qo7qUepr5vgTNpRGbsrZ2V7eb4ZqlT61tGL2YCDCqGoHKNXUgISNV5XvddQJ2L0QQwlZKBXmokp7kqFKlynARE5rQcZ1v+/J9P+7YuXlqP3PcP7x8c3/3043uBnr5ta2gudcpABDpitIPXQpTz5fy8Ptepmtdi/nw5Z9+rlZD2bWttF0AzOEGB5l5TEQZb6gyi732aJ3OSvkyTAGUWN5kQSSADlKcsI0upCdB9qouh1GlZykHg+59/zt4kAYjEem5//tOP29otbV235+0xkMfD3fF4WurEzNa6hPW8Xp6f1/O57WGcilVFrM/nvvdMRM9Mepkl9R6RIYlmXqq5EyrOkj2jdxkjBVPvLTKNhmRmwiwS7q4cHZY21TI5RDK7mjE7uJ/Xj58/CpktiTT3fWtr69mVJT/88umLXx6Py6tap+gKtkQiMjIiZWBGK9Wi7VR2p5DLcpjqJKyNOU1lX3vbt60wFVOpxVxsbijntpUu9Wzep1Iz1FozGt1hyBbzNEdm9GyRpZrlMtXJWcys98jIABT5fHk6P116RreEAtDT0/kSfbJqxsePl/c/vb+7Pd4tt1vf9x4RqdS+bWJXAjD3udY5ssNrnU9eqsBISSDYIyL2dRXL5F4FRURI5qneOsycHoqEzAvHHJOhJOleUqJT6ZIIRYRM0SNy3/eN0HZe99YXTsVsLlaM531f131tbWv72rbPHz7/5ce/POyPgFFQF5Pr5XlbNxLFTESo730nVadJ0h47jKT31jNDSppN0zRNs5G9hxcv7p5FTNCZEcXcvUiSQFokRROY9KkuXrhfdpsKSIMJvaUgSPbh5/e976w2T0b32AAkwR79fIm2pzkD+/H2dr47TWWKlmkhWnF3+WVfs+2FC0FnRe97NDgK2YR93ySQHiH11BR72/a+wViWwyHaGTUB9sg6VZqJtOIA3Y1ERFS3eZmit733m+PJnYoWCBMQ/uNffvjjH/50Oa80U6AjW0uCpChGosWOj59jb9/ffF//josf1u1cqpdSKkuPaG1t+74cb461Elq3NZXmtDoTaL2V4ia1bXXa6ljPz73t0/FQ6rxIzwy07BGyyFCChBgpM0Oq90gBLHvvTlOGondLYzHDjz9+/y//5/++//BTP7dHcos0J2A0dxu0kiZvXdu2PXz4efvq9W65xfP9fHdzutvXFS2XeY6AgVJs+2WaZpD7vi/zAqUy3W3ftwRb2yJb23awkLLPD58u26VH9i6AkdlagxCp6J3CtrVMgXAnM0Vs+55Iys39/HD+19/9609//iH3mEqleY9sXXsIQLFCUrBSHNDe2vOnz7//7j+Fy1TtcLg9HQ8REejz6TSfTk5Lad03mJMlIhNsvbVtgxA9e9v3bX96/Nz6BmO0LA+fHrLJ2I3WeyQic661tBY9s5Bb617rVEr01rOzeEQD0Hs7ny+//92///DnH7e2l45ap8xdQnSl9Qw6zWUZYUVmTtiny14+PuWvdTwcD3UOdStoLb2WVzezJKVPwjLf0eLpubm7mXI0hRsbISnJOhdW1WKRAsA09Ox7U1d2CSSvg0yCksH2vqXxdDxaMgNt2376/rvvv/+urzu22CIAISWIEJKRPdRDWaotZXZ6l/bQ4+fzX95/eD5fIIue5q6eBT4vU0LTNC/L0dz2tvW2F5gPxsos5ubZ2y4ot31bn/vlqezbfqxLINR6JpTc9wZziRk9hEyQQDH2jsy978ncLtvnz5++++67p8fPGSDMFF5lQo9wm5hZvXq1AhyXUtz24H5WRn98bH/4t99fHp8Oh8XSXXRyb613XdanWhbVuX16//HTB9Kebp56C7eCAJUFtiMQmdGj7b2z9GzUkqlUuvve+wFCwo1M0E2KjIZe13VjKUty9oOXcn54/vjx0751pOA02DwVUX1NI0rx4zIfllLMJseltVjzuJSeeF77Lw+XWH9aL+vN3enNl18yVZeTsW1bRGs3Xh4eP23bfjy92vcoVk43d9t2aa33DJcogC5DgZVSKhyIDimVEW3b9mmCGWlwM2WElBH7vlUaiMt20fn5w4df2rorQFKZgtzoVsyChqn4F/eH+1OhocAenlTVGvC0Wk6etM/7+vn7x5vltvU6m5XD6eZ4+/rVm2hdVupy+vbtrw7LMeIC6Pbwmo9eS/Vs5mTxeZ4pj5LFsWTK3SKCRktft+5eaZYRnBAKNyPRe9aqy/P5x/b98+Pz+x/e73sD4EN+KkJwodBSyoBB7iiEkKXwdCzPW2akCRE9xMluvzp+8bUff7k8ffzwqVp5/erVlibwi7dvb+/f5Lb9/POHMpU3b75alkPsTQibS2YwIWNxlVAYUhpzEhnpDhAx+qx2hIpb711Q7/3y6XFt7XK5PD2dIRgNTAIEtq33yABA6+DW8rJqrr739svDDvCy99Z7LXNGbIr7ZfrNt+8OE+41TWUuy8Gng2M/r88HP2TG3tbtstIP+3oZzNR6lF3rdu571LnWZS6TGYltb4QSnOZpqrXWsu8d4L73ECK5X9Zt733nZdu2ve2t956QJFUzuOUej+uuCIPTLF0t+tZ9Xmrb8PC8b7uMnObJyrzlOiEOs80H5trn4/Gr3/z61ddvjTUefi6hu7ubCksrh5tjKR6KOs3LPB8AEPM+Z4rmNnn54u39+vD03LdlmpiaapmOs9EyW4QiWmsZbW3Zz+dOa/sere1tj9bDgVTSvHqxwpZ7C9VaplogREfre/Tad9uaC5BZa5aX3SugMh9Opbi9Prz+6t2bX/9qOVTCkLeH2b949QrA4XQ4nKZ9vSyn07wsJoOZMnotZpYJc5T7V3e9dVxWkVvbSe7ao8e+bZIctvUx34gegLa199ZBCOopkEotZfHZL9v5nGegE56A0o5l2VquLZdDySSJ1nM6lePxrpC/+vJ+39b7L7+6f/fVPM3FyzQvmdIDoxGVZZqXJaKH16mUKVqP6FK21GQGE8TiNj09Xi7rFrTzZXt6bvIzEr11GiYvrfdaPZI9Qqmh/wkzQ2ZmojEK/VBmaV73taXWtgNMO9y+ugsKdplnbns+PJ278mDzzbLcH+bbm/vDF198+w9/f3s8JTOZGdGzdWWomyxTCZkbQUlAQDKzWpxOJelW/v27P/3l/cdtX49l2npENjMDkT0FRcmMJBhSZGZkRJdYDdW8JTL6urWMVKh4uTvdnde25hqRbd/3PcuUX315/+abv+09/+MPf/juzz9u5+enJ9Xlzfz1l7/6u7+7u7mjgIQiL9vz0+NDZJIwEZJRZp4Z0VtGJGCJTJkSBonl0/tPT+eVil0hjLGUUhGZSgOH9+lKAsPvAhkCRRIGg3xrXTrfHE+n+XSox4uO58u5lmLmy+H0m9/+89d/+2u3+jf/8N/+83d//OXDB18O92/v3/36b07HU2YQgDIzt7bt++6l9OiAaCZRpEJpAkyZ6QCQABJRWD4+fe57FGfvYW6ZQTAye3TJOkUgLCOz0Ix08xi9pFRKgrtFaGs9n58y8v7u9f3h7tX9zf3d3W/+8Tevvnz35suvZi8g7l+//af/cdfWS4jz4lNZ+t6twEiJItzL7e1tqbWWiRxBBGsp5u7mAJg9hWJVzATYW1m3zlTSeoYhI1LZe2ZGEMqEIOsemeYSaYYMRkRSUiYggbCEesvHuAhcWiFw//rw+st3b775sqBEpFHFjcs0TVNo5AWK7LGnm7sXJ616KUc3J23sN0kzpzlpQgJmjoyEQEluJTMKPUNmiK5ImYtgim4ACaCnMpUEkJkJE0hLdFGZIM1Y6ZJa6nldm5z7/vHuxtwQAgVTBACZGRjGYoWZHcps2SIIYy1QAbNnknJAAEgvBhokYEhdIyhEKlNeXEYhM5OIIelooTTCaMhMCmBILtEAwkCCcIBUWqaUqG4hFSYJpbPUN2/eHm8OJGQ0sSMyED3BdCdgRpvnOdxa7zQDKFzJEUAaMf4QKCGVgMDxToYEwKTC4nFFeqaUoUAKkSKHKBUSACQkYQC9ECIBdgQzFCCMlVQk6A7GVJZf/erbWj1TVgDBRCRSPVOkiaBgZmIxioSUmV2pse0SSSSoEAyQAPCae8LgQqZQBkBKcNMwJAlJlsrINKdEIJWCU4IZjUZnZtKIToJ7ZKFEOktEUpiWMh8PvSUZtAYOVMzoPQFzty6RzOy99d4t3M107RpQxNgJCRTyui0SzYTra4w2ocAcVkoadQABQVL0lHK4tswEZKQZIeg6U2ZWplJARKYZa/FILXfLzd39NFUvwwQmcuw/CRppJDIVHQqD3EDS3c2MGoUeS5BSGqsCSQM43iMUROlUKBGSjf8DjaYDIgGIEnFNuOhwMENeLJMGwG0Cd0hAEc0H1vrr1/eH2xNGIVMBkCJBNwxIgKCUknSfig/7Lw1rcYWgl1cSQEgiIsf3ySQFUGINKQVKo8MgKQGK48s4MhaagRyPRwPMCFFKAXAXwstktaA6ynz4+t3XU5kyE5ap0dGgG4GQ0Dso0swMMNIIKgezxPVHaUwGxtjmta7ktarRWjcWgAaLa1MAZF5nRoI4JopXDjMSQkIMUbxCQeFi81TKYeJUOFV/++7bb9+9Q4+OtHQxFCGgsGYooiVpZqU4OfLyAR6RERDMaRj/BEKDayRBICBLwYTxocLJsgEhOhMpyGmQiOuuXV8KThhKqEOkYbxrxHyoh+pf3ByWmYROtzdffvvN4fY2kUjCiMzMTi9IUAmkZEYSlgODMUBIyoSYSLgBuu7DdTyTMAgwANkUJtCiXM6r5ZUdRrdkDowcRAJANI5PwISk0wikiWAp9fZQX5+mN/cHUZBPx9tyuKEbjZKQKcK8mruAJM0KzY0lMzO6ALiNqRtdzHHQcj0qQmYKUspM5EtRSRazYFHro8+FawwkCMIgENBpLDQ6QUYEwGEGxrQVx+1hfnV3PN3c7D329Hp6vZxunZ54eWbRzMfEmAEsbk4wo0d246ABSjKOookyQYorEAFXDr1y2UirYIYo87HqAhjAvw4yRRgAEaMJeB2Rq34TbKiMlEO3t4fbmxsvc1fMt3d3b76q9WjuTseAA+UI5UGR/kJHIcjMYVdwhHJ8vcFEIgVqCPghiCAk0mHuVepKwa2UQ8meBM1GZa+NM47MMmkjYgZTikyN59OAUJ+naTLvYMLLMt3cvL69/WKqtfddw+5fS2bkoNKhC2IA6QAcYsjGgBIibGDgaH0NNQFgjDNBwiQkUslSOlAcxurIITlEAikO/BttBxv7ByVoHC1WjKUQlMpUb17dHk9392/rVMytqLgVoWfKzPEyWoOUwCENhjzUdc5HAGpQIhBSDEYSLRUMyIwwSZk9FEb36qWWQhLAVNikaKIZAIdlZmQWtwSdHEoqBbdBBGamw1yWw+2rt18ut29qmaZaM9Pd4YRSkpsNGLOxeuQYVdIyryYpB9SLyOtDKfOFByACkR0qczFYjgAUojGil0i54G7uHr2n0ewKkkn4kD5SLSWl2LIYbAgb8rhMx1rNcbx7PS+31wYcQV0qR7kpJcQgODpYBg5yAQRd5cUV7/6KIBwUSaOiZwaGB5fC0gWYIUD3MoyPm5mNVjWDGUG3IgZgCS9Wi0dmV9+3AbLikBLssjLNN7VMmX0AHF5EwHWVehlRZAwVNLBmlHesPvWi/wmCGidpyOjRurnTEBlXBIQZLBToKJVGMyOKocFYjECtNo69IdTK43FmqkU/X2SGFlmcZjJCsuXmrtT6siRd63pFdGgsR0ilSCiVV9U50IJX53WVwWAOhEjIyMyUkc7oScIMhgIxY8hDK17BwDz5NJGQaATrVHoP94rUNNnt6TbZf/7wVIpHCsTQi3UqXqd5PoyiEUobxj9fDhcw5PHoh/FcQ3mNxV9ZZxATqUFfSNHGBonmPnyGYDBWI1M9I80NVJnmpUTeHKdlslwQRAZYjMHikxG3r+4Oh9PPHz7s22rmRDfIaOZwYiomITK8kHRjaiDtEMS0qxWSFAIDRoCZSToIjGhsWNUcqgXAuLQRuqZPGjBoMCEzET1olJiRJS2PPr96ff/q9X2ZD23bL23vrWlv03K4uX919+reyc8fPxJ08+LdzOicJs6TFSeRmVEwC+OIJEhdsf+FDs0spIhws7FCM76sedzGUCI59I4SQgruY4QSRr+SnQZ+DDYAory9ffPum3dfvXt7OJzm5dh7u6xrtkhqmm9vb4/L5L3nze0fbk6HtUnRzI3GQy3zVMyGQTEkIjIzYLiy/lVNDUNFM89ryEEImS9uBcbrpFBKdcEwpC8GmgIOexHXIr04MyORUpZf//YfvnnzzTIvAGotPh2mw43BvFaz4kiraeyH43I6Vpy7w2spII+nsszuZklmMqFUJMJZaaYcrmn83SE1hyQ2Xg1fjJPwa1nFv3qvkRcgIpCkuZFQRpoXGF6IOSk4rZzuX0/zDL/qWTe3Uqy4wSCEYsiRUso8FcAzVEoBeJhM0VqjE7m3XdsWuxTzckIpABgvGcKV9pVSEWhQxpXkCQoZSY6ZHmnFWHCSsDJkX9B4XfvAGgyRZeVUjj3kyKmUEUiSNhhFiswYTqIsy+FQ59mywb2gVM/ee3cvbd+fnj/l9UBQgNnhSOMwpAOIxvWXq2XtmZCZEURKvNpBSUZKqUgOc+wDixLA9fMvYVsCDoNbscqIZiBqAVKJYFgmnIoYApTiNE3H5ZCw6KjmVubnhwevU6nL3vbSdvhkvDpz0ECTBBsaHQM0QOS48ON2dbmDu0Fzi0wI5j4IkO7DT5gZ/Kq3HZZGRdoVg1Ew0AE2YivYMD/JYESAcAMNpUxlLgTDplIqyoznrUwzSnGauU/zHBGsXsyUAaYERxnLTMmQHKRMOq9cZoRoyhwzPfTpX4n5xR/YuLGQGkMDM7YuUyZU1rZWWK3VvABx5X4pXzzygPDIGMloKcXr5NOr09uDl6Uu5XQ8+XQsteS+F6/unpnFi5kN5DFiHI2OPHuIWY1Ua0g1JUUQQ3anYkQobj6QTBpO5iXywktCQxaLTpus+otqH4ldEEwzSQj12ACUWtBkdFidb2/YjqVO7lYPy9U2lurDnTDM3OwFua9mHBTM/2sKr3f6rq8HN5hyHMkUvvjkq2O+Kiu+iBIqBaJ4mc2qlzoKPmYlYFetFb1nH/7S/RjahT5PUy21K7xcRaAIFloTYeOqynBHqbh6dl5bFi8xx/h5EBndhgXMTI3cjnZ185BdDfMQclfj+9e6lNPdHVrnNVhFRIB2PY5pLbIPVmIkPK0jMroCVnwal+rYI2jFYcGh78yLwZgRGE5v+KprYI5QXmOBkdddL2RcYwmQ5g4wo5v5yxbqRVRdfbtARQRQ/uo1YKPToOhARkZeYU97Wx8efoHCU/vlYnWxyRccc9979owsBklmpVoddJWRZsMCe+SwegQIOl8ig1Rc4X9cSBmZmTmBzBiPJYDGTFFUKjmme6A9jSqXy2WqXsgYDyr1HJujCKBHi962rW0t2+bw86fPZZr283k5TaEefcsOKw6ymHR1zuFlShJCV0R2Nw8FILteT5VxxN6D55Tq0jjPQ2aPTDeK/C+AgobXI5SZRlciGcXyGla4l2wtIDfLyIgQ0BGl1qKYjzfbk9b2LGq/nJ/e/3g+X+CleClTdS9eLDWuLMiGOhqXdobQHujIEW2PloG7QeNSbWCcF8A0nA8F+iBvRgcxhO2w7CCFa9xgk1U3H+2ZgIlKRfSIHn0bSZiXpdSD+YxGcsLWLj//cPnlJ2Svtc7Lwb28BAcwr6QpR4qQI+cnIOPLIiyBSA7igqgcjoc9M7ILIB2yTCEBQTlcBAYb2vhtINHL6eZm72teVYaP1g8BbrFjnkuxudV2OJ3Wy1Z9qTOgsIXL8e7m/u1cjnWeyRSILDBGqmTdMih/EauIJtBH+3oxCySvhgvGbdU0ubM0yQNeLfakuYkdmYHKYdA9lAbrRAgtVn+M/w+aHYZSg6EJNQAAAABJRU5ErkJggg==", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAApNUlEQVR4nAXBZ4ym+WEY9uf5P723t7fpfXZ2dm9v9xqvrY48kkeKarRNx/mQ2PAHpyBBggAJECCfAwgJkARJ4NhSIkuWRZGUKPEo8nrZ273bMrszs9Nn3t6f9+m95feD//jvf/X13/4vUPSyEJ+9CFP/L+qspBka8AVMOL7RK31UgPUJVK+ZcugAKM8WX0nfPM3tR2mthqfSHbeokrO9tdarP1342Mz40a8Gd6GOw5GuNL+aL0wtmF8ggj7ob6Z1yzAmLXe15+o/9stb72WX+V+3P2aksFMBK3uJ3V8rrT3rNzPaKSvrhLWFzd/HmslHeWy9/VyFX80xNM2e7j9bqiinrrBUhmiyUaDB5T3gd59Q2NoCA2ghfz9UVy0xhYBIlftWC/oIwiszdO2CWCRFBKcGc/GkY5Gf4UVb5Mem68z108nEc2on8KOdYOlb6vr3tpGxXT50kSMr/Jg71toyBbOCP4zvX02ihd2sXpl+u2wErw73ex/19yC+bLTC24Y9/UpT6A5MoIoZufzY9vv1R0S0WayUX0kxgrpWz+kOzGT+QrlxRZHVb0kBAU1ktz86fP46+PRMj0FEvnZlSU/l3bfKy0wuIblJijLVUpEj7YKJ3k72k6QHouzBgmL0avDZz8++RzMrwf1ehBNVGhFCT3iUGQYweJSP0BlOlCBfhaNy/q0+cjGaNS/6pmAQxCwnLvm0yIn+2OsXc4nIZC+slsJvFjbe/oaohJaJIDwCB6aP9Ng5T9V7WsFZZJAnBSzAUehEN1ORVWwUPYVzlIvbTVeW5/bBv3heFeQMe3p9+nTb2g9PPo5UULDfhqAgYMtouhnTngcvxn0+qLMcbFeJZ8kP1771p7/9uVl/JzLzWpgavRGdzCU4k46jz08CCEKwAZ1OF8+1bg/zBlmrQYCUi1opDeWn8HTQ/ttwIZwnBz/Pq5I5cgA1C4z3shmgfTIgeIYjCWceOoV2tUugIo8UsGsAkCWjwWAdrYSRfycJJ8M+OfP4U0vJTcC9W18D7StNb7GdkEma9dfC1aIZ9FkPOIzVYP2bLo0ERzbN+m3AWnUF580n9Ee7P/hPiqI2Yw2qgCRJMeVkiCDOXWcOj8voK+Oa3mY+SoVBhP4Z9s2dj+s63/YqCzhrZx9RUnwrM1nHhr4fnV7E4YIJsO7EDLTEOB/vQxfCzip8Y3VWwS9I4IyHnBPQaW2WJEKVs8heY/nNi/n0ElMEVE3W0Ie/hcHcVUPA3pssDscvfxd7iUOcNSkQnac6Q5VhbgJ6D2hEmttGxKQheu4l3l1TYMRZnEGJA8HlvOn1aSGP1wwlQZMq3fLDA2AfFgyerRb6roMN6Rh8ufB3agDNQe0g/lp/gXJzVhxPAx/hZgsae2swVY7f2KkaG45BqvWzXBaNyeaXl71Pk1+R7Npy5JxH7tdyCvooNTSkjjEGTS89CI3rMhaZ82/eBQdKW2XJ9bPcrvax/hgM0EdIjTXIjge3EwKDpnna571M5uMxVQcLBvXlF2Q5UDL9Mp0AGLWTf8cnqHVZcXGYvooti6mb4Cml0gVruaIF+wTJeE+0qIJrQXQ8sV+eVlgOLKdTJOMJwDh+/9MeH4jNqd+RA3epTu0KzvtYwWJqFpkuhl7PouMVnBLmSExO6ForQwl1EjFvvlkoTOkAKxgBC3qXqdcJmmhOCnrpXOyxrcHKp2s/4CEuccZJl+mNCW3U7bqo+/TcEJ1l4gY/KgdyCr1PktHjGv+DSz4Ay7xJBE7hEtWHXgzHDGVqg8/aGo7+zVnfgQSJ98uwNu8/Vu+7oXZtUle4kWtPm1oCs4t++Qor59ce5qt2THvZMf30eUKEb6zrOy+RAXJgWAhWbWELPtWfVHvul/B+iFlamHVwGY0X8TNQLgMqfMjdhQ4UKs963MM5+2vCOkXwYDk7COKScEd3BCyYmGwjh5on5yiUAG+SxtdvTbOUCJo4lpC401Fcf3ye9BO+JNLXe7ADIagUF3rwm0aWrLylEf8w1P7XswX7RQaHnpHnWn8Xc0leQomZT3y1O+z1wvMrAWKcXuf6XINm4uxUb5ztuQxcvIYO7bZqmgdjOoSZ3F3x9golMghZCTw79dwqaNB4qr5F/uLZRlNEWgcbc3M56np1hhTyfWs1YU3iMTBDucTbMe8UtH/GgZkjYFs1vPW4vk8J+Ty1YGexjI5PZnGim/nQLjbauZySwBkND0mhU6yYx59A2nU9sb2+fXKip8C5xW2Y5M1OmnyFNkclCWIqYbaKZYHq4pwD8yTJ4UI8w3oGDp58Nq1yNDq1/sBc5q2h93eq1YVSZKD6/waKwghBwQvoO0K+ldy6XlqGx1mjuxZHA9ePwGQU4XUFLDm4XETAtLYwxCSk2uIdGkKEwblnbQc3q1TkBOeJc9XXIGN26j9Oobc+dFflmdvKpaKX+92QSO1JQWFiy3kW/XjaCyKlU4Do03DiNR8yoPw74OX6p5gd7t16ZnAtd6476JqBFVjOaZKHMIm7NH94pxqjUUwHXsOMkPcHW1nGQN+0Qqz07TphPb38ELyfPuO3TCqD9wnEx1LxkLeR0vA9h4KV2eEosDksSycBOULmZpCWPelZTs9IIay2KcrCo8StWOjBaPdBMvUP4tH6GvE3Lxp7IriiVbFiOf+BTfZSNN+dJ1P35sGH01Tju+BXbrNrd/rcrw85N9Ay926MZsiT+aKs5Ju5fIPh0shXalaMMHIQoapeWnrkM9OL8W6JDO5GvJAztCkcFj6rdtDK+pvfBU/O3+kZC+FexC0pabCmuwaHZMT9nIHp1zGlAhjIB5gBaYBaQ+tnpM9FkdAKl2YD+PiIh3oDJKSi5729P34iPSh7/UvUM4+bY8BX4Gd0V3QbP1jMKH6k3mQ8FBToALGu2v1ID29y4Mc3EIxrUBUzC6B6Iii2yyjiMogM3iMzDZvO/GASWknXOV1yJzB89dFX+cJ8qXWoRYOX6TtP5Yt0MgmfPwA/8D+gzx0tmUYzqoHaPnsKZzYcRVVAjhcoPxk+CXv11Uoe1nV6KssdeCZCS1iLsoI64uXYTssKTrrr3u+v0aV098sVOIiTHi5qqq0YZSY79j1mcaeHfKV2F2XgV8QpJbc5l/qc4Cc7oi2oyMUnJTrC4cSGUYQR9oaM8EEOVbOJL5Sw5ZfkpNYbHmEMktsw7T8bX7SGiOlSenaPPNdQGqSlJeD93gAXAsvDogsU4AmRLcTvJhFMmTqJJumpSc4pRc9OpRngZqkZLs/WZZzGQhvMpUV1rz2dHcZsN0IMwBwy7i4q1C8JhogU9Ls8Y2vsxqhO3Tu8K8rEXR+aX77v3z8M4HRit/DiAts2e2gE1i6p0K9kbOiqhr68iKSvJslunHSyQVc4QimigY40PImCOeq6K7BbvizM0ZqbIYSeAtuBQOGiVAuGhRxMu3rHzCKGtz+GkJYxUFk9ifIc+QbM4YS79a50RiBShK9CJUwa0jR7ctVOj6zzXz6EBgJOBlF3c26ibMxXVuClHOcGv9TIGliogjhXoIeVqHFWAsO/XfryJUAO95JRcHKm/ZJ4I7C/dEQZo/UeruXAq0kcueEk8dX7TDU/o7e1Ija4vHLe5IGAkVkZ8vuZRoaUSM7jvh4JmM5XVXDymb+HV5OQNYuq6jVkqM+MPYtNIxlcQx3xuf7UOVc7zNfvM0zOn7ItFI95dXXmrgza/UmEF67fnFsiSbNDOZh6LR6lnSTqKmhFgGWysoF+zY73YTqdvmMrBw1/2VkuKtEPfi/c94//7w+/+O1fj+w6OkCCWYhEfMZ+yemu3k+dGUJc9OKn41P1/jMft2thuLuqx7+xmSxByTI0s80mnnHRwIqtz0KA/hdDkUzwVFa2JCWG3Qi6EtlqCd+iuUvVjumKEC+RW2FaMXiblm3OjMIuFHJrJwXobh0x2PzS0GGOMCIHkfMTuj9bU/lpzCiXo54Q8+a1W/g7zCWI9/3ptT7K2kIXwH/+b9EqjryohdId6nQ8mD75mZt8ynbGIQ8pmbAmESU5WZCpl69D4HaVvVnJstpTdX4h87ClGm5dIp+5geIL5RvymSKHHlD/j4UZzPW5K2+6XlgaOPRLWZqqAceUJlZETFa9ccQRH6y00zeI61QiIak+y8Cw81l/j77vjuEBgyC6j/vAr5mXjLOjjZKadK7u36opDHmxnbWN+/q1ILwRMCnicutjLp7jsVfbKa41BPNXbXab5uVviRZz3LsXaSp4RbazZHROrHg3ofbqAodzvQzBNBiaeP+kRnFSHiu98F/zRLESidZEJvlrEnhS+DljzMjkXWhnIuksrh8C+pLyEw/8HkdHFQrm5aP49s8KsyfWER3PE31zGnwex0YInfpPizNJPxRzKQMJParhnnpXol/8MJpOoaMiEBTaQMkKpozllz4nwdi/MXVIbxbhyvv0OIYjkT3tiO2kVIMdapnhiqJcJRPIdu1gjVThC646C8bOUDf1oIGiTPKIgAaSW2S+vO8ERwM3cvCVxH0UgleC5cDVluKvBk8mnfiE31kpwvOjPImPD0l5l57k0/Bl7/ELb/Cs5+mtXyz7rNwGUW9qDFad1IxhCJ7EDmxTsKb7Qmz1HF3OyCep2MbPU/wOj5iLMdbFbSnM1IfwlEJW9LyKpmvXs6NLuL5dmRDR2YQc9gr5Cp4SiT/nV1ghCpxVKUq0eKYrJFSpniPjBe3dCqr8b3SQO36TBLJSJAtLEGOzDpjd+r64Oq9jnyDui2GR8xzNYqUtWgZLT206dcCVplwZO5M9N+4NJjvbBC+4OSjN7rRfsE7q+FzOVu0mFQWVSkzNBWshmYp1ufYmQav+zr76a+LAjA83y43UwzaRc1Mdeg8VxBKJgQ+XeLQ142z9DPefLGyXNt4hZIivFSS4SAvsslpt2vUHt4mKgn/ac/Fret6nxcp/z0t7bz2M5kW+JLC6hty+gcO/+N7//DnWMWWfIkXUVI01iB1UKIwI6RnEEfogwSLGGxrCTkdv0nytEsXmbKDJgSkWl3VH5X0+MJ4oyC1tdVqnmJ81hY1sfDpkbyg6NaQ3X6j+f/vaOrvdrT1mxvGQ1iK0UNPD6CqX8bO1F0rsezj6UJnP7tJbatcflIZSKEZNx5qvIaTF9g+G7Ao7f988e3HGo9spFToCIk1DK0nRYTspliZyDfz9O0e0TUJhbrxvtBIgO+LpBbj0/N6gmhhIMHQiB8Uxr/slPoPpjqlOz6MFtAJAIWe8/Fgz+tAbOHO9zSYrJPfN0ydQd2ZZOcEy+0I1XRWekMrG9rGHHM1PF5c25hsZXTxrKfl9enXan3NjV8H/PtIfwOb6J6akwFPTyAUP/BkO2eNzBa4FDF2UaffJDtbVZOeZFnyR5f2p+ldayFzDiDU2l2M9DjSaCz3J64d9oZ7N1GT8LFgqWlJBp8ZXYzDlMz8jRrHlEXlKQiLuEKVLmAPcoCAMiu8vT8qx2MRF2Z4tXFzKC+tvraFsg0bgPFaf0eqDhNQAO1nhtxzhhxTbGSqPUYZRBqes/gDdcrBSiUGg2tJ/BLJoH+pcXBE5xj8ueIjbNlLBnUyKT+fTkYtJmb2EYd+8VHBehDvxOPrdnO9pHpxY/U9KsQ5//71/nhdjQFb7cEdMPHSIQ0JTxF58rJxd79ygw+koMLJBTnwdJ7LI891+f4Z1G9EL9qpZ2P/xC8rfPQLxhW4uZqBPUFSZRmuFmp0Mal3m62VbSZhSVBSvcXj3aVTYjDcw+69ho3muFoT6xpgJ3s0kqV7owkZtGCdFFjb6F+O67fkmFt9ZKgdYl4hzakHTjrhMhnknPpCSbYQn9rt4KRgW/mbOeHkA/+H/8F+6Hxq8Ap8Jw+2SQrf4QaI0CpfnVxW0ivA5lL5I2u64XCkAg+oZF2vXOWYsjeAn9B+8aR5M2081KeIYeNrDQyjkcqRZSwoOo8Q0WZQ8tNZYAeIEhHGhsRnjFtuPVb87Mp8vgJ3QFqe7HpRwn9jpfy6WLrQOZNOslI9ZS56VNeyURdM9k6iXA74pNxn2cbn37TFXwIjZzBFELgkmgjj6CwHEaDcVPWYpWBqPWveonnbnnLwwz4n29f7v5YA8nUXBoMBMjMnAhy938zTw1gbrfRTM6//nyHvYja4gDDdHlymYQlU/PzsTdCReEon+Wf+by2ixVAkUKtfCa2nmXI0QY5PNIJ7xXxnViGiFqOmiMA2/Y1Q73ccUs1G9hkBIS9BTPI4qsKNPUkXCxugCNF9CAu2t/wsZ2ZG7iiO078ZN6IwZurd+BIPSBz8iSuTp5wTdbshzo5n52RrEjwTtHSv7qjuL6IyUizC+uZBD5wSMy6Wf+ifICcA3y2zddnCFwYiZmbplq4x+3689/Fev/sddwH3Ub0XnRzvfMaPREKQeMiegFnWBDWDvkZPYbOEma2IKDJJhHsEoYQceeUFe5Az7IAoe8AYK5b8ZBepmyC3N4xk26E36HYqgsB/hDVLO2d2mKY5KcnaXAwL2FAGHC38Szu6v055XrOL8ClG1QhcmRLHbn6XJ6+fH5amrukZsQEzEl3v+3EbTt7ggOfdj/8E4V1feNAQUr0gNr/QBYVd+0fx3TvY8byRvbbDB1TacZ5EA8XkaVy5LhVWyTgtQXUg8fPMbjvrXJDrg6Sx7LIXtWs2aoeafq7MCjkTmJ7vAKUwY2LdmWj8gVW7qc+QZn/ii9pfnNh9DudHSguMww+68D7x9FbFrlwhnigOvr1NmM04NIognPO7mflm5e6tOVvDMSrAD+8T4OpnQnAyJQgjZHPrS+vkje0AhKGF7h2jLDGLJzNLb1IqJob/pcY1ydI25QUGczHrDZxTKDsaPAea1zyw1+lOIOnk12pN9XUhol/quc0DpvfSf08gNvyzJK/AsQiWib8Kk+NIrcUPIEMfeSNh6794NSHxdJKNy62Eo2Mw+jYJr3/nD6O2VteI7EEfymObShagAhRNwiywY7ryy/4sgdDCStq03oCy6OQaMlIltzrhymh6jM8K6F5pTuD+qYPiiOovbRvtgPBtp43oOBb3KsfFrWN+LjQzz8NhACBlxxM3ceQ+8Xj0svO9sWzO7r/u14kVuaXwdKtYR+WQ+cgecIwV86q3geTfQ7oVOZznH9+2W3q3ewcli51WrjOCJ6UbM7e4VoAOee+x+nP3btR0OxL1cUPWlvSIjq499bDhuFo3M6dcCXDp9itAslB/yUoQrDtowuZ5WZvwZlOeKxsbNQJtv8RAp5NIuZUDMhbDC2rVOkNaRxvU+EpE386HBEw9wN5lSd0o9ly1n/9kcr6FEp9w66Y2cy+QKTI8F7qo90wRXCpfN6QSFerHCliI80/DPRFRmF1x+GXXbn4BR0GfCzLKZmwwYP/nKJi5X15DpKJ5MGmGRWJz8kQnN6J0kljuEVh0z9bjoojcXfYVLus5M8aypGlxN40qSxXPetQtyeuHPtJ0TN4G1uA/ldU9g1hKvdWRQDLU0s5OclkecpgCn/Zf9XYD5PYjJTbFNuYsUyuY8t3QjaOgrXoERUu+Xq0jI4AdYfkAo9AiSiCYbMkHA62+KtR52BrptS5qbUxWHhRuktGC12yDYHAjqOtLDlDM4ylQWHkHuTcwqHaa45OBvw0tdvDN8oGqBmVzuw4vU6JSdxRlTLvOzzDOGyPFdPZLgpPwlxssYBclccUxgmZLL7/7+3RyjP5eZgVzqfAJrmI48KsaG1o4uUwnioLN7RCB2xcnz+alkl3Um7WvK6h3lyDPDDX02THXWN6cOPKk2ZGYcDEfTojYvkqZ87k/qqZAWrZD3p/lXADGs654jQK6atEs1QT+hZ9ZvmmRz5xA28Fzv4B+2yLes7xW9YcQqYkxzIhI7zrXQwh3oGOVHo8l9QlqmVpnCQoaKg+VVWKw6edKO8Xp8CB84tp3IWuC925x3G7lvOd7z1iUy59YxKbB3Cl8cm4OMe5DklBh1j9ep6uNyS3LOwUGNExyeKviRd3161PWhNIzIIp6ca2kVEBNL9ma0aXqt+yJrgZPuRbGWXSq5F25WKXCHLE9iyK0CydlGaSTI+JpH/gK0/fKcNFBTdc+xs/HKcsTdsFRk2YMoyUQ9aZrcW8/FiEwQ+T7JF8uvz+Gr2ykUQlgUGBfjUbhiFi2XHIX9L5GVdb/kjPOuTyDa73DGbGHnhtCdJvCg6BqPgqb3sH4zKSN6AHXdVBBeC0vcORKQ1yC9I7KA1swrFIG+NvjUqLDbS+KfaIDkS4M2RTyMf/l8hoirWJznywQZ206bon3sohLH93Zzk2ngVcbWEVo1ez1+dHqmNyUS/V43AOpSezvMcjc+oQqDDofYDNeY/0785rodpWbSbyZktc5CgcbLu/NW5JPc2O5Fp1JzTCKSX0rTF7bDJmhJDArdATon1grynb+fcEszXP3f69U2iC7OWLACyfVxqXKtT5bvl+h8RFdrn5weopHuIOpOHn739jYml4WyHLkMduHDeGecW9r1twZQ+HDlfEEzt9aWLzqi3L6ckqZro5B40ghvTE3VpxRawBTixAsbtImkxSaNLwKj2VhZJ+DbxCaCJHg4AbijBxzMj7DR1lQSlpKZiBZtostk1REQS/c/fLa1lIv3wXJdGdF7gx5fFF/x+anIWN60A0e+Jy3b016FEzpGmKcmkPBeGmgs0vOxFIVQUFgB0OatPCuQMQKykqloGSEhKWr8+DnPfb2z16bd+cePPvA7jAHbRpzF0jLbvP0Vk3XF5GolCOJh+wqZJ4OFhoP7K1DGIvS3BuA1nc5lf4HjlwMECAkMHTUAmaOoe/2o1XW32l+PTLziTtpwetJNkYLQFvFr8oD1ifOVdGWJL7dYmKyQfpHYBbX1rREmRuUurtYbFFy968aPmefnsb8TNC2XtbHgKcBbu8che/9oQtod59KIAcYExuCvWgdx3ZXRKDgAQe0ofBAzVBFLkdlf+YXxpjaSu/XSYYL5abVUMlhcy0llLjELALtTvl0Tl4uY89o/qAuLWMnxOfTVWSWpzZOvbReLG4/+u/G3odCJRSU+12f9zXpmm5KdnrD5mb3Nxn9mHMPdwBtcaRcaZxkRsQc9cYsmP7eZLFyF2mWB2YH+Ma4TD4q1AhwQcf8aGFz/66pxVgtJGI3V0qLOpJ6NdjS+4mg+WfcaIkdm20Ma3aAQRiDlOQ6wvlAKt9CAjlHCH/sa2Q0JmwiIvIbIu7qLvzBuZ1Pnm7tMES5e/OscBZKidRRNRA1yo+kfbKdjAZSGkU0VTXGdSlXxZR/KjpxebU627rz2lcDJ5EId1+YDRCXNf8i9Pqck0mdaHz2+fZhCWWAdH3y+5KaxItbw8nj1CCDXfv8A8MBGJqG67PqpTd+SoPoWA16bSJ5vmRx+PM8u+dcWzdI6wgmCJV11rlzsLCoGtJGR30Fg/s1yVIWRAvFPFyR3jjEfRfbD+dwPY/LYOiv8T7EyiJ6OuDBWKXEYpN0bL/knWJc/FYU6X3wK4DXRsgLtFtvChm38WLklXaiXT5/5UuZOHDfZTQ/SMRP/rof1Q+pWxmGaUfUjTeLNWSPthqBLg5VfQLmB72Q+gr1NvfaPFvKwB40bJza9Xy/F8PdIPLvpo1vK6W/0e79Cx8OClt7K52C+bCoEbntr7RzLg1HBEd6e/yHMymgP7khL+WkVGVevPs2F9CPoYjnkSPRQw3GCmYXVsc/CKOGvxoklxLcamNFG1RyBhrUmJGtgX4sL1UIkKEUmIuqNS06/iOS/XSSSNcCfLwPohHyUgrdFrUBCT9twnOF7AGmNQzKsba6sk7Hc+RwaiaF097D4WpoZxkX4006requUi3iPn9ta5mp8RrsVI4vzATvU2v9IBL3MOzlp5knpGB2Zl8yUYPihKrz+YLCEzf2R659JpBBYiqOIepD24AQhFc2c6wew5U+unBg/u+rGqW1uFZJtz/U4ermRje2MOR931E0qDhZEajudLk9mk96URPZLnTx1I4f7Ti7RxDRf1Y+zbfiPfvK92uLqC+6r9utWu0UtQIhNoq/68TfDrj6XVoRNxs7rV5q/cPn5Ez+xDqKJPh4Gs0JIkuJuqVOBqhA9//qt16owehimVylfzB84ukIhdCbsC7nXPrl6sCMU4gfoYonwUvEqGfB4HicjMsoYldeow4TL5TPJlWyfkYm/hNyX7pezaxjLqajan8nKCpm1IF/+PHU3lPUeN7wDHR9e0SIJcfkVz+pWQbHw8ir5wvKPQ6+nbuACWgnrBdSsO2ve2s3idknZNOnA2cLGU04M/30YTtsBxIKsYCI7A3wy2PTt2RZbPzCM+wg0jWzGjtA9QdY2RUUg7TX/C2316FfxAwyer9polBDndfWep115lGlGgKzAProUuFXDQxL9eXLeTnZ6iveaD6EmMrR+phaWMKbgzrPnzqTI2xw9XXegzilL1FlHKrSCGcSwTVD5Z/OFCvrrXpJ4dTyzlM6WHmGDWaH3di8EboTcT1CoAYLIaV0bvGIQudVejCE8WA+zBraxjta0TZvuCNAs8lo5OpJ5Z8SniNBLHxmzz58zfiIO/lNmCYXx0MLtNKhS5VcLUimMrIzR+hiMghmI+5SCoNA8ogdhaVUXmjNpEBOk/TYTaNbKN/1+T6LEDAXN/tSndCvVUB1zGeSCzJIO0MHrXzVOBhn801X5pmD92rfnTynz55jqwub62Mk5bllgBIcb+bPms80J/tZzcMtjCvk7mfxejr9IT/uL/RCqqSMuU2PElsHMZwqkFw6aSxHz9roqr+jf8WlPY1Q3RJB50kNSPM1hHG4TQCIQA1JETyoylRUPKbkFGE56fgFk6Wbod9nKR8h0ekKlEdYAFwSu1InU/RJS8lOHcOeDSuUqgVmjDP/Zb/9UPwL4UYAt00ZekZznVIBmG7WZgy6gsBWMgzFDpNRR92trNnyUs3fTqU28dHD2/OU5eiVimheT3JJKFBkZ/W/d+iw5tWOZUHy+K4xWcltermlOW0TcC8a/S5DtlHFxvaLaEMpzc4tJz/GYLuxmagzhJIkVAaniqI6f5dwdJheqk84IUWpnaf9Wsm7H4zYfU56NInA1YywUzThfWFk+uw8Y7FUMgtBv4R3EKLWXMmguSAvu8bW5iP2mM/KOisQqlaYqwa8tFevfVoqs+U/owbPfKZZYmT79bBDehQaD12Xpv3Elb+3SCRj1BQyHlnBixLjBs8lXTPVwJZxcgyUbGXDwUHEKATB9aPRs0HJpa73JdrcKAoMBLBBU0h5CRnGvEIHEm8aYxc6XaFdOV2d5146SHLG8ky/KXBm+tSoQaQRxnc7jVh8M409hrmx5bAXU2vJZhilpFYUSHIYMKUO0OZ/VNhxlky54Bp/6TZtj9pX1ldjMoma4xEnc82qVdQbB+bB9+TGiwiD3fDicnOPoipNdjPtOvnmdx3gBG4d2Q/Ry6QIGkddhLQdQLkaCz8rQ+hd2+nzClBtHmRYvxBv2VjOLVHCehkbXPBqQrmXfmKnzVcrpfL7/XP7AR8z9g7TFKg/QRaPyrTWQflhyWhcQp5JSVDXjXOYhGGpU9pwBxZQ8IkJHtUcg6we+VsqYmMqlacwOGdwTEPj8crVgWrN+sMGrY8Y4zpBUjyEDOkLTp8W9xX5uQaef4P9Gg6LIivroOiLZD89bUBZE83KJTkp4QPjRVAwerpf58xM8zfAu/QWINuJRxXrLz89/FyhZ9zuKfygo0RQmBvN5WdtRAjgow+Ws+VrYTWvjhyB6KXQYsuHvIE8otTTSc+kspoXzUJOSmfXtHpNiLZZTRInmjv1u4IJ8Y5kwrXQRs913owYBSfSYPFchXQDVwNKmk4tYmbd4jNzoZ07zLl3i/zCIER+fX4m/Pq/NbhsYIAIZ8yeBPTHi1aJMlcjdx+MLWCgBl887u75MxIGtTzjvi2OtvUQ/y9UjxTzFbHijksFu7hIG2ENkxYf2SUCZmz8CyTGuUL3fZJ3oDVdSFBKF/NNBvG1sc0AeXikqjEb2ODWmIKH3QEGQJm6ksgsNYRM0oqTrhiTyk+J7faFweStazBblp8e5/PVatjV0YwZU4oKrBVmaxMmCkRC1q6KVVRIIdvSemYK/hIqnEYkMWlHdrzPctdlqL7BTSLfZBrSAJsHtvysGZy7stsH/6HESv8x1oxoROahjXlfvu89ZccPQOxHo2kOCWX2n25g0YfLzdEZwZT6j3YVBCAfSBFbGuQRy3QSfxNNraAohruEoC6PY2VNiVtm+LuXyZ9sXlcjBXFJYCOb+5Q/FqQ3jTdeEYwtzD7aiFWYMctyxzETmDCdXTZmlcmHEgeV/BUK0dTgpN7LcWg1yn6NNNKy1ee15qAXqPVX1XzM3+UbwMQP/mOUs6JueazKIVoagi8THmBfKo05wz07Bt60Z4ifwtr6+pI7IAI3gGIZC3h46s3VcxOOwDQI+1S6vPqqKZU4JQ+s8P7OBuITO8yWcmd+aqwQJhrE7xcIHbuKHoWXY42jMyJSwMEurh+hlTrZ6SDQLjkyplWhMOp3O8hUp0wZYXigXZEPgYKvppfYEpxKrqBdvZXmbEftQex13oVVr91wjIIIibyt9Ya8N+7F1M8uzEBaIQ+L7yzPwF9Nhlrb2Pb/dXlLnHPcXiQsA7QbLc8CBPCSZQFTOE5Dc6h/qExCpWrTCP53gE4Yo6znAz0xr2c/wNe9d3Xj5W4bFJnl5TqSSJSvJNuIGETsbXg+iSgGcZ6s5jJQYy/hEDPNKBRwFoUsmxPO0CmfiEKexOIiUuosHIT9gopXVaKNPmelTCV/DYrhjaefJ5nOyWOY8Ak76/0+mkqMC1NJV8MYPtrLc2gYxBm8xc9x87Y/cGHQgh7ua+r/yWchfC8oWcQih072S4k3pXuU3LAnFOUYH67NsVkCTC0ePnpbv0cP9iC6ZgZzCcgrtrESg7YlOInvqPA/2M9T0hauhBMIEe9Fjuu4oWeYRxz9Tt/2ZLgQE4ikLbJQ9j4hh3CU6069+Sv15hBXpiBo5pRnPwTCmgsrIhHqePYKRnyTlBLX8RkQAsMdMo3YEV5Gf+sb76lUwldV6T4yEzHzB6KMjhjvGIOMibnxXHVxhQ8p6hctkPQyqvQ4dJ73AjiI+pD+Y1/MsLAg53D3onXLB+FDCQ+qPKSGWyx7eXiTMXITVBEtSjaqSQCGsBmMrI9k1OciPQc9+iqc6oggGZVpCRCIyulgh/iXkaf69y5QaMLYKFWJtBA9z/PJCnE6Q56efmrBU0IkI5Hf1Bpo/Njlom49fus5NxaS4mUb1gJ7T42ufsKem+zNCXOLVPjIKTStvJG1nUI2GAA2ExRkXHjaxVmnhJzYVz8QZoao/b1SZyhsm10ng6U8qpbnDLIDr5oyw6TWqNrCT/CS0W7ZKYGMaBRE0tCcPwtZevdwIzPBSqKwxKAu1Y0G5UxiVgmjvXdH0ocynAtFlI/gfh1fd88L2AvkvblUtwnC++HwPQDfBaAKtopeQNzaYR0iUIm6PtO+N99tzlelOSjH4y2J9x4A6zmIt5d3xwYDThkE6VOMvn3V+G+RVWvQs80/2dLdFebnSfzXpItMmXSucWQI1oM5J0wsRhdV0bmyy5cIK8DKolE844aYENV2KeYC8tXbz+typy5W5XPnXj80MH9wmsi7cw48y5s5EwGNAhgh4IHIz9dKxa+io2/73zzurCgz5b9Sug+BoxSE1NwExay8BAqKfhKh6ySTMnKQhMcIWcEkcappQLzBjGbVoBE21nnZ1+LV6cHTGrxelxlTCIP/1O+GYZd3QeNSoyRnWtqZCAB15Z/jwUyOc3azyMHwGjsSnExSlCjQgkMjxrAIIC2/5vS8TjOq5Cdeh729qk/hkNYz8WpDBxQndwdMchM5zyZRc0Yq0WxAH7Y8u9Dvn5WbFcF1lxQDsQ0meQ4SCS1meIeFZMD9HMbLxUtcUSphOPVyOkFuMkLqjFWEFy86Ka0dvN354U3hylb9987UqH6avL5d3pKX6oPhOqflixW9MAxE6KizeERAth2vB+o+wOtz70N8IKpWVhUpYbSRwX+MuphfWbIpvt69becpaJ/Kw/g2uSd7OLrH866EmzS+as6e3goq7XimsTnZX8Pf/4vLypY81/SPvrT5Pu3v25TOmJjdv///p5rDIJB100gAAAABJRU5ErkJggg==", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAApA0lEQVR4nAXBaYxk+WEY9vf+777fq/s++u6enu6ec09yl8vlYVreSIIVwJACxUFkhEhifUiCwAj8IQhgIB+SDzGCALIAO6ItObIcS6RJMSTF1V4zO7MzOzPdPX13Vddd9arq3feZ3w/+xx99b8LDSevuTlUKHt/8f3j6QFW3V/TXn377ePVl7Xe+TfzkuFlP3k63P775VG9332W+czG8yP/uf7Jy4Z8wUHZ8x3tT3vvqhUHYnyj9xq3/OvnVv/hXv3z5/f/9D1svdcKVFMyI1eV8MGt+8A4o3MukrsIMvdeZB1vTk0eHnQ1jG/kwwNEYhpjFfGSdSxHjv7C2/3A16t31gi9mqRS+Cqi7ojfvM0FxXjU3fmGq7xomsl4UsL/OHoIvU1hI93dc07QgxZu9947nYcQvPQK+8+gd7+vaz7/0oIUiR0+IBdws4V/igzluQF9Ifz2NcfZtm8Yyp+yNfZ4XbuzNfWxPef7kby/MvYqI/PnJMpkYWwNcYsbmLLuWofPiSHw8vXzJenF2bfaip+ubB/vyuxaqK71z0Zx3jGHaVy6W03hVUAFhQR0urRC+brQuAPrrV8Q0AhOuRs1+QyLCTQqNQzsom6ugzW1DTciG5eTUgLiM/VcIwyWr7nfZdDXa/h67gDmHoHVEmZ3Yv9YxqQ0LUF74h80SfRSGPTwknWptHk6OwvjBswg/y0wvCcLB+QZWjpz2ajqsm1eaMwO/slbHMLmvt4s7C0dwdQOFSZ3SbZ88YRqSREWD3SWC0db+Fi+AacxPn7BEm9XZC4di7kulkrzR0kjoIZVVaWpAJyFGBUKEOcXeEvCVNOcO6VeuD9VQtlCiGLWxsUEsNO2SmyVQsy622aRE6FAZfsjz+YL7i3Mngg+JcfVaDlEfo60xaHgCxXTuZKmNazYqiRsMp/RO5+TTEfVi6jljlj169+Wj1WVMQwMQychJkgW+B8rCjq4L+WkntMB6znlLmm9ysWWo+sO3SamsY9o1qn2Kp4GC4DYmlCOWvL71fGR14uMbrqO4iqUzg0QCYTSQvaKydRs7fuzNw2UGbsPyK//UDVgajpuWj0bvqatCo8ONAkaqkNiDFgs7X4VvHHxrxY7FhQqfvpZXuASWwN+mOBh+A/cmakdMUa1vvL4SF6/Ckb3M0N9aARmMatHozR6dxcaOgcEzHZ663VSIRVTTlc7lkPrC7S/Z3R1top9cX52frXvK79rPw/OPJRnujxmDi09WikhrrSo0M7X7js2rDwkKICzDwy/4b82gOxsFQqY07uwrF10QKacFlf3pps3rP/9womi7fnn21PUWc+gotPlv6ObRaT8PULSSWXtrATVjaqTRJ/a8+AnqihSSHj0dQofS3IlFg+03MHeFBJE87cBR1vPilIhSYULMFxjJ+/DNr0jRH31u4BjA53D1RXEWD4WKiGEnMvMCqvcGrVdLm0ESdgnSGgZ5MsMceVqfk2D+xVgG9ew338du0/8ayO/bBEzG7CLDkWlNzFI1y7mQL4RFnhwFsHsQLIfM2LQK3jqX0HQ78UtCKnmY1QtVJjRzmLOhAGV4dkwXo171Uf7dO73sLCD0sOAWAgW6ZuTFIJkchxhNwIQkcArA851bDqQrWk6J5Tn5euH6aKniFOf+wp9NLFSmHWjFZQv3mKVU8YE7O+uZfTp2yBAkJJ7YsauujG+BT4TBr6HsEURf/pSb/laMD9Hcej3VvTlu26dIsqbzHnUZE8M/u+HycZb/z4xrxSo9Q9UbcranzvAwRBwEypsZU1x0X3TDRe64o1fHv1lbPgyIcHE1Gz+14zQXRUP3Kp74mVlacnukrTBIygXogC5VQTJD3f5KFsBuwbBsI0DYwWZ7KfnlRRXI1fWyht8hlYyKwSmbQXRX0FBEXOMr3WyYQ+vPQSLNVvH/6Bv6g5J68iP8y6zjLbbiPIJFQe0OFJ0c+/OItHgtk8tqd2H8M6wsIdqBcdAOrUfQyKJYNNeMFm5H7j3LBZ1cOd36Hv/9ipsgP7VmREr2c7W51k8NSD/Tli0KLyudABr0rl4mg88I2LSehCabJn5WsKvC0TEtCEk6wNYVUEmpVh4qV6b9CYXZ3vwCXhToosV0yASQCZ3rv36oGUq/hgFc566H9zOVImfi2/fv7JUKAfXEpUMpLckwL9S3+TX8nrQo5pjZ/iS6qsCb77A8yx+FKQ2wehN2vV/chEkUjyaXJpatifT0JlquaBbk0fmCUNiImqxwkY5vYEwwrss7CM+rD3fb36ui7XaUb0H4NUYIWTz1qhfw319duIxrB1yMOi4y/VpHP/Uq6QqhCHNfhdNX6elMq8BO1fehJwIZgZxV1rfAClIIEpGFAurdtNi7tCybgjB61HK1XglXOPetIHXOJJefGqR9Lrcn06M/L25scNuMDVOMNkzp2VuGcgUtO3a0NJexNQxS/c+HLr1K5ktm5p5qRMyUzdC01mPu/u1nf1br3bC9LnaOO23J9i2JYqg0vXQ2l/NNrf8873ZozIciLA5sglK8e/F0OlVQA9IbflyJijjkX2hHz+BOmoqRpmKn42OU+JvLSOQNXcNf1hPknMbxKk65yQDnEmvuWfCTALYpi6fFOLBX+CybZh9OtEk6m9FpwyN4nLjT/+qPu8g0vMqERNxi4s+6g3UDtt9/f6ycBf+HT2/DmR8IlNOFz39Wy708rP5uZ3he8uKhI90ngo4WwhzEzI8CVaDDg3D3BMhFGdZs0Qt0Ecw8y+eIzFAVYpFu8aTfHyMbXBErSOOxmCMnzHd5MCtjiL3NFQqK89rvRulVbEkKMkqDwn0LEMCcc1MeeGIS+G2fyyvneXaXgJNrCMbKtlVazCafPsZGfn+miT6BOL98FLWFZFrIoheD6hU5rhLAQLyL0fwoxBFobbj57FLxjnkfKu0w3IAIyM2qP56uMzbdSseTRydWRW/SPgKYAGDADjAgvRXH5VV/UxIIH8AclywiJwyDpQP9vz6EXv1JAkrIbpB94SR4E5FSKewmPqySxIGOq0uUyCcYc4Xa09kQQ624jeWcXBj+aBYMC2KLjoDTC58fnzND10Oxwsyi0Rr+Fj/+8psk3bbN+tUmhVKMAl8GxyOHXLFN6JmAjrPXdhtHpBtYoDlWEBkoFIi/mWsxpNfe2d6Ygbgr+RhLAw4ADLqNk4MJO11Q/2K4sInY0cNQNN6GYj0Y+QUKg/buI2CO2eZyQYTqLKDOKXHtbgVigqpmDYEMIt+n3UKKW2dhnnNUazD+DhqzTTLgUJi0FlUVug71s5BBJQ5hHkIBryeyp9w/JqFrY/5S+nhZz/illUXARzjZAziMqNTka1c7fhZcpfp1n0SQkM9Xs3hppRyCMGdrU87EITWy1XJk86GufzUNCmWYwOk7+YiX3RdyuJLFj6OuNxWmURxjSuEa/Lam5BkNjXFGsNt44PXlSDeO1QZqbCDxdSXdYaW0lYkW6W8i3sron3nRi8MHnHpkpJp2/PzZpN91XRrlIRsYfwAl6zFIx118MAJc5O9+9FuBLr1HtMptioeofLXQWQ8YA+JURBtqF8bJ6csTomeh3ggfxnaRRgksTn1WWgqoc96bj2VYT+TRK2sRwxHtOsNQercVhYsUDlbw3bprRjJsYlXg2jgk3EeSCkhpuKf5NJVGRi5fXMmNoXxBITOu4YDVqEBfg0ySuf2l+M43ruXmXVj5+KR/MtJEocwl/NmBDYl/ko8SPiFrkp/L5crVb2DKyxbOXWP/AL9TKhXyyuMZ+9QRPKwHvuxBM3y30O9O7eXZhd/0IBjrpH4VX4D846wQKLDjGVHLwfhqdd2LMKfKFgoM4UE0fvaJ4gQGecmuNjRxBjIxeCx+AZgEqy5EZ5mVEF5/czljrIIJiRXPITg5UhVR5aRyHwoGeAH/bkztuVvmaaSlRLSHCqmghngv+amgdpEqdclQZCIBHFxCjspuYjE3AenT24O3knezKzjEIRiREHyyHiFj/y+gsX50M+ojY62PDFQEXcTM+ty+3RMN2IKqlcKcbMZN5TKiLMqTjAwqkAlUaPwgh29ADm4OJTYpioEIdOd7+lgvWiGa0DPplgaeJrnqFmNAicUWAqSmcBxZNFs36YqcIyfQLLr4cvrYkT95Dl71lmuGtyCY87gWMSTNfQEPbEupYf7hpDLfR7RJsExylYT06anx6QjfTIlKViXJYJBHoB11cXaHfSMpu7r2NFPd0Lpj2LLZcUSRsOpfljJcY2OVarkWkQNJBxhUO8QKbGJ4RX+RMJlIeBfgfJb8KgEw3A1xOySdk2Bxz5fJDB0h85lCMxnE+rwZ0yjGW+65AQiTOtGSAv066ky+/Lez+6pZIOjXXqYReTXJua9P4mnNfzfLb9rqQZv3ijcdz/FoypipugLJSAVeQjvyQs6wLC5Yl5N4hLl6PN9ebqHFby/18Upzy57C53czHmrni28H5r4NCt6CkwydVXblEnK2BvwwLkLKl9wZ7DXc6dhTrv9GUIAGqDKLN81bBTK5JDXgglLpVkjHvkFBAmcMsJtV4tb+Lu7ZYGXkP/PxjkeTD/2/HHP7EjSwIYiNhHyo0EWY9sgTdaoq4ShFo0uBXDiGOtZVglRg5Ow/zKf9I5awTHcebTA1CcGcqmmKg6FdznzN/LyDRtuNbOFbxaJ4UZUyhZxHT/BOj+2heXKsXzXZkoRVXReW1m+9q/JiQ4L6LEjd/7K0Dv/eG/9N4X0kUpNvJtBpBFvrAivmWn33poTKvQyWnkORMiW4IYEF3t+uTW47y0VuyPYEx6C9vEexG2lPpgTIGs3CTIOkIVYeJlKWKs4WMVFN2sN2viRfxoPga4xqxArukTBMIiWzEpFTuGxnsfWsnIFWnEqeMqj3pWro++cpHuX5LX/5JwtwB/dqRtDNM4AHt1IAPI5jl6NnWaN1mbOyFu3r8F/+Lz861peaJqeoto+ty/nqBFXY8+FlGkUiQishEZsqF7GJ7SwSO9nw4CM8IEUokGEK5HKUgk/0TqGaClrBoEaqjuXYlOgmL6uzDeYbs0vv7m6smoY7sj3uro8NUV1uEfH52xnjUbJ9F8OXB/dKSVqMpY0VlCA4dgW8vBny5xt6ZbCFtGH3yLPnz7g274fQ4pV06xYqC1wrbC6+/H+cd9//AMk4wGuTaB5ZwnqQNj/Jyk+Pni+U66VDHUOWON0EpOPjMASXzlQoDqyYvL5NFtAQSXilTIWZ0ZQ2L4UwChPqu2A3KbSBCL+yfT8n77lcYmmt95YzSMMT/6ZN0PgZFWhrRPGI4skvojXfWEk+aJSXmQbJ322KJIGB3bA7eMU+jiNO2cxJUvLZjMf0Gn/HlfPjnZ2//w1Rj2Kxb50tz5kPNvYYZkDIPwFn6eiFfO3Xgxd0H3ylcLgL67hYWGS6JLzyc9l0/S4daE9baAFQLKNTcSDuFnJx5jdMae0FbEwEWoeuExv6pF0Z6xiH0jWlGrvF+TJLQW/g/t6qA9Fe5e9V6GITQzLGn2qPafpWhcw5eCJi2upbhfzabQZuaK7njyFZDuFoMwjJmSGEaqtCKVyaVq6Zrpo/TI8Dvw7Pva3MdySpKe7N5B7yS2sO//B/+LUx+fc8i4SYrzpBS2ou+h+D7HtKxiJvwsh1pU0B6QxAJqfJsl/IV6z41fyy1s9bt2FvpsJzyi4zRMels2K6iIS3gac1g3QqG7MCNlxvf8v1FtX9WwWOzq2snnT7FV14cf1XIV7PZFbe3sxH9C5iydAmhsppkIIhLjt2IhqeJJAqJgpkzet3jHSaPYHsrQpOWmkGbTkXo+VOVPpnufCDBf42/E9++D+f+a/IhQi/uSSPs7UPEvXH8IKjyCzm9Gwf7/PJVpLqUQEUIOdVYBvdcRm7H5FqPo7mqEkrQ4a/r7FmQC0vT1ZumQTauMln/ilJ/pXoZKu/GQlOxkSk3fxuQjsybwzP/whOfp/1c2iqkFzKkbmuY3AFSgpNxm5Mqavx1G+0t2K/i4RiRCaoEy+cAtOY4lOnN9BWcrcP2eFH48Z5/rWCM/eyNTByVDjmoQwKlA/QOEw/Q+RpShc3kH5CISHtbYGMOmiUl4t0OEWrL4xKfpXjLqFMzocTCd9F9+48ciFMpBHjfsGbWU1d9uDXzj+/jGbz/Q7kb3HrbwnN4pC5uZlNz//l/8bP6lmoyBEjlrEQLJrPz9ATZr407UHfbTH0OGwUfV+7iCZ4Kh1idM10xTI5BAoWnyJIFRZYZQ0Xrx5Oq/LK+6KwkAxwCFsAxj/INxNrxgTUsySlKog0xa+6sgmhtpTGYwhc6g0/zKYE/6EkwOnAlURLs4gdWw3n4+iNLAFfkUgU5EgOzkFFslMIbPN8QY0qs6/A/GKCP60jwTCpitLef5fd+ohGU+LNO24bG8fZGVdDP3Kq+OicbiPza6OcR6UmTO3p+9WTm7oG3YgVR8EUVxnZm2XpkDFM4Eem1Q87b1CHHnyqMeCH67eSsngRRc68q3Wd/MFJksvExa+q76CNDPg7SY3pufjCNzEvLkLN/Equ8UaSxxeaNxdnOGKXUimSORRT5+z8UmAvj62+0AQQLraloGLvvzOLqhDVYmjzB+SFdntfDNNI8dLhF5c319JK4bQFUmYROXPx4W7FWto8WHN7NpbOePXLbY5Z/N+kSunVCDbX6gzGkAdZqqESCAU5XaYfxX9hYa+eolPEKwUohnUqTM7Y4cAnAlIwGjwzOkbsajRtqBx/e/1gbiZi+jP9SrKV+pV0HLDfmLKnZcSjo4ZvIS7iZcIZXoDDuG9VM3cCumvKllS8t0jqWcjzC+cTRxVQYjayh5xflKPiJskjHJTemyXsh250iF6pxoyMOKv3NbPOQX6EJPlZSqM/WGKna73RYQNn0qIdfgLPuyK2kUOZNwA1Tb/PFJAtcHRZ4xcRDzhMjrzwGL7LstRw5kX5zTReUvqrywF20/5Ssz9D8Faq/vKy9EceW+B89SBWmgm+eGFZUbhpkyaCC0u95N+tarPlrJuiohcs9SheuqZlDLkSUudXKaky5jQ7OlCue3jBMS40P++9/IoZP4aOw89hNbO+s5AMb5QONGfJkV5/M4WdoczsN1Tbcc5em5xP7BJq4TNqXZphKhz+q2I0B0z5LHh3fuFDNjIrJv+5P55GcelmFk8uRPWK0s94cYD3tB2qiDgK4LYzi4creR+Np0paOMpvwnfFGPc+K6ZE+p92N7i07FFIvlfbJlFQItwoHzOSxQolxx8WJ35AX2dTrGSZUmaPRn2r4lxEgeT67neKa2ijDDk/udwZIkgSclz4H7G8Q83SMLcV8lIsyPJjA87mgBfCufMd80+f7y/29/g/nCCfgnrBMl9mkNe6v9ZIpC4nMOnbOEWYXjjaabv139wtk7hbwVNzBEERG9S0sDODggEVxyr91JBGHpostvtKoD+Cx2k3KsYUVj6c2XSaobP5tALwUnppBTZy5j7OdYpwQCOC7uWuzr/wEwJvhIhHI6gapDF3zLKb1HoRZGtWxtW/JehCFaIS35kvBjlI2HhI8ckONA6hAIPQ3YwNpu48qUF/F8hndgg5/goGy7UAn6jKxDgGw3mmWv7hmNAX0wPVW3h6L3FqkZ324ZONnciV7NsygfpBoWLWndvFO5eMSDTGIYVHQA3NDPHdu6WDVq38zSy1WLno6BViRj/YKNyeoWZSl/kKEFw+j7BKMrPBC5Iz9zKM5Otnhoaqv6DdE4KmyZ01M2DHR3pQQ47j+gRUOsrPZ+p1oZ4PYLpv23pcom5y1t9MDoGW0ESMh5jHbC0t/rnkZgnVvJd3YvDvmDqeYFdxkxWyOR3rB1cBCzVKr1PTYSzM4XmAQBNb/vh+nI08B59xFTi/kY02rIALODEX3h+pCnLO85Bss/PDyxN1Yk+wnjl6wuFw8KI19xcZiPhOrCLhVUIwpY/u4Wt2EUbWp87kUhsvwxvoLao4gn5Q6mws5znrm2vvLD/PzajULWiJJX51Chz44Nu3QYF3T40ou1NN/KggVybgwLPRwzqHyPFFxzLc4y6+q8x4CsdWNlfJULyJEzFBMJRcqGHvsEKtlT+Zf6FuU9It+u0yCzWy9xQcb/GgsP8kemQv/hiyZCEMBM5p3qtu1OhR2S3Jc1X8nUdMJuugCOQVIZnTGzF+cYh+msYy9kr+3GzBtI9Z9GhmxLERZv5dRsWzVf6ymFXjCR4J2b31zsEaBObIJ+ugfxm0Mtj0PKVPI41aZ8Ku76flo32P0go1ThsOMxePrPycmuvXgTYnj+KAiOhyhA5/p8pvbxu+ixbdd7Z7s5yT/5TU4pOp9U6tESBkyptffOtR9h+z0qqTo2AIJ1+Xz36m7RFFE9Ggefdd8naKXnwJZ1h62rEvxKTEDVkOi82w9KaEY4BBGj/OfpowWbeb3KXEF3fGUSYQCCIYEj6lF5nPJABt4LU/ArVbeVRJA9delpPA7kdQJOqQDp74wXtAYduS8j5dEy8TolnOYNlgUFBy4c7gyg7Lf5bI2NUw9IYB63FnFeU78or7mIaRWC2EhX2x/jKztnwI+x5CMpNrIHMN01PfWhZ6irc8RAtawn0K9Po3X8YdUtslRUdXvGbimyoqNsl1NfTI2P6dwe8J3szmu+lI2u5nlUsDEAuyjKPkfDjOy6DvJ+hHwD71SjyWWbokIHJsPhZCe48ksSiLXcAkraUfPkkmeC3Qr1Uam2OQYS9DB3dzAsbRuHqVj8nEYobgIdecJtPZG+gP1rEDBioeQM17JWmN69u66VNMRRNVoy4fBuXHjScl0MBmxUL0G2Ym87LaD16puvk8TcR4IoYilr22MUOcFJMi1W3pGpcpcLmT2OzEEbLxhm/oAhN45K1/6zVgpoEU0yIINmMVxtKw26Cl9lvI5oRnEYkI30XbBSFYKJmbtFRJE/OMj2RDDYuiNHy19AsTI5585XEbJG6EaHJrCc9wg2s2k/pGnbG/LcJf5x+BAarDBSOmtaUahu5soAof8Bn8P9z+xd62npF/5n1tiAbv7ST+NkUYJOK5/FOWzXbb59T18mYhkngZSUJ/El0m+bojRSjR/6mlNvQlhN48/22RIg233Tk+BesP9jEEz916SzbnyvPsAjut+0mWf8UiqmIH/NLWtemRKecQopwTaY7mVrbY8jhPXzXakkoFM11IdXylTseW57ZKTg/NTbwIftItP0G7iVeHaIe2mMaq2MyVJdSdgfv83+vXRS3ZFwDv0FHu10jOWbJ/93yip155Hj5h6BWvXaWj+aXomMAJPDdvZdAigaVousOS8idgAerrHUsYw7vP6mEODE5G7GLkbECRx5zYJ2KxsVwHizMPCnASFfKbLThBW5nWm+IG3SQLTnTG3T2eETCck1OKhONKNE5Xw4kWsStFCVK3fBF653TprmS8Fs4guFkgxLiqLt1uXl4OBoBo8alR6LvwUzxqIUZ/tb5zUBujs9MmWUKytUJwJ6SUoOPvSQJ3TcOTNe52nsk0ZZ9ez2gzw62t/1fUbc1JbhEaDV8KQyILkm6vT46xT14HNQxJay9llKP48GE2jkkPQxrc3QrARSwKCX/m6uOKQkzORYruG6Py9OKFDIv5RqWLbnF7sbBdRe51VtSfu+it4dvT8cDNGl66RL9ygAj8x6f0aFTNmabs+XrJ92qiYkYPOS0ufBrcCmZCbxwemnZgxGmPcEmPsB2H1CHqL+LJHI1Ofcx9Ne4SPNw3XyxHRfJ4TB89qEMmjUxQZpE60jZ1X1w9zy33SdzSJD+IjS8i+E2p7qbcsFAtblY41wyup3gOS8Sh95qDdrl7S8vcYgyIFyRIRcFq4cC2voi4Dv8eIww27fRyxdcSNqKhB4ZPgV//drIlZ4p6AtpQZn4DCaKDhxU9nNhXzJ9SzD88i8SDEsre3OCUETFwLDL6l97OVrYYroB47olz02REN8qsLLG+O79iMm0oHWL3rlbPUWT71joNcq393Q3l2EJ9O4OEinj7+wxdZHP3Cl8NlXJPLOdHh6FndWPb78M2Lwo3nmIXZ5t6YeOhwDyIPcUWn5L91TSnmhAE2TFM5ygg4soSIlP91ZG+X2wU3smQT7tXvyjx34lsKi7KNu1iaPaM/90PMSdZ6gXtk6v6QcvNEyBnvXgdH+RGdk+KFHOSm5SxFka3N+1xQhZFMM5tpRp4aVlihuTBHl/LZ9Yw2bzprqBIdYvny+QeT32Tmq6Xs5MjrZv7YL/jtVNSFjnTjIJGUSQpEXeBpBeXtxhdvUGeUJ5PyBS0viiRQt3FEjGAmNRA9Z8z+U4S9XdaG6vyQbuj9OXV+H2ooCiiXInx6OoZrM6/SdlFW23MeL3y1g6oFrho5E5BuUqnp127aAd1u8wTkjab63z6GFmgQKus+a/Z5gta0pXLP/5LABfEcr0FrRKKM/f6uoLceMKQyOnJtjW9sCt/0FLOvTZ9HTOD0P+oDrIgEGhHd1kUTyXmikFkbAoDGtsIQN5F42nox36kTuEjLjMplfTi7cVPeNdscpmvH/3LE1o8rQVnUwhCmz4L1scfTKFffD3Wnl/F2htXhkN2GGNWSYZcTp0Z42S3EZ8GqEP2z3W1sLHa7CkfyjA2KDoORMNuhhQhovzDHWDGwcw80742Rb5uiofqIcs5LmuLKL2Eahxy4yyU5RCNlgmLUM+BAxkCjBLR3pUYVvXsmnGWhXfkkF1buNbQfDFEFB9GMJubVCy9jN7JPFkKv/1OqfbKNRP/9xpFMrRcKLV8dQ6H1M9Bd1L/b9fyRU1KhzfbC3UV0pLFNF6Nlj6h1dDFktkyArRfIoUgHZx/rsARwkp4nfW9YdT9sRN6GqVqoEkiIKlV3xQQ6DzVbhO3ELfU9OhBZlHWDaB1FsVbbPj6NlDKKySa1Fk2DpVrZPbBUQEP+VQswUwnhxLnC6pYKO4LyYQcLPxmuoGCbfQM74KnuHVpvg76RVRwuDlO7H//YK+eSaY25bV3ufbRhS1PiztECZ3sZS71AApGCoXV2nTWvkGKKBYj5hrS3EscDvfi1ObVeTAAbVRaYv9ICcW13WcCUAr5OFo4hmVK5QJQHR3x5TljMJfMgBZDqVD1xuVc9rXdXiCog5FxFgOZNtUK/hHf+u+DuEpTuB0avSBHqPDXcx9k5wyLyTHHKGd22Wvg2348BjwpRVA+cmaaKQ+gDpqli7id5mJkrfJLluLliCIG5DChChFaXpfa4asxdcrWuQRnkyTHo37XY+B4R9GkYhkf2NOvOU7CjCriz/V3V4FnowgKBDvJBSaTy8E+hFNYGoUZ/9fuuAEBauF0jMd24DknPTwdlQqrLiuWTPox0bsOS+czSycosViMQgjkMbujdX0qXSMwldCSFENx3AVUjqcW89DDiSKanxqaAaMcUgSYFRV0+E1NiyDN90sEwQAsG7sxem2Q00Ex0vFpuOQpVHE/5fgAiDdF7iJnEuUAwEYMIVrZg6D+X/9zkNFHY+dGUSkT6yKIxVTJoarG5UQAvy9A00h8RiTL7N+Jq8TmdgNGbNodtrJ8mIUjsvU0xDAQraYjwzj5un/8vHOj6z1h8QllQB+fK7SojYJwE5C52TINbBLHCpXUwpxt0lu24Z5m64jqYT5XIDAXBDBAmeVImRgmNoEAvbfNNKM3w0hxl8m8wUGwpj0s1mpnIXsnomCaRiPk84Qjm9X/Arz0noFgiysQEKg+iozp0s8m22ye8eCQWs7Qzip4gflhLtRu0ttWsqUe4R70OqvZYKhq6a9BUrSvJ4hhl05Tq4hSzAqTioOxsX/QvFlCDQSPUaIq8AjCwf1nJy8tzwYfG1o6IhC6XvAxMsyGNyGLbtHYwl56Jp8LTPvFHUnUTPNE7pO1bMooFbWfwssHC/dXVoAOni6WSCRiGbbhLooX+DU42epLmamnqmTHvutuZz3gI4NxB7Vm/6dSDT0rdfZQEKdJ7b1h1E215B7uoreXFMOGSDmWxrRttA4cMhtvrkgHbiT4QuM2R1OHheb3vUKg08+HnQvYlfm2NH+zXy9pkV3LlG0rDMMfz6HQkYiIgbFozJolFGdjv4vmo98arwTKmJY2LXZap80XkMTz5+0xlHvQiEw4bNKAP2x1u4LtHq8aD8D+z1a8pAzVssYHQsrLAD33zCjVeFr44ehG8jlVYEUfw+3l1Po0WxE2mXD9hXdfpIX1cra4/2G5uOCSCUFlMwn7yqmIpmeELCvntJvPZl56tGS/g6BRFl06ZrW3h1FrMKydP44BQyFv3w2KVY63SzpKw+X5jxK/UCV3Q5m48l5iuZ1hcG0G0rnOPnC3cPl2ZgMPEBO2BHKLRp5/+9Al+mIrPW7baPNBB3E3nZuKMEYixCOwFivwGT28nHi06hrZxi0LuU4tVP83aoOfIefqUbRWsa8jtVRtrDOlIPlfU/tjhypGKpZ9S/V/RW3uJoMobFO32zR5x7MN1EEimC+TiBdhsx+Jxd8X3rTKzqAfxJCVMCkR+GeWaAjfFQMHKoZefR11r8mbLkMlWQD1A26SW0hL9eWXJMNxVONSel70H0wZf/0F3PnIOL4ACPj9AOD8fsQF5wG1hFhXvdaGGQVqYJCIIpBhGQxYqg724R7LrDjpGtssMCcrt8otgY+XaWF0nrhEBXN3axyuTsv/YDTrL0pk0U8Dbtyf3yjjVTExxXiKZiRuUPwDWB9CDuUpZZRimL/0DlswEM17RgIfN10HN3Ne9zo8DFSHN1F80LVn2JUU0g5VsATurcBPQXsIBf/XvLIkvfvLqeZXeWAGUzxO0hv5x7l1JCgh8a6Sr5qui4102DU4QsIKNcgrwY982gAOYwxm1+mohpyZkU5Ytm+Bm0SYdAeSmcGT/VxKvcnuE+x7/LO8DRGUDmWhSx3ChQz0ZKAsUd2A09WZbi5amk+mycM32Je3odz+OcTQtZNclPBGZzF/vs7qq0ZmGKsrMxoq4zFmhW4dvfnCRddtvLQ0U2MSjHwe319myzfwH/1P58zWqc6MMs6KS3Cu5+UWX8/t8nJ1sWts9DFd+5z3q0fKq6Jb+PggT2mjir09QqDcHtE48oMi2My0E1OFhGwjHR1jFSF1fMtdCYmXqe0wW1vGyXizmC67Ird2PtJg4K0QfD5KU5tKeF72Qg6PSDQKUysE2CReoGyuPoZB0V2a3Aan3iAt0dP1NHXRgKXm4iKztFCTovMAnbPEalAWQDyY2htQyNwDhB/r3RzyVC49JFYBwLetzVhdm1XfmOAsLK5dlBDiS4dWocf+T2Iezc7MsIpmM/y1lbUx2w2eHspVmvYUEBh4vScAgJREboTZHmwAWi7G8fLeZnxLCFDY6QZAY5ZXwY3JnAd4HGQbkygz5QoFGYR4QY/SR7TAEegygQl3aSsWRMpWjP/Fs+Jrx0L77FTyknR5Bpxz6MUjkPyPAXEBzMPek2uMRt9x/N2hfBJd4Nmz5dVEzp5zMUtyb7aENxHYVLPXfXvBvLqTeIvTpTKc3h/BkWDaRpZfgt2IXqAVvB6slKQgVHWQQaMe24FSXAvmArAMEE8aqb6m8WRyxbhxXlq7Rx1W5SrhLGdc70K2Qq5WrMbLPDmThl4CBz2O8NXF9cIVf5olxeRWhVlh3l/sGNsRq3DhA87KRuEUODcqUmGLtdF2y7Oh42iZEK6E/sB1mwZ9FPpp7DsAnCdpp5JKElQnaTRcnWvRMAHDJvx5xvPMytLVPQRwIZmDv4anjLV2w0/JrvVXCpvRxZjVUasceZGLuV64wGefaiT21pgOYHp5ir8zpV4Z8/UjauvWd8v+PIJUbfZPXB+hslxsZbp8xOc3WmVn36sv8Y0c3AxOeYHXHSRyc77DzaqNBtjPncUT1OHXWRafc5cncbj+Djb592efvVZq3CoV0lKe2U5Pa88mMFqmOkyXlyazMnyrgQkz6OG0WZeiZhNZa0/COkaKKJ24X/+b9dxDp3J3tawUnp4VsSejA2HMiQaaZmdcGN21D+avTSAXSu5lUe9s3BYLwfvXvacey9I6h8ToPy23jDelrL+X0KvVIOrF/Qy7vPSuZJo9CRcUlP8kTvbnKu9GefXTp/8/+l/BFs2tA20AAAAASUVORK5CYII=", "text/plain": [ "" ] @@ -541,7 +5417,8 @@ } ], "source": [ - "display(Image.fromarray(images_processed[0]))" + "display(Image.fromarray(images_processed[0]))\n", + "display(Image.fromarray(images_processed[1]))" ] }, { diff --git a/examples/progressive_distillation/utils.py b/examples/progressive_distillation/utils.py index 76b75b7e0bf4..0d3398722d28 100644 --- a/examples/progressive_distillation/utils.py +++ b/examples/progressive_distillation/utils.py @@ -10,7 +10,14 @@ Resize, ToTensor, ) -from diffusers import UNet2DModel +import torch.nn.functional as F + +import torch +from diffusers import UNet2DModel, DDIMScheduler +from accelerate import Accelerator +from diffusers.optimization import get_scheduler +from diffusers.training_utils import EMAModel +from tqdm import tqdm @dataclass class DiffusionTrainingArgs: @@ -65,4 +72,98 @@ def get_unet(training_config): "UpBlock2D", "UpBlock2D", ), - ) \ No newline at end of file + ) + + +def distill(teacher, n, train_image, training_config, epochs=100, lr=3e-4, batch_size=16): + accelerator = Accelerator( + gradient_accumulation_steps=training_config.gradient_accumulation_steps, + mixed_precision=training_config.mixed_precision, +) + if accelerator.is_main_process: + run = "distill" + accelerator.init_trackers(run) + teacher_scheduler = DDIMScheduler(num_train_timesteps=n) + student_scheduler = DDIMScheduler(num_train_timesteps=n // 2) + student = get_unet(training_config) + student.load_state_dict(teacher.state_dict()) + student = accelerator.prepare(student) + student.train() + optimizer = torch.optim.AdamW( + student.parameters(), + lr=lr, + betas=(training_config.adam_beta1, training_config.adam_beta2), + weight_decay=0.001, + eps=training_config.adam_epsilon, + ) + lr_scheduler = get_scheduler( + "linear", + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=(epochs) // training_config.gradient_accumulation_steps, +) + teacher, student, optimizer, lr_scheduler, train_image, teacher_scheduler, student_scheduler = accelerator.prepare( + teacher, student, optimizer, lr_scheduler, train_image,teacher_scheduler, student_scheduler +) + ema_model = EMAModel(student, inv_gamma=training_config.ema_inv_gamma, power=training_config.ema_power, max_value=training_config.ema_max_decay) + global_step = 0 + for epoch in range(epochs): + progress_bar = tqdm(total=1, disable=not accelerator.is_local_main_process) + progress_bar.set_description(f"Epoch {epoch}") + batch = train_image.unsqueeze(0).repeat( + batch_size, 1, 1, 1 + ).to(accelerator.device) + with accelerator.accumulate(student): + noise = torch.randn(batch.shape).to(accelerator.device) + bsz = batch.shape[0] + # Sample a random timestep for each image + timesteps = torch.randint( + 2, student_scheduler.config.num_train_timesteps, (bsz,), device=batch.device + ).long() * 2 + with torch.no_grad(): + # Add noise to the image based on noise scheduler a t=timesteps + alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps, accelerator.device) + z_t = alpha_t * batch + sigma_t * noise + + # Take the first diffusion step with the teacher + noise_pred_t = teacher(z_t, timesteps).sample + x_teacher_z_t = (alpha_t * z_t - sigma_t * noise_pred_t).clip(-1, 1) + + # Add noise to the image based on noise scheduler a t=timesteps-1, to prepare for the next diffusion step + alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma(batch, timesteps-1, accelerator.device) + z_t_prime = alpha_t_prime * x_teacher_z_t + (sigma_t_prime / sigma_t) * (z_t - alpha_t * x_teacher_z_t) + # Take the second diffusion step with the teacher + noise_pred_t_prime = teacher(z_t_prime.float(), timesteps - 1).sample + rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * noise_pred_t_prime).clip(-1, 1) + + # V prediction per Appendix D + alpha_t_prime2, sigma_t_prime2 = teacher_scheduler.get_alpha_sigma(batch, timesteps-2, accelerator.device) + x_teacher_z_t_prime = (z_t - alpha_t_prime2 * rec_t_prime) / sigma_t_prime2 + z_t_prime_2 = alpha_t_prime2 * x_teacher_z_t_prime - sigma_t_prime2 * rec_t_prime + + noise_pred = student(z_t, timesteps).sample + loss = F.mse_loss(noise_pred, z_t_prime_2) + accelerator.backward(loss) + + if accelerator.sync_gradients: + accelerator.clip_grad_norm_(student.parameters(), 1.0) + optimizer.step() + lr_scheduler.step() + if training_config.use_ema: + ema_model.step(student) + optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + global_step += 1 + + logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} + if training_config.use_ema: + logs["ema_decay"] = ema_model.decay + progress_bar.set_postfix(**logs) + accelerator.log(logs, step=global_step) + progress_bar.close() + + accelerator.wait_for_everyone() + return student, ema_model, accelerator From b76d0849a55df2621b60c77b8f6e45ea6c6c17f8 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 27 Oct 2022 15:50:33 -0400 Subject: [PATCH 100/133] equation fixes and comments --- .../image_diffusion.ipynb | 7187 +++++------------ examples/progressive_distillation/utils.py | 19 +- 2 files changed, 2044 insertions(+), 5162 deletions(-) diff --git a/examples/progressive_distillation/image_diffusion.ipynb b/examples/progressive_distillation/image_diffusion.ipynb index acd3369df528..70832030b15e 100644 --- a/examples/progressive_distillation/image_diffusion.ipynb +++ b/examples/progressive_distillation/image_diffusion.ipynb @@ -2,9 +2,19 @@ "cells": [ { "cell_type": "code", - "execution_count": 13, + "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/lib/python3/dist-packages/requests/__init__.py:89: RequestsDependencyWarning: urllib3 (1.26.12) or chardet (3.0.4) doesn't match a supported version!\n", + " warnings.warn(\"urllib3 ({}) or chardet ({}) doesn't match a supported \"\n", + "WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n" + ] + } + ], "source": [ "import torch\n", "from PIL import Image\n", @@ -31,16 +41,16 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 3, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -51,7 +61,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -60,7 +70,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -72,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -82,7 +92,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -91,7 +101,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -100,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -110,7 +120,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -124,1013 +134,1012 @@ "name": "stderr", "output_type": "stream", "text": [ - "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0, loss=0.000283, lr=0.0003, step=1]\n", - "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0, loss=0.0966, lr=0.000299, step=2]\n", - "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.405, loss=0.0704, lr=0.000299, step=3]\n", - "Epoch 10: 0%| | 0/1 [03:45 125\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0, loss=0.0114, lr=0.0003, step=1]\n", - "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0, loss=0.312, lr=0.000299, step=2]\n", - "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.405, loss=0.0621, lr=0.000299, step=3]\n", - "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.561, loss=0.0529, lr=0.000299, step=4]\n", - "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.646, loss=0.076, lr=0.000298, step=5]\n", - "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.701, loss=0.0575, lr=0.000298, step=6]\n", - "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.739, loss=0.0202, lr=0.000298, step=7]\n", - "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.768, loss=0.0204, lr=0.000298, step=8]\n", - "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.79, loss=0.0432, lr=0.000297, step=9]\n", - "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.808, loss=0.0506, lr=0.000297, step=10]\n", - "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.822, loss=0.0287, lr=0.000297, step=11]\n", - "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.834, loss=0.014, lr=0.000296, step=12]\n", - "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.845, loss=0.0525, lr=0.000296, step=13]\n", - "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.854, loss=0.0154, lr=0.000296, step=14]\n", - "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.862, loss=0.0152, lr=0.000295, step=15]\n", - "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.869, loss=0.0127, lr=0.000295, step=16]\n", - "Epoch 16: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.875, loss=0.0104, lr=0.000295, step=17]\n", - "Epoch 17: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.881, loss=0.0344, lr=0.000295, step=18]\n", - "Epoch 18: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.886, loss=0.0464, lr=0.000294, step=19]\n", - "Epoch 19: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.89, loss=0.017, lr=0.000294, step=20]\n", - "Epoch 20: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.894, loss=0.0132, lr=0.000294, step=21]\n", - "Epoch 21: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.898, loss=0.0161, lr=0.000293, step=22]\n", - "Epoch 22: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.902, loss=0.052, lr=0.000293, step=23]\n", - "Epoch 23: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.905, loss=0.0181, lr=0.000293, step=24]\n", - "Epoch 24: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.908, loss=0.017, lr=0.000292, step=25]\n", - "Epoch 25: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.911, loss=0.008, lr=0.000292, step=26]\n", - "Epoch 26: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.913, loss=0.0281, lr=0.000292, step=27]\n", - "Epoch 27: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.916, loss=0.0286, lr=0.000292, step=28]\n", - "Epoch 28: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.918, loss=0.0267, lr=0.000291, step=29]\n", - "Epoch 29: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.92, loss=0.0637, lr=0.000291, step=30]\n", - "Epoch 30: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.922, loss=0.00631, lr=0.000291, step=31]\n", - "Epoch 31: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.924, loss=0.0335, lr=0.00029, step=32]\n", - "Epoch 32: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.926, loss=0.0395, lr=0.00029, step=33]\n", - "Epoch 33: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.927, loss=0.00453, lr=0.00029, step=34]\n", - "Epoch 34: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.929, loss=0.014, lr=0.000289, step=35]\n", - "Epoch 35: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.931, loss=0.0115, lr=0.000289, step=36]\n", - "Epoch 36: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.932, loss=0.00655, lr=0.000289, step=37]\n", - "Epoch 37: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.933, loss=0.0124, lr=0.000289, step=38]\n", - "Epoch 38: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.935, loss=0.0569, lr=0.000288, step=39]\n", - "Epoch 39: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.936, loss=0.0114, lr=0.000288, step=40]\n", - "Epoch 40: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.937, loss=0.0256, lr=0.000288, step=41]\n", - "Epoch 41: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.938, loss=0.0184, lr=0.000287, step=42]\n", - "Epoch 42: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.939, loss=0.0213, lr=0.000287, step=43]\n", - "Epoch 43: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.94, loss=0.0234, lr=0.000287, step=44]\n", - "Epoch 44: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.941, loss=0.016, lr=0.000286, step=45]\n", - "Epoch 45: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.942, loss=0.00425, lr=0.000286, step=46]\n", - "Epoch 46: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.943, loss=0.00389, lr=0.000286, step=47]\n", - "Epoch 47: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.944, loss=0.0111, lr=0.000286, step=48]\n", - "Epoch 48: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.945, loss=0.0265, lr=0.000285, step=49]\n", - "Epoch 49: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.946, loss=0.0308, lr=0.000285, step=50]\n", - "Epoch 50: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.947, loss=0.00589, lr=0.000285, step=51]\n", - "Epoch 51: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.948, loss=0.0168, lr=0.000284, step=52]\n", - "Epoch 52: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.948, loss=0.00525, lr=0.000284, step=53]\n", - "Epoch 53: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.949, loss=0.00618, lr=0.000284, step=54]\n", - "Epoch 54: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.0267, lr=0.000283, step=55]\n", - "Epoch 55: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.0263, lr=0.000283, step=56]\n", - "Epoch 56: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.951, loss=0.00575, lr=0.000283, step=57]\n", - "Epoch 57: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.952, loss=0.00428, lr=0.000283, step=58]\n", - "Epoch 58: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.952, loss=0.00703, lr=0.000282, step=59]\n", - "Epoch 59: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.953, loss=0.0173, lr=0.000282, step=60]\n", - "Epoch 60: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.954, loss=0.0054, lr=0.000282, step=61]\n", - "Epoch 61: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.954, loss=0.00355, lr=0.000281, step=62]\n", - "Epoch 62: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.955, loss=0.00793, lr=0.000281, step=63]\n", - "Epoch 63: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.955, loss=0.0172, lr=0.000281, step=64]\n", - "Epoch 64: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.956, loss=0.0169, lr=0.00028, step=65]\n", - "Epoch 65: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.956, loss=0.0148, lr=0.00028, step=66]\n", - "Epoch 66: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.957, loss=0.0133, lr=0.00028, step=67]\n", - "Epoch 67: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.957, loss=0.019, lr=0.00028, step=68]\n", - "Epoch 68: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.958, loss=0.00753, lr=0.000279, step=69]\n", - "Epoch 69: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.958, loss=0.00686, lr=0.000279, step=70]\n", - "Epoch 70: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.959, loss=0.0124, lr=0.000279, step=71]\n", - "Epoch 71: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.959, loss=0.00266, lr=0.000278, step=72]\n", - "Epoch 72: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.96, loss=0.0189, lr=0.000278, step=73]\n", - "Epoch 73: 100%|██████████| 1/1 [00:00<00:00, 1.28it/s, ema_decay=0.96, loss=0.0084, lr=0.000278, step=74]\n", - "Epoch 74: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.96, loss=0.00493, lr=0.000277, step=75]\n", - "Epoch 75: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.961, loss=0.00398, lr=0.000277, step=76]\n", - "Epoch 76: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.961, loss=0.0123, lr=0.000277, step=77]\n", - "Epoch 77: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.962, loss=0.00553, lr=0.000277, step=78]\n", - "Epoch 78: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.962, loss=0.00301, lr=0.000276, step=79]\n", - "Epoch 79: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.962, loss=0.0145, lr=0.000276, step=80]\n", - "Epoch 80: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.963, loss=0.00281, lr=0.000276, step=81]\n", - "Epoch 81: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.963, loss=0.0036, lr=0.000275, step=82]\n", - "Epoch 82: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.00352, lr=0.000275, step=83]\n", - "Epoch 83: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.964, loss=0.0037, lr=0.000275, step=84]\n", - "Epoch 84: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.964, loss=0.00323, lr=0.000275, step=85]\n", - "Epoch 85: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.964, loss=0.00644, lr=0.000274, step=86]\n", - "Epoch 86: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.0105, lr=0.000274, step=87]\n", - "Epoch 87: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.965, loss=0.00332, lr=0.000274, step=88]\n", - "Epoch 88: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.00885, lr=0.000273, step=89]\n", - "Epoch 89: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.0167, lr=0.000273, step=90]\n", - "Epoch 90: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.966, loss=0.00248, lr=0.000273, step=91]\n", - "Epoch 91: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.966, loss=0.0111, lr=0.000272, step=92]\n", - "Epoch 92: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.966, loss=0.00319, lr=0.000272, step=93]\n", - "Epoch 93: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.967, loss=0.0142, lr=0.000272, step=94]\n", - "Epoch 94: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.00388, lr=0.000271, step=95]\n", - "Epoch 95: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.00221, lr=0.000271, step=96]\n", - "Epoch 96: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.00326, lr=0.000271, step=97]\n", - "Epoch 97: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.00251, lr=0.000271, step=98]\n", - "Epoch 98: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.00697, lr=0.00027, step=99]\n", - "Epoch 99: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.968, loss=0.00232, lr=0.00027, step=100]\n", - "Epoch 100: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.00304, lr=0.00027, step=101]\n", - "Epoch 101: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.969, loss=0.00916, lr=0.000269, step=102]\n", - "Epoch 102: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.969, loss=0.00354, lr=0.000269, step=103]\n", - "Epoch 103: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.00228, lr=0.000269, step=104]\n", - "Epoch 104: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.969, loss=0.00496, lr=0.000268, step=105]\n", - "Epoch 105: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.97, loss=0.00422, lr=0.000268, step=106]\n", - "Epoch 106: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00306, lr=0.000268, step=107]\n", - "Epoch 107: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00257, lr=0.000268, step=108]\n", - "Epoch 108: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00305, lr=0.000267, step=109]\n", - "Epoch 109: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.0035, lr=0.000267, step=110]\n", - "Epoch 110: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.971, loss=0.00655, lr=0.000267, step=111]\n", - "Epoch 111: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00252, lr=0.000266, step=112]\n", - "Epoch 112: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00335, lr=0.000266, step=113]\n", - "Epoch 113: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.00393, lr=0.000266, step=114]\n", - "Epoch 114: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00559, lr=0.000266, step=115]\n", - "Epoch 115: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.00148, lr=0.000265, step=116]\n", - "Epoch 116: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.972, loss=0.00826, lr=0.000265, step=117]\n", - "Epoch 117: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.972, loss=0.00361, lr=0.000265, step=118]\n", - "Epoch 118: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.00151, lr=0.000264, step=119]\n", - "Epoch 119: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.972, loss=0.00402, lr=0.000264, step=120]\n", - "Epoch 120: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.972, loss=0.00172, lr=0.000264, step=121]\n", - "Epoch 121: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.0055, lr=0.000263, step=122]\n", - "Epoch 122: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.00879, lr=0.000263, step=123]\n", - "Epoch 123: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.00183, lr=0.000263, step=124]\n", - "Epoch 124: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.00301, lr=0.000262, step=125]\n", - "Epoch 125: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0084, lr=0.000262, step=126]\n", - "Epoch 126: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.00145, lr=0.000262, step=127]\n", - "Epoch 127: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.00585, lr=0.000262, step=128]\n", - "Epoch 128: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.974, loss=0.00319, lr=0.000261, step=129]\n", - "Epoch 129: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.00189, lr=0.000261, step=130]\n", - "Epoch 130: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0026, lr=0.000261, step=131]\n", - "Epoch 131: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.00314, lr=0.00026, step=132]\n", - "Epoch 132: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.00223, lr=0.00026, step=133]\n", - "Epoch 133: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.974, loss=0.00451, lr=0.00026, step=134]\n", - "Epoch 134: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.00503, lr=0.000259, step=135]\n", - "Epoch 135: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.00236, lr=0.000259, step=136]\n", - "Epoch 136: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.00274, lr=0.000259, step=137]\n", - "Epoch 137: 100%|██████████| 1/1 [00:00<00:00, 1.17it/s, ema_decay=0.975, loss=0.00263, lr=0.000259, step=138]\n", - "Epoch 138: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.975, loss=0.00198, lr=0.000258, step=139]\n", - "Epoch 139: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.975, loss=0.00211, lr=0.000258, step=140]\n", - "Epoch 140: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.975, loss=0.00151, lr=0.000258, step=141]\n", - "Epoch 141: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.976, loss=0.00437, lr=0.000257, step=142]\n", - "Epoch 142: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.00485, lr=0.000257, step=143]\n", - "Epoch 143: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.976, loss=0.00131, lr=0.000257, step=144]\n", - "Epoch 144: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.0012, lr=0.000256, step=145]\n", - "Epoch 145: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.976, loss=0.00211, lr=0.000256, step=146]\n", - "Epoch 146: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.976, loss=0.00136, lr=0.000256, step=147]\n", - "Epoch 147: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.00726, lr=0.000256, step=148]\n", - "Epoch 148: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0013, lr=0.000255, step=149]\n", - "Epoch 149: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00401, lr=0.000255, step=150]\n", - "Epoch 150: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00106, lr=0.000255, step=151]\n", - "Epoch 151: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.977, loss=0.00271, lr=0.000254, step=152]\n", - "Epoch 152: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00163, lr=0.000254, step=153]\n", - "Epoch 153: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00119, lr=0.000254, step=154]\n", - "Epoch 154: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.00407, lr=0.000253, step=155]\n", - "Epoch 155: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.00165, lr=0.000253, step=156]\n", - "Epoch 156: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.977, loss=0.00134, lr=0.000253, step=157]\n", - "Epoch 157: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00182, lr=0.000253, step=158]\n", - "Epoch 158: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.00147, lr=0.000252, step=159]\n", - "Epoch 159: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00194, lr=0.000252, step=160]\n", - "Epoch 160: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00124, lr=0.000252, step=161]\n", - "Epoch 161: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.000933, lr=0.000251, step=162]\n", - "Epoch 162: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00447, lr=0.000251, step=163]\n", - "Epoch 163: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00348, lr=0.000251, step=164]\n", - "Epoch 164: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00319, lr=0.00025, step=165]\n", - "Epoch 165: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00366, lr=0.00025, step=166]\n", - "Epoch 166: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00104, lr=0.00025, step=167]\n", - "Epoch 167: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.978, loss=0.00136, lr=0.00025, step=168]\n", - "Epoch 168: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.979, loss=0.00119, lr=0.000249, step=169]\n", - "Epoch 169: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.00381, lr=0.000249, step=170]\n", - "Epoch 170: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.001, lr=0.000249, step=171]\n", - "Epoch 171: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.979, loss=0.00135, lr=0.000248, step=172]\n", - "Epoch 172: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.979, loss=0.00103, lr=0.000248, step=173]\n", - "Epoch 173: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.00164, lr=0.000248, step=174]\n", - "Epoch 174: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.00319, lr=0.000247, step=175]\n", - "Epoch 175: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.00287, lr=0.000247, step=176]\n", - "Epoch 176: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.00529, lr=0.000247, step=177]\n", - "Epoch 177: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.00118, lr=0.000247, step=178]\n", - "Epoch 178: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.00122, lr=0.000246, step=179]\n", - "Epoch 179: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00354, lr=0.000246, step=180]\n", - "Epoch 180: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00135, lr=0.000246, step=181]\n", - "Epoch 181: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.000869, lr=0.000245, step=182]\n", - "Epoch 182: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.00115, lr=0.000245, step=183]\n", - "Epoch 183: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.00122, lr=0.000245, step=184]\n", - "Epoch 184: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.00161, lr=0.000244, step=185]\n", - "Epoch 185: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.00363, lr=0.000244, step=186]\n", - "Epoch 186: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.00121, lr=0.000244, step=187]\n", - "Epoch 187: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.00113, lr=0.000244, step=188]\n", - "Epoch 188: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00676, lr=0.000243, step=189]\n", - "Epoch 189: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.000734, lr=0.000243, step=190]\n", - "Epoch 190: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.005, lr=0.000243, step=191]\n", - "Epoch 191: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0017, lr=0.000242, step=192]\n", - "Epoch 192: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.981, loss=0.000844, lr=0.000242, step=193]\n", - "Epoch 193: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00344, lr=0.000242, step=194]\n", - "Epoch 194: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.000867, lr=0.000241, step=195]\n", - "Epoch 195: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0044, lr=0.000241, step=196]\n", - "Epoch 196: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00243, lr=0.000241, step=197]\n", - "Epoch 197: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.00432, lr=0.000241, step=198]\n", - "Epoch 198: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00306, lr=0.00024, step=199]\n", - "Epoch 199: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00387, lr=0.00024, step=200]\n", - "Epoch 200: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.981, loss=0.000804, lr=0.00024, step=201]\n", - "Epoch 201: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.981, loss=0.00286, lr=0.000239, step=202]\n", - "Epoch 202: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.000794, lr=0.000239, step=203]\n", - "Epoch 203: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00165, lr=0.000239, step=204]\n", - "Epoch 204: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00418, lr=0.000238, step=205]\n", - "Epoch 205: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00409, lr=0.000238, step=206]\n", - "Epoch 206: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00221, lr=0.000238, step=207]\n", - "Epoch 207: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.00098, lr=0.000238, step=208]\n", - "Epoch 208: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.00122, lr=0.000237, step=209]\n", - "Epoch 209: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.982, loss=0.00122, lr=0.000237, step=210]\n", - "Epoch 210: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00095, lr=0.000237, step=211]\n", - "Epoch 211: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.000895, lr=0.000236, step=212]\n", - "Epoch 212: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0013, lr=0.000236, step=213]\n", - "Epoch 213: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.982, loss=0.00166, lr=0.000236, step=214]\n", - "Epoch 214: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.0028, lr=0.000235, step=215]\n", - "Epoch 215: 100%|██████████| 1/1 [00:00<00:00, 1.17it/s, ema_decay=0.982, loss=0.00147, lr=0.000235, step=216]\n", - "Epoch 216: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.982, loss=0.00135, lr=0.000235, step=217]\n", - "Epoch 217: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00123, lr=0.000235, step=218]\n", - "Epoch 218: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.00252, lr=0.000234, step=219]\n", - "Epoch 219: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00175, lr=0.000234, step=220]\n", - "Epoch 220: 100%|██████████| 1/1 [00:00<00:00, 1.19it/s, ema_decay=0.982, loss=0.00343, lr=0.000234, step=221]\n", - "Epoch 221: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.983, loss=0.000986, lr=0.000233, step=222]\n", - "Epoch 222: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.00277, lr=0.000233, step=223]\n", - "Epoch 223: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.983, loss=0.00221, lr=0.000233, step=224]\n", - "Epoch 224: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.983, loss=0.000964, lr=0.000232, step=225]\n", - "Epoch 225: 100%|██████████| 1/1 [00:00<00:00, 1.28it/s, ema_decay=0.983, loss=0.00265, lr=0.000232, step=226]\n", - "Epoch 226: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.00214, lr=0.000232, step=227]\n", - "Epoch 227: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.983, loss=0.00105, lr=0.000232, step=228]\n", - "Epoch 228: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00116, lr=0.000231, step=229]\n", - "Epoch 229: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.983, loss=0.00147, lr=0.000231, step=230]\n", - "Epoch 230: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.00163, lr=0.000231, step=231]\n", - "Epoch 231: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00086, lr=0.00023, step=232]\n", - "Epoch 232: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00119, lr=0.00023, step=233]\n", - "Epoch 233: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.00101, lr=0.00023, step=234]\n", - "Epoch 234: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0029, lr=0.000229, step=235]\n", - "Epoch 235: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.00105, lr=0.000229, step=236]\n", - "Epoch 236: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.002, lr=0.000229, step=237]\n", - "Epoch 237: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00114, lr=0.000229, step=238]\n", - "Epoch 238: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.0012, lr=0.000228, step=239]\n", - "Epoch 239: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00103, lr=0.000228, step=240]\n", - "Epoch 240: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00214, lr=0.000228, step=241]\n", - "Epoch 241: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00237, lr=0.000227, step=242]\n", - "Epoch 242: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.000634, lr=0.000227, step=243]\n", - "Epoch 243: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00225, lr=0.000227, step=244]\n", - "Epoch 244: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00264, lr=0.000226, step=245]\n", - "Epoch 245: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00126, lr=0.000226, step=246]\n", - "Epoch 246: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00571, lr=0.000226, step=247]\n", - "Epoch 247: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0011, lr=0.000226, step=248]\n", - "Epoch 248: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00154, lr=0.000225, step=249]\n", - "Epoch 249: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00402, lr=0.000225, step=250]\n", - "Epoch 250: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.00128, lr=0.000225, step=251]\n", - "Epoch 251: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00258, lr=0.000224, step=252]\n", - "Epoch 252: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0037, lr=0.000224, step=253]\n", - "Epoch 253: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00101, lr=0.000224, step=254]\n", - "Epoch 254: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00271, lr=0.000223, step=255]\n", - "Epoch 255: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00194, lr=0.000223, step=256]\n", - "Epoch 256: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.984, loss=0.00167, lr=0.000223, step=257]\n", - "Epoch 257: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.00178, lr=0.000223, step=258]\n", - "Epoch 258: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.00232, lr=0.000222, step=259]\n", - "Epoch 259: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0025, lr=0.000222, step=260]\n", - "Epoch 260: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00167, lr=0.000222, step=261]\n", - "Epoch 261: 100%|██████████| 1/1 [00:00<00:00, 1.04it/s, ema_decay=0.985, loss=0.000972, lr=0.000221, step=262]\n", - "Epoch 262: 100%|██████████| 1/1 [00:00<00:00, 1.14it/s, ema_decay=0.985, loss=0.0017, lr=0.000221, step=263]\n", - "Epoch 263: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00145, lr=0.000221, step=264]\n", - "Epoch 264: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00243, lr=0.00022, step=265]\n", - "Epoch 265: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.00122, lr=0.00022, step=266]\n", - "Epoch 266: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00136, lr=0.00022, step=267]\n", - "Epoch 267: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00222, lr=0.00022, step=268]\n", - "Epoch 268: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00156, lr=0.000219, step=269]\n", - "Epoch 269: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00116, lr=0.000219, step=270]\n", - "Epoch 270: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.00362, lr=0.000219, step=271]\n", - "Epoch 271: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00082, lr=0.000218, step=272]\n", - "Epoch 272: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00109, lr=0.000218, step=273]\n", - "Epoch 273: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00368, lr=0.000218, step=274]\n", - "Epoch 274: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00167, lr=0.000217, step=275]\n", - "Epoch 275: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00132, lr=0.000217, step=276]\n", - "Epoch 276: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00138, lr=0.000217, step=277]\n", - "Epoch 277: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.00138, lr=0.000217, step=278]\n", - "Epoch 278: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.0025, lr=0.000216, step=279]\n", - "Epoch 279: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00301, lr=0.000216, step=280]\n", - "Epoch 280: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00231, lr=0.000216, step=281]\n", - "Epoch 281: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00246, lr=0.000215, step=282]\n", - "Epoch 282: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00224, lr=0.000215, step=283]\n", - "Epoch 283: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00228, lr=0.000215, step=284]\n", - "Epoch 284: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00188, lr=0.000214, step=285]\n", - "Epoch 285: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00155, lr=0.000214, step=286]\n", - "Epoch 286: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00252, lr=0.000214, step=287]\n", - "Epoch 287: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.00114, lr=0.000214, step=288]\n", - "Epoch 288: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00162, lr=0.000213, step=289]\n", - "Epoch 289: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00185, lr=0.000213, step=290]\n", - "Epoch 290: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00122, lr=0.000213, step=291]\n", - "Epoch 291: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00208, lr=0.000212, step=292]\n", - "Epoch 292: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00119, lr=0.000212, step=293]\n", - "Epoch 293: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00153, lr=0.000212, step=294]\n", - "Epoch 294: 100%|██████████| 1/1 [00:00<00:00, 1.17it/s, ema_decay=0.986, loss=0.000898, lr=0.000211, step=295]\n", - "Epoch 295: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.000991, lr=0.000211, step=296]\n", - "Epoch 296: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.986, loss=0.00182, lr=0.000211, step=297]\n", - "Epoch 297: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00285, lr=0.000211, step=298]\n", - "Epoch 298: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00122, lr=0.00021, step=299]\n", - "Epoch 299: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00103, lr=0.00021, step=300]\n", - "Epoch 300: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00219, lr=0.00021, step=301]\n", - "Epoch 301: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.000659, lr=0.000209, step=302]\n", - "Epoch 302: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00182, lr=0.000209, step=303]\n", - "Epoch 303: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00211, lr=0.000209, step=304]\n", - "Epoch 304: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00161, lr=0.000208, step=305]\n", - "Epoch 305: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00117, lr=0.000208, step=306]\n", - "Epoch 306: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0026, lr=0.000208, step=307]\n", - "Epoch 307: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.000813, lr=0.000208, step=308]\n", - "Epoch 308: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00292, lr=0.000207, step=309]\n", - "Epoch 309: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.986, loss=0.000847, lr=0.000207, step=310]\n", - "Epoch 310: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.0027, lr=0.000207, step=311]\n", - "Epoch 311: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.00138, lr=0.000206, step=312]\n", - "Epoch 312: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.987, loss=0.00158, lr=0.000206, step=313]\n", - "Epoch 313: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00111, lr=0.000206, step=314]\n", - "Epoch 314: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.000876, lr=0.000206, step=315]\n", - "Epoch 315: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00136, lr=0.000205, step=316]\n", - "Epoch 316: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.000999, lr=0.000205, step=317]\n", - "Epoch 317: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00179, lr=0.000205, step=318]\n", - "Epoch 318: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0014, lr=0.000204, step=319]\n", - "Epoch 319: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.000911, lr=0.000204, step=320]\n", - "Epoch 320: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00237, lr=0.000204, step=321]\n", - "Epoch 321: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.00172, lr=0.000203, step=322]\n", - "Epoch 322: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00081, lr=0.000203, step=323]\n", - "Epoch 323: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00218, lr=0.000203, step=324]\n", - "Epoch 324: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00149, lr=0.000202, step=325]\n", - "Epoch 325: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.987, loss=0.000931, lr=0.000202, step=326]\n", - "Epoch 326: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00207, lr=0.000202, step=327]\n", - "Epoch 327: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00303, lr=0.000202, step=328]\n", - "Epoch 328: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00138, lr=0.000201, step=329]\n", - "Epoch 329: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.987, loss=0.00241, lr=0.000201, step=330]\n", - "Epoch 330: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00181, lr=0.000201, step=331]\n", - "Epoch 331: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00197, lr=0.0002, step=332]\n", - "Epoch 332: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00125, lr=0.0002, step=333]\n", - "Epoch 333: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00183, lr=0.0002, step=334]\n", - "Epoch 334: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00169, lr=0.000199, step=335]\n", - "Epoch 335: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00102, lr=0.000199, step=336]\n", - "Epoch 336: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00271, lr=0.000199, step=337]\n", - "Epoch 337: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00175, lr=0.000199, step=338]\n", - "Epoch 338: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00101, lr=0.000198, step=339]\n", - "Epoch 339: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.987, loss=0.0016, lr=0.000198, step=340]\n", - "Epoch 340: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.987, loss=0.00195, lr=0.000198, step=341]\n", - "Epoch 341: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.00202, lr=0.000197, step=342]\n", - "Epoch 342: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.000818, lr=0.000197, step=343]\n", - "Epoch 343: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00203, lr=0.000197, step=344]\n", - "Epoch 344: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00247, lr=0.000196, step=345]\n", - "Epoch 345: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.000914, lr=0.000196, step=346]\n", - "Epoch 346: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00253, lr=0.000196, step=347]\n", - "Epoch 347: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.000923, lr=0.000196, step=348]\n", - "Epoch 348: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.988, loss=0.00352, lr=0.000195, step=349]\n", - "Epoch 349: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.0018, lr=0.000195, step=350]\n", - "Epoch 350: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.000925, lr=0.000195, step=351]\n", - "Epoch 351: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.00163, lr=0.000194, step=352]\n", - "Epoch 352: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00458, lr=0.000194, step=353]\n", - "Epoch 353: 100%|██████████| 1/1 [00:00<00:00, 1.07it/s, ema_decay=0.988, loss=0.00276, lr=0.000194, step=354]\n", - "Epoch 354: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.00312, lr=0.000193, step=355]\n", - "Epoch 355: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00398, lr=0.000193, step=356]\n", - "Epoch 356: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00396, lr=0.000193, step=357]\n", - "Epoch 357: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.00188, lr=0.000193, step=358]\n", - "Epoch 358: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00291, lr=0.000192, step=359]\n", - "Epoch 359: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00386, lr=0.000192, step=360]\n", - "Epoch 360: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00215, lr=0.000192, step=361]\n", - "Epoch 361: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00184, lr=0.000191, step=362]\n", - "Epoch 362: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.00349, lr=0.000191, step=363]\n", - "Epoch 363: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.00115, lr=0.000191, step=364]\n", - "Epoch 364: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.00194, lr=0.00019, step=365]\n", - "Epoch 365: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.988, loss=0.00233, lr=0.00019, step=366]\n", - "Epoch 366: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00169, lr=0.00019, step=367]\n", - "Epoch 367: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00305, lr=0.00019, step=368]\n", - "Epoch 368: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00107, lr=0.000189, step=369]\n", - "Epoch 369: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00203, lr=0.000189, step=370]\n", - "Epoch 370: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0041, lr=0.000189, step=371]\n", - "Epoch 371: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00552, lr=0.000188, step=372]\n", - "Epoch 372: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.00117, lr=0.000188, step=373]\n", - "Epoch 373: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0018, lr=0.000188, step=374]\n", - "Epoch 374: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0114, lr=0.000187, step=375]\n", - "Epoch 375: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.00351, lr=0.000187, step=376]\n", - "Epoch 376: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0025, lr=0.000187, step=377]\n", - "Epoch 377: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00243, lr=0.000187, step=378]\n", - "Epoch 378: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00343, lr=0.000186, step=379]\n", - "Epoch 379: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00395, lr=0.000186, step=380]\n", - "Epoch 380: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00329, lr=0.000186, step=381]\n", - "Epoch 381: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0016, lr=0.000185, step=382]\n", - "Epoch 382: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00355, lr=0.000185, step=383]\n", - "Epoch 383: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00351, lr=0.000185, step=384]\n", - "Epoch 384: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0036, lr=0.000184, step=385]\n", - "Epoch 385: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00348, lr=0.000184, step=386]\n", - "Epoch 386: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00203, lr=0.000184, step=387]\n", - "Epoch 387: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.00298, lr=0.000184, step=388]\n", - "Epoch 388: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.00272, lr=0.000183, step=389]\n", - "Epoch 389: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00222, lr=0.000183, step=390]\n", - "Epoch 390: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00355, lr=0.000183, step=391]\n", - "Epoch 391: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0012, lr=0.000182, step=392]\n", - "Epoch 392: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00196, lr=0.000182, step=393]\n", - "Epoch 393: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00241, lr=0.000182, step=394]\n", - "Epoch 394: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00203, lr=0.000181, step=395]\n", - "Epoch 395: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.989, loss=0.00296, lr=0.000181, step=396]\n", - "Epoch 396: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.989, loss=0.00281, lr=0.000181, step=397]\n", - "Epoch 397: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.00145, lr=0.000181, step=398]\n", - "Epoch 398: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00315, lr=0.00018, step=399]\n", - "Epoch 399: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00378, lr=0.00018, step=400]\n", - "Epoch 400: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.002, lr=0.00018, step=401]\n", - "Epoch 401: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00133, lr=0.000179, step=402]\n", - "Epoch 402: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00177, lr=0.000179, step=403]\n", - "Epoch 403: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00392, lr=0.000179, step=404]\n", - "Epoch 404: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00157, lr=0.000178, step=405]\n", - "Epoch 405: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0019, lr=0.000178, step=406]\n", - "Epoch 406: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0019, lr=0.000178, step=407]\n", - "Epoch 407: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.0021, lr=0.000178, step=408]\n", - "Epoch 408: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00132, lr=0.000177, step=409]\n", - "Epoch 409: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00142, lr=0.000177, step=410]\n", - "Epoch 410: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00204, lr=0.000177, step=411]\n", - "Epoch 411: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00119, lr=0.000176, step=412]\n", - "Epoch 412: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00166, lr=0.000176, step=413]\n", - "Epoch 413: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00215, lr=0.000176, step=414]\n", - "Epoch 414: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00121, lr=0.000175, step=415]\n", - "Epoch 415: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000937, lr=0.000175, step=416]\n", - "Epoch 416: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.00132, lr=0.000175, step=417]\n", - "Epoch 417: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00114, lr=0.000175, step=418]\n", - "Epoch 418: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00109, lr=0.000174, step=419]\n", - "Epoch 419: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00107, lr=0.000174, step=420]\n", - "Epoch 420: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00119, lr=0.000174, step=421]\n", - "Epoch 421: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00207, lr=0.000173, step=422]\n", - "Epoch 422: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00147, lr=0.000173, step=423]\n", - "Epoch 423: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.000816, lr=0.000173, step=424]\n", - "Epoch 424: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.989, loss=0.00302, lr=0.000172, step=425]\n", - "Epoch 425: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00161, lr=0.000172, step=426]\n", - "Epoch 426: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000704, lr=0.000172, step=427]\n", - "Epoch 427: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.000804, lr=0.000172, step=428]\n", - "Epoch 428: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00169, lr=0.000171, step=429]\n", - "Epoch 429: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00261, lr=0.000171, step=430]\n", - "Epoch 430: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0018, lr=0.000171, step=431]\n", - "Epoch 431: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00175, lr=0.00017, step=432]\n", - "Epoch 432: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00166, lr=0.00017, step=433]\n", - "Epoch 433: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.000882, lr=0.00017, step=434]\n", - "Epoch 434: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00332, lr=0.000169, step=435]\n", - "Epoch 435: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00113, lr=0.000169, step=436]\n", - "Epoch 436: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00149, lr=0.000169, step=437]\n", - "Epoch 437: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00218, lr=0.000169, step=438]\n", - "Epoch 438: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00235, lr=0.000168, step=439]\n", - "Epoch 439: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000792, lr=0.000168, step=440]\n", - "Epoch 440: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0015, lr=0.000168, step=441]\n", - "Epoch 441: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.99, loss=0.00163, lr=0.000167, step=442]\n", - "Epoch 442: 100%|██████████| 1/1 [00:00<00:00, 1.24it/s, ema_decay=0.99, loss=0.000747, lr=0.000167, step=443]\n", - "Epoch 443: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.00131, lr=0.000167, step=444]\n", - "Epoch 444: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00147, lr=0.000167, step=445]\n", - "Epoch 445: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000897, lr=0.000166, step=446]\n", - "Epoch 446: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00127, lr=0.000166, step=447]\n", - "Epoch 447: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00107, lr=0.000166, step=448]\n", - "Epoch 448: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00327, lr=0.000165, step=449]\n", - "Epoch 449: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.99, loss=0.000759, lr=0.000165, step=450]\n", - "Epoch 450: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00118, lr=0.000165, step=451]\n", - "Epoch 451: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00208, lr=0.000164, step=452]\n", - "Epoch 452: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0024, lr=0.000164, step=453]\n", - "Epoch 453: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00144, lr=0.000164, step=454]\n", - "Epoch 454: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.00148, lr=0.000163, step=455]\n", - "Epoch 455: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00219, lr=0.000163, step=456]\n", - "Epoch 456: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00148, lr=0.000163, step=457]\n", - "Epoch 457: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00125, lr=0.000163, step=458]\n", - "Epoch 458: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00297, lr=0.000162, step=459]\n", - "Epoch 459: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00278, lr=0.000162, step=460]\n", - "Epoch 460: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00136, lr=0.000162, step=461]\n", - "Epoch 461: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00119, lr=0.000161, step=462]\n", - "Epoch 462: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00175, lr=0.000161, step=463]\n", - "Epoch 463: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00221, lr=0.000161, step=464]\n", - "Epoch 464: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000969, lr=0.00016, step=465]\n", - "Epoch 465: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00253, lr=0.00016, step=466]\n", - "Epoch 466: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00154, lr=0.00016, step=467]\n", - "Epoch 467: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00118, lr=0.00016, step=468]\n", - "Epoch 468: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00235, lr=0.000159, step=469]\n", - "Epoch 469: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00148, lr=0.000159, step=470]\n", - "Epoch 470: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00148, lr=0.000159, step=471]\n", - "Epoch 471: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.000965, lr=0.000158, step=472]\n", - "Epoch 472: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00146, lr=0.000158, step=473]\n", - "Epoch 473: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00353, lr=0.000158, step=474]\n", - "Epoch 474: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00221, lr=0.000157, step=475]\n", - "Epoch 475: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.0014, lr=0.000157, step=476]\n", - "Epoch 476: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00186, lr=0.000157, step=477]\n", - "Epoch 477: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00184, lr=0.000157, step=478]\n", - "Epoch 478: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00148, lr=0.000156, step=479]\n", - "Epoch 479: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00109, lr=0.000156, step=480]\n", - "Epoch 480: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00261, lr=0.000156, step=481]\n", - "Epoch 481: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00158, lr=0.000155, step=482]\n", - "Epoch 482: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00105, lr=0.000155, step=483]\n", - "Epoch 483: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0011, lr=0.000155, step=484]\n", - "Epoch 484: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.99, loss=0.00199, lr=0.000154, step=485]\n", - "Epoch 485: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.000702, lr=0.000154, step=486]\n", - "Epoch 486: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00129, lr=0.000154, step=487]\n", - "Epoch 487: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00196, lr=0.000154, step=488]\n", - "Epoch 488: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00197, lr=0.000153, step=489]\n", - "Epoch 489: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.99, loss=0.00176, lr=0.000153, step=490]\n", - "Epoch 490: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00125, lr=0.000153, step=491]\n", - "Epoch 491: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00136, lr=0.000152, step=492]\n", - "Epoch 492: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.000661, lr=0.000152, step=493]\n", - "Epoch 493: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.00107, lr=0.000152, step=494]\n", - "Epoch 494: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0022, lr=0.000151, step=495]\n", - "Epoch 495: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.000522, lr=0.000151, step=496]\n", - "Epoch 496: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00108, lr=0.000151, step=497]\n", - "Epoch 497: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00174, lr=0.000151, step=498]\n", - "Epoch 498: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00166, lr=0.00015, step=499]\n", - "Epoch 499: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00126, lr=0.00015, step=500]\n", - "Epoch 500: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000982, lr=0.00015, step=501]\n", - "Epoch 501: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000943, lr=0.000149, step=502]\n", - "Epoch 502: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00167, lr=0.000149, step=503]\n", - "Epoch 503: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00102, lr=0.000149, step=504]\n", - "Epoch 504: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00274, lr=0.000148, step=505]\n", - "Epoch 505: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00099, lr=0.000148, step=506]\n", - "Epoch 506: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00171, lr=0.000148, step=507]\n", - "Epoch 507: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00147, lr=0.000148, step=508]\n", - "Epoch 508: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00138, lr=0.000147, step=509]\n", - "Epoch 509: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00116, lr=0.000147, step=510]\n", - "Epoch 510: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000945, lr=0.000147, step=511]\n", - "Epoch 511: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00147, lr=0.000146, step=512]\n", - "Epoch 512: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000757, lr=0.000146, step=513]\n", - "Epoch 513: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00122, lr=0.000146, step=514]\n", - "Epoch 514: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00116, lr=0.000145, step=515]\n", - "Epoch 515: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00133, lr=0.000145, step=516]\n", - "Epoch 516: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00105, lr=0.000145, step=517]\n", - "Epoch 517: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00146, lr=0.000145, step=518]\n", - "Epoch 518: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000884, lr=0.000144, step=519]\n", - "Epoch 519: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000715, lr=0.000144, step=520]\n", - "Epoch 520: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00114, lr=0.000144, step=521]\n", - "Epoch 521: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00118, lr=0.000143, step=522]\n", - "Epoch 522: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00113, lr=0.000143, step=523]\n", - "Epoch 523: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00137, lr=0.000143, step=524]\n", - "Epoch 524: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00136, lr=0.000142, step=525]\n", - "Epoch 525: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000681, lr=0.000142, step=526]\n", - "Epoch 526: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.000633, lr=0.000142, step=527]\n", - "Epoch 527: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000716, lr=0.000142, step=528]\n", - "Epoch 528: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000834, lr=0.000141, step=529]\n", - "Epoch 529: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00118, lr=0.000141, step=530]\n", - "Epoch 530: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000867, lr=0.000141, step=531]\n", - "Epoch 531: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000871, lr=0.00014, step=532]\n", - "Epoch 532: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000786, lr=0.00014, step=533]\n", - "Epoch 533: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000995, lr=0.00014, step=534]\n", - "Epoch 534: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000961, lr=0.00014, step=535]\n", - "Epoch 535: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000577, lr=0.000139, step=536]\n", - "Epoch 536: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.000829, lr=0.000139, step=537]\n", - "Epoch 537: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00071, lr=0.000139, step=538]\n", - "Epoch 538: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0011, lr=0.000138, step=539]\n", - "Epoch 539: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.991, loss=0.000875, lr=0.000138, step=540]\n", - "Epoch 540: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.000768, lr=0.000138, step=541]\n", - "Epoch 541: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000979, lr=0.000137, step=542]\n", - "Epoch 542: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000753, lr=0.000137, step=543]\n", - "Epoch 543: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.000996, lr=0.000137, step=544]\n", - "Epoch 544: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000855, lr=0.000136, step=545]\n", - "Epoch 545: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000908, lr=0.000136, step=546]\n", - "Epoch 546: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000853, lr=0.000136, step=547]\n", - "Epoch 547: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000864, lr=0.000136, step=548]\n", - "Epoch 548: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000691, lr=0.000135, step=549]\n", - "Epoch 549: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000817, lr=0.000135, step=550]\n", - "Epoch 550: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000752, lr=0.000135, step=551]\n", - "Epoch 551: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000665, lr=0.000134, step=552]\n", - "Epoch 552: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000688, lr=0.000134, step=553]\n", - "Epoch 553: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.00117, lr=0.000134, step=554]\n", - "Epoch 554: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.000824, lr=0.000133, step=555]\n", - "Epoch 555: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00133, lr=0.000133, step=556]\n", - "Epoch 556: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000693, lr=0.000133, step=557]\n", - "Epoch 557: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00138, lr=0.000133, step=558]\n", - "Epoch 558: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000873, lr=0.000132, step=559]\n", - "Epoch 559: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000552, lr=0.000132, step=560]\n", - "Epoch 560: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00106, lr=0.000132, step=561]\n", - "Epoch 561: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000643, lr=0.000131, step=562]\n", - "Epoch 562: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000554, lr=0.000131, step=563]\n", - "Epoch 563: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000537, lr=0.000131, step=564]\n", - "Epoch 564: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000894, lr=0.000131, step=565]\n", - "Epoch 565: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000594, lr=0.00013, step=566]\n", - "Epoch 566: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00055, lr=0.00013, step=567]\n", - "Epoch 567: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00055, lr=0.00013, step=568]\n", - "Epoch 568: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00052, lr=0.000129, step=569]\n", - "Epoch 569: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00083, lr=0.000129, step=570]\n", - "Epoch 570: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000891, lr=0.000129, step=571]\n", - "Epoch 571: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000543, lr=0.000128, step=572]\n", - "Epoch 572: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.000482, lr=0.000128, step=573]\n", - "Epoch 573: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.000573, lr=0.000128, step=574]\n", - "Epoch 574: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00112, lr=0.000127, step=575]\n", - "Epoch 575: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00054, lr=0.000127, step=576]\n", - "Epoch 576: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.000415, lr=0.000127, step=577]\n", - "Epoch 577: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000805, lr=0.000127, step=578]\n", - "Epoch 578: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000666, lr=0.000126, step=579]\n", - "Epoch 579: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000628, lr=0.000126, step=580]\n", - "Epoch 580: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000482, lr=0.000126, step=581]\n", - "Epoch 581: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000538, lr=0.000125, step=582]\n", - "Epoch 582: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000516, lr=0.000125, step=583]\n", - "Epoch 583: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000563, lr=0.000125, step=584]\n", - "Epoch 584: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000631, lr=0.000124, step=585]\n", - "Epoch 585: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000715, lr=0.000124, step=586]\n", - "Epoch 586: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000908, lr=0.000124, step=587]\n", - "Epoch 587: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00047, lr=0.000124, step=588]\n", - "Epoch 588: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000568, lr=0.000123, step=589]\n", - "Epoch 589: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.000851, lr=0.000123, step=590]\n", - "Epoch 590: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000802, lr=0.000123, step=591]\n", - "Epoch 591: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00075, lr=0.000122, step=592]\n", - "Epoch 592: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.000662, lr=0.000122, step=593]\n", - "Epoch 593: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00119, lr=0.000122, step=594]\n", - "Epoch 594: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000687, lr=0.000121, step=595]\n", - "Epoch 595: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00107, lr=0.000121, step=596]\n", - "Epoch 596: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000414, lr=0.000121, step=597]\n", - "Epoch 597: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00124, lr=0.000121, step=598]\n", - "Epoch 598: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000663, lr=0.00012, step=599]\n", - "Epoch 599: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.000533, lr=0.00012, step=600]\n", - "Epoch 600: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000639, lr=0.00012, step=601]\n", - "Epoch 601: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000883, lr=0.000119, step=602]\n", - "Epoch 602: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000731, lr=0.000119, step=603]\n", - "Epoch 603: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000668, lr=0.000119, step=604]\n", - "Epoch 604: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000899, lr=0.000118, step=605]\n", - "Epoch 605: 100%|██████████| 1/1 [00:00<00:00, 1.08it/s, ema_decay=0.992, loss=0.000871, lr=0.000118, step=606]\n", - "Epoch 606: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.992, loss=0.000599, lr=0.000118, step=607]\n", - "Epoch 607: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000679, lr=0.000118, step=608]\n", - "Epoch 608: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00101, lr=0.000117, step=609]\n", - "Epoch 609: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000769, lr=0.000117, step=610]\n", - "Epoch 610: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000644, lr=0.000117, step=611]\n", - "Epoch 611: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000556, lr=0.000116, step=612]\n", - "Epoch 612: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000501, lr=0.000116, step=613]\n", - "Epoch 613: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000525, lr=0.000116, step=614]\n", - "Epoch 614: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000692, lr=0.000115, step=615]\n", - "Epoch 615: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000547, lr=0.000115, step=616]\n", - "Epoch 616: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000425, lr=0.000115, step=617]\n", - "Epoch 617: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000701, lr=0.000115, step=618]\n", - "Epoch 618: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000811, lr=0.000114, step=619]\n", - "Epoch 619: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000714, lr=0.000114, step=620]\n", - "Epoch 620: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000847, lr=0.000114, step=621]\n", - "Epoch 621: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000679, lr=0.000113, step=622]\n", - "Epoch 622: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000642, lr=0.000113, step=623]\n", - "Epoch 623: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000525, lr=0.000113, step=624]\n", - "Epoch 624: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000685, lr=0.000112, step=625]\n", - "Epoch 625: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000628, lr=0.000112, step=626]\n", - "Epoch 626: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000626, lr=0.000112, step=627]\n", - "Epoch 627: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000598, lr=0.000112, step=628]\n", - "Epoch 628: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000612, lr=0.000111, step=629]\n", - "Epoch 629: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000546, lr=0.000111, step=630]\n", - "Epoch 630: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000651, lr=0.000111, step=631]\n", - "Epoch 631: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000539, lr=0.00011, step=632]\n", - "Epoch 632: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000692, lr=0.00011, step=633]\n", - "Epoch 633: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000703, lr=0.00011, step=634]\n", - "Epoch 634: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.992, loss=0.000586, lr=0.000109, step=635]\n", - "Epoch 635: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00072, lr=0.000109, step=636]\n", - "Epoch 636: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000383, lr=0.000109, step=637]\n", - "Epoch 637: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000749, lr=0.000109, step=638]\n", - "Epoch 638: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000686, lr=0.000108, step=639]\n", - "Epoch 639: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00112, lr=0.000108, step=640]\n", - "Epoch 640: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000573, lr=0.000108, step=641]\n", - "Epoch 641: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000527, lr=0.000107, step=642]\n", - "Epoch 642: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000468, lr=0.000107, step=643]\n", - "Epoch 643: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000636, lr=0.000107, step=644]\n", - "Epoch 644: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000444, lr=0.000106, step=645]\n", - "Epoch 645: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000764, lr=0.000106, step=646]\n", - "Epoch 646: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.000418, lr=0.000106, step=647]\n", - "Epoch 647: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000658, lr=0.000106, step=648]\n", - "Epoch 648: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.992, loss=0.000562, lr=0.000105, step=649]\n", - "Epoch 649: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000902, lr=0.000105, step=650]\n", - "Epoch 650: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.000826, lr=0.000105, step=651]\n", - "Epoch 651: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.000503, lr=0.000104, step=652]\n", - "Epoch 652: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000645, lr=0.000104, step=653]\n", - "Epoch 653: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.992, loss=0.000551, lr=0.000104, step=654]\n", - "Epoch 654: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000545, lr=0.000103, step=655]\n", - "Epoch 655: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000848, lr=0.000103, step=656]\n", - "Epoch 656: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000561, lr=0.000103, step=657]\n", - "Epoch 657: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000609, lr=0.000103, step=658]\n", - "Epoch 658: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000722, lr=0.000102, step=659]\n", - "Epoch 659: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000832, lr=0.000102, step=660]\n", - "Epoch 660: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.000536, lr=0.000102, step=661]\n", - "Epoch 661: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000948, lr=0.000101, step=662]\n", - "Epoch 662: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00048, lr=0.000101, step=663]\n", - "Epoch 663: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000418, lr=0.000101, step=664]\n", - "Epoch 664: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00082, lr=0.000101, step=665]\n", - "Epoch 665: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000478, lr=0.0001, step=666]\n", - "Epoch 666: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000681, lr=9.99e-5, step=667]\n", - "Epoch 667: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000503, lr=9.96e-5, step=668]\n", - "Epoch 668: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000507, lr=9.93e-5, step=669]\n", - "Epoch 669: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.000528, lr=9.9e-5, step=670]\n", - "Epoch 670: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000361, lr=9.87e-5, step=671]\n", - "Epoch 671: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000562, lr=9.84e-5, step=672]\n", - "Epoch 672: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000445, lr=9.81e-5, step=673]\n", - "Epoch 673: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000528, lr=9.78e-5, step=674]\n", - "Epoch 674: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.000567, lr=9.75e-5, step=675]\n", - "Epoch 675: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000612, lr=9.72e-5, step=676]\n", - "Epoch 676: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000957, lr=9.69e-5, step=677]\n", - "Epoch 677: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000549, lr=9.66e-5, step=678]\n", - "Epoch 678: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000412, lr=9.63e-5, step=679]\n", - "Epoch 679: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.000554, lr=9.6e-5, step=680]\n", - "Epoch 680: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000474, lr=9.57e-5, step=681]\n", - "Epoch 681: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000442, lr=9.54e-5, step=682]\n", - "Epoch 682: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000918, lr=9.51e-5, step=683]\n", - "Epoch 683: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000711, lr=9.48e-5, step=684]\n", - "Epoch 684: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.000657, lr=9.45e-5, step=685]\n", - "Epoch 685: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000632, lr=9.42e-5, step=686]\n", - "Epoch 686: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000603, lr=9.39e-5, step=687]\n", - "Epoch 687: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000579, lr=9.36e-5, step=688]\n", - "Epoch 688: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000947, lr=9.33e-5, step=689]\n", - "Epoch 689: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00052, lr=9.3e-5, step=690]\n", - "Epoch 690: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00101, lr=9.27e-5, step=691]\n", - "Epoch 691: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000407, lr=9.24e-5, step=692]\n", - "Epoch 692: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000573, lr=9.21e-5, step=693]\n", - "Epoch 693: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000412, lr=9.18e-5, step=694]\n", - "Epoch 694: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000825, lr=9.15e-5, step=695]\n", - "Epoch 695: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000394, lr=9.12e-5, step=696]\n", - "Epoch 696: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000523, lr=9.09e-5, step=697]\n", - "Epoch 697: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00047, lr=9.06e-5, step=698]\n", - "Epoch 698: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000785, lr=9.03e-5, step=699]\n", - "Epoch 699: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000757, lr=9e-5, step=700]\n", - "Epoch 700: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00103, lr=8.97e-5, step=701]\n", - "Epoch 701: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000739, lr=8.94e-5, step=702]\n", - "Epoch 702: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00104, lr=8.91e-5, step=703]\n", - "Epoch 703: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.993, loss=0.000518, lr=8.88e-5, step=704]\n", - "Epoch 704: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000489, lr=8.85e-5, step=705]\n", - "Epoch 705: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000668, lr=8.82e-5, step=706]\n", - "Epoch 706: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00036, lr=8.79e-5, step=707]\n", - "Epoch 707: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000509, lr=8.76e-5, step=708]\n", - "Epoch 708: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000586, lr=8.73e-5, step=709]\n", - "Epoch 709: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000538, lr=8.7e-5, step=710]\n", - "Epoch 710: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000499, lr=8.67e-5, step=711]\n", - "Epoch 711: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000624, lr=8.64e-5, step=712]\n", - "Epoch 712: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000434, lr=8.61e-5, step=713]\n", - "Epoch 713: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000385, lr=8.58e-5, step=714]\n", - "Epoch 714: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000549, lr=8.55e-5, step=715]\n", - "Epoch 715: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000562, lr=8.52e-5, step=716]\n", - "Epoch 716: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000676, lr=8.49e-5, step=717]\n", - "Epoch 717: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000687, lr=8.46e-5, step=718]\n", - "Epoch 718: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000596, lr=8.43e-5, step=719]\n", - "Epoch 719: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000724, lr=8.4e-5, step=720]\n", - "Epoch 720: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000302, lr=8.37e-5, step=721]\n", - "Epoch 721: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.000407, lr=8.34e-5, step=722]\n", - "Epoch 722: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000545, lr=8.31e-5, step=723]\n", - "Epoch 723: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000629, lr=8.28e-5, step=724]\n", - "Epoch 724: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00062, lr=8.25e-5, step=725]\n", - "Epoch 725: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000615, lr=8.22e-5, step=726]\n", - "Epoch 726: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000504, lr=8.19e-5, step=727]\n", - "Epoch 727: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000501, lr=8.16e-5, step=728]\n", - "Epoch 728: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000615, lr=8.13e-5, step=729]\n", - "Epoch 729: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000698, lr=8.1e-5, step=730]\n", - "Epoch 730: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00056, lr=8.07e-5, step=731]\n", - "Epoch 731: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000537, lr=8.04e-5, step=732]\n", - "Epoch 732: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000828, lr=8.01e-5, step=733]\n", - "Epoch 733: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000592, lr=7.98e-5, step=734]\n", - "Epoch 734: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00058, lr=7.95e-5, step=735]\n", - "Epoch 735: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00114, lr=7.92e-5, step=736]\n", - "Epoch 736: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000936, lr=7.89e-5, step=737]\n", - "Epoch 737: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.000506, lr=7.86e-5, step=738]\n", - "Epoch 738: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000606, lr=7.83e-5, step=739]\n", - "Epoch 739: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000587, lr=7.8e-5, step=740]\n", - "Epoch 740: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.000703, lr=7.77e-5, step=741]\n", - "Epoch 741: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000478, lr=7.74e-5, step=742]\n", - "Epoch 742: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00107, lr=7.71e-5, step=743]\n", - "Epoch 743: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000574, lr=7.68e-5, step=744]\n", - "Epoch 744: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00083, lr=7.65e-5, step=745]\n", - "Epoch 745: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000726, lr=7.62e-5, step=746]\n", - "Epoch 746: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00109, lr=7.59e-5, step=747]\n", - "Epoch 747: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000596, lr=7.56e-5, step=748]\n", - "Epoch 748: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000546, lr=7.53e-5, step=749]\n", - "Epoch 749: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000592, lr=7.5e-5, step=750]\n", - "Epoch 750: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000427, lr=7.47e-5, step=751]\n", - "Epoch 751: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000401, lr=7.44e-5, step=752]\n", - "Epoch 752: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000474, lr=7.41e-5, step=753]\n", - "Epoch 753: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.000654, lr=7.38e-5, step=754]\n", - "Epoch 754: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000606, lr=7.35e-5, step=755]\n", - "Epoch 755: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000591, lr=7.32e-5, step=756]\n", - "Epoch 756: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000699, lr=7.29e-5, step=757]\n", - "Epoch 757: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00042, lr=7.26e-5, step=758]\n", - "Epoch 758: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000745, lr=7.23e-5, step=759]\n", - "Epoch 759: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000471, lr=7.2e-5, step=760]\n", - "Epoch 760: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000859, lr=7.17e-5, step=761]\n", - "Epoch 761: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000403, lr=7.14e-5, step=762]\n", - "Epoch 762: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000609, lr=7.11e-5, step=763]\n", - "Epoch 763: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000616, lr=7.08e-5, step=764]\n", - "Epoch 764: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000679, lr=7.05e-5, step=765]\n", - "Epoch 765: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000714, lr=7.02e-5, step=766]\n", - "Epoch 766: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.993, loss=0.000509, lr=6.99e-5, step=767]\n", - "Epoch 767: 100%|██████████| 1/1 [00:00<00:00, 1.09it/s, ema_decay=0.993, loss=0.000676, lr=6.96e-5, step=768]\n", - "Epoch 768: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.993, loss=0.00087, lr=6.93e-5, step=769]\n", - "Epoch 769: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000407, lr=6.9e-5, step=770]\n", - "Epoch 770: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00114, lr=6.87e-5, step=771]\n", - "Epoch 771: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000727, lr=6.84e-5, step=772]\n", - "Epoch 772: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000631, lr=6.81e-5, step=773]\n", - "Epoch 773: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000955, lr=6.78e-5, step=774]\n", - "Epoch 774: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000378, lr=6.75e-5, step=775]\n", - "Epoch 775: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000477, lr=6.72e-5, step=776]\n", - "Epoch 776: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000454, lr=6.69e-5, step=777]\n", - "Epoch 777: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00102, lr=6.66e-5, step=778]\n", - "Epoch 778: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000726, lr=6.63e-5, step=779]\n", - "Epoch 779: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000676, lr=6.6e-5, step=780]\n", - "Epoch 780: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000697, lr=6.57e-5, step=781]\n", - "Epoch 781: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000577, lr=6.54e-5, step=782]\n", - "Epoch 782: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000761, lr=6.51e-5, step=783]\n", - "Epoch 783: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000664, lr=6.48e-5, step=784]\n", - "Epoch 784: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000908, lr=6.45e-5, step=785]\n", - "Epoch 785: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000575, lr=6.42e-5, step=786]\n", - "Epoch 786: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00103, lr=6.39e-5, step=787]\n", - "Epoch 787: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000537, lr=6.36e-5, step=788]\n", - "Epoch 788: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000975, lr=6.33e-5, step=789]\n", - "Epoch 789: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s, ema_decay=0.993, loss=0.000614, lr=6.3e-5, step=790]\n", - "Epoch 790: 100%|██████████| 1/1 [00:00<00:00, 1.33it/s, ema_decay=0.993, loss=0.000519, lr=6.27e-5, step=791]\n", - "Epoch 791: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.000922, lr=6.24e-5, step=792]\n", - "Epoch 792: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000452, lr=6.21e-5, step=793]\n", - "Epoch 793: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00103, lr=6.18e-5, step=794]\n", - "Epoch 794: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00055, lr=6.15e-5, step=795]\n", - "Epoch 795: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.000492, lr=6.12e-5, step=796]\n", - "Epoch 796: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0007, lr=6.09e-5, step=797]\n", - "Epoch 797: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000515, lr=6.06e-5, step=798]\n", - "Epoch 798: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.000492, lr=6.03e-5, step=799]\n", - "Epoch 799: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000683, lr=6e-5, step=800]\n", - "Epoch 800: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000911, lr=5.97e-5, step=801]\n", - "Epoch 801: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000765, lr=5.94e-5, step=802]\n", - "Epoch 802: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.000443, lr=5.91e-5, step=803]\n", - "Epoch 803: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000538, lr=5.88e-5, step=804]\n", - "Epoch 804: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000558, lr=5.85e-5, step=805]\n", - "Epoch 805: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000633, lr=5.82e-5, step=806]\n", - "Epoch 806: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000896, lr=5.79e-5, step=807]\n", - "Epoch 807: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000592, lr=5.76e-5, step=808]\n", - "Epoch 808: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00044, lr=5.73e-5, step=809]\n", - "Epoch 809: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000566, lr=5.7e-5, step=810]\n", - "Epoch 810: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.000402, lr=5.67e-5, step=811]\n", - "Epoch 811: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00035, lr=5.64e-5, step=812]\n", - "Epoch 812: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000434, lr=5.61e-5, step=813]\n", - "Epoch 813: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000437, lr=5.58e-5, step=814]\n", - "Epoch 814: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.000471, lr=5.55e-5, step=815]\n", - "Epoch 815: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000953, lr=5.52e-5, step=816]\n", - "Epoch 816: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.001, lr=5.49e-5, step=817]\n", - "Epoch 817: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000994, lr=5.46e-5, step=818]\n", - "Epoch 818: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.000664, lr=5.43e-5, step=819]\n", - "Epoch 819: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000368, lr=5.4e-5, step=820]\n", - "Epoch 820: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.993, loss=0.000557, lr=5.37e-5, step=821]\n", - "Epoch 821: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.000528, lr=5.34e-5, step=822]\n", - "Epoch 822: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000471, lr=5.31e-5, step=823]\n", - "Epoch 823: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.000539, lr=5.28e-5, step=824]\n", - "Epoch 824: 100%|██████████| 1/1 [00:00<00:00, 1.20it/s, ema_decay=0.993, loss=0.000422, lr=5.25e-5, step=825]\n", - "Epoch 825: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.000801, lr=5.22e-5, step=826]\n", - "Epoch 826: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000615, lr=5.19e-5, step=827]\n", - "Epoch 827: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000855, lr=5.16e-5, step=828]\n", - "Epoch 828: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00066, lr=5.13e-5, step=829]\n", - "Epoch 829: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000446, lr=5.1e-5, step=830]\n", - "Epoch 830: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.000908, lr=5.07e-5, step=831]\n", - "Epoch 831: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0007, lr=5.04e-5, step=832]\n", - "Epoch 832: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000803, lr=5.01e-5, step=833]\n", - "Epoch 833: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000689, lr=4.98e-5, step=834]\n", - "Epoch 834: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00088, lr=4.95e-5, step=835]\n", - "Epoch 835: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000636, lr=4.92e-5, step=836]\n", - "Epoch 836: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000488, lr=4.89e-5, step=837]\n", - "Epoch 837: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000692, lr=4.86e-5, step=838]\n", - "Epoch 838: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000603, lr=4.83e-5, step=839]\n", - "Epoch 839: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000641, lr=4.8e-5, step=840]\n", - "Epoch 840: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000846, lr=4.77e-5, step=841]\n", - "Epoch 841: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000613, lr=4.74e-5, step=842]\n", - "Epoch 842: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000663, lr=4.71e-5, step=843]\n", - "Epoch 843: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000699, lr=4.68e-5, step=844]\n", - "Epoch 844: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000488, lr=4.65e-5, step=845]\n", - "Epoch 845: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000481, lr=4.62e-5, step=846]\n", - "Epoch 846: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000711, lr=4.59e-5, step=847]\n", - "Epoch 847: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000442, lr=4.56e-5, step=848]\n", - "Epoch 848: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00045, lr=4.53e-5, step=849]\n", - "Epoch 849: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000655, lr=4.5e-5, step=850]\n", - "Epoch 850: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000447, lr=4.47e-5, step=851]\n", - "Epoch 851: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000564, lr=4.44e-5, step=852]\n", - "Epoch 852: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000695, lr=4.41e-5, step=853]\n", - "Epoch 853: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000633, lr=4.38e-5, step=854]\n", - "Epoch 854: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000441, lr=4.35e-5, step=855]\n", - "Epoch 855: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000479, lr=4.32e-5, step=856]\n", - "Epoch 856: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000678, lr=4.29e-5, step=857]\n", - "Epoch 857: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000531, lr=4.26e-5, step=858]\n", - "Epoch 858: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000714, lr=4.23e-5, step=859]\n", - "Epoch 859: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000549, lr=4.2e-5, step=860]\n", - "Epoch 860: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000813, lr=4.17e-5, step=861]\n", - "Epoch 861: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000403, lr=4.14e-5, step=862]\n", - "Epoch 862: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000564, lr=4.11e-5, step=863]\n", - "Epoch 863: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000376, lr=4.08e-5, step=864]\n", - "Epoch 864: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000616, lr=4.05e-5, step=865]\n", - "Epoch 865: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000898, lr=4.02e-5, step=866]\n", - "Epoch 866: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000903, lr=3.99e-5, step=867]\n", - "Epoch 867: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000471, lr=3.96e-5, step=868]\n", - "Epoch 868: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000582, lr=3.93e-5, step=869]\n", - "Epoch 869: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000514, lr=3.9e-5, step=870]\n", - "Epoch 870: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000874, lr=3.87e-5, step=871]\n", - "Epoch 871: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000388, lr=3.84e-5, step=872]\n", - "Epoch 872: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000687, lr=3.81e-5, step=873]\n", - "Epoch 873: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000657, lr=3.78e-5, step=874]\n", - "Epoch 874: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000506, lr=3.75e-5, step=875]\n", - "Epoch 875: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00049, lr=3.72e-5, step=876]\n", - "Epoch 876: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000539, lr=3.69e-5, step=877]\n", - "Epoch 877: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000532, lr=3.66e-5, step=878]\n", - "Epoch 878: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.000432, lr=3.63e-5, step=879]\n", - "Epoch 879: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000624, lr=3.6e-5, step=880]\n", - "Epoch 880: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000396, lr=3.57e-5, step=881]\n", - "Epoch 881: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000713, lr=3.54e-5, step=882]\n", - "Epoch 882: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.000549, lr=3.51e-5, step=883]\n", - "Epoch 883: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000808, lr=3.48e-5, step=884]\n", - "Epoch 884: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00066, lr=3.45e-5, step=885]\n", - "Epoch 885: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000837, lr=3.42e-5, step=886]\n", - "Epoch 886: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000996, lr=3.39e-5, step=887]\n", - "Epoch 887: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.000508, lr=3.36e-5, step=888]\n", - "Epoch 888: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000392, lr=3.33e-5, step=889]\n", - "Epoch 889: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000657, lr=3.3e-5, step=890]\n", - "Epoch 890: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000709, lr=3.27e-5, step=891]\n", - "Epoch 891: 100%|██████████| 1/1 [00:00<00:00, 1.07it/s, ema_decay=0.994, loss=0.000499, lr=3.24e-5, step=892]\n", - "Epoch 892: 100%|██████████| 1/1 [00:00<00:00, 1.12it/s, ema_decay=0.994, loss=0.000525, lr=3.21e-5, step=893]\n", - "Epoch 893: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000764, lr=3.18e-5, step=894]\n", - "Epoch 894: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000778, lr=3.15e-5, step=895]\n", - "Epoch 895: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000438, lr=3.12e-5, step=896]\n", - "Epoch 896: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00058, lr=3.09e-5, step=897]\n", - "Epoch 897: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000736, lr=3.06e-5, step=898]\n", - "Epoch 898: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000464, lr=3.03e-5, step=899]\n", - "Epoch 899: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000478, lr=3e-5, step=900]\n", - "Epoch 900: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00118, lr=2.97e-5, step=901]\n", - "Epoch 901: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00071, lr=2.94e-5, step=902]\n", - "Epoch 902: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000834, lr=2.91e-5, step=903]\n", - "Epoch 903: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000612, lr=2.88e-5, step=904]\n", - "Epoch 904: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000417, lr=2.85e-5, step=905]\n", - "Epoch 905: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000422, lr=2.82e-5, step=906]\n", - "Epoch 906: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000702, lr=2.79e-5, step=907]\n", - "Epoch 907: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00053, lr=2.76e-5, step=908]\n", - "Epoch 908: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000679, lr=2.73e-5, step=909]\n", - "Epoch 909: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000812, lr=2.7e-5, step=910]\n", - "Epoch 910: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000467, lr=2.67e-5, step=911]\n", - "Epoch 911: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000375, lr=2.64e-5, step=912]\n", - "Epoch 912: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000665, lr=2.61e-5, step=913]\n", - "Epoch 913: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000468, lr=2.58e-5, step=914]\n", - "Epoch 914: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000645, lr=2.55e-5, step=915]\n", - "Epoch 915: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000498, lr=2.52e-5, step=916]\n", - "Epoch 916: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000579, lr=2.49e-5, step=917]\n", - "Epoch 917: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000579, lr=2.46e-5, step=918]\n", - "Epoch 918: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000475, lr=2.43e-5, step=919]\n", - "Epoch 919: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000431, lr=2.4e-5, step=920]\n", - "Epoch 920: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000498, lr=2.37e-5, step=921]\n", - "Epoch 921: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000573, lr=2.34e-5, step=922]\n", - "Epoch 922: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000542, lr=2.31e-5, step=923]\n", - "Epoch 923: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0006, lr=2.28e-5, step=924]\n", - "Epoch 924: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000901, lr=2.25e-5, step=925]\n", - "Epoch 925: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00071, lr=2.22e-5, step=926]\n", - "Epoch 926: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000612, lr=2.19e-5, step=927]\n", - "Epoch 927: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000424, lr=2.16e-5, step=928]\n", - "Epoch 928: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000468, lr=2.13e-5, step=929]\n", - "Epoch 929: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000517, lr=2.1e-5, step=930]\n", - "Epoch 930: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000382, lr=2.07e-5, step=931]\n", - "Epoch 931: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000414, lr=2.04e-5, step=932]\n", - "Epoch 932: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000621, lr=2.01e-5, step=933]\n", - "Epoch 933: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000435, lr=1.98e-5, step=934]\n", - "Epoch 934: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000459, lr=1.95e-5, step=935]\n", - "Epoch 935: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000627, lr=1.92e-5, step=936]\n", - "Epoch 936: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000698, lr=1.89e-5, step=937]\n", - "Epoch 937: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000822, lr=1.86e-5, step=938]\n", - "Epoch 938: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000613, lr=1.83e-5, step=939]\n", - "Epoch 939: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000401, lr=1.8e-5, step=940]\n", - "Epoch 940: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000901, lr=1.77e-5, step=941]\n", - "Epoch 941: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000415, lr=1.74e-5, step=942]\n", - "Epoch 942: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000399, lr=1.71e-5, step=943]\n", - "Epoch 943: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000459, lr=1.68e-5, step=944]\n", - "Epoch 944: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.994, loss=0.000592, lr=1.65e-5, step=945]\n", - "Epoch 945: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000599, lr=1.62e-5, step=946]\n", - "Epoch 946: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000634, lr=1.59e-5, step=947]\n", - "Epoch 947: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000678, lr=1.56e-5, step=948]\n", - "Epoch 948: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000868, lr=1.53e-5, step=949]\n", - "Epoch 949: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000835, lr=1.5e-5, step=950]\n", - "Epoch 950: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000487, lr=1.47e-5, step=951]\n", - "Epoch 951: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00046, lr=1.44e-5, step=952]\n", - "Epoch 952: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000586, lr=1.41e-5, step=953]\n", - "Epoch 953: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00062, lr=1.38e-5, step=954]\n", - "Epoch 954: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000937, lr=1.35e-5, step=955]\n", - "Epoch 955: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000532, lr=1.32e-5, step=956]\n", - "Epoch 956: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00045, lr=1.29e-5, step=957]\n", - "Epoch 957: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000611, lr=1.26e-5, step=958]\n", - "Epoch 958: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000543, lr=1.23e-5, step=959]\n", - "Epoch 959: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000702, lr=1.2e-5, step=960]\n", - "Epoch 960: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000444, lr=1.17e-5, step=961]\n", - "Epoch 961: 100%|██████████| 1/1 [00:00<00:00, 1.34it/s, ema_decay=0.994, loss=0.000506, lr=1.14e-5, step=962]\n", - "Epoch 962: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.000765, lr=1.11e-5, step=963]\n", - "Epoch 963: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000722, lr=1.08e-5, step=964]\n", - "Epoch 964: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000384, lr=1.05e-5, step=965]\n", - "Epoch 965: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000624, lr=1.02e-5, step=966]\n", - "Epoch 966: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000567, lr=9.9e-6, step=967]\n", - "Epoch 967: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000467, lr=9.6e-6, step=968]\n", - "Epoch 968: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000478, lr=9.3e-6, step=969]\n", - "Epoch 969: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000487, lr=9e-6, step=970]\n", - "Epoch 970: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.000461, lr=8.7e-6, step=971]\n", - "Epoch 971: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000543, lr=8.4e-6, step=972]\n", - "Epoch 972: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000387, lr=8.1e-6, step=973]\n", - "Epoch 973: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000575, lr=7.8e-6, step=974]\n", - "Epoch 974: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000382, lr=7.5e-6, step=975]\n", - "Epoch 975: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000522, lr=7.2e-6, step=976]\n", - "Epoch 976: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.000483, lr=6.9e-6, step=977]\n", - "Epoch 977: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000612, lr=6.6e-6, step=978]\n", - "Epoch 978: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000398, lr=6.3e-6, step=979]\n", - "Epoch 979: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000622, lr=6e-6, step=980]\n", - "Epoch 980: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000591, lr=5.7e-6, step=981]\n", - "Epoch 981: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000533, lr=5.4e-6, step=982]\n", - "Epoch 982: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000523, lr=5.1e-6, step=983]\n", - "Epoch 983: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000277, lr=4.8e-6, step=984]\n", - "Epoch 984: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000384, lr=4.5e-6, step=985]\n", - "Epoch 985: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000556, lr=4.2e-6, step=986]\n", - "Epoch 986: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000608, lr=3.9e-6, step=987]\n", - "Epoch 987: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00061, lr=3.6e-6, step=988]\n", - "Epoch 988: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000377, lr=3.3e-6, step=989]\n", - "Epoch 989: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000397, lr=3e-6, step=990]\n", - "Epoch 990: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00061, lr=2.7e-6, step=991]\n", - "Epoch 991: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00068, lr=2.4e-6, step=992]\n", - "Epoch 992: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000407, lr=2.1e-6, step=993]\n", - "Epoch 993: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.000565, lr=1.8e-6, step=994]\n", - "Epoch 994: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00043, lr=1.5e-6, step=995]\n", - "Epoch 995: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0009, lr=1.2e-6, step=996]\n", - "Epoch 996: 100%|██████████| 1/1 [00:00<00:00, 1.10it/s, ema_decay=0.994, loss=0.000464, lr=9e-7, step=997]\n", - "Epoch 997: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000487, lr=6e-7, step=998]\n", - "Epoch 998: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00103, lr=3e-7, step=999]\n", - "Epoch 999: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.000533, lr=0, step=1000]\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "93ccd71935ce42d494cc2bb845c5593d", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/50 [00:00 62\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.21it/s, ema_decay=0, loss=0.111, lr=0.0003, step=1]\n", - "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0, loss=0.577, lr=0.000299, step=2]\n", - "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.405, loss=0.297, lr=0.000299, step=3]\n", - "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.561, loss=0.144, lr=0.000299, step=4]\n", - "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.646, loss=0.136, lr=0.000298, step=5]\n", - "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.701, loss=0.109, lr=0.000298, step=6]\n", - "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.739, loss=0.141, lr=0.000298, step=7]\n", - "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.768, loss=0.181, lr=0.000298, step=8]\n", - "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.79, loss=0.13, lr=0.000297, step=9]\n", - "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.808, loss=0.145, lr=0.000297, step=10]\n", - "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.822, loss=0.0777, lr=0.000297, step=11]\n", - "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.834, loss=0.135, lr=0.000296, step=12]\n", - "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.845, loss=0.0251, lr=0.000296, step=13]\n", - "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.854, loss=0.0899, lr=0.000296, step=14]\n", - "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.862, loss=0.103, lr=0.000295, step=15]\n", - "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.869, loss=0.11, lr=0.000295, step=16]\n", - "Epoch 16: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.875, loss=0.177, lr=0.000295, step=17]\n", - "Epoch 17: 100%|██████████| 1/1 [00:00<00:00, 1.02it/s, ema_decay=0.881, loss=0.0393, lr=0.000295, step=18]\n", - "Epoch 18: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.886, loss=0.0691, lr=0.000294, step=19]\n", - "Epoch 19: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.89, loss=0.124, lr=0.000294, step=20]\n", - "Epoch 20: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.894, loss=0.00799, lr=0.000294, step=21]\n", - "Epoch 21: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.898, loss=0.0227, lr=0.000293, step=22]\n", - "Epoch 22: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.902, loss=0.0237, lr=0.000293, step=23]\n", - "Epoch 23: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.905, loss=0.106, lr=0.000293, step=24]\n", - "Epoch 24: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.908, loss=0.0772, lr=0.000292, step=25]\n", - "Epoch 25: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.911, loss=0.0482, lr=0.000292, step=26]\n", - "Epoch 26: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.913, loss=0.0199, lr=0.000292, step=27]\n", - "Epoch 27: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.916, loss=0.0306, lr=0.000292, step=28]\n", - "Epoch 28: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.918, loss=0.0203, lr=0.000291, step=29]\n", - "Epoch 29: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.92, loss=0.081, lr=0.000291, step=30]\n", - "Epoch 30: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.922, loss=0.0611, lr=0.000291, step=31]\n", - "Epoch 31: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.924, loss=0.0573, lr=0.00029, step=32]\n", - "Epoch 32: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.926, loss=0.0786, lr=0.00029, step=33]\n", - "Epoch 33: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.927, loss=0.0159, lr=0.00029, step=34]\n", - "Epoch 34: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.929, loss=0.0153, lr=0.000289, step=35]\n", - "Epoch 35: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.931, loss=0.0368, lr=0.000289, step=36]\n", - "Epoch 36: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.932, loss=0.0754, lr=0.000289, step=37]\n", - "Epoch 37: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.933, loss=0.0975, lr=0.000289, step=38]\n", - "Epoch 38: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.935, loss=0.0682, lr=0.000288, step=39]\n", - "Epoch 39: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.936, loss=0.0511, lr=0.000288, step=40]\n", - "Epoch 40: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.937, loss=0.0171, lr=0.000288, step=41]\n", - "Epoch 41: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.938, loss=0.017, lr=0.000287, step=42]\n", - "Epoch 42: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.939, loss=0.00777, lr=0.000287, step=43]\n", - "Epoch 43: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.94, loss=0.0331, lr=0.000287, step=44]\n", - "Epoch 44: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.941, loss=0.0244, lr=0.000286, step=45]\n", - "Epoch 45: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.942, loss=0.0109, lr=0.000286, step=46]\n", - "Epoch 46: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.943, loss=0.0879, lr=0.000286, step=47]\n", - "Epoch 47: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.944, loss=0.0101, lr=0.000286, step=48]\n", - "Epoch 48: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.945, loss=0.0332, lr=0.000285, step=49]\n", - "Epoch 49: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.946, loss=0.0564, lr=0.000285, step=50]\n", - "Epoch 50: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.947, loss=0.0193, lr=0.000285, step=51]\n", - "Epoch 51: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.948, loss=0.015, lr=0.000284, step=52]\n", - "Epoch 52: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.948, loss=0.0294, lr=0.000284, step=53]\n", - "Epoch 53: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.949, loss=0.03, lr=0.000284, step=54]\n", - "Epoch 54: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.95, loss=0.0312, lr=0.000283, step=55]\n", - "Epoch 55: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.0136, lr=0.000283, step=56]\n", - "Epoch 56: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.951, loss=0.00601, lr=0.000283, step=57]\n", - "Epoch 57: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.952, loss=0.0304, lr=0.000283, step=58]\n", - "Epoch 58: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.952, loss=0.0226, lr=0.000282, step=59]\n", - "Epoch 59: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.953, loss=0.0758, lr=0.000282, step=60]\n", - "Epoch 60: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.954, loss=0.0207, lr=0.000282, step=61]\n", - "Epoch 61: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.954, loss=0.0224, lr=0.000281, step=62]\n", - "Epoch 62: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.955, loss=0.00999, lr=0.000281, step=63]\n", - "Epoch 63: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.955, loss=0.0616, lr=0.000281, step=64]\n", - "Epoch 64: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.956, loss=0.0178, lr=0.00028, step=65]\n", - "Epoch 65: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.956, loss=0.0291, lr=0.00028, step=66]\n", - "Epoch 66: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.957, loss=0.0345, lr=0.00028, step=67]\n", - "Epoch 67: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.957, loss=0.0112, lr=0.00028, step=68]\n", - "Epoch 68: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.958, loss=0.00766, lr=0.000279, step=69]\n", - "Epoch 69: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.958, loss=0.00512, lr=0.000279, step=70]\n", - "Epoch 70: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.959, loss=0.0349, lr=0.000279, step=71]\n", - "Epoch 71: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.959, loss=0.0333, lr=0.000278, step=72]\n", - "Epoch 72: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.96, loss=0.0223, lr=0.000278, step=73]\n", - "Epoch 73: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.96, loss=0.0185, lr=0.000278, step=74]\n", - "Epoch 74: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.96, loss=0.0047, lr=0.000277, step=75]\n", - "Epoch 75: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.961, loss=0.031, lr=0.000277, step=76]\n", - "Epoch 76: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.961, loss=0.00932, lr=0.000277, step=77]\n", - "Epoch 77: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.962, loss=0.0171, lr=0.000277, step=78]\n", - "Epoch 78: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.962, loss=0.0263, lr=0.000276, step=79]\n", - "Epoch 79: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.962, loss=0.00591, lr=0.000276, step=80]\n", - "Epoch 80: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.0118, lr=0.000276, step=81]\n", - "Epoch 81: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.0145, lr=0.000275, step=82]\n", - "Epoch 82: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.963, loss=0.0336, lr=0.000275, step=83]\n", - "Epoch 83: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.964, loss=0.0216, lr=0.000275, step=84]\n", - "Epoch 84: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.964, loss=0.00635, lr=0.000275, step=85]\n", - "Epoch 85: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.964, loss=0.0182, lr=0.000274, step=86]\n", - "Epoch 86: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.965, loss=0.0135, lr=0.000274, step=87]\n", - "Epoch 87: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.0146, lr=0.000274, step=88]\n", - "Epoch 88: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.965, loss=0.0134, lr=0.000273, step=89]\n", - "Epoch 89: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.965, loss=0.00226, lr=0.000273, step=90]\n", - "Epoch 90: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.966, loss=0.0148, lr=0.000273, step=91]\n", - "Epoch 91: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.966, loss=0.0291, lr=0.000272, step=92]\n", - "Epoch 92: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.966, loss=0.0115, lr=0.000272, step=93]\n", - "Epoch 93: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.00575, lr=0.000272, step=94]\n", - "Epoch 94: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.00732, lr=0.000271, step=95]\n", - "Epoch 95: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.0169, lr=0.000271, step=96]\n", - "Epoch 96: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.00515, lr=0.000271, step=97]\n", - "Epoch 97: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.0033, lr=0.000271, step=98]\n", - "Epoch 98: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.968, loss=0.00344, lr=0.00027, step=99]\n", - "Epoch 99: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.013, lr=0.00027, step=100]\n", - "Epoch 100: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.0212, lr=0.00027, step=101]\n", - "Epoch 101: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0102, lr=0.000269, step=102]\n", - "Epoch 102: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.969, loss=0.011, lr=0.000269, step=103]\n", - "Epoch 103: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.00473, lr=0.000269, step=104]\n", - "Epoch 104: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0193, lr=0.000268, step=105]\n", - "Epoch 105: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00951, lr=0.000268, step=106]\n", - "Epoch 106: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.0277, lr=0.000268, step=107]\n", - "Epoch 107: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00406, lr=0.000268, step=108]\n", - "Epoch 108: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.0162, lr=0.000267, step=109]\n", - "Epoch 109: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.0201, lr=0.000267, step=110]\n", - "Epoch 110: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00843, lr=0.000267, step=111]\n", - "Epoch 111: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00398, lr=0.000266, step=112]\n", - "Epoch 112: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.971, loss=0.0185, lr=0.000266, step=113]\n", - "Epoch 113: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.003, lr=0.000266, step=114]\n", - "Epoch 114: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.971, loss=0.0133, lr=0.000266, step=115]\n", - "Epoch 115: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.972, loss=0.0101, lr=0.000265, step=116]\n", - "Epoch 116: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.972, loss=0.00752, lr=0.000265, step=117]\n", - "Epoch 117: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.972, loss=0.0224, lr=0.000265, step=118]\n", - "Epoch 118: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.00375, lr=0.000264, step=119]\n", - "Epoch 119: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0108, lr=0.000264, step=120]\n", - "Epoch 120: 100%|██████████| 1/1 [00:00<00:00, 1.06it/s, ema_decay=0.972, loss=0.0122, lr=0.000264, step=121]\n", - "Epoch 121: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.973, loss=0.0207, lr=0.000263, step=122]\n", - "Epoch 122: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.973, loss=0.017, lr=0.000263, step=123]\n", - "Epoch 123: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.0122, lr=0.000263, step=124]\n", - "Epoch 124: 100%|██████████| 1/1 [00:00<00:00, 1.25it/s, ema_decay=0.973, loss=0.0117, lr=0.000262, step=125]\n", - "Epoch 125: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.973, loss=0.0484, lr=0.000262, step=126]\n", - "Epoch 126: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.973, loss=0.0149, lr=0.000262, step=127]\n", - "Epoch 127: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.013, lr=0.000262, step=128]\n", - "Epoch 128: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.974, loss=0.00657, lr=0.000261, step=129]\n", - "Epoch 129: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.974, loss=0.0326, lr=0.000261, step=130]\n", - "Epoch 130: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.016, lr=0.000261, step=131]\n", - "Epoch 131: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.0053, lr=0.00026, step=132]\n", - "Epoch 132: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.974, loss=0.0174, lr=0.00026, step=133]\n", - "Epoch 133: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.0231, lr=0.00026, step=134]\n", - "Epoch 134: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.0116, lr=0.000259, step=135]\n", - "Epoch 135: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.017, lr=0.000259, step=136]\n", - "Epoch 136: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.0102, lr=0.000259, step=137]\n", - "Epoch 137: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.0134, lr=0.000259, step=138]\n", - "Epoch 138: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.0289, lr=0.000258, step=139]\n", - "Epoch 139: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.00827, lr=0.000258, step=140]\n", - "Epoch 140: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.975, loss=0.00443, lr=0.000258, step=141]\n", - "Epoch 141: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.0107, lr=0.000257, step=142]\n", - "Epoch 142: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.00598, lr=0.000257, step=143]\n", - "Epoch 143: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.0284, lr=0.000257, step=144]\n", - "Epoch 144: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.0107, lr=0.000256, step=145]\n", - "Epoch 145: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.00326, lr=0.000256, step=146]\n", - "Epoch 146: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0062, lr=0.000256, step=147]\n", - "Epoch 147: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.0352, lr=0.000256, step=148]\n", - "Epoch 148: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.00939, lr=0.000255, step=149]\n", - "Epoch 149: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.00641, lr=0.000255, step=150]\n", - "Epoch 150: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.014, lr=0.000255, step=151]\n", - "Epoch 151: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.977, loss=0.0251, lr=0.000254, step=152]\n", - "Epoch 152: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00373, lr=0.000254, step=153]\n", - "Epoch 153: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.977, loss=0.0208, lr=0.000254, step=154]\n", - "Epoch 154: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.977, loss=0.0139, lr=0.000253, step=155]\n", - "Epoch 155: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00304, lr=0.000253, step=156]\n", - "Epoch 156: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.0103, lr=0.000253, step=157]\n", - "Epoch 157: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.977, loss=0.0107, lr=0.000253, step=158]\n", - "Epoch 158: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.0112, lr=0.000252, step=159]\n", - "Epoch 159: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.0129, lr=0.000252, step=160]\n", - "Epoch 160: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00966, lr=0.000252, step=161]\n", - "Epoch 161: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.0038, lr=0.000251, step=162]\n", - "Epoch 162: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.00732, lr=0.000251, step=163]\n", - "Epoch 163: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.013, lr=0.000251, step=164]\n", - "Epoch 164: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.978, loss=0.0191, lr=0.00025, step=165]\n", - "Epoch 165: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.978, loss=0.0131, lr=0.00025, step=166]\n", - "Epoch 166: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.978, loss=0.00729, lr=0.00025, step=167]\n", - "Epoch 167: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00868, lr=0.00025, step=168]\n", - "Epoch 168: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.979, loss=0.0274, lr=0.000249, step=169]\n", - "Epoch 169: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.0101, lr=0.000249, step=170]\n", - "Epoch 170: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.011, lr=0.000249, step=171]\n", - "Epoch 171: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.979, loss=0.0094, lr=0.000248, step=172]\n", - "Epoch 172: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.00526, lr=0.000248, step=173]\n", - "Epoch 173: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.0198, lr=0.000248, step=174]\n", - "Epoch 174: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.022, lr=0.000247, step=175]\n", - "Epoch 175: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.00827, lr=0.000247, step=176]\n", - "Epoch 176: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0122, lr=0.000247, step=177]\n", - "Epoch 177: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.0132, lr=0.000247, step=178]\n", - "Epoch 178: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0198, lr=0.000246, step=179]\n", - "Epoch 179: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.98, loss=0.00994, lr=0.000246, step=180]\n", - "Epoch 180: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.00594, lr=0.000246, step=181]\n", - "Epoch 181: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0161, lr=0.000245, step=182]\n", - "Epoch 182: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0164, lr=0.000245, step=183]\n", - "Epoch 183: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00475, lr=0.000245, step=184]\n", - "Epoch 184: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.98, loss=0.0241, lr=0.000244, step=185]\n", - "Epoch 185: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.98, loss=0.0049, lr=0.000244, step=186]\n", - "Epoch 186: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0096, lr=0.000244, step=187]\n", - "Epoch 187: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0132, lr=0.000244, step=188]\n", - "Epoch 188: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.011, lr=0.000243, step=189]\n", - "Epoch 189: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.014, lr=0.000243, step=190]\n", - "Epoch 190: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.013, lr=0.000243, step=191]\n", - "Epoch 191: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.0075, lr=0.000242, step=192]\n", - "Epoch 192: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.014, lr=0.000242, step=193]\n", - "Epoch 193: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00428, lr=0.000242, step=194]\n", - "Epoch 194: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.00969, lr=0.000241, step=195]\n", - "Epoch 195: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0108, lr=0.000241, step=196]\n", - "Epoch 196: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00461, lr=0.000241, step=197]\n", - "Epoch 197: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00542, lr=0.000241, step=198]\n", - "Epoch 198: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0141, lr=0.00024, step=199]\n", - "Epoch 199: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00763, lr=0.00024, step=200]\n", - "Epoch 200: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.00529, lr=0.00024, step=201]\n", - "Epoch 201: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00515, lr=0.000239, step=202]\n", - "Epoch 202: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.981, loss=0.0166, lr=0.000239, step=203]\n", - "Epoch 203: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00693, lr=0.000239, step=204]\n", - "Epoch 204: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0113, lr=0.000238, step=205]\n", - "Epoch 205: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.0121, lr=0.000238, step=206]\n", - "Epoch 206: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.00643, lr=0.000238, step=207]\n", - "Epoch 207: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00594, lr=0.000238, step=208]\n", - "Epoch 208: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0133, lr=0.000237, step=209]\n", - "Epoch 209: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00965, lr=0.000237, step=210]\n", - "Epoch 210: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00478, lr=0.000237, step=211]\n", - "Epoch 211: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00661, lr=0.000236, step=212]\n", - "Epoch 212: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00655, lr=0.000236, step=213]\n", - "Epoch 213: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00409, lr=0.000236, step=214]\n", - "Epoch 214: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0038, lr=0.000235, step=215]\n", - "Epoch 215: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00429, lr=0.000235, step=216]\n", - "Epoch 216: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00919, lr=0.000235, step=217]\n", - "Epoch 217: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.00561, lr=0.000235, step=218]\n", - "Epoch 218: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.00237, lr=0.000234, step=219]\n", - "Epoch 219: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0161, lr=0.000234, step=220]\n", - "Epoch 220: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00742, lr=0.000234, step=221]\n", - "Epoch 221: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0108, lr=0.000233, step=222]\n", - "Epoch 222: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00452, lr=0.000233, step=223]\n", - "Epoch 223: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00371, lr=0.000233, step=224]\n", - "Epoch 224: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00537, lr=0.000232, step=225]\n", - "Epoch 225: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.00769, lr=0.000232, step=226]\n", - "Epoch 226: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0109, lr=0.000232, step=227]\n", - "Epoch 227: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00542, lr=0.000232, step=228]\n", - "Epoch 228: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00505, lr=0.000231, step=229]\n", - "Epoch 229: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00254, lr=0.000231, step=230]\n", - "Epoch 230: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0122, lr=0.000231, step=231]\n", - "Epoch 231: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00484, lr=0.00023, step=232]\n", - "Epoch 232: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00648, lr=0.00023, step=233]\n", - "Epoch 233: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00965, lr=0.00023, step=234]\n", - "Epoch 234: 100%|██████████| 1/1 [00:00<00:00, 1.30it/s, ema_decay=0.983, loss=0.00738, lr=0.000229, step=235]\n", - "Epoch 235: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.983, loss=0.00392, lr=0.000229, step=236]\n", - "Epoch 236: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00785, lr=0.000229, step=237]\n", - "Epoch 237: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.00933, lr=0.000229, step=238]\n", - "Epoch 238: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.00275, lr=0.000228, step=239]\n", - "Epoch 239: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00145, lr=0.000228, step=240]\n", - "Epoch 240: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00627, lr=0.000228, step=241]\n", - "Epoch 241: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.984, loss=0.0134, lr=0.000227, step=242]\n", - "Epoch 242: 100%|██████████| 1/1 [00:00<00:00, 1.30it/s, ema_decay=0.984, loss=0.00699, lr=0.000227, step=243]\n", - "Epoch 243: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.00766, lr=0.000227, step=244]\n", - "Epoch 244: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00802, lr=0.000226, step=245]\n", - "Epoch 245: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0105, lr=0.000226, step=246]\n", - "Epoch 246: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0038, lr=0.000226, step=247]\n", - "Epoch 247: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00619, lr=0.000226, step=248]\n", - "Epoch 248: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00931, lr=0.000225, step=249]\n", - "Epoch 249: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.00906, lr=0.000225, step=250]\n", - "Epoch 250: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00605, lr=0.000225, step=251]\n", - "Epoch 251: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00545, lr=0.000224, step=252]\n", - "Epoch 252: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.0057, lr=0.000224, step=253]\n", - "Epoch 253: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00644, lr=0.000224, step=254]\n", - "Epoch 254: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00379, lr=0.000223, step=255]\n", - "Epoch 255: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00803, lr=0.000223, step=256]\n", - "Epoch 256: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00301, lr=0.000223, step=257]\n", - "Epoch 257: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00585, lr=0.000223, step=258]\n", - "Epoch 258: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00992, lr=0.000222, step=259]\n", - "Epoch 259: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.015, lr=0.000222, step=260]\n", - "Epoch 260: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00918, lr=0.000222, step=261]\n", - "Epoch 261: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0134, lr=0.000221, step=262]\n", - "Epoch 262: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.985, loss=0.0054, lr=0.000221, step=263]\n", - "Epoch 263: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00842, lr=0.000221, step=264]\n", - "Epoch 264: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00763, lr=0.00022, step=265]\n", - "Epoch 265: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00267, lr=0.00022, step=266]\n", - "Epoch 266: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00541, lr=0.00022, step=267]\n", - "Epoch 267: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.00157, lr=0.00022, step=268]\n", - "Epoch 268: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00858, lr=0.000219, step=269]\n", - "Epoch 269: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00511, lr=0.000219, step=270]\n", - "Epoch 270: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00671, lr=0.000219, step=271]\n", - "Epoch 271: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.985, loss=0.00276, lr=0.000218, step=272]\n", - "Epoch 272: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.985, loss=0.0107, lr=0.000218, step=273]\n", - "Epoch 273: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.00236, lr=0.000218, step=274]\n", - "Epoch 274: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00436, lr=0.000217, step=275]\n", - "Epoch 275: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00556, lr=0.000217, step=276]\n", - "Epoch 276: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.0107, lr=0.000217, step=277]\n", - "Epoch 277: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00438, lr=0.000217, step=278]\n", - "Epoch 278: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00679, lr=0.000216, step=279]\n", - "Epoch 279: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00569, lr=0.000216, step=280]\n", - "Epoch 280: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0027, lr=0.000216, step=281]\n", - "Epoch 281: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00789, lr=0.000215, step=282]\n", - "Epoch 282: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.985, loss=0.00515, lr=0.000215, step=283]\n", - "Epoch 283: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00417, lr=0.000215, step=284]\n", - "Epoch 284: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00606, lr=0.000214, step=285]\n", - "Epoch 285: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.986, loss=0.00334, lr=0.000214, step=286]\n", - "Epoch 286: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00412, lr=0.000214, step=287]\n", - "Epoch 287: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00311, lr=0.000214, step=288]\n", - "Epoch 288: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00683, lr=0.000213, step=289]\n", - "Epoch 289: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00135, lr=0.000213, step=290]\n", - "Epoch 290: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00642, lr=0.000213, step=291]\n", - "Epoch 291: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00219, lr=0.000212, step=292]\n", - "Epoch 292: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00455, lr=0.000212, step=293]\n", - "Epoch 293: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0066, lr=0.000212, step=294]\n", - "Epoch 294: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00516, lr=0.000211, step=295]\n", - "Epoch 295: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0061, lr=0.000211, step=296]\n", - "Epoch 296: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0107, lr=0.000211, step=297]\n", - "Epoch 297: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00535, lr=0.000211, step=298]\n", - "Epoch 298: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00516, lr=0.00021, step=299]\n", - "Epoch 299: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00546, lr=0.00021, step=300]\n", - "Epoch 300: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.00399, lr=0.00021, step=301]\n", - "Epoch 301: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00729, lr=0.000209, step=302]\n", - "Epoch 302: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.00428, lr=0.000209, step=303]\n", - "Epoch 303: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.986, loss=0.00244, lr=0.000209, step=304]\n", - "Epoch 304: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00618, lr=0.000208, step=305]\n", - "Epoch 305: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00182, lr=0.000208, step=306]\n", - "Epoch 306: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00253, lr=0.000208, step=307]\n", - "Epoch 307: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00659, lr=0.000208, step=308]\n", - "Epoch 308: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00238, lr=0.000207, step=309]\n", - "Epoch 309: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00218, lr=0.000207, step=310]\n", - "Epoch 310: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0112, lr=0.000207, step=311]\n", - "Epoch 311: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00349, lr=0.000206, step=312]\n", - "Epoch 312: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.987, loss=0.00668, lr=0.000206, step=313]\n", - "Epoch 313: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00488, lr=0.000206, step=314]\n", - "Epoch 314: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00399, lr=0.000206, step=315]\n", - "Epoch 315: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00553, lr=0.000205, step=316]\n", - "Epoch 316: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00679, lr=0.000205, step=317]\n", - "Epoch 317: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00106, lr=0.000205, step=318]\n", - "Epoch 318: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00586, lr=0.000204, step=319]\n", - "Epoch 319: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00444, lr=0.000204, step=320]\n", - "Epoch 320: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00427, lr=0.000204, step=321]\n", - "Epoch 321: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00272, lr=0.000203, step=322]\n", - "Epoch 322: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00705, lr=0.000203, step=323]\n", - "Epoch 323: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00491, lr=0.000203, step=324]\n", - "Epoch 324: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00222, lr=0.000202, step=325]\n", - "Epoch 325: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00628, lr=0.000202, step=326]\n", - "Epoch 326: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00575, lr=0.000202, step=327]\n", - "Epoch 327: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00222, lr=0.000202, step=328]\n", - "Epoch 328: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00289, lr=0.000201, step=329]\n", - "Epoch 329: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00263, lr=0.000201, step=330]\n", - "Epoch 330: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00715, lr=0.000201, step=331]\n", - "Epoch 331: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.987, loss=0.0106, lr=0.0002, step=332]\n", - "Epoch 332: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00782, lr=0.0002, step=333]\n", - "Epoch 333: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.987, loss=0.00829, lr=0.0002, step=334]\n", - "Epoch 334: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.0107, lr=0.000199, step=335]\n", - "Epoch 335: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00272, lr=0.000199, step=336]\n", - "Epoch 336: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00998, lr=0.000199, step=337]\n", - "Epoch 337: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00608, lr=0.000199, step=338]\n", - "Epoch 338: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00245, lr=0.000198, step=339]\n", - "Epoch 339: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00666, lr=0.000198, step=340]\n", - "Epoch 340: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00714, lr=0.000198, step=341]\n", - "Epoch 341: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00794, lr=0.000197, step=342]\n", - "Epoch 342: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00383, lr=0.000197, step=343]\n", - "Epoch 343: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00394, lr=0.000197, step=344]\n", - "Epoch 344: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00854, lr=0.000196, step=345]\n", - "Epoch 345: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00279, lr=0.000196, step=346]\n", - "Epoch 346: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.0157, lr=0.000196, step=347]\n", - "Epoch 347: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00348, lr=0.000196, step=348]\n", - "Epoch 348: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0171, lr=0.000195, step=349]\n", - "Epoch 349: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0162, lr=0.000195, step=350]\n", - "Epoch 350: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00321, lr=0.000195, step=351]\n", - "Epoch 351: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0051, lr=0.000194, step=352]\n", - "Epoch 352: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.015, lr=0.000194, step=353]\n", - "Epoch 353: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00557, lr=0.000194, step=354]\n", - "Epoch 354: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00373, lr=0.000193, step=355]\n", - "Epoch 355: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00768, lr=0.000193, step=356]\n", - "Epoch 356: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00467, lr=0.000193, step=357]\n", - "Epoch 357: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00981, lr=0.000193, step=358]\n", - "Epoch 358: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00396, lr=0.000192, step=359]\n", - "Epoch 359: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.00286, lr=0.000192, step=360]\n", - "Epoch 360: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00579, lr=0.000192, step=361]\n", - "Epoch 361: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00653, lr=0.000191, step=362]\n", - "Epoch 362: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00633, lr=0.000191, step=363]\n", - "Epoch 363: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.00818, lr=0.000191, step=364]\n", - "Epoch 364: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.0044, lr=0.00019, step=365]\n", + "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0, loss=0.216, lr=0.0003, step=1]\n", + "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0, loss=0.29, lr=0.000299, step=2]\n", + "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.405, loss=0.3, lr=0.000299, step=3]\n", + "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.561, loss=0.181, lr=0.000299, step=4]\n", + "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.646, loss=0.178, lr=0.000298, step=5]\n", + "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.701, loss=0.12, lr=0.000298, step=6]\n", + "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.739, loss=0.112, lr=0.000298, step=7]\n", + "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.768, loss=0.104, lr=0.000298, step=8]\n", + "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.79, loss=0.0821, lr=0.000297, step=9]\n", + "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.808, loss=0.0716, lr=0.000297, step=10]\n", + "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.822, loss=0.071, lr=0.000297, step=11]\n", + "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.834, loss=0.0533, lr=0.000296, step=12]\n", + "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.845, loss=0.059, lr=0.000296, step=13]\n", + "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.854, loss=0.0566, lr=0.000296, step=14]\n", + "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.862, loss=0.058, lr=0.000295, step=15]\n", + "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.869, loss=0.0461, lr=0.000295, step=16]\n", + "Epoch 16: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.875, loss=0.0433, lr=0.000295, step=17]\n", + "Epoch 17: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.881, loss=0.046, lr=0.000295, step=18]\n", + "Epoch 18: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.886, loss=0.0327, lr=0.000294, step=19]\n", + "Epoch 19: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.89, loss=0.0371, lr=0.000294, step=20]\n", + "Epoch 20: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.894, loss=0.037, lr=0.000294, step=21]\n", + "Epoch 21: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.898, loss=0.0345, lr=0.000293, step=22]\n", + "Epoch 22: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.902, loss=0.0264, lr=0.000293, step=23]\n", + "Epoch 23: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.905, loss=0.0291, lr=0.000293, step=24]\n", + "Epoch 24: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.908, loss=0.0318, lr=0.000292, step=25]\n", + "Epoch 25: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.911, loss=0.025, lr=0.000292, step=26]\n", + "Epoch 26: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.913, loss=0.0319, lr=0.000292, step=27]\n", + "Epoch 27: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.916, loss=0.024, lr=0.000292, step=28]\n", + "Epoch 28: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.918, loss=0.0292, lr=0.000291, step=29]\n", + "Epoch 29: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.92, loss=0.0304, lr=0.000291, step=30]\n", + "Epoch 30: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.922, loss=0.0263, lr=0.000291, step=31]\n", + "Epoch 31: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.924, loss=0.0219, lr=0.00029, step=32]\n", + "Epoch 32: 100%|██████████| 1/1 [00:00<00:00, 1.19it/s, ema_decay=0.926, loss=0.0242, lr=0.00029, step=33]\n", + "Epoch 33: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.927, loss=0.025, lr=0.00029, step=34]\n", + "Epoch 34: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.929, loss=0.0266, lr=0.000289, step=35]\n", + "Epoch 35: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.931, loss=0.0218, lr=0.000289, step=36]\n", + "Epoch 36: 100%|██████████| 1/1 [00:00<00:00, 1.18it/s, ema_decay=0.932, loss=0.023, lr=0.000289, step=37]\n", + "Epoch 37: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.933, loss=0.0203, lr=0.000289, step=38]\n", + "Epoch 38: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.935, loss=0.0187, lr=0.000288, step=39]\n", + "Epoch 39: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.936, loss=0.0178, lr=0.000288, step=40]\n", + "Epoch 40: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.937, loss=0.0187, lr=0.000288, step=41]\n", + "Epoch 41: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.938, loss=0.0194, lr=0.000287, step=42]\n", + "Epoch 42: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.939, loss=0.015, lr=0.000287, step=43]\n", + "Epoch 43: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.94, loss=0.0187, lr=0.000287, step=44]\n", + "Epoch 44: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.941, loss=0.0163, lr=0.000286, step=45]\n", + "Epoch 45: 100%|██████████| 1/1 [00:00<00:00, 1.25it/s, ema_decay=0.942, loss=0.0163, lr=0.000286, step=46]\n", + "Epoch 46: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.943, loss=0.0161, lr=0.000286, step=47]\n", + "Epoch 47: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.944, loss=0.0154, lr=0.000286, step=48]\n", + "Epoch 48: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.945, loss=0.0155, lr=0.000285, step=49]\n", + "Epoch 49: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.946, loss=0.0158, lr=0.000285, step=50]\n", + "Epoch 50: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.947, loss=0.0139, lr=0.000285, step=51]\n", + "Epoch 51: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.948, loss=0.0152, lr=0.000284, step=52]\n", + "Epoch 52: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.948, loss=0.0157, lr=0.000284, step=53]\n", + "Epoch 53: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.949, loss=0.0135, lr=0.000284, step=54]\n", + "Epoch 54: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.0131, lr=0.000283, step=55]\n", + "Epoch 55: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.95, loss=0.0125, lr=0.000283, step=56]\n", + "Epoch 56: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.951, loss=0.0123, lr=0.000283, step=57]\n", + "Epoch 57: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.952, loss=0.0123, lr=0.000283, step=58]\n", + "Epoch 58: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.952, loss=0.0123, lr=0.000282, step=59]\n", + "Epoch 59: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.953, loss=0.0121, lr=0.000282, step=60]\n", + "Epoch 60: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.954, loss=0.0118, lr=0.000282, step=61]\n", + "Epoch 61: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.954, loss=0.012, lr=0.000281, step=62]\n", + "Epoch 62: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.955, loss=0.0119, lr=0.000281, step=63]\n", + "Epoch 63: 100%|██████████| 1/1 [00:00<00:00, 1.31it/s, ema_decay=0.955, loss=0.0117, lr=0.000281, step=64]\n", + "Epoch 64: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.956, loss=0.011, lr=0.00028, step=65]\n", + "Epoch 65: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.956, loss=0.0127, lr=0.00028, step=66]\n", + "Epoch 66: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.957, loss=0.0102, lr=0.00028, step=67]\n", + "Epoch 67: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.957, loss=0.0109, lr=0.00028, step=68]\n", + "Epoch 68: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.958, loss=0.011, lr=0.000279, step=69]\n", + "Epoch 69: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.958, loss=0.0101, lr=0.000279, step=70]\n", + "Epoch 70: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.959, loss=0.0104, lr=0.000279, step=71]\n", + "Epoch 71: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.959, loss=0.0103, lr=0.000278, step=72]\n", + "Epoch 72: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.96, loss=0.0105, lr=0.000278, step=73]\n", + "Epoch 73: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.96, loss=0.0107, lr=0.000278, step=74]\n", + "Epoch 74: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.96, loss=0.00957, lr=0.000277, step=75]\n", + "Epoch 75: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.961, loss=0.0101, lr=0.000277, step=76]\n", + "Epoch 76: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.961, loss=0.0104, lr=0.000277, step=77]\n", + "Epoch 77: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.962, loss=0.00956, lr=0.000277, step=78]\n", + "Epoch 78: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.962, loss=0.0108, lr=0.000276, step=79]\n", + "Epoch 79: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.962, loss=0.00959, lr=0.000276, step=80]\n", + "Epoch 80: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.963, loss=0.00939, lr=0.000276, step=81]\n", + "Epoch 81: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.0092, lr=0.000275, step=82]\n", + "Epoch 82: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.963, loss=0.00854, lr=0.000275, step=83]\n", + "Epoch 83: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.964, loss=0.00849, lr=0.000275, step=84]\n", + "Epoch 84: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.964, loss=0.00874, lr=0.000275, step=85]\n", + "Epoch 85: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.964, loss=0.00868, lr=0.000274, step=86]\n", + "Epoch 86: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.0094, lr=0.000274, step=87]\n", + "Epoch 87: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.0092, lr=0.000274, step=88]\n", + "Epoch 88: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.00842, lr=0.000273, step=89]\n", + "Epoch 89: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.965, loss=0.00902, lr=0.000273, step=90]\n", + "Epoch 90: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.966, loss=0.0083, lr=0.000273, step=91]\n", + "Epoch 91: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.966, loss=0.00898, lr=0.000272, step=92]\n", + "Epoch 92: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.966, loss=0.00839, lr=0.000272, step=93]\n", + "Epoch 93: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.00847, lr=0.000272, step=94]\n", + "Epoch 94: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.967, loss=0.00837, lr=0.000271, step=95]\n", + "Epoch 95: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.967, loss=0.00852, lr=0.000271, step=96]\n", + "Epoch 96: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.967, loss=0.00819, lr=0.000271, step=97]\n", + "Epoch 97: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.968, loss=0.00878, lr=0.000271, step=98]\n", + "Epoch 98: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.968, loss=0.00894, lr=0.00027, step=99]\n", + "Epoch 99: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.968, loss=0.0075, lr=0.00027, step=100]\n", + "Epoch 100: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.968, loss=0.0079, lr=0.00027, step=101]\n", + "Epoch 101: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.969, loss=0.008, lr=0.000269, step=102]\n", + "Epoch 102: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.969, loss=0.00811, lr=0.000269, step=103]\n", + "Epoch 103: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.969, loss=0.00759, lr=0.000269, step=104]\n", + "Epoch 104: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.969, loss=0.0079, lr=0.000268, step=105]\n", + "Epoch 105: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.97, loss=0.00784, lr=0.000268, step=106]\n", + "Epoch 106: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.97, loss=0.00723, lr=0.000268, step=107]\n", + "Epoch 107: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.97, loss=0.00807, lr=0.000268, step=108]\n", + "Epoch 108: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.97, loss=0.00732, lr=0.000267, step=109]\n", + "Epoch 109: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.0073, lr=0.000267, step=110]\n", + "Epoch 110: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.971, loss=0.0081, lr=0.000267, step=111]\n", + "Epoch 111: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.971, loss=0.00795, lr=0.000266, step=112]\n", + "Epoch 112: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.971, loss=0.00826, lr=0.000266, step=113]\n", + "Epoch 113: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.971, loss=0.0074, lr=0.000266, step=114]\n", + "Epoch 114: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.971, loss=0.00958, lr=0.000266, step=115]\n", + "Epoch 115: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.972, loss=0.00818, lr=0.000265, step=116]\n", + "Epoch 116: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.972, loss=0.00897, lr=0.000265, step=117]\n", + "Epoch 117: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.972, loss=0.00738, lr=0.000265, step=118]\n", + "Epoch 118: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.972, loss=0.00775, lr=0.000264, step=119]\n", + "Epoch 119: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.972, loss=0.0101, lr=0.000264, step=120]\n", + "Epoch 120: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.972, loss=0.00772, lr=0.000264, step=121]\n", + "Epoch 121: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.973, loss=0.0105, lr=0.000263, step=122]\n", + "Epoch 122: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.973, loss=0.00784, lr=0.000263, step=123]\n", + "Epoch 123: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.0086, lr=0.000263, step=124]\n", + "Epoch 124: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.973, loss=0.0082, lr=0.000262, step=125]\n", + "Epoch 125: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.973, loss=0.00789, lr=0.000262, step=126]\n", + "Epoch 126: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.973, loss=0.00833, lr=0.000262, step=127]\n", + "Epoch 127: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.974, loss=0.00724, lr=0.000262, step=128]\n", + "Epoch 128: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.974, loss=0.00795, lr=0.000261, step=129]\n", + "Epoch 129: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.00776, lr=0.000261, step=130]\n", + "Epoch 130: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.00744, lr=0.000261, step=131]\n", + "Epoch 131: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.974, loss=0.00775, lr=0.00026, step=132]\n", + "Epoch 132: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.974, loss=0.0073, lr=0.00026, step=133]\n", + "Epoch 133: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.00737, lr=0.00026, step=134]\n", + "Epoch 134: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.975, loss=0.00795, lr=0.000259, step=135]\n", + "Epoch 135: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.975, loss=0.00785, lr=0.000259, step=136]\n", + "Epoch 136: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.975, loss=0.0076, lr=0.000259, step=137]\n", + "Epoch 137: 100%|██████████| 1/1 [00:00<00:00, 1.27it/s, ema_decay=0.975, loss=0.00791, lr=0.000259, step=138]\n", + "Epoch 138: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.00769, lr=0.000258, step=139]\n", + "Epoch 139: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.975, loss=0.00709, lr=0.000258, step=140]\n", + "Epoch 140: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.00779, lr=0.000258, step=141]\n", + "Epoch 141: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.00683, lr=0.000257, step=142]\n", + "Epoch 142: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.00707, lr=0.000257, step=143]\n", + "Epoch 143: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.0069, lr=0.000257, step=144]\n", + "Epoch 144: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.00666, lr=0.000256, step=145]\n", + "Epoch 145: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.0066, lr=0.000256, step=146]\n", + "Epoch 146: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.976, loss=0.00717, lr=0.000256, step=147]\n", + "Epoch 147: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.976, loss=0.00669, lr=0.000256, step=148]\n", + "Epoch 148: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.00669, lr=0.000255, step=149]\n", + "Epoch 149: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.00712, lr=0.000255, step=150]\n", + "Epoch 150: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.977, loss=0.00665, lr=0.000255, step=151]\n", + "Epoch 151: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.977, loss=0.00693, lr=0.000254, step=152]\n", + "Epoch 152: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.977, loss=0.0066, lr=0.000254, step=153]\n", + "Epoch 153: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00688, lr=0.000254, step=154]\n", + "Epoch 154: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.00601, lr=0.000253, step=155]\n", + "Epoch 155: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.00671, lr=0.000253, step=156]\n", + "Epoch 156: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00608, lr=0.000253, step=157]\n", + "Epoch 157: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.00631, lr=0.000253, step=158]\n", + "Epoch 158: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.978, loss=0.0063, lr=0.000252, step=159]\n", + "Epoch 159: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.978, loss=0.00704, lr=0.000252, step=160]\n", + "Epoch 160: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00627, lr=0.000252, step=161]\n", + "Epoch 161: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00624, lr=0.000251, step=162]\n", + "Epoch 162: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.00714, lr=0.000251, step=163]\n", + "Epoch 163: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.978, loss=0.00625, lr=0.000251, step=164]\n", + "Epoch 164: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00639, lr=0.00025, step=165]\n", + "Epoch 165: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.00672, lr=0.00025, step=166]\n", + "Epoch 166: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00651, lr=0.00025, step=167]\n", + "Epoch 167: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.978, loss=0.00617, lr=0.00025, step=168]\n", + "Epoch 168: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.979, loss=0.00651, lr=0.000249, step=169]\n", + "Epoch 169: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.979, loss=0.00635, lr=0.000249, step=170]\n", + "Epoch 170: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.979, loss=0.00653, lr=0.000249, step=171]\n", + "Epoch 171: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.979, loss=0.00623, lr=0.000248, step=172]\n", + "Epoch 172: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.00634, lr=0.000248, step=173]\n", + "Epoch 173: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.979, loss=0.00607, lr=0.000248, step=174]\n", + "Epoch 174: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.979, loss=0.00583, lr=0.000247, step=175]\n", + "Epoch 175: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.979, loss=0.00648, lr=0.000247, step=176]\n", + "Epoch 176: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.979, loss=0.00647, lr=0.000247, step=177]\n", + "Epoch 177: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.00608, lr=0.000247, step=178]\n", + "Epoch 178: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.007, lr=0.000246, step=179]\n", + "Epoch 179: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.00581, lr=0.000246, step=180]\n", + "Epoch 180: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.98, loss=0.00684, lr=0.000246, step=181]\n", + "Epoch 181: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.00593, lr=0.000245, step=182]\n", + "Epoch 182: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.98, loss=0.00655, lr=0.000245, step=183]\n", + "Epoch 183: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.98, loss=0.00602, lr=0.000245, step=184]\n", + "Epoch 184: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.00647, lr=0.000244, step=185]\n", + "Epoch 185: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.98, loss=0.00588, lr=0.000244, step=186]\n", + "Epoch 186: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.98, loss=0.00644, lr=0.000244, step=187]\n", + "Epoch 187: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.98, loss=0.0064, lr=0.000244, step=188]\n", + "Epoch 188: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.98, loss=0.00597, lr=0.000243, step=189]\n", + "Epoch 189: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.98, loss=0.00635, lr=0.000243, step=190]\n", + "Epoch 190: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.00607, lr=0.000243, step=191]\n", + "Epoch 191: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00575, lr=0.000242, step=192]\n", + "Epoch 192: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.00625, lr=0.000242, step=193]\n", + "Epoch 193: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.981, loss=0.00554, lr=0.000242, step=194]\n", + "Epoch 194: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.981, loss=0.00577, lr=0.000241, step=195]\n", + "Epoch 195: 100%|██████████| 1/1 [00:00<00:00, 1.30it/s, ema_decay=0.981, loss=0.00617, lr=0.000241, step=196]\n", + "Epoch 196: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.981, loss=0.00554, lr=0.000241, step=197]\n", + "Epoch 197: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.981, loss=0.00587, lr=0.000241, step=198]\n", + "Epoch 198: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.981, loss=0.00524, lr=0.00024, step=199]\n", + "Epoch 199: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.981, loss=0.00621, lr=0.00024, step=200]\n", + "Epoch 200: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00546, lr=0.00024, step=201]\n", + "Epoch 201: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00602, lr=0.000239, step=202]\n", + "Epoch 202: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.00569, lr=0.000239, step=203]\n", + "Epoch 203: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.981, loss=0.00551, lr=0.000239, step=204]\n", + "Epoch 204: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00574, lr=0.000238, step=205]\n", + "Epoch 205: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.982, loss=0.00564, lr=0.000238, step=206]\n", + "Epoch 206: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.982, loss=0.00519, lr=0.000238, step=207]\n", + "Epoch 207: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00601, lr=0.000238, step=208]\n", + "Epoch 208: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.982, loss=0.00623, lr=0.000237, step=209]\n", + "Epoch 209: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.982, loss=0.00592, lr=0.000237, step=210]\n", + "Epoch 210: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.00547, lr=0.000237, step=211]\n", + "Epoch 211: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00568, lr=0.000236, step=212]\n", + "Epoch 212: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.982, loss=0.00595, lr=0.000236, step=213]\n", + "Epoch 213: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.982, loss=0.00641, lr=0.000236, step=214]\n", + "Epoch 214: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.982, loss=0.00649, lr=0.000235, step=215]\n", + "Epoch 215: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.00573, lr=0.000235, step=216]\n", + "Epoch 216: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.0061, lr=0.000235, step=217]\n", + "Epoch 217: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.00609, lr=0.000235, step=218]\n", + "Epoch 218: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.00531, lr=0.000234, step=219]\n", + "Epoch 219: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.982, loss=0.00553, lr=0.000234, step=220]\n", + "Epoch 220: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.00658, lr=0.000234, step=221]\n", + "Epoch 221: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.983, loss=0.00536, lr=0.000233, step=222]\n", + "Epoch 222: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.00616, lr=0.000233, step=223]\n", + "Epoch 223: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.00587, lr=0.000233, step=224]\n", + "Epoch 224: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.00537, lr=0.000232, step=225]\n", + "Epoch 225: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.00617, lr=0.000232, step=226]\n", + "Epoch 226: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.0055, lr=0.000232, step=227]\n", + "Epoch 227: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.00528, lr=0.000232, step=228]\n", + "Epoch 228: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.983, loss=0.00547, lr=0.000231, step=229]\n", + "Epoch 229: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.00522, lr=0.000231, step=230]\n", + "Epoch 230: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00548, lr=0.000231, step=231]\n", + "Epoch 231: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.983, loss=0.00569, lr=0.00023, step=232]\n", + "Epoch 232: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.00557, lr=0.00023, step=233]\n", + "Epoch 233: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.983, loss=0.00559, lr=0.00023, step=234]\n", + "Epoch 234: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00552, lr=0.000229, step=235]\n", + "Epoch 235: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.00516, lr=0.000229, step=236]\n", + "Epoch 236: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00548, lr=0.000229, step=237]\n", + "Epoch 237: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.983, loss=0.00574, lr=0.000229, step=238]\n", + "Epoch 238: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.983, loss=0.00503, lr=0.000228, step=239]\n", + "Epoch 239: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.00526, lr=0.000228, step=240]\n", + "Epoch 240: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.984, loss=0.00549, lr=0.000228, step=241]\n", + "Epoch 241: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.984, loss=0.00561, lr=0.000227, step=242]\n", + "Epoch 242: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.00511, lr=0.000227, step=243]\n", + "Epoch 243: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00587, lr=0.000227, step=244]\n", + "Epoch 244: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00533, lr=0.000226, step=245]\n", + "Epoch 245: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.984, loss=0.00575, lr=0.000226, step=246]\n", + "Epoch 246: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.0053, lr=0.000226, step=247]\n", + "Epoch 247: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.00547, lr=0.000226, step=248]\n", + "Epoch 248: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.006, lr=0.000225, step=249]\n", + "Epoch 249: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00567, lr=0.000225, step=250]\n", + "Epoch 250: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.00532, lr=0.000225, step=251]\n", + "Epoch 251: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00558, lr=0.000224, step=252]\n", + "Epoch 252: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.00576, lr=0.000224, step=253]\n", + "Epoch 253: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.984, loss=0.00557, lr=0.000224, step=254]\n", + "Epoch 254: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.00574, lr=0.000223, step=255]\n", + "Epoch 255: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00547, lr=0.000223, step=256]\n", + "Epoch 256: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00559, lr=0.000223, step=257]\n", + "Epoch 257: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.984, loss=0.00496, lr=0.000223, step=258]\n", + "Epoch 258: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.984, loss=0.00567, lr=0.000222, step=259]\n", + "Epoch 259: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.985, loss=0.00526, lr=0.000222, step=260]\n", + "Epoch 260: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00578, lr=0.000222, step=261]\n", + "Epoch 261: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00514, lr=0.000221, step=262]\n", + "Epoch 262: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00551, lr=0.000221, step=263]\n", + "Epoch 263: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00504, lr=0.000221, step=264]\n", + "Epoch 264: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.985, loss=0.00504, lr=0.00022, step=265]\n", + "Epoch 265: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00514, lr=0.00022, step=266]\n", + "Epoch 266: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.985, loss=0.005, lr=0.00022, step=267]\n", + "Epoch 267: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.985, loss=0.00506, lr=0.00022, step=268]\n", + "Epoch 268: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.985, loss=0.00491, lr=0.000219, step=269]\n", + "Epoch 269: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.00495, lr=0.000219, step=270]\n", + "Epoch 270: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.985, loss=0.00499, lr=0.000219, step=271]\n", + "Epoch 271: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.985, loss=0.00506, lr=0.000218, step=272]\n", + "Epoch 272: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.00489, lr=0.000218, step=273]\n", + "Epoch 273: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.00503, lr=0.000218, step=274]\n", + "Epoch 274: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.985, loss=0.00528, lr=0.000217, step=275]\n", + "Epoch 275: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.00465, lr=0.000217, step=276]\n", + "Epoch 276: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.985, loss=0.00494, lr=0.000217, step=277]\n", + "Epoch 277: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00509, lr=0.000217, step=278]\n", + "Epoch 278: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0049, lr=0.000216, step=279]\n", + "Epoch 279: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00477, lr=0.000216, step=280]\n", + "Epoch 280: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.985, loss=0.0048, lr=0.000216, step=281]\n", + "Epoch 281: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00509, lr=0.000215, step=282]\n", + "Epoch 282: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00487, lr=0.000215, step=283]\n", + "Epoch 283: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00492, lr=0.000215, step=284]\n", + "Epoch 284: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00468, lr=0.000214, step=285]\n", + "Epoch 285: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.986, loss=0.00499, lr=0.000214, step=286]\n", + "Epoch 286: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00467, lr=0.000214, step=287]\n", + "Epoch 287: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0049, lr=0.000214, step=288]\n", + "Epoch 288: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.986, loss=0.0047, lr=0.000213, step=289]\n", + "Epoch 289: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.986, loss=0.00488, lr=0.000213, step=290]\n", + "Epoch 290: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00469, lr=0.000213, step=291]\n", + "Epoch 291: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.00469, lr=0.000212, step=292]\n", + "Epoch 292: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.986, loss=0.00493, lr=0.000212, step=293]\n", + "Epoch 293: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00475, lr=0.000212, step=294]\n", + "Epoch 294: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00459, lr=0.000211, step=295]\n", + "Epoch 295: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.00509, lr=0.000211, step=296]\n", + "Epoch 296: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.00472, lr=0.000211, step=297]\n", + "Epoch 297: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.00492, lr=0.000211, step=298]\n", + "Epoch 298: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.986, loss=0.00454, lr=0.00021, step=299]\n", + "Epoch 299: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.0048, lr=0.00021, step=300]\n", + "Epoch 300: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00477, lr=0.00021, step=301]\n", + "Epoch 301: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00467, lr=0.000209, step=302]\n", + "Epoch 302: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00489, lr=0.000209, step=303]\n", + "Epoch 303: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00446, lr=0.000209, step=304]\n", + "Epoch 304: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00488, lr=0.000208, step=305]\n", + "Epoch 305: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00441, lr=0.000208, step=306]\n", + "Epoch 306: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.986, loss=0.00461, lr=0.000208, step=307]\n", + "Epoch 307: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.986, loss=0.00468, lr=0.000208, step=308]\n", + "Epoch 308: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.986, loss=0.00443, lr=0.000207, step=309]\n", + "Epoch 309: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00451, lr=0.000207, step=310]\n", + "Epoch 310: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.00443, lr=0.000207, step=311]\n", + "Epoch 311: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.00486, lr=0.000206, step=312]\n", + "Epoch 312: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.00462, lr=0.000206, step=313]\n", + "Epoch 313: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00467, lr=0.000206, step=314]\n", + "Epoch 314: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00465, lr=0.000206, step=315]\n", + "Epoch 315: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00458, lr=0.000205, step=316]\n", + "Epoch 316: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00469, lr=0.000205, step=317]\n", + "Epoch 317: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00443, lr=0.000205, step=318]\n", + "Epoch 318: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00457, lr=0.000204, step=319]\n", + "Epoch 319: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00453, lr=0.000204, step=320]\n", + "Epoch 320: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00454, lr=0.000204, step=321]\n", + "Epoch 321: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00451, lr=0.000203, step=322]\n", + "Epoch 322: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.987, loss=0.00435, lr=0.000203, step=323]\n", + "Epoch 323: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00453, lr=0.000203, step=324]\n", + "Epoch 324: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00449, lr=0.000202, step=325]\n", + "Epoch 325: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00456, lr=0.000202, step=326]\n", + "Epoch 326: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.987, loss=0.00434, lr=0.000202, step=327]\n", + "Epoch 327: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.987, loss=0.00459, lr=0.000202, step=328]\n", + "Epoch 328: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.987, loss=0.0044, lr=0.000201, step=329]\n", + "Epoch 329: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.987, loss=0.00467, lr=0.000201, step=330]\n", + "Epoch 330: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.987, loss=0.00443, lr=0.000201, step=331]\n", + "Epoch 331: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.987, loss=0.00442, lr=0.0002, step=332]\n", + "Epoch 332: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00468, lr=0.0002, step=333]\n", + "Epoch 333: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.0044, lr=0.0002, step=334]\n", + "Epoch 334: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00443, lr=0.000199, step=335]\n", + "Epoch 335: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.987, loss=0.00461, lr=0.000199, step=336]\n", + "Epoch 336: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.987, loss=0.00439, lr=0.000199, step=337]\n", + "Epoch 337: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00466, lr=0.000199, step=338]\n", + "Epoch 338: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0044, lr=0.000198, step=339]\n", + "Epoch 339: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00425, lr=0.000198, step=340]\n", + "Epoch 340: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.987, loss=0.00454, lr=0.000198, step=341]\n", + "Epoch 341: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.987, loss=0.00407, lr=0.000197, step=342]\n", + "Epoch 342: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.987, loss=0.00472, lr=0.000197, step=343]\n", + "Epoch 343: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.00422, lr=0.000197, step=344]\n", + "Epoch 344: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.987, loss=0.00429, lr=0.000196, step=345]\n", + "Epoch 345: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00425, lr=0.000196, step=346]\n", + "Epoch 346: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00436, lr=0.000196, step=347]\n", + "Epoch 347: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00437, lr=0.000196, step=348]\n", + "Epoch 348: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.988, loss=0.00514, lr=0.000195, step=349]\n", + "Epoch 349: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.00431, lr=0.000195, step=350]\n", + "Epoch 350: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00512, lr=0.000195, step=351]\n", + "Epoch 351: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00445, lr=0.000194, step=352]\n", + "Epoch 352: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.988, loss=0.00534, lr=0.000194, step=353]\n", + "Epoch 353: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00432, lr=0.000194, step=354]\n", + "Epoch 354: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00447, lr=0.000193, step=355]\n", + "Epoch 355: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00457, lr=0.000193, step=356]\n", + "Epoch 356: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.988, loss=0.00475, lr=0.000193, step=357]\n", + "Epoch 357: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00452, lr=0.000193, step=358]\n", + "Epoch 358: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.988, loss=0.00526, lr=0.000192, step=359]\n", + "Epoch 359: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00441, lr=0.000192, step=360]\n", + "Epoch 360: 100%|██████████| 1/1 [00:00<00:00, 1.09it/s, ema_decay=0.988, loss=0.00464, lr=0.000192, step=361]\n", + "Epoch 361: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00465, lr=0.000191, step=362]\n", + "Epoch 362: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00424, lr=0.000191, step=363]\n", + "Epoch 363: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00472, lr=0.000191, step=364]\n", + "Epoch 364: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.0042, lr=0.00019, step=365]\n", "Epoch 365: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00446, lr=0.00019, step=366]\n", - "Epoch 366: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00327, lr=0.00019, step=367]\n", - "Epoch 367: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0163, lr=0.00019, step=368]\n", - "Epoch 368: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00368, lr=0.000189, step=369]\n", - "Epoch 369: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00292, lr=0.000189, step=370]\n", - "Epoch 370: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.0044, lr=0.000189, step=371]\n", - "Epoch 371: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0136, lr=0.000188, step=372]\n", - "Epoch 372: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00746, lr=0.000188, step=373]\n", - "Epoch 373: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00546, lr=0.000188, step=374]\n", - "Epoch 374: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00182, lr=0.000187, step=375]\n", - "Epoch 375: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00476, lr=0.000187, step=376]\n", - "Epoch 376: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0185, lr=0.000187, step=377]\n", - "Epoch 377: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00231, lr=0.000187, step=378]\n", - "Epoch 378: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00277, lr=0.000186, step=379]\n", - "Epoch 379: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00329, lr=0.000186, step=380]\n", - "Epoch 380: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.988, loss=0.00325, lr=0.000186, step=381]\n", - "Epoch 381: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.015, lr=0.000185, step=382]\n", - "Epoch 382: 100%|██████████| 1/1 [00:00<00:00, 1.03it/s, ema_decay=0.988, loss=0.00242, lr=0.000185, step=383]\n", - "Epoch 383: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.988, loss=0.00489, lr=0.000185, step=384]\n", - "Epoch 384: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00653, lr=0.000184, step=385]\n", - "Epoch 385: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.00384, lr=0.000184, step=386]\n", - "Epoch 386: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00563, lr=0.000184, step=387]\n", - "Epoch 387: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00425, lr=0.000184, step=388]\n", - "Epoch 388: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.00474, lr=0.000183, step=389]\n", - "Epoch 389: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0026, lr=0.000183, step=390]\n", - "Epoch 390: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00247, lr=0.000183, step=391]\n", - "Epoch 391: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.0082, lr=0.000182, step=392]\n", - "Epoch 392: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00352, lr=0.000182, step=393]\n", - "Epoch 393: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.0072, lr=0.000182, step=394]\n", - "Epoch 394: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00786, lr=0.000181, step=395]\n", - "Epoch 395: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00619, lr=0.000181, step=396]\n", - "Epoch 396: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00326, lr=0.000181, step=397]\n", - "Epoch 397: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00314, lr=0.000181, step=398]\n", - "Epoch 398: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00725, lr=0.00018, step=399]\n", - "Epoch 399: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00281, lr=0.00018, step=400]\n", - "Epoch 400: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00221, lr=0.00018, step=401]\n", - "Epoch 401: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00222, lr=0.000179, step=402]\n", - "Epoch 402: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00389, lr=0.000179, step=403]\n", - "Epoch 403: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00607, lr=0.000179, step=404]\n", - "Epoch 404: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0053, lr=0.000178, step=405]\n", - "Epoch 405: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00268, lr=0.000178, step=406]\n", - "Epoch 406: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.00278, lr=0.000178, step=407]\n", - "Epoch 407: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.00441, lr=0.000178, step=408]\n", - "Epoch 408: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00356, lr=0.000177, step=409]\n", - "Epoch 409: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0058, lr=0.000177, step=410]\n", - "Epoch 410: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00669, lr=0.000177, step=411]\n", - "Epoch 411: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.989, loss=0.00662, lr=0.000176, step=412]\n", - "Epoch 412: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00189, lr=0.000176, step=413]\n", - "Epoch 413: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00903, lr=0.000176, step=414]\n", - "Epoch 414: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00866, lr=0.000175, step=415]\n", - "Epoch 415: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00723, lr=0.000175, step=416]\n", - "Epoch 416: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00261, lr=0.000175, step=417]\n", - "Epoch 417: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.00727, lr=0.000175, step=418]\n", - "Epoch 418: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00605, lr=0.000174, step=419]\n", - "Epoch 419: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.989, loss=0.00689, lr=0.000174, step=420]\n", - "Epoch 420: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0039, lr=0.000174, step=421]\n", - "Epoch 421: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00794, lr=0.000173, step=422]\n", - "Epoch 422: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00209, lr=0.000173, step=423]\n", - "Epoch 423: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0036, lr=0.000173, step=424]\n", - "Epoch 424: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00562, lr=0.000172, step=425]\n", - "Epoch 425: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00488, lr=0.000172, step=426]\n", - "Epoch 426: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00272, lr=0.000172, step=427]\n", - "Epoch 427: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00204, lr=0.000172, step=428]\n", - "Epoch 428: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0056, lr=0.000171, step=429]\n", - "Epoch 429: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00376, lr=0.000171, step=430]\n", - "Epoch 430: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00123, lr=0.000171, step=431]\n", - "Epoch 431: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00344, lr=0.00017, step=432]\n", - "Epoch 432: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.0046, lr=0.00017, step=433]\n", - "Epoch 433: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.005, lr=0.00017, step=434]\n", - "Epoch 434: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0091, lr=0.000169, step=435]\n", - "Epoch 435: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00484, lr=0.000169, step=436]\n", - "Epoch 436: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0052, lr=0.000169, step=437]\n", - "Epoch 437: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.99, loss=0.00157, lr=0.000169, step=438]\n", - "Epoch 438: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00709, lr=0.000168, step=439]\n", - "Epoch 439: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00701, lr=0.000168, step=440]\n", - "Epoch 440: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00346, lr=0.000168, step=441]\n", - "Epoch 441: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00194, lr=0.000167, step=442]\n", - "Epoch 442: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00195, lr=0.000167, step=443]\n", - "Epoch 443: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00688, lr=0.000167, step=444]\n", - "Epoch 444: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00914, lr=0.000167, step=445]\n", - "Epoch 445: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00311, lr=0.000166, step=446]\n", - "Epoch 446: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00768, lr=0.000166, step=447]\n", - "Epoch 447: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00647, lr=0.000166, step=448]\n", - "Epoch 448: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00439, lr=0.000165, step=449]\n", - "Epoch 449: 100%|██████████| 1/1 [00:00<00:00, 1.19it/s, ema_decay=0.99, loss=0.00636, lr=0.000165, step=450]\n", - "Epoch 450: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.99, loss=0.00661, lr=0.000165, step=451]\n", - "Epoch 451: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00341, lr=0.000164, step=452]\n", - "Epoch 452: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00619, lr=0.000164, step=453]\n", - "Epoch 453: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0019, lr=0.000164, step=454]\n", - "Epoch 454: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00531, lr=0.000163, step=455]\n", - "Epoch 455: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00546, lr=0.000163, step=456]\n", - "Epoch 456: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00298, lr=0.000163, step=457]\n", - "Epoch 457: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00334, lr=0.000163, step=458]\n", - "Epoch 458: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00323, lr=0.000162, step=459]\n", - "Epoch 459: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.0028, lr=0.000162, step=460]\n", - "Epoch 460: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0034, lr=0.000162, step=461]\n", - "Epoch 461: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00731, lr=0.000161, step=462]\n", - "Epoch 462: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0036, lr=0.000161, step=463]\n", - "Epoch 463: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00185, lr=0.000161, step=464]\n", - "Epoch 464: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00356, lr=0.00016, step=465]\n", - "Epoch 465: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00309, lr=0.00016, step=466]\n", - "Epoch 466: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00323, lr=0.00016, step=467]\n", - "Epoch 467: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0102, lr=0.00016, step=468]\n", - "Epoch 468: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00431, lr=0.000159, step=469]\n", - "Epoch 469: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00248, lr=0.000159, step=470]\n", - "Epoch 470: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00225, lr=0.000159, step=471]\n", - "Epoch 471: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00606, lr=0.000158, step=472]\n", - "Epoch 472: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00636, lr=0.000158, step=473]\n", - "Epoch 473: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00424, lr=0.000158, step=474]\n", - "Epoch 474: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00226, lr=0.000157, step=475]\n", - "Epoch 475: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00555, lr=0.000157, step=476]\n", - "Epoch 476: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0033, lr=0.000157, step=477]\n", - "Epoch 477: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00316, lr=0.000157, step=478]\n", - "Epoch 478: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00474, lr=0.000156, step=479]\n", - "Epoch 479: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00363, lr=0.000156, step=480]\n", - "Epoch 480: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00152, lr=0.000156, step=481]\n", - "Epoch 481: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00757, lr=0.000155, step=482]\n", - "Epoch 482: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00367, lr=0.000155, step=483]\n", - "Epoch 483: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0033, lr=0.000155, step=484]\n", - "Epoch 484: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00321, lr=0.000154, step=485]\n", - "Epoch 485: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00391, lr=0.000154, step=486]\n", - "Epoch 486: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00138, lr=0.000154, step=487]\n", - "Epoch 487: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00544, lr=0.000154, step=488]\n", - "Epoch 488: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0036, lr=0.000153, step=489]\n", - "Epoch 489: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00644, lr=0.000153, step=490]\n", - "Epoch 490: 100%|██████████| 1/1 [00:00<00:00, 1.21it/s, ema_decay=0.99, loss=0.000968, lr=0.000153, step=491]\n", - "Epoch 491: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00479, lr=0.000152, step=492]\n", - "Epoch 492: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00224, lr=0.000152, step=493]\n", - "Epoch 493: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00382, lr=0.000152, step=494]\n", - "Epoch 494: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00516, lr=0.000151, step=495]\n", - "Epoch 495: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00323, lr=0.000151, step=496]\n", - "Epoch 496: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.00351, lr=0.000151, step=497]\n", - "Epoch 497: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0029, lr=0.000151, step=498]\n", - "Epoch 498: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00206, lr=0.00015, step=499]\n", - "Epoch 499: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00642, lr=0.00015, step=500]\n", - "Epoch 500: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00151, lr=0.00015, step=501]\n", - "Epoch 501: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00319, lr=0.000149, step=502]\n", - "Epoch 502: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0037, lr=0.000149, step=503]\n", - "Epoch 503: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00491, lr=0.000149, step=504]\n", - "Epoch 504: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00521, lr=0.000148, step=505]\n", - "Epoch 505: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00403, lr=0.000148, step=506]\n", - "Epoch 506: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00188, lr=0.000148, step=507]\n", - "Epoch 507: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00201, lr=0.000148, step=508]\n", - "Epoch 508: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00556, lr=0.000147, step=509]\n", - "Epoch 509: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00771, lr=0.000147, step=510]\n", - "Epoch 510: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00302, lr=0.000147, step=511]\n", - "Epoch 511: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00327, lr=0.000146, step=512]\n", - "Epoch 512: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.0035, lr=0.000146, step=513]\n", - "Epoch 513: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00709, lr=0.000146, step=514]\n", - "Epoch 514: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00408, lr=0.000145, step=515]\n", - "Epoch 515: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0019, lr=0.000145, step=516]\n", - "Epoch 516: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00127, lr=0.000145, step=517]\n", - "Epoch 517: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00388, lr=0.000145, step=518]\n", - "Epoch 518: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00402, lr=0.000144, step=519]\n", - "Epoch 519: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00408, lr=0.000144, step=520]\n", - "Epoch 520: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.991, loss=0.00565, lr=0.000144, step=521]\n", - "Epoch 521: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00231, lr=0.000143, step=522]\n", - "Epoch 522: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00273, lr=0.000143, step=523]\n", - "Epoch 523: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00394, lr=0.000143, step=524]\n", - "Epoch 524: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00464, lr=0.000142, step=525]\n", - "Epoch 525: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00462, lr=0.000142, step=526]\n", - "Epoch 526: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00337, lr=0.000142, step=527]\n", - "Epoch 527: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00372, lr=0.000142, step=528]\n", - "Epoch 528: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00595, lr=0.000141, step=529]\n", - "Epoch 529: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00215, lr=0.000141, step=530]\n", - "Epoch 530: 100%|██████████| 1/1 [00:00<00:00, 1.29it/s, ema_decay=0.991, loss=0.0031, lr=0.000141, step=531]\n", - "Epoch 531: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00179, lr=0.00014, step=532]\n", - "Epoch 532: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00391, lr=0.00014, step=533]\n", - "Epoch 533: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00409, lr=0.00014, step=534]\n", - "Epoch 534: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00255, lr=0.00014, step=535]\n", - "Epoch 535: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00276, lr=0.000139, step=536]\n", - "Epoch 536: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0048, lr=0.000139, step=537]\n", - "Epoch 537: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.991, loss=0.00446, lr=0.000139, step=538]\n", - "Epoch 538: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.991, loss=0.0012, lr=0.000138, step=539]\n", - "Epoch 539: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00217, lr=0.000138, step=540]\n", - "Epoch 540: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00475, lr=0.000138, step=541]\n", - "Epoch 541: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.000902, lr=0.000137, step=542]\n", - "Epoch 542: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00401, lr=0.000137, step=543]\n", - "Epoch 543: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00133, lr=0.000137, step=544]\n", - "Epoch 544: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00435, lr=0.000136, step=545]\n", - "Epoch 545: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00234, lr=0.000136, step=546]\n", - "Epoch 546: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00278, lr=0.000136, step=547]\n", - "Epoch 547: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00387, lr=0.000136, step=548]\n", - "Epoch 548: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00119, lr=0.000135, step=549]\n", - "Epoch 549: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00283, lr=0.000135, step=550]\n", - "Epoch 550: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00294, lr=0.000135, step=551]\n", - "Epoch 551: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00434, lr=0.000134, step=552]\n", - "Epoch 552: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00442, lr=0.000134, step=553]\n", - "Epoch 553: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0013, lr=0.000134, step=554]\n", - "Epoch 554: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0022, lr=0.000133, step=555]\n", - "Epoch 555: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00249, lr=0.000133, step=556]\n", - "Epoch 556: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0034, lr=0.000133, step=557]\n", - "Epoch 557: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00303, lr=0.000133, step=558]\n", - "Epoch 558: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00643, lr=0.000132, step=559]\n", - "Epoch 559: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00341, lr=0.000132, step=560]\n", - "Epoch 560: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00398, lr=0.000132, step=561]\n", - "Epoch 561: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00244, lr=0.000131, step=562]\n", - "Epoch 562: 100%|██████████| 1/1 [00:00<00:00, 1.10it/s, ema_decay=0.991, loss=0.0025, lr=0.000131, step=563]\n", - "Epoch 563: 100%|██████████| 1/1 [00:00<00:00, 1.19it/s, ema_decay=0.991, loss=0.00607, lr=0.000131, step=564]\n", - "Epoch 564: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00451, lr=0.000131, step=565]\n", - "Epoch 565: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00161, lr=0.00013, step=566]\n", - "Epoch 566: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00391, lr=0.00013, step=567]\n", - "Epoch 567: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00367, lr=0.00013, step=568]\n", - "Epoch 568: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00234, lr=0.000129, step=569]\n", - "Epoch 569: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00275, lr=0.000129, step=570]\n", - "Epoch 570: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00327, lr=0.000129, step=571]\n", - "Epoch 571: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00381, lr=0.000128, step=572]\n", - "Epoch 572: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00394, lr=0.000128, step=573]\n", - "Epoch 573: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00437, lr=0.000128, step=574]\n", - "Epoch 574: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00327, lr=0.000127, step=575]\n", - "Epoch 575: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00256, lr=0.000127, step=576]\n", - "Epoch 576: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0013, lr=0.000127, step=577]\n", - "Epoch 577: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00124, lr=0.000127, step=578]\n", - "Epoch 578: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00556, lr=0.000126, step=579]\n", - "Epoch 579: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00212, lr=0.000126, step=580]\n", - "Epoch 580: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00302, lr=0.000126, step=581]\n", - "Epoch 581: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00306, lr=0.000125, step=582]\n", - "Epoch 582: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00225, lr=0.000125, step=583]\n", - "Epoch 583: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000967, lr=0.000125, step=584]\n", - "Epoch 584: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00486, lr=0.000124, step=585]\n", - "Epoch 585: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00346, lr=0.000124, step=586]\n", - "Epoch 586: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00345, lr=0.000124, step=587]\n", - "Epoch 587: 100%|██████████| 1/1 [00:00<00:00, 1.26it/s, ema_decay=0.992, loss=0.00248, lr=0.000124, step=588]\n", - "Epoch 588: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00347, lr=0.000123, step=589]\n", - "Epoch 589: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0029, lr=0.000123, step=590]\n", - "Epoch 590: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.000899, lr=0.000123, step=591]\n", - "Epoch 591: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00326, lr=0.000122, step=592]\n", - "Epoch 592: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00456, lr=0.000122, step=593]\n", - "Epoch 593: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00389, lr=0.000122, step=594]\n", - "Epoch 594: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00249, lr=0.000121, step=595]\n", - "Epoch 595: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00357, lr=0.000121, step=596]\n", - "Epoch 596: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00601, lr=0.000121, step=597]\n", - "Epoch 597: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00186, lr=0.000121, step=598]\n", - "Epoch 598: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00386, lr=0.00012, step=599]\n", - "Epoch 599: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00225, lr=0.00012, step=600]\n", - "Epoch 600: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00129, lr=0.00012, step=601]\n", - "Epoch 601: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00464, lr=0.000119, step=602]\n", - "Epoch 602: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00301, lr=0.000119, step=603]\n", - "Epoch 603: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.992, loss=0.00361, lr=0.000119, step=604]\n", - "Epoch 604: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.992, loss=0.00247, lr=0.000118, step=605]\n", - "Epoch 605: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00571, lr=0.000118, step=606]\n", - "Epoch 606: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00184, lr=0.000118, step=607]\n", - "Epoch 607: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0021, lr=0.000118, step=608]\n", - "Epoch 608: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.005, lr=0.000117, step=609]\n", - "Epoch 609: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00494, lr=0.000117, step=610]\n", - "Epoch 610: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00341, lr=0.000117, step=611]\n", - "Epoch 611: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00164, lr=0.000116, step=612]\n", - "Epoch 612: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00209, lr=0.000116, step=613]\n", - "Epoch 613: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00446, lr=0.000116, step=614]\n", - "Epoch 614: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00447, lr=0.000115, step=615]\n", - "Epoch 615: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00366, lr=0.000115, step=616]\n", - "Epoch 616: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00465, lr=0.000115, step=617]\n", - "Epoch 617: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00352, lr=0.000115, step=618]\n", - "Epoch 618: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00192, lr=0.000114, step=619]\n", - "Epoch 619: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00304, lr=0.000114, step=620]\n", - "Epoch 620: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00408, lr=0.000114, step=621]\n", - "Epoch 621: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00313, lr=0.000113, step=622]\n", - "Epoch 622: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00291, lr=0.000113, step=623]\n", - "Epoch 623: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00125, lr=0.000113, step=624]\n", - "Epoch 624: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00247, lr=0.000112, step=625]\n", - "Epoch 625: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00324, lr=0.000112, step=626]\n", - "Epoch 626: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00239, lr=0.000112, step=627]\n", - "Epoch 627: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0035, lr=0.000112, step=628]\n", - "Epoch 628: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00185, lr=0.000111, step=629]\n", - "Epoch 629: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0025, lr=0.000111, step=630]\n", - "Epoch 630: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00285, lr=0.000111, step=631]\n", - "Epoch 631: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00193, lr=0.00011, step=632]\n", - "Epoch 632: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00654, lr=0.00011, step=633]\n", - "Epoch 633: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00366, lr=0.00011, step=634]\n", - "Epoch 634: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00312, lr=0.000109, step=635]\n", - "Epoch 635: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00167, lr=0.000109, step=636]\n", - "Epoch 636: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0068, lr=0.000109, step=637]\n", - "Epoch 637: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00198, lr=0.000109, step=638]\n", - "Epoch 638: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00148, lr=0.000108, step=639]\n", - "Epoch 639: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00289, lr=0.000108, step=640]\n", - "Epoch 640: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00306, lr=0.000108, step=641]\n", - "Epoch 641: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.000712, lr=0.000107, step=642]\n", - "Epoch 642: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00406, lr=0.000107, step=643]\n", - "Epoch 643: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00393, lr=0.000107, step=644]\n", - "Epoch 644: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00288, lr=0.000106, step=645]\n", - "Epoch 645: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00358, lr=0.000106, step=646]\n", - "Epoch 646: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00236, lr=0.000106, step=647]\n", - "Epoch 647: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00543, lr=0.000106, step=648]\n", - "Epoch 648: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00315, lr=0.000105, step=649]\n", - "Epoch 649: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00466, lr=0.000105, step=650]\n", - "Epoch 650: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00267, lr=0.000105, step=651]\n", - "Epoch 651: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00368, lr=0.000104, step=652]\n", - "Epoch 652: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00258, lr=0.000104, step=653]\n", - "Epoch 653: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.00416, lr=0.000104, step=654]\n", - "Epoch 654: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.992, loss=0.00143, lr=0.000103, step=655]\n", - "Epoch 655: 100%|██████████| 1/1 [00:00<00:00, 1.21it/s, ema_decay=0.992, loss=0.00256, lr=0.000103, step=656]\n", - "Epoch 656: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00208, lr=0.000103, step=657]\n", - "Epoch 657: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00134, lr=0.000103, step=658]\n", - "Epoch 658: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00351, lr=0.000102, step=659]\n", - "Epoch 659: 100%|██████████| 1/1 [00:00<00:00, 1.34it/s, ema_decay=0.992, loss=0.00262, lr=0.000102, step=660]\n", - "Epoch 660: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00307, lr=0.000102, step=661]\n", - "Epoch 661: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00214, lr=0.000101, step=662]\n", - "Epoch 662: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00219, lr=0.000101, step=663]\n", - "Epoch 663: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00398, lr=0.000101, step=664]\n", - "Epoch 664: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00243, lr=0.000101, step=665]\n", - "Epoch 665: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00214, lr=0.0001, step=666]\n", - "Epoch 666: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00469, lr=9.99e-5, step=667]\n", - "Epoch 667: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00359, lr=9.96e-5, step=668]\n", - "Epoch 668: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00297, lr=9.93e-5, step=669]\n", - "Epoch 669: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00198, lr=9.9e-5, step=670]\n", - "Epoch 670: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00228, lr=9.87e-5, step=671]\n", - "Epoch 671: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00247, lr=9.84e-5, step=672]\n", - "Epoch 672: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00217, lr=9.81e-5, step=673]\n", - "Epoch 673: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00342, lr=9.78e-5, step=674]\n", - "Epoch 674: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0013, lr=9.75e-5, step=675]\n", - "Epoch 675: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00247, lr=9.72e-5, step=676]\n", - "Epoch 676: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00273, lr=9.69e-5, step=677]\n", - "Epoch 677: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00256, lr=9.66e-5, step=678]\n", - "Epoch 678: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00156, lr=9.63e-5, step=679]\n", - "Epoch 679: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00114, lr=9.6e-5, step=680]\n", - "Epoch 680: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00209, lr=9.57e-5, step=681]\n", - "Epoch 681: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00175, lr=9.54e-5, step=682]\n", - "Epoch 682: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00276, lr=9.51e-5, step=683]\n", - "Epoch 683: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.000822, lr=9.48e-5, step=684]\n", - "Epoch 684: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00232, lr=9.45e-5, step=685]\n", - "Epoch 685: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0025, lr=9.42e-5, step=686]\n", - "Epoch 686: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00111, lr=9.39e-5, step=687]\n", - "Epoch 687: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00244, lr=9.36e-5, step=688]\n", - "Epoch 688: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00252, lr=9.33e-5, step=689]\n", - "Epoch 689: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00197, lr=9.3e-5, step=690]\n", - "Epoch 690: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00326, lr=9.27e-5, step=691]\n", - "Epoch 691: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00288, lr=9.24e-5, step=692]\n", - "Epoch 692: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0015, lr=9.21e-5, step=693]\n", - "Epoch 693: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00107, lr=9.18e-5, step=694]\n", - "Epoch 694: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00249, lr=9.15e-5, step=695]\n", - "Epoch 695: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00293, lr=9.12e-5, step=696]\n", - "Epoch 696: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00174, lr=9.09e-5, step=697]\n", - "Epoch 697: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00228, lr=9.06e-5, step=698]\n", - "Epoch 698: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0023, lr=9.03e-5, step=699]\n", - "Epoch 699: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00635, lr=9e-5, step=700]\n", - "Epoch 700: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00185, lr=8.97e-5, step=701]\n", - "Epoch 701: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00337, lr=8.94e-5, step=702]\n", - "Epoch 702: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00159, lr=8.91e-5, step=703]\n", - "Epoch 703: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00279, lr=8.88e-5, step=704]\n", - "Epoch 704: 100%|██████████| 1/1 [00:00<00:00, 1.22it/s, ema_decay=0.993, loss=0.00329, lr=8.85e-5, step=705]\n", - "Epoch 705: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00245, lr=8.82e-5, step=706]\n", - "Epoch 706: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00314, lr=8.79e-5, step=707]\n", - "Epoch 707: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00266, lr=8.76e-5, step=708]\n", - "Epoch 708: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00163, lr=8.73e-5, step=709]\n", - "Epoch 709: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00343, lr=8.7e-5, step=710]\n", - "Epoch 710: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00437, lr=8.67e-5, step=711]\n", - "Epoch 711: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00393, lr=8.64e-5, step=712]\n", - "Epoch 712: 100%|██████████| 1/1 [00:00<00:00, 1.18it/s, ema_decay=0.993, loss=0.00185, lr=8.61e-5, step=713]\n", - "Epoch 713: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00374, lr=8.58e-5, step=714]\n", - "Epoch 714: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00175, lr=8.55e-5, step=715]\n", - "Epoch 715: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00127, lr=8.52e-5, step=716]\n", - "Epoch 716: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00394, lr=8.49e-5, step=717]\n", - "Epoch 717: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00123, lr=8.46e-5, step=718]\n", - "Epoch 718: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00236, lr=8.43e-5, step=719]\n", - "Epoch 719: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00149, lr=8.4e-5, step=720]\n", - "Epoch 720: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00141, lr=8.37e-5, step=721]\n", - "Epoch 721: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00432, lr=8.34e-5, step=722]\n", - "Epoch 722: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00413, lr=8.31e-5, step=723]\n", - "Epoch 723: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00123, lr=8.28e-5, step=724]\n", - "Epoch 724: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00372, lr=8.25e-5, step=725]\n", - "Epoch 725: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00228, lr=8.22e-5, step=726]\n", - "Epoch 726: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00736, lr=8.19e-5, step=727]\n", - "Epoch 727: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00298, lr=8.16e-5, step=728]\n", - "Epoch 728: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00264, lr=8.13e-5, step=729]\n", - "Epoch 729: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00577, lr=8.1e-5, step=730]\n", - "Epoch 730: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00166, lr=8.07e-5, step=731]\n", - "Epoch 731: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.00146, lr=8.04e-5, step=732]\n", - "Epoch 732: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00174, lr=8.01e-5, step=733]\n", - "Epoch 733: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00192, lr=7.98e-5, step=734]\n", - "Epoch 734: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00078, lr=7.95e-5, step=735]\n", - "Epoch 735: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00481, lr=7.92e-5, step=736]\n", - "Epoch 736: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.004, lr=7.89e-5, step=737]\n", - "Epoch 737: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00087, lr=7.86e-5, step=738]\n", - "Epoch 738: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00256, lr=7.83e-5, step=739]\n", - "Epoch 739: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00408, lr=7.8e-5, step=740]\n", - "Epoch 740: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00317, lr=7.77e-5, step=741]\n", - "Epoch 741: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00206, lr=7.74e-5, step=742]\n", - "Epoch 742: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00482, lr=7.71e-5, step=743]\n", - "Epoch 743: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00468, lr=7.68e-5, step=744]\n", - "Epoch 744: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00385, lr=7.65e-5, step=745]\n", - "Epoch 745: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00103, lr=7.62e-5, step=746]\n", - "Epoch 746: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00424, lr=7.59e-5, step=747]\n", - "Epoch 747: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00192, lr=7.56e-5, step=748]\n", - "Epoch 748: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00288, lr=7.53e-5, step=749]\n", - "Epoch 749: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00235, lr=7.5e-5, step=750]\n", - "Epoch 750: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00538, lr=7.47e-5, step=751]\n", - "Epoch 751: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00273, lr=7.44e-5, step=752]\n", - "Epoch 752: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00123, lr=7.41e-5, step=753]\n", - "Epoch 753: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00317, lr=7.38e-5, step=754]\n", - "Epoch 754: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00172, lr=7.35e-5, step=755]\n", - "Epoch 755: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00503, lr=7.32e-5, step=756]\n", - "Epoch 756: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00191, lr=7.29e-5, step=757]\n", - "Epoch 757: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00377, lr=7.26e-5, step=758]\n", - "Epoch 758: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00386, lr=7.23e-5, step=759]\n", - "Epoch 759: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00335, lr=7.2e-5, step=760]\n", - "Epoch 760: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00185, lr=7.17e-5, step=761]\n", - "Epoch 761: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00284, lr=7.14e-5, step=762]\n", - "Epoch 762: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00615, lr=7.11e-5, step=763]\n", - "Epoch 763: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00233, lr=7.08e-5, step=764]\n", - "Epoch 764: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00238, lr=7.05e-5, step=765]\n", - "Epoch 765: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00259, lr=7.02e-5, step=766]\n", - "Epoch 766: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00299, lr=6.99e-5, step=767]\n", - "Epoch 767: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00273, lr=6.96e-5, step=768]\n", - "Epoch 768: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00263, lr=6.93e-5, step=769]\n", - "Epoch 769: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00245, lr=6.9e-5, step=770]\n", - "Epoch 770: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00205, lr=6.87e-5, step=771]\n", - "Epoch 771: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00157, lr=6.84e-5, step=772]\n", - "Epoch 772: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00476, lr=6.81e-5, step=773]\n", - "Epoch 773: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00149, lr=6.78e-5, step=774]\n", - "Epoch 774: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00195, lr=6.75e-5, step=775]\n", - "Epoch 775: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00183, lr=6.72e-5, step=776]\n", - "Epoch 776: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.0024, lr=6.69e-5, step=777]\n", - "Epoch 777: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00201, lr=6.66e-5, step=778]\n", - "Epoch 778: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00349, lr=6.63e-5, step=779]\n", - "Epoch 779: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0013, lr=6.6e-5, step=780]\n", - "Epoch 780: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00169, lr=6.57e-5, step=781]\n", - "Epoch 781: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00256, lr=6.54e-5, step=782]\n", - "Epoch 782: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00377, lr=6.51e-5, step=783]\n", - "Epoch 783: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00158, lr=6.48e-5, step=784]\n", - "Epoch 784: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.0026, lr=6.45e-5, step=785]\n", - "Epoch 785: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00215, lr=6.42e-5, step=786]\n", - "Epoch 786: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0024, lr=6.39e-5, step=787]\n", - "Epoch 787: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00155, lr=6.36e-5, step=788]\n", - "Epoch 788: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00353, lr=6.33e-5, step=789]\n", - "Epoch 789: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000675, lr=6.3e-5, step=790]\n", - "Epoch 790: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0023, lr=6.27e-5, step=791]\n", - "Epoch 791: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0014, lr=6.24e-5, step=792]\n", - "Epoch 792: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00502, lr=6.21e-5, step=793]\n", - "Epoch 793: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00321, lr=6.18e-5, step=794]\n", - "Epoch 794: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0035, lr=6.15e-5, step=795]\n", - "Epoch 795: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s, ema_decay=0.993, loss=0.00198, lr=6.12e-5, step=796]\n", - "Epoch 796: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00156, lr=6.09e-5, step=797]\n", - "Epoch 797: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00196, lr=6.06e-5, step=798]\n", - "Epoch 798: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00456, lr=6.03e-5, step=799]\n", - "Epoch 799: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00082, lr=6e-5, step=800]\n", - "Epoch 800: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0016, lr=5.97e-5, step=801]\n", - "Epoch 801: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00114, lr=5.94e-5, step=802]\n", - "Epoch 802: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00273, lr=5.91e-5, step=803]\n", - "Epoch 803: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00238, lr=5.88e-5, step=804]\n", - "Epoch 804: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00312, lr=5.85e-5, step=805]\n", - "Epoch 805: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00316, lr=5.82e-5, step=806]\n", - "Epoch 806: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00212, lr=5.79e-5, step=807]\n", - "Epoch 807: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00101, lr=5.76e-5, step=808]\n", - "Epoch 808: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00144, lr=5.73e-5, step=809]\n", - "Epoch 809: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00145, lr=5.7e-5, step=810]\n", - "Epoch 810: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00427, lr=5.67e-5, step=811]\n", - "Epoch 811: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00176, lr=5.64e-5, step=812]\n", - "Epoch 812: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00329, lr=5.61e-5, step=813]\n", - "Epoch 813: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00329, lr=5.58e-5, step=814]\n", - "Epoch 814: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.000988, lr=5.55e-5, step=815]\n", - "Epoch 815: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00542, lr=5.52e-5, step=816]\n", - "Epoch 816: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00457, lr=5.49e-5, step=817]\n", - "Epoch 817: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00286, lr=5.46e-5, step=818]\n", - "Epoch 818: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00321, lr=5.43e-5, step=819]\n", - "Epoch 819: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.003, lr=5.4e-5, step=820]\n", - "Epoch 820: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0046, lr=5.37e-5, step=821]\n", - "Epoch 821: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.0043, lr=5.34e-5, step=822]\n", - "Epoch 822: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00307, lr=5.31e-5, step=823]\n", - "Epoch 823: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00188, lr=5.28e-5, step=824]\n", - "Epoch 824: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00468, lr=5.25e-5, step=825]\n", - "Epoch 825: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00339, lr=5.22e-5, step=826]\n", - "Epoch 826: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.994, loss=0.00307, lr=5.19e-5, step=827]\n", - "Epoch 827: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00189, lr=5.16e-5, step=828]\n", - "Epoch 828: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00286, lr=5.13e-5, step=829]\n", - "Epoch 829: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00298, lr=5.1e-5, step=830]\n", - "Epoch 830: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00444, lr=5.07e-5, step=831]\n", - "Epoch 831: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0017, lr=5.04e-5, step=832]\n", - "Epoch 832: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.000803, lr=5.01e-5, step=833]\n", - "Epoch 833: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000553, lr=4.98e-5, step=834]\n", - "Epoch 834: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.002, lr=4.95e-5, step=835]\n", - "Epoch 835: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00145, lr=4.92e-5, step=836]\n", - "Epoch 836: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00274, lr=4.89e-5, step=837]\n", - "Epoch 837: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00124, lr=4.86e-5, step=838]\n", - "Epoch 838: 100%|██████████| 1/1 [00:00<00:00, 1.33it/s, ema_decay=0.994, loss=0.00147, lr=4.83e-5, step=839]\n", - "Epoch 839: 100%|██████████| 1/1 [00:00<00:00, 1.31it/s, ema_decay=0.994, loss=0.0025, lr=4.8e-5, step=840]\n", - "Epoch 840: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00393, lr=4.77e-5, step=841]\n", - "Epoch 841: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00208, lr=4.74e-5, step=842]\n", - "Epoch 842: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.003, lr=4.71e-5, step=843]\n", - "Epoch 843: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00126, lr=4.68e-5, step=844]\n", - "Epoch 844: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00344, lr=4.65e-5, step=845]\n", - "Epoch 845: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00307, lr=4.62e-5, step=846]\n", - "Epoch 846: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00214, lr=4.59e-5, step=847]\n", - "Epoch 847: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00283, lr=4.56e-5, step=848]\n", - "Epoch 848: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00341, lr=4.53e-5, step=849]\n", - "Epoch 849: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00226, lr=4.5e-5, step=850]\n", - "Epoch 850: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00473, lr=4.47e-5, step=851]\n", - "Epoch 851: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00469, lr=4.44e-5, step=852]\n", - "Epoch 852: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00164, lr=4.41e-5, step=853]\n", - "Epoch 853: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00392, lr=4.38e-5, step=854]\n", - "Epoch 854: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00174, lr=4.35e-5, step=855]\n", - "Epoch 855: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00247, lr=4.32e-5, step=856]\n", - "Epoch 856: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00303, lr=4.29e-5, step=857]\n", - "Epoch 857: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0018, lr=4.26e-5, step=858]\n", - "Epoch 858: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00336, lr=4.23e-5, step=859]\n", - "Epoch 859: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00387, lr=4.2e-5, step=860]\n", - "Epoch 860: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00192, lr=4.17e-5, step=861]\n", - "Epoch 861: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00388, lr=4.14e-5, step=862]\n", - "Epoch 862: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00224, lr=4.11e-5, step=863]\n", - "Epoch 863: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0037, lr=4.08e-5, step=864]\n", - "Epoch 864: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00378, lr=4.05e-5, step=865]\n", - "Epoch 865: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00168, lr=4.02e-5, step=866]\n", - "Epoch 866: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00227, lr=3.99e-5, step=867]\n", - "Epoch 867: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000804, lr=3.96e-5, step=868]\n", - "Epoch 868: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00147, lr=3.93e-5, step=869]\n", - "Epoch 869: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00357, lr=3.9e-5, step=870]\n", - "Epoch 870: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00405, lr=3.87e-5, step=871]\n", - "Epoch 871: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000856, lr=3.84e-5, step=872]\n", - "Epoch 872: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00224, lr=3.81e-5, step=873]\n", - "Epoch 873: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00396, lr=3.78e-5, step=874]\n", - "Epoch 874: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00392, lr=3.75e-5, step=875]\n", - "Epoch 875: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00189, lr=3.72e-5, step=876]\n", - "Epoch 876: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00226, lr=3.69e-5, step=877]\n", - "Epoch 877: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00248, lr=3.66e-5, step=878]\n", - "Epoch 878: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00177, lr=3.63e-5, step=879]\n", - "Epoch 879: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00158, lr=3.6e-5, step=880]\n", - "Epoch 880: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00246, lr=3.57e-5, step=881]\n", - "Epoch 881: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00196, lr=3.54e-5, step=882]\n", - "Epoch 882: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00182, lr=3.51e-5, step=883]\n", - "Epoch 883: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00104, lr=3.48e-5, step=884]\n", - "Epoch 884: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00281, lr=3.45e-5, step=885]\n", - "Epoch 885: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00311, lr=3.42e-5, step=886]\n", - "Epoch 886: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00111, lr=3.39e-5, step=887]\n", - "Epoch 887: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00335, lr=3.36e-5, step=888]\n", - "Epoch 888: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00236, lr=3.33e-5, step=889]\n", - "Epoch 889: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00175, lr=3.3e-5, step=890]\n", - "Epoch 890: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00218, lr=3.27e-5, step=891]\n", - "Epoch 891: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000836, lr=3.24e-5, step=892]\n", - "Epoch 892: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00266, lr=3.21e-5, step=893]\n", - "Epoch 893: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000972, lr=3.18e-5, step=894]\n", - "Epoch 894: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00122, lr=3.15e-5, step=895]\n", - "Epoch 895: 100%|██████████| 1/1 [00:00<00:00, 1.29it/s, ema_decay=0.994, loss=0.00212, lr=3.12e-5, step=896]\n", - "Epoch 896: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00161, lr=3.09e-5, step=897]\n", - "Epoch 897: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.000924, lr=3.06e-5, step=898]\n", - "Epoch 898: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0034, lr=3.03e-5, step=899]\n", - "Epoch 899: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00345, lr=3e-5, step=900]\n", - "Epoch 900: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00362, lr=2.97e-5, step=901]\n", - "Epoch 901: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00113, lr=2.94e-5, step=902]\n", - "Epoch 902: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00217, lr=2.91e-5, step=903]\n", - "Epoch 903: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00199, lr=2.88e-5, step=904]\n", - "Epoch 904: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00166, lr=2.85e-5, step=905]\n", - "Epoch 905: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00207, lr=2.82e-5, step=906]\n", - "Epoch 906: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00389, lr=2.79e-5, step=907]\n", - "Epoch 907: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0033, lr=2.76e-5, step=908]\n", - "Epoch 908: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00116, lr=2.73e-5, step=909]\n", - "Epoch 909: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00182, lr=2.7e-5, step=910]\n", - "Epoch 910: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00137, lr=2.67e-5, step=911]\n", - "Epoch 911: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00124, lr=2.64e-5, step=912]\n", - "Epoch 912: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s, ema_decay=0.994, loss=0.00157, lr=2.61e-5, step=913]\n", - "Epoch 913: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00154, lr=2.58e-5, step=914]\n", - "Epoch 914: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00071, lr=2.55e-5, step=915]\n", - "Epoch 915: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00183, lr=2.52e-5, step=916]\n", - "Epoch 916: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0018, lr=2.49e-5, step=917]\n", - "Epoch 917: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000995, lr=2.46e-5, step=918]\n", - "Epoch 918: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00355, lr=2.43e-5, step=919]\n", - "Epoch 919: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00144, lr=2.4e-5, step=920]\n", - "Epoch 920: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.000952, lr=2.37e-5, step=921]\n", - "Epoch 921: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00163, lr=2.34e-5, step=922]\n", - "Epoch 922: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00153, lr=2.31e-5, step=923]\n", - "Epoch 923: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00332, lr=2.28e-5, step=924]\n", - "Epoch 924: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00211, lr=2.25e-5, step=925]\n", - "Epoch 925: 100%|██████████| 1/1 [00:00<00:00, 1.13it/s, ema_decay=0.994, loss=0.00354, lr=2.22e-5, step=926]\n", - "Epoch 926: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00194, lr=2.19e-5, step=927]\n", - "Epoch 927: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00254, lr=2.16e-5, step=928]\n", - "Epoch 928: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00218, lr=2.13e-5, step=929]\n", - "Epoch 929: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.0021, lr=2.1e-5, step=930]\n", - "Epoch 930: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00482, lr=2.07e-5, step=931]\n", - "Epoch 931: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00342, lr=2.04e-5, step=932]\n", - "Epoch 932: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00132, lr=2.01e-5, step=933]\n", - "Epoch 933: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00211, lr=1.98e-5, step=934]\n", - "Epoch 934: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00103, lr=1.95e-5, step=935]\n", - "Epoch 935: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00169, lr=1.92e-5, step=936]\n", - "Epoch 936: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00276, lr=1.89e-5, step=937]\n", - "Epoch 937: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00127, lr=1.86e-5, step=938]\n", - "Epoch 938: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000533, lr=1.83e-5, step=939]\n", - "Epoch 939: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00134, lr=1.8e-5, step=940]\n", - "Epoch 940: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00182, lr=1.77e-5, step=941]\n", - "Epoch 941: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00158, lr=1.74e-5, step=942]\n", - "Epoch 942: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00175, lr=1.71e-5, step=943]\n", - "Epoch 943: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00155, lr=1.68e-5, step=944]\n", - "Epoch 944: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00267, lr=1.65e-5, step=945]\n", - "Epoch 945: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00195, lr=1.62e-5, step=946]\n", - "Epoch 946: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00139, lr=1.59e-5, step=947]\n", - "Epoch 947: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00243, lr=1.56e-5, step=948]\n", - "Epoch 948: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000852, lr=1.53e-5, step=949]\n", - "Epoch 949: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00178, lr=1.5e-5, step=950]\n", - "Epoch 950: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00326, lr=1.47e-5, step=951]\n", - "Epoch 951: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00171, lr=1.44e-5, step=952]\n", - "Epoch 952: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00175, lr=1.41e-5, step=953]\n", - "Epoch 953: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00199, lr=1.38e-5, step=954]\n", - "Epoch 954: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00301, lr=1.35e-5, step=955]\n", - "Epoch 955: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00173, lr=1.32e-5, step=956]\n", - "Epoch 956: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000652, lr=1.29e-5, step=957]\n", - "Epoch 957: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00237, lr=1.26e-5, step=958]\n", - "Epoch 958: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00104, lr=1.23e-5, step=959]\n", - "Epoch 959: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00357, lr=1.2e-5, step=960]\n", - "Epoch 960: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000907, lr=1.17e-5, step=961]\n", - "Epoch 961: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000641, lr=1.14e-5, step=962]\n", - "Epoch 962: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00106, lr=1.11e-5, step=963]\n", - "Epoch 963: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00109, lr=1.08e-5, step=964]\n", - "Epoch 964: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00175, lr=1.05e-5, step=965]\n", - "Epoch 965: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00229, lr=1.02e-5, step=966]\n", - "Epoch 966: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00154, lr=9.9e-6, step=967]\n", - "Epoch 967: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00183, lr=9.6e-6, step=968]\n", - "Epoch 968: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00241, lr=9.3e-6, step=969]\n", - "Epoch 969: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00314, lr=9e-6, step=970]\n", - "Epoch 970: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0018, lr=8.7e-6, step=971]\n", - "Epoch 971: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00224, lr=8.4e-6, step=972]\n", - "Epoch 972: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00208, lr=8.1e-6, step=973]\n", - "Epoch 973: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00109, lr=7.8e-6, step=974]\n", - "Epoch 974: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0024, lr=7.5e-6, step=975]\n", - "Epoch 975: 100%|██████████| 1/1 [00:00<00:00, 1.22it/s, ema_decay=0.994, loss=0.0018, lr=7.2e-6, step=976]\n", - "Epoch 976: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00261, lr=6.9e-6, step=977]\n", - "Epoch 977: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00247, lr=6.6e-6, step=978]\n", - "Epoch 978: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00144, lr=6.3e-6, step=979]\n", - "Epoch 979: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00213, lr=6e-6, step=980]\n", - "Epoch 980: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00139, lr=5.7e-6, step=981]\n", - "Epoch 981: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00185, lr=5.4e-6, step=982]\n", - "Epoch 982: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00191, lr=5.1e-6, step=983]\n", - "Epoch 983: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00202, lr=4.8e-6, step=984]\n", - "Epoch 984: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.000456, lr=4.5e-6, step=985]\n", - "Epoch 985: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00384, lr=4.2e-6, step=986]\n", - "Epoch 986: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00267, lr=3.9e-6, step=987]\n", - "Epoch 987: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00344, lr=3.6e-6, step=988]\n", - "Epoch 988: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00499, lr=3.3e-6, step=989]\n", - "Epoch 989: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00283, lr=3e-6, step=990]\n", - "Epoch 990: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00263, lr=2.7e-6, step=991]\n", - "Epoch 991: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00299, lr=2.4e-6, step=992]\n", - "Epoch 992: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.000997, lr=2.1e-6, step=993]\n", - "Epoch 993: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0015, lr=1.8e-6, step=994]\n", - "Epoch 994: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0037, lr=1.5e-6, step=995]\n", - "Epoch 995: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00172, lr=1.2e-6, step=996]\n", - "Epoch 996: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00158, lr=9e-7, step=997]\n", - "Epoch 997: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00356, lr=6e-7, step=998]\n", - "Epoch 998: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00153, lr=3e-7, step=999]\n", - "Epoch 999: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0014, lr=0, step=1000]\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "0e5c2fa8fcec4feb8e30865591686739", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/50 [00:00 31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0, loss=0.189, lr=0.0003, step=1]\n", - "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0, loss=0.725, lr=0.000299, step=2]\n", - "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.405, loss=0.693, lr=0.000299, step=3]\n", - "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.561, loss=0.153, lr=0.000299, step=4]\n", - "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.646, loss=0.244, lr=0.000298, step=5]\n", - "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.701, loss=0.432, lr=0.000298, step=6]\n", - "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.739, loss=0.42, lr=0.000298, step=7]\n", - "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.768, loss=0.491, lr=0.000298, step=8]\n", - "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.79, loss=0.127, lr=0.000297, step=9]\n", - "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.808, loss=0.329, lr=0.000297, step=10]\n", - "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.822, loss=0.128, lr=0.000297, step=11]\n", - "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.834, loss=0.181, lr=0.000296, step=12]\n", - "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.845, loss=0.253, lr=0.000296, step=13]\n", - "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.854, loss=0.178, lr=0.000296, step=14]\n", - "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.862, loss=0.173, lr=0.000295, step=15]\n", - "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.869, loss=0.093, lr=0.000295, step=16]\n", - "Epoch 16: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.875, loss=0.291, lr=0.000295, step=17]\n", - "Epoch 17: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.881, loss=0.195, lr=0.000295, step=18]\n", - "Epoch 18: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.886, loss=0.132, lr=0.000294, step=19]\n", - "Epoch 19: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.89, loss=0.12, lr=0.000294, step=20]\n", - "Epoch 20: 100%|██████████| 1/1 [00:00<00:00, 1.08it/s, ema_decay=0.894, loss=0.187, lr=0.000294, step=21]\n", - "Epoch 21: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.898, loss=0.247, lr=0.000293, step=22]\n", - "Epoch 22: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.902, loss=0.116, lr=0.000293, step=23]\n", - "Epoch 23: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.905, loss=0.0518, lr=0.000293, step=24]\n", - "Epoch 24: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.908, loss=0.13, lr=0.000292, step=25]\n", - "Epoch 25: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.911, loss=0.131, lr=0.000292, step=26]\n", - "Epoch 26: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.913, loss=0.113, lr=0.000292, step=27]\n", - "Epoch 27: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.916, loss=0.147, lr=0.000292, step=28]\n", - "Epoch 28: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.918, loss=0.146, lr=0.000291, step=29]\n", - "Epoch 29: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.92, loss=0.118, lr=0.000291, step=30]\n", - "Epoch 30: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.922, loss=0.154, lr=0.000291, step=31]\n", - "Epoch 31: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.924, loss=0.0298, lr=0.00029, step=32]\n", - "Epoch 32: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.926, loss=0.112, lr=0.00029, step=33]\n", - "Epoch 33: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.927, loss=0.165, lr=0.00029, step=34]\n", - "Epoch 34: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.929, loss=0.238, lr=0.000289, step=35]\n", - "Epoch 35: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.931, loss=0.0597, lr=0.000289, step=36]\n", - "Epoch 36: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.932, loss=0.109, lr=0.000289, step=37]\n", - "Epoch 37: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.933, loss=0.0682, lr=0.000289, step=38]\n", - "Epoch 38: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.935, loss=0.087, lr=0.000288, step=39]\n", - "Epoch 39: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.936, loss=0.174, lr=0.000288, step=40]\n", - "Epoch 40: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.937, loss=0.157, lr=0.000288, step=41]\n", - "Epoch 41: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.938, loss=0.176, lr=0.000287, step=42]\n", - "Epoch 42: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.939, loss=0.0938, lr=0.000287, step=43]\n", - "Epoch 43: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.94, loss=0.0874, lr=0.000287, step=44]\n", - "Epoch 44: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.941, loss=0.184, lr=0.000286, step=45]\n", - "Epoch 45: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.942, loss=0.0758, lr=0.000286, step=46]\n", - "Epoch 46: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.943, loss=0.182, lr=0.000286, step=47]\n", - "Epoch 47: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.944, loss=0.0701, lr=0.000286, step=48]\n", - "Epoch 48: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.945, loss=0.119, lr=0.000285, step=49]\n", - "Epoch 49: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.946, loss=0.0674, lr=0.000285, step=50]\n", - "Epoch 50: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.947, loss=0.0804, lr=0.000285, step=51]\n", - "Epoch 51: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.948, loss=0.104, lr=0.000284, step=52]\n", - "Epoch 52: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.948, loss=0.143, lr=0.000284, step=53]\n", - "Epoch 53: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.949, loss=0.0482, lr=0.000284, step=54]\n", - "Epoch 54: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.122, lr=0.000283, step=55]\n", - "Epoch 55: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.0885, lr=0.000283, step=56]\n", - "Epoch 56: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.951, loss=0.046, lr=0.000283, step=57]\n", - "Epoch 57: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.952, loss=0.0483, lr=0.000283, step=58]\n", - "Epoch 58: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.952, loss=0.137, lr=0.000282, step=59]\n", - "Epoch 59: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.953, loss=0.0726, lr=0.000282, step=60]\n", - "Epoch 60: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.954, loss=0.0651, lr=0.000282, step=61]\n", - "Epoch 61: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.954, loss=0.144, lr=0.000281, step=62]\n", - "Epoch 62: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.955, loss=0.117, lr=0.000281, step=63]\n", - "Epoch 63: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.955, loss=0.034, lr=0.000281, step=64]\n", - "Epoch 64: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.956, loss=0.0775, lr=0.00028, step=65]\n", - "Epoch 65: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.956, loss=0.0589, lr=0.00028, step=66]\n", - "Epoch 66: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.957, loss=0.0747, lr=0.00028, step=67]\n", - "Epoch 67: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.957, loss=0.0671, lr=0.00028, step=68]\n", - "Epoch 68: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.958, loss=0.123, lr=0.000279, step=69]\n", - "Epoch 69: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.958, loss=0.0798, lr=0.000279, step=70]\n", - "Epoch 70: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.959, loss=0.0584, lr=0.000279, step=71]\n", - "Epoch 71: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.959, loss=0.169, lr=0.000278, step=72]\n", - "Epoch 72: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.96, loss=0.0817, lr=0.000278, step=73]\n", - "Epoch 73: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.96, loss=0.069, lr=0.000278, step=74]\n", - "Epoch 74: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.96, loss=0.0631, lr=0.000277, step=75]\n", - "Epoch 75: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.961, loss=0.0995, lr=0.000277, step=76]\n", - "Epoch 76: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.961, loss=0.11, lr=0.000277, step=77]\n", - "Epoch 77: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.962, loss=0.05, lr=0.000277, step=78]\n", - "Epoch 78: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.962, loss=0.0498, lr=0.000276, step=79]\n", - "Epoch 79: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.962, loss=0.0409, lr=0.000276, step=80]\n", - "Epoch 80: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.0865, lr=0.000276, step=81]\n", - "Epoch 81: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.131, lr=0.000275, step=82]\n", - "Epoch 82: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.0874, lr=0.000275, step=83]\n", - "Epoch 83: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.964, loss=0.0558, lr=0.000275, step=84]\n", - "Epoch 84: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.964, loss=0.12, lr=0.000275, step=85]\n", - "Epoch 85: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.964, loss=0.106, lr=0.000274, step=86]\n", - "Epoch 86: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.965, loss=0.0289, lr=0.000274, step=87]\n", - "Epoch 87: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.965, loss=0.0604, lr=0.000274, step=88]\n", - "Epoch 88: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.0709, lr=0.000273, step=89]\n", - "Epoch 89: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.0607, lr=0.000273, step=90]\n", - "Epoch 90: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.966, loss=0.0322, lr=0.000273, step=91]\n", - "Epoch 91: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.966, loss=0.0677, lr=0.000272, step=92]\n", - "Epoch 92: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.966, loss=0.0551, lr=0.000272, step=93]\n", - "Epoch 93: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.967, loss=0.0733, lr=0.000272, step=94]\n", - "Epoch 94: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.0219, lr=0.000271, step=95]\n", - "Epoch 95: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.0449, lr=0.000271, step=96]\n", - "Epoch 96: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.128, lr=0.000271, step=97]\n", - "Epoch 97: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.119, lr=0.000271, step=98]\n", - "Epoch 98: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.0365, lr=0.00027, step=99]\n", - "Epoch 99: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.968, loss=0.0357, lr=0.00027, step=100]\n", - "Epoch 100: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.968, loss=0.0343, lr=0.00027, step=101]\n", - "Epoch 101: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0437, lr=0.000269, step=102]\n", - "Epoch 102: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.112, lr=0.000269, step=103]\n", - "Epoch 103: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0365, lr=0.000269, step=104]\n", - "Epoch 104: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.969, loss=0.0659, lr=0.000268, step=105]\n", - "Epoch 105: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.0655, lr=0.000268, step=106]\n", - "Epoch 106: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.0936, lr=0.000268, step=107]\n", - "Epoch 107: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.0687, lr=0.000268, step=108]\n", - "Epoch 108: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.0658, lr=0.000267, step=109]\n", - "Epoch 109: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.0582, lr=0.000267, step=110]\n", - "Epoch 110: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.0259, lr=0.000267, step=111]\n", - "Epoch 111: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.0625, lr=0.000266, step=112]\n", - "Epoch 112: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.123, lr=0.000266, step=113]\n", - "Epoch 113: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.0491, lr=0.000266, step=114]\n", - "Epoch 114: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.0739, lr=0.000266, step=115]\n", - "Epoch 115: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.972, loss=0.0463, lr=0.000265, step=116]\n", - "Epoch 116: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0432, lr=0.000265, step=117]\n", - "Epoch 117: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0649, lr=0.000265, step=118]\n", - "Epoch 118: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.972, loss=0.0974, lr=0.000264, step=119]\n", - "Epoch 119: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0564, lr=0.000264, step=120]\n", - "Epoch 120: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.017, lr=0.000264, step=121]\n", - "Epoch 121: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0211, lr=0.000263, step=122]\n", - "Epoch 122: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0955, lr=0.000263, step=123]\n", - "Epoch 123: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0563, lr=0.000263, step=124]\n", - "Epoch 124: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0176, lr=0.000262, step=125]\n", - "Epoch 125: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0295, lr=0.000262, step=126]\n", - "Epoch 126: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.0802, lr=0.000262, step=127]\n", - "Epoch 127: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0502, lr=0.000262, step=128]\n", - "Epoch 128: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0437, lr=0.000261, step=129]\n", - "Epoch 129: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0177, lr=0.000261, step=130]\n", - "Epoch 130: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.0745, lr=0.000261, step=131]\n", - "Epoch 131: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.974, loss=0.0856, lr=0.00026, step=132]\n", - "Epoch 132: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.974, loss=0.054, lr=0.00026, step=133]\n", - "Epoch 133: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.974, loss=0.0639, lr=0.00026, step=134]\n", - "Epoch 134: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.975, loss=0.0186, lr=0.000259, step=135]\n", - "Epoch 135: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.0376, lr=0.000259, step=136]\n", - "Epoch 136: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.0956, lr=0.000259, step=137]\n", - "Epoch 137: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.975, loss=0.0959, lr=0.000259, step=138]\n", - "Epoch 138: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.0496, lr=0.000258, step=139]\n", - "Epoch 139: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.0483, lr=0.000258, step=140]\n", - "Epoch 140: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.0725, lr=0.000258, step=141]\n", - "Epoch 141: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0451, lr=0.000257, step=142]\n", - "Epoch 142: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.0165, lr=0.000257, step=143]\n", - "Epoch 143: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.976, loss=0.0176, lr=0.000257, step=144]\n", - "Epoch 144: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.148, lr=0.000256, step=145]\n", - "Epoch 145: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0684, lr=0.000256, step=146]\n", - "Epoch 146: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0675, lr=0.000256, step=147]\n", - "Epoch 147: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.084, lr=0.000256, step=148]\n", - "Epoch 148: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0583, lr=0.000255, step=149]\n", - "Epoch 149: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.0231, lr=0.000255, step=150]\n", - "Epoch 150: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.977, loss=0.0448, lr=0.000255, step=151]\n", - "Epoch 151: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.0407, lr=0.000254, step=152]\n", - "Epoch 152: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.977, loss=0.111, lr=0.000254, step=153]\n", - "Epoch 153: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.977, loss=0.0317, lr=0.000254, step=154]\n", - "Epoch 154: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.977, loss=0.0526, lr=0.000253, step=155]\n", - "Epoch 155: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.0925, lr=0.000253, step=156]\n", - "Epoch 156: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.977, loss=0.0786, lr=0.000253, step=157]\n", - "Epoch 157: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.977, loss=0.0738, lr=0.000253, step=158]\n", - "Epoch 158: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.0753, lr=0.000252, step=159]\n", - "Epoch 159: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.0268, lr=0.000252, step=160]\n", - "Epoch 160: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.978, loss=0.0953, lr=0.000252, step=161]\n", - "Epoch 161: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.0945, lr=0.000251, step=162]\n", - "Epoch 162: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.0127, lr=0.000251, step=163]\n", - "Epoch 163: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.0825, lr=0.000251, step=164]\n", - "Epoch 164: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.0397, lr=0.00025, step=165]\n", - "Epoch 165: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.016, lr=0.00025, step=166]\n", - "Epoch 166: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.978, loss=0.0606, lr=0.00025, step=167]\n", - "Epoch 167: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.032, lr=0.00025, step=168]\n", - "Epoch 168: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0322, lr=0.000249, step=169]\n", - "Epoch 169: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.0633, lr=0.000249, step=170]\n", - "Epoch 170: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.0848, lr=0.000249, step=171]\n", - "Epoch 171: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.979, loss=0.0722, lr=0.000248, step=172]\n", - "Epoch 172: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.0657, lr=0.000248, step=173]\n", - "Epoch 173: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.0294, lr=0.000248, step=174]\n", - "Epoch 174: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.117, lr=0.000247, step=175]\n", - "Epoch 175: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.00836, lr=0.000247, step=176]\n", - "Epoch 176: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0578, lr=0.000247, step=177]\n", - "Epoch 177: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.0946, lr=0.000247, step=178]\n", - "Epoch 178: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.0641, lr=0.000246, step=179]\n", - "Epoch 179: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.98, loss=0.0481, lr=0.000246, step=180]\n", - "Epoch 180: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.0598, lr=0.000246, step=181]\n", - "Epoch 181: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00935, lr=0.000245, step=182]\n", - "Epoch 182: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.107, lr=0.000245, step=183]\n", - "Epoch 183: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.0601, lr=0.000245, step=184]\n", - "Epoch 184: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.0452, lr=0.000244, step=185]\n", - "Epoch 185: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.0464, lr=0.000244, step=186]\n", - "Epoch 186: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0848, lr=0.000244, step=187]\n", - "Epoch 187: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0465, lr=0.000244, step=188]\n", - "Epoch 188: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.98, loss=0.0357, lr=0.000243, step=189]\n", - "Epoch 189: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0314, lr=0.000243, step=190]\n", - "Epoch 190: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.0797, lr=0.000243, step=191]\n", - "Epoch 191: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.091, lr=0.000242, step=192]\n", - "Epoch 192: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.0471, lr=0.000242, step=193]\n", - "Epoch 193: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.0794, lr=0.000242, step=194]\n", - "Epoch 194: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0616, lr=0.000241, step=195]\n", - "Epoch 195: 100%|██████████| 1/1 [00:00<00:00, 1.21it/s, ema_decay=0.981, loss=0.0824, lr=0.000241, step=196]\n", - "Epoch 196: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.019, lr=0.000241, step=197]\n", - "Epoch 197: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.043, lr=0.000241, step=198]\n", - "Epoch 198: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.0443, lr=0.00024, step=199]\n", - "Epoch 199: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0438, lr=0.00024, step=200]\n", - "Epoch 200: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00997, lr=0.00024, step=201]\n", - "Epoch 201: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.0835, lr=0.000239, step=202]\n", - "Epoch 202: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0471, lr=0.000239, step=203]\n", - "Epoch 203: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0304, lr=0.000239, step=204]\n", - "Epoch 204: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.0182, lr=0.000238, step=205]\n", - "Epoch 205: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0481, lr=0.000238, step=206]\n", - "Epoch 206: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.00683, lr=0.000238, step=207]\n", - "Epoch 207: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0718, lr=0.000238, step=208]\n", - "Epoch 208: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0154, lr=0.000237, step=209]\n", - "Epoch 209: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.0949, lr=0.000237, step=210]\n", - "Epoch 210: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0431, lr=0.000237, step=211]\n", - "Epoch 211: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0602, lr=0.000236, step=212]\n", - "Epoch 212: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0487, lr=0.000236, step=213]\n", - "Epoch 213: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0593, lr=0.000236, step=214]\n", - "Epoch 214: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0618, lr=0.000235, step=215]\n", - "Epoch 215: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0747, lr=0.000235, step=216]\n", - "Epoch 216: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.0601, lr=0.000235, step=217]\n", - "Epoch 217: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.982, loss=0.0255, lr=0.000235, step=218]\n", - "Epoch 218: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.982, loss=0.0117, lr=0.000234, step=219]\n", - "Epoch 219: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.0809, lr=0.000234, step=220]\n", - "Epoch 220: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0715, lr=0.000234, step=221]\n", - "Epoch 221: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.031, lr=0.000233, step=222]\n", - "Epoch 222: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0789, lr=0.000233, step=223]\n", - "Epoch 223: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0409, lr=0.000233, step=224]\n", - "Epoch 224: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0838, lr=0.000232, step=225]\n", - "Epoch 225: 100%|██████████| 1/1 [00:00<00:00, 1.18it/s, ema_decay=0.983, loss=0.0293, lr=0.000232, step=226]\n", - "Epoch 226: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.083, lr=0.000232, step=227]\n", - "Epoch 227: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.983, loss=0.0684, lr=0.000232, step=228]\n", - "Epoch 228: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0396, lr=0.000231, step=229]\n", - "Epoch 229: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.1, lr=0.000231, step=230]\n", - "Epoch 230: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.983, loss=0.0363, lr=0.000231, step=231]\n", - "Epoch 231: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0175, lr=0.00023, step=232]\n", - "Epoch 232: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.0139, lr=0.00023, step=233]\n", - "Epoch 233: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0395, lr=0.00023, step=234]\n", - "Epoch 234: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0387, lr=0.000229, step=235]\n", - "Epoch 235: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.0644, lr=0.000229, step=236]\n", - "Epoch 236: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.983, loss=0.0278, lr=0.000229, step=237]\n", - "Epoch 237: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.101, lr=0.000229, step=238]\n", - "Epoch 238: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0307, lr=0.000228, step=239]\n", - "Epoch 239: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0602, lr=0.000228, step=240]\n", - "Epoch 240: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0341, lr=0.000228, step=241]\n", - "Epoch 241: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0618, lr=0.000227, step=242]\n", - "Epoch 242: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0595, lr=0.000227, step=243]\n", - "Epoch 243: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00958, lr=0.000227, step=244]\n", - "Epoch 244: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0873, lr=0.000226, step=245]\n", - "Epoch 245: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0635, lr=0.000226, step=246]\n", - "Epoch 246: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0591, lr=0.000226, step=247]\n", - "Epoch 247: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0743, lr=0.000226, step=248]\n", - "Epoch 248: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0798, lr=0.000225, step=249]\n", - "Epoch 249: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0273, lr=0.000225, step=250]\n", - "Epoch 250: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.0108, lr=0.000225, step=251]\n", - "Epoch 251: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0498, lr=0.000224, step=252]\n", - "Epoch 252: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0818, lr=0.000224, step=253]\n", - "Epoch 253: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0363, lr=0.000224, step=254]\n", - "Epoch 254: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0444, lr=0.000223, step=255]\n", - "Epoch 255: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.984, loss=0.0734, lr=0.000223, step=256]\n", - "Epoch 256: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0418, lr=0.000223, step=257]\n", - "Epoch 257: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0853, lr=0.000223, step=258]\n", - "Epoch 258: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.0556, lr=0.000222, step=259]\n", - "Epoch 259: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0483, lr=0.000222, step=260]\n", - "Epoch 260: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.985, loss=0.0692, lr=0.000222, step=261]\n", - "Epoch 261: 100%|██████████| 1/1 [00:00<00:00, 1.20it/s, ema_decay=0.985, loss=0.0479, lr=0.000221, step=262]\n", - "Epoch 262: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0264, lr=0.000221, step=263]\n", - "Epoch 263: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0554, lr=0.000221, step=264]\n", - "Epoch 264: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.066, lr=0.00022, step=265]\n", - "Epoch 265: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0483, lr=0.00022, step=266]\n", - "Epoch 266: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0526, lr=0.00022, step=267]\n", - "Epoch 267: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0613, lr=0.00022, step=268]\n", - "Epoch 268: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0223, lr=0.000219, step=269]\n", - "Epoch 269: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0406, lr=0.000219, step=270]\n", - "Epoch 270: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0246, lr=0.000219, step=271]\n", - "Epoch 271: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.0728, lr=0.000218, step=272]\n", - "Epoch 272: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0745, lr=0.000218, step=273]\n", - "Epoch 273: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0422, lr=0.000218, step=274]\n", - "Epoch 274: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.0452, lr=0.000217, step=275]\n", - "Epoch 275: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0441, lr=0.000217, step=276]\n", - "Epoch 276: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0659, lr=0.000217, step=277]\n", - "Epoch 277: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.136, lr=0.000217, step=278]\n", - "Epoch 278: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0157, lr=0.000216, step=279]\n", - "Epoch 279: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.128, lr=0.000216, step=280]\n", - "Epoch 280: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0729, lr=0.000216, step=281]\n", - "Epoch 281: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0577, lr=0.000215, step=282]\n", - "Epoch 282: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0266, lr=0.000215, step=283]\n", - "Epoch 283: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0839, lr=0.000215, step=284]\n", - "Epoch 284: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.137, lr=0.000214, step=285]\n", - "Epoch 285: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0399, lr=0.000214, step=286]\n", - "Epoch 286: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0362, lr=0.000214, step=287]\n", - "Epoch 287: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0652, lr=0.000214, step=288]\n", - "Epoch 288: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0577, lr=0.000213, step=289]\n", - "Epoch 289: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0412, lr=0.000213, step=290]\n", - "Epoch 290: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.0259, lr=0.000213, step=291]\n", - "Epoch 291: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0375, lr=0.000212, step=292]\n", - "Epoch 292: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0476, lr=0.000212, step=293]\n", - "Epoch 293: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0684, lr=0.000212, step=294]\n", - "Epoch 294: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0435, lr=0.000211, step=295]\n", - "Epoch 295: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0565, lr=0.000211, step=296]\n", - "Epoch 296: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0556, lr=0.000211, step=297]\n", - "Epoch 297: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0906, lr=0.000211, step=298]\n", - "Epoch 298: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.128, lr=0.00021, step=299]\n", - "Epoch 299: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0318, lr=0.00021, step=300]\n", - "Epoch 300: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0805, lr=0.00021, step=301]\n", - "Epoch 301: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0705, lr=0.000209, step=302]\n", - "Epoch 302: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0434, lr=0.000209, step=303]\n", - "Epoch 303: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0749, lr=0.000209, step=304]\n", - "Epoch 304: 100%|██████████| 1/1 [00:00<00:00, 1.20it/s, ema_decay=0.986, loss=0.108, lr=0.000208, step=305]\n", - "Epoch 305: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.986, loss=0.0427, lr=0.000208, step=306]\n", - "Epoch 306: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0482, lr=0.000208, step=307]\n", - "Epoch 307: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0292, lr=0.000208, step=308]\n", - "Epoch 308: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0347, lr=0.000207, step=309]\n", - "Epoch 309: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.986, loss=0.0293, lr=0.000207, step=310]\n", - "Epoch 310: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.0437, lr=0.000207, step=311]\n", - "Epoch 311: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00857, lr=0.000206, step=312]\n", - "Epoch 312: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.0832, lr=0.000206, step=313]\n", - "Epoch 313: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0412, lr=0.000206, step=314]\n", - "Epoch 314: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0833, lr=0.000206, step=315]\n", - "Epoch 315: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0528, lr=0.000205, step=316]\n", - "Epoch 316: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0743, lr=0.000205, step=317]\n", - "Epoch 317: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0464, lr=0.000205, step=318]\n", - "Epoch 318: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.987, loss=0.0588, lr=0.000204, step=319]\n", - "Epoch 319: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.0508, lr=0.000204, step=320]\n", - "Epoch 320: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00674, lr=0.000204, step=321]\n", - "Epoch 321: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0276, lr=0.000203, step=322]\n", - "Epoch 322: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.0965, lr=0.000203, step=323]\n", - "Epoch 323: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00922, lr=0.000203, step=324]\n", - "Epoch 324: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0866, lr=0.000202, step=325]\n", - "Epoch 325: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0161, lr=0.000202, step=326]\n", - "Epoch 326: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0245, lr=0.000202, step=327]\n", - "Epoch 327: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0533, lr=0.000202, step=328]\n", - "Epoch 328: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0269, lr=0.000201, step=329]\n", - "Epoch 329: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0541, lr=0.000201, step=330]\n", - "Epoch 330: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0529, lr=0.000201, step=331]\n", - "Epoch 331: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0343, lr=0.0002, step=332]\n", - "Epoch 332: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0188, lr=0.0002, step=333]\n", - "Epoch 333: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.0515, lr=0.0002, step=334]\n", - "Epoch 334: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.103, lr=0.000199, step=335]\n", - "Epoch 335: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0516, lr=0.000199, step=336]\n", - "Epoch 336: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.987, loss=0.0122, lr=0.000199, step=337]\n", - "Epoch 337: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.0443, lr=0.000199, step=338]\n", - "Epoch 338: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.987, loss=0.0347, lr=0.000198, step=339]\n", - "Epoch 339: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.0415, lr=0.000198, step=340]\n", - "Epoch 340: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0577, lr=0.000198, step=341]\n", - "Epoch 341: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0238, lr=0.000197, step=342]\n", - "Epoch 342: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0633, lr=0.000197, step=343]\n", - "Epoch 343: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.0126, lr=0.000197, step=344]\n", - "Epoch 344: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.046, lr=0.000196, step=345]\n", - "Epoch 345: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0676, lr=0.000196, step=346]\n", - "Epoch 346: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0165, lr=0.000196, step=347]\n", - "Epoch 347: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.00481, lr=0.000196, step=348]\n", - "Epoch 348: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.01, lr=0.000195, step=349]\n", - "Epoch 349: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.0172, lr=0.000195, step=350]\n", - "Epoch 350: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.135, lr=0.000195, step=351]\n", - "Epoch 351: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.0179, lr=0.000194, step=352]\n", - "Epoch 352: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0661, lr=0.000194, step=353]\n", - "Epoch 353: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0348, lr=0.000194, step=354]\n", - "Epoch 354: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.0539, lr=0.000193, step=355]\n", - "Epoch 355: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0509, lr=0.000193, step=356]\n", - "Epoch 356: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0318, lr=0.000193, step=357]\n", - "Epoch 357: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.136, lr=0.000193, step=358]\n", - "Epoch 358: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0767, lr=0.000192, step=359]\n", - "Epoch 359: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0278, lr=0.000192, step=360]\n", - "Epoch 360: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0305, lr=0.000192, step=361]\n", - "Epoch 361: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0708, lr=0.000191, step=362]\n", - "Epoch 362: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0461, lr=0.000191, step=363]\n", - "Epoch 363: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0753, lr=0.000191, step=364]\n", - "Epoch 364: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0445, lr=0.00019, step=365]\n", - "Epoch 365: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.988, loss=0.0818, lr=0.00019, step=366]\n", - "Epoch 366: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.031, lr=0.00019, step=367]\n", - "Epoch 367: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0722, lr=0.00019, step=368]\n", - "Epoch 368: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.027, lr=0.000189, step=369]\n", - "Epoch 369: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.0425, lr=0.000189, step=370]\n", - "Epoch 370: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0546, lr=0.000189, step=371]\n", - "Epoch 371: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.055, lr=0.000188, step=372]\n", - "Epoch 372: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0403, lr=0.000188, step=373]\n", - "Epoch 373: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.0199, lr=0.000188, step=374]\n", - "Epoch 374: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.058, lr=0.000187, step=375]\n", - "Epoch 375: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0308, lr=0.000187, step=376]\n", - "Epoch 376: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0461, lr=0.000187, step=377]\n", - "Epoch 377: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0331, lr=0.000187, step=378]\n", - "Epoch 378: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0468, lr=0.000186, step=379]\n", - "Epoch 379: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0977, lr=0.000186, step=380]\n", - "Epoch 380: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.012, lr=0.000186, step=381]\n", - "Epoch 381: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0749, lr=0.000185, step=382]\n", - "Epoch 382: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.0496, lr=0.000185, step=383]\n", - "Epoch 383: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.0405, lr=0.000185, step=384]\n", - "Epoch 384: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0384, lr=0.000184, step=385]\n", - "Epoch 385: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.988, loss=0.0168, lr=0.000184, step=386]\n", - "Epoch 386: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0603, lr=0.000184, step=387]\n", - "Epoch 387: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0629, lr=0.000184, step=388]\n", - "Epoch 388: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.989, loss=0.0217, lr=0.000183, step=389]\n", - "Epoch 389: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0328, lr=0.000183, step=390]\n", - "Epoch 390: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0481, lr=0.000183, step=391]\n", - "Epoch 391: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.024, lr=0.000182, step=392]\n", - "Epoch 392: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.122, lr=0.000182, step=393]\n", - "Epoch 393: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0238, lr=0.000182, step=394]\n", - "Epoch 394: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0607, lr=0.000181, step=395]\n", - "Epoch 395: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0442, lr=0.000181, step=396]\n", - "Epoch 396: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0408, lr=0.000181, step=397]\n", - "Epoch 397: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0342, lr=0.000181, step=398]\n", - "Epoch 398: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0492, lr=0.00018, step=399]\n", - "Epoch 399: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0292, lr=0.00018, step=400]\n", - "Epoch 400: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.054, lr=0.00018, step=401]\n", - "Epoch 401: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0258, lr=0.000179, step=402]\n", - "Epoch 402: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0368, lr=0.000179, step=403]\n", - "Epoch 403: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00628, lr=0.000179, step=404]\n", - "Epoch 404: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0357, lr=0.000178, step=405]\n", - "Epoch 405: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0365, lr=0.000178, step=406]\n", - "Epoch 406: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.067, lr=0.000178, step=407]\n", - "Epoch 407: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0468, lr=0.000178, step=408]\n", - "Epoch 408: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.039, lr=0.000177, step=409]\n", - "Epoch 409: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0215, lr=0.000177, step=410]\n", - "Epoch 410: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0678, lr=0.000177, step=411]\n", - "Epoch 411: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0377, lr=0.000176, step=412]\n", - "Epoch 412: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0472, lr=0.000176, step=413]\n", - "Epoch 413: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0474, lr=0.000176, step=414]\n", - "Epoch 414: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0181, lr=0.000175, step=415]\n", - "Epoch 415: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0865, lr=0.000175, step=416]\n", - "Epoch 416: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0507, lr=0.000175, step=417]\n", - "Epoch 417: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0262, lr=0.000175, step=418]\n", - "Epoch 418: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.039, lr=0.000174, step=419]\n", - "Epoch 419: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.076, lr=0.000174, step=420]\n", - "Epoch 420: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0576, lr=0.000174, step=421]\n", - "Epoch 421: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00964, lr=0.000173, step=422]\n", - "Epoch 422: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.102, lr=0.000173, step=423]\n", - "Epoch 423: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0609, lr=0.000173, step=424]\n", - "Epoch 424: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.0599, lr=0.000172, step=425]\n", - "Epoch 425: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0382, lr=0.000172, step=426]\n", - "Epoch 426: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0552, lr=0.000172, step=427]\n", - "Epoch 427: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0699, lr=0.000172, step=428]\n", - "Epoch 428: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0114, lr=0.000171, step=429]\n", - "Epoch 429: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0557, lr=0.000171, step=430]\n", - "Epoch 430: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.036, lr=0.000171, step=431]\n", - "Epoch 431: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0678, lr=0.00017, step=432]\n", - "Epoch 432: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0249, lr=0.00017, step=433]\n", - "Epoch 433: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.989, loss=0.0275, lr=0.00017, step=434]\n", - "Epoch 434: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0141, lr=0.000169, step=435]\n", - "Epoch 435: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0448, lr=0.000169, step=436]\n", - "Epoch 436: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0584, lr=0.000169, step=437]\n", - "Epoch 437: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0527, lr=0.000169, step=438]\n", - "Epoch 438: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0373, lr=0.000168, step=439]\n", - "Epoch 439: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0494, lr=0.000168, step=440]\n", - "Epoch 440: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0309, lr=0.000168, step=441]\n", - "Epoch 441: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0338, lr=0.000167, step=442]\n", - "Epoch 442: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0352, lr=0.000167, step=443]\n", - "Epoch 443: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.029, lr=0.000167, step=444]\n", - "Epoch 444: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0316, lr=0.000167, step=445]\n", - "Epoch 445: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0293, lr=0.000166, step=446]\n", - "Epoch 446: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.0593, lr=0.000166, step=447]\n", - "Epoch 447: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0399, lr=0.000166, step=448]\n", - "Epoch 448: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0249, lr=0.000165, step=449]\n", - "Epoch 449: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0377, lr=0.000165, step=450]\n", - "Epoch 450: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0238, lr=0.000165, step=451]\n", - "Epoch 451: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.054, lr=0.000164, step=452]\n", - "Epoch 452: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0376, lr=0.000164, step=453]\n", - "Epoch 453: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.0549, lr=0.000164, step=454]\n", - "Epoch 454: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0596, lr=0.000163, step=455]\n", - "Epoch 455: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0254, lr=0.000163, step=456]\n", - "Epoch 456: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.0505, lr=0.000163, step=457]\n", - "Epoch 457: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.0508, lr=0.000163, step=458]\n", - "Epoch 458: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.0588, lr=0.000162, step=459]\n", - "Epoch 459: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0239, lr=0.000162, step=460]\n", - "Epoch 460: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0506, lr=0.000162, step=461]\n", - "Epoch 461: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0262, lr=0.000161, step=462]\n", - "Epoch 462: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0236, lr=0.000161, step=463]\n", - "Epoch 463: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0384, lr=0.000161, step=464]\n", - "Epoch 464: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.99, loss=0.0391, lr=0.00016, step=465]\n", - "Epoch 465: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.056, lr=0.00016, step=466]\n", - "Epoch 466: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0197, lr=0.00016, step=467]\n", - "Epoch 467: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0489, lr=0.00016, step=468]\n", - "Epoch 468: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0249, lr=0.000159, step=469]\n", - "Epoch 469: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0738, lr=0.000159, step=470]\n", - "Epoch 470: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0594, lr=0.000159, step=471]\n", - "Epoch 471: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0486, lr=0.000158, step=472]\n", - "Epoch 472: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00806, lr=0.000158, step=473]\n", - "Epoch 473: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0455, lr=0.000158, step=474]\n", - "Epoch 474: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0411, lr=0.000157, step=475]\n", - "Epoch 475: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0441, lr=0.000157, step=476]\n", - "Epoch 476: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00711, lr=0.000157, step=477]\n", - "Epoch 477: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0585, lr=0.000157, step=478]\n", - "Epoch 478: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0484, lr=0.000156, step=479]\n", - "Epoch 479: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0312, lr=0.000156, step=480]\n", - "Epoch 480: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0393, lr=0.000156, step=481]\n", - "Epoch 481: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.0468, lr=0.000155, step=482]\n", - "Epoch 482: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0434, lr=0.000155, step=483]\n", - "Epoch 483: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.0286, lr=0.000155, step=484]\n", - "Epoch 484: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0265, lr=0.000154, step=485]\n", - "Epoch 485: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.034, lr=0.000154, step=486]\n", - "Epoch 486: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.0451, lr=0.000154, step=487]\n", - "Epoch 487: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0379, lr=0.000154, step=488]\n", - "Epoch 488: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0436, lr=0.000153, step=489]\n", - "Epoch 489: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.039, lr=0.000153, step=490]\n", - "Epoch 490: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.0275, lr=0.000153, step=491]\n", - "Epoch 491: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00934, lr=0.000152, step=492]\n", - "Epoch 492: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0426, lr=0.000152, step=493]\n", - "Epoch 493: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0247, lr=0.000152, step=494]\n", - "Epoch 494: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.0308, lr=0.000151, step=495]\n", - "Epoch 495: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.056, lr=0.000151, step=496]\n", - "Epoch 496: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.0303, lr=0.000151, step=497]\n", - "Epoch 497: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.99, loss=0.0357, lr=0.000151, step=498]\n", - "Epoch 498: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0317, lr=0.00015, step=499]\n", - "Epoch 499: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0529, lr=0.00015, step=500]\n", - "Epoch 500: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00975, lr=0.00015, step=501]\n", - "Epoch 501: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0787, lr=0.000149, step=502]\n", - "Epoch 502: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0086, lr=0.000149, step=503]\n", - "Epoch 503: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0225, lr=0.000149, step=504]\n", - "Epoch 504: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0232, lr=0.000148, step=505]\n", - "Epoch 505: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0326, lr=0.000148, step=506]\n", - "Epoch 506: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0402, lr=0.000148, step=507]\n", - "Epoch 507: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0342, lr=0.000148, step=508]\n", - "Epoch 508: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0261, lr=0.000147, step=509]\n", - "Epoch 509: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0422, lr=0.000147, step=510]\n", - "Epoch 510: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0242, lr=0.000147, step=511]\n", - "Epoch 511: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0454, lr=0.000146, step=512]\n", - "Epoch 512: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0443, lr=0.000146, step=513]\n", - "Epoch 513: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0305, lr=0.000146, step=514]\n", - "Epoch 514: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.0578, lr=0.000145, step=515]\n", - "Epoch 515: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00477, lr=0.000145, step=516]\n", - "Epoch 516: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0437, lr=0.000145, step=517]\n", - "Epoch 517: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0353, lr=0.000145, step=518]\n", - "Epoch 518: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0156, lr=0.000144, step=519]\n", - "Epoch 519: 100%|██████████| 1/1 [00:00<00:00, 1.28it/s, ema_decay=0.991, loss=0.0389, lr=0.000144, step=520]\n", - "Epoch 520: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0292, lr=0.000144, step=521]\n", - "Epoch 521: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0194, lr=0.000143, step=522]\n", - "Epoch 522: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0254, lr=0.000143, step=523]\n", - "Epoch 523: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0352, lr=0.000143, step=524]\n", - "Epoch 524: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0675, lr=0.000142, step=525]\n", - "Epoch 525: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0449, lr=0.000142, step=526]\n", - "Epoch 526: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0225, lr=0.000142, step=527]\n", - "Epoch 527: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0279, lr=0.000142, step=528]\n", - "Epoch 528: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.016, lr=0.000141, step=529]\n", - "Epoch 529: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0792, lr=0.000141, step=530]\n", - "Epoch 530: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0558, lr=0.000141, step=531]\n", - "Epoch 531: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0282, lr=0.00014, step=532]\n", - "Epoch 532: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.0677, lr=0.00014, step=533]\n", - "Epoch 533: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0302, lr=0.00014, step=534]\n", - "Epoch 534: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0295, lr=0.00014, step=535]\n", - "Epoch 535: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0104, lr=0.000139, step=536]\n", - "Epoch 536: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0709, lr=0.000139, step=537]\n", - "Epoch 537: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0374, lr=0.000139, step=538]\n", - "Epoch 538: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.0441, lr=0.000138, step=539]\n", - "Epoch 539: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0346, lr=0.000138, step=540]\n", - "Epoch 540: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0799, lr=0.000138, step=541]\n", - "Epoch 541: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0266, lr=0.000137, step=542]\n", - "Epoch 542: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0307, lr=0.000137, step=543]\n", - "Epoch 543: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0547, lr=0.000137, step=544]\n", - "Epoch 544: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00683, lr=0.000136, step=545]\n", - "Epoch 545: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.991, loss=0.0602, lr=0.000136, step=546]\n", - "Epoch 546: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0254, lr=0.000136, step=547]\n", - "Epoch 547: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0465, lr=0.000136, step=548]\n", - "Epoch 548: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0517, lr=0.000135, step=549]\n", - "Epoch 549: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.035, lr=0.000135, step=550]\n", - "Epoch 550: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0241, lr=0.000135, step=551]\n", - "Epoch 551: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.023, lr=0.000134, step=552]\n", - "Epoch 552: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.04, lr=0.000134, step=553]\n", - "Epoch 553: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0363, lr=0.000134, step=554]\n", - "Epoch 554: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0673, lr=0.000133, step=555]\n", - "Epoch 555: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0308, lr=0.000133, step=556]\n", - "Epoch 556: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0555, lr=0.000133, step=557]\n", - "Epoch 557: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00592, lr=0.000133, step=558]\n", - "Epoch 558: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0361, lr=0.000132, step=559]\n", - "Epoch 559: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0714, lr=0.000132, step=560]\n", - "Epoch 560: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0816, lr=0.000132, step=561]\n", - "Epoch 561: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0203, lr=0.000131, step=562]\n", - "Epoch 562: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.035, lr=0.000131, step=563]\n", - "Epoch 563: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0912, lr=0.000131, step=564]\n", - "Epoch 564: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0218, lr=0.000131, step=565]\n", - "Epoch 565: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0316, lr=0.00013, step=566]\n", - "Epoch 566: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0165, lr=0.00013, step=567]\n", - "Epoch 567: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0507, lr=0.00013, step=568]\n", - "Epoch 568: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0183, lr=0.000129, step=569]\n", - "Epoch 569: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0215, lr=0.000129, step=570]\n", - "Epoch 570: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0182, lr=0.000129, step=571]\n", - "Epoch 571: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.0363, lr=0.000128, step=572]\n", - "Epoch 572: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0369, lr=0.000128, step=573]\n", - "Epoch 573: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0299, lr=0.000128, step=574]\n", - "Epoch 574: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0371, lr=0.000127, step=575]\n", - "Epoch 575: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0305, lr=0.000127, step=576]\n", - "Epoch 576: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.047, lr=0.000127, step=577]\n", - "Epoch 577: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0308, lr=0.000127, step=578]\n", - "Epoch 578: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0455, lr=0.000126, step=579]\n", - "Epoch 579: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0362, lr=0.000126, step=580]\n", - "Epoch 580: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0341, lr=0.000126, step=581]\n", - "Epoch 581: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0745, lr=0.000125, step=582]\n", - "Epoch 582: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0186, lr=0.000125, step=583]\n", - "Epoch 583: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0169, lr=0.000125, step=584]\n", - "Epoch 584: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.03, lr=0.000124, step=585]\n", - "Epoch 585: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0226, lr=0.000124, step=586]\n", - "Epoch 586: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0323, lr=0.000124, step=587]\n", - "Epoch 587: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00292, lr=0.000124, step=588]\n", - "Epoch 588: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0697, lr=0.000123, step=589]\n", - "Epoch 589: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0417, lr=0.000123, step=590]\n", - "Epoch 590: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0922, lr=0.000123, step=591]\n", - "Epoch 591: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0118, lr=0.000122, step=592]\n", - "Epoch 592: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0394, lr=0.000122, step=593]\n", - "Epoch 593: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0317, lr=0.000122, step=594]\n", - "Epoch 594: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0126, lr=0.000121, step=595]\n", - "Epoch 595: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00654, lr=0.000121, step=596]\n", - "Epoch 596: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0195, lr=0.000121, step=597]\n", - "Epoch 597: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0584, lr=0.000121, step=598]\n", - "Epoch 598: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0259, lr=0.00012, step=599]\n", - "Epoch 599: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0283, lr=0.00012, step=600]\n", - "Epoch 600: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0433, lr=0.00012, step=601]\n", - "Epoch 601: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0426, lr=0.000119, step=602]\n", - "Epoch 602: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0288, lr=0.000119, step=603]\n", - "Epoch 603: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0521, lr=0.000119, step=604]\n", - "Epoch 604: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0193, lr=0.000118, step=605]\n", - "Epoch 605: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0431, lr=0.000118, step=606]\n", - "Epoch 606: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0313, lr=0.000118, step=607]\n", - "Epoch 607: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.052, lr=0.000118, step=608]\n", - "Epoch 608: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0249, lr=0.000117, step=609]\n", - "Epoch 609: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0353, lr=0.000117, step=610]\n", - "Epoch 610: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0308, lr=0.000117, step=611]\n", - "Epoch 611: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0227, lr=0.000116, step=612]\n", - "Epoch 612: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0791, lr=0.000116, step=613]\n", - "Epoch 613: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0183, lr=0.000116, step=614]\n", - "Epoch 614: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0219, lr=0.000115, step=615]\n", - "Epoch 615: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00682, lr=0.000115, step=616]\n", - "Epoch 616: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0651, lr=0.000115, step=617]\n", - "Epoch 617: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.0409, lr=0.000115, step=618]\n", - "Epoch 618: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0203, lr=0.000114, step=619]\n", - "Epoch 619: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0346, lr=0.000114, step=620]\n", - "Epoch 620: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0201, lr=0.000114, step=621]\n", - "Epoch 621: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0309, lr=0.000113, step=622]\n", - "Epoch 622: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.0284, lr=0.000113, step=623]\n", - "Epoch 623: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0216, lr=0.000113, step=624]\n", - "Epoch 624: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0432, lr=0.000112, step=625]\n", - "Epoch 625: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0735, lr=0.000112, step=626]\n", - "Epoch 626: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.018, lr=0.000112, step=627]\n", - "Epoch 627: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.021, lr=0.000112, step=628]\n", - "Epoch 628: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.992, loss=0.0287, lr=0.000111, step=629]\n", - "Epoch 629: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0403, lr=0.000111, step=630]\n", - "Epoch 630: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0196, lr=0.000111, step=631]\n", - "Epoch 631: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0397, lr=0.00011, step=632]\n", - "Epoch 632: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0201, lr=0.00011, step=633]\n", - "Epoch 633: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0295, lr=0.00011, step=634]\n", - "Epoch 634: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0189, lr=0.000109, step=635]\n", - "Epoch 635: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.0264, lr=0.000109, step=636]\n", - "Epoch 636: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0396, lr=0.000109, step=637]\n", - "Epoch 637: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0161, lr=0.000109, step=638]\n", - "Epoch 638: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0284, lr=0.000108, step=639]\n", - "Epoch 639: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0641, lr=0.000108, step=640]\n", - "Epoch 640: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.017, lr=0.000108, step=641]\n", - "Epoch 641: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0216, lr=0.000107, step=642]\n", - "Epoch 642: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0367, lr=0.000107, step=643]\n", - "Epoch 643: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.046, lr=0.000107, step=644]\n", - "Epoch 644: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0172, lr=0.000106, step=645]\n", - "Epoch 645: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00879, lr=0.000106, step=646]\n", - "Epoch 646: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0416, lr=0.000106, step=647]\n", - "Epoch 647: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0259, lr=0.000106, step=648]\n", - "Epoch 648: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.011, lr=0.000105, step=649]\n", - "Epoch 649: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0163, lr=0.000105, step=650]\n", - "Epoch 650: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.013, lr=0.000105, step=651]\n", - "Epoch 651: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0224, lr=0.000104, step=652]\n", - "Epoch 652: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00738, lr=0.000104, step=653]\n", - "Epoch 653: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0164, lr=0.000104, step=654]\n", - "Epoch 654: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0428, lr=0.000103, step=655]\n", - "Epoch 655: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0402, lr=0.000103, step=656]\n", - "Epoch 656: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0114, lr=0.000103, step=657]\n", - "Epoch 657: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.0332, lr=0.000103, step=658]\n", - "Epoch 658: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0163, lr=0.000102, step=659]\n", - "Epoch 659: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0112, lr=0.000102, step=660]\n", - "Epoch 660: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0444, lr=0.000102, step=661]\n", - "Epoch 661: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00755, lr=0.000101, step=662]\n", - "Epoch 662: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0232, lr=0.000101, step=663]\n", - "Epoch 663: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.069, lr=0.000101, step=664]\n", - "Epoch 664: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0254, lr=0.000101, step=665]\n", - "Epoch 665: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.045, lr=0.0001, step=666]\n", - "Epoch 666: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.023, lr=9.99e-5, step=667]\n", - "Epoch 667: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0267, lr=9.96e-5, step=668]\n", - "Epoch 668: 100%|██████████| 1/1 [00:00<00:00, 1.09it/s, ema_decay=0.992, loss=0.0414, lr=9.93e-5, step=669]\n", - "Epoch 669: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.0481, lr=9.9e-5, step=670]\n", - "Epoch 670: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0641, lr=9.87e-5, step=671]\n", - "Epoch 671: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0257, lr=9.84e-5, step=672]\n", - "Epoch 672: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0178, lr=9.81e-5, step=673]\n", - "Epoch 673: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0222, lr=9.78e-5, step=674]\n", - "Epoch 674: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0153, lr=9.75e-5, step=675]\n", - "Epoch 675: 100%|██████████| 1/1 [00:00<00:00, 1.08it/s, ema_decay=0.992, loss=0.0167, lr=9.72e-5, step=676]\n", - "Epoch 676: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.992, loss=0.0573, lr=9.69e-5, step=677]\n", - "Epoch 677: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0652, lr=9.66e-5, step=678]\n", - "Epoch 678: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.0695, lr=9.63e-5, step=679]\n", - "Epoch 679: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.0251, lr=9.6e-5, step=680]\n", - "Epoch 680: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0141, lr=9.57e-5, step=681]\n", - "Epoch 681: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.0311, lr=9.54e-5, step=682]\n", - "Epoch 682: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0226, lr=9.51e-5, step=683]\n", - "Epoch 683: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0165, lr=9.48e-5, step=684]\n", - "Epoch 684: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0312, lr=9.45e-5, step=685]\n", - "Epoch 685: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0166, lr=9.42e-5, step=686]\n", - "Epoch 686: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0245, lr=9.39e-5, step=687]\n", - "Epoch 687: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.026, lr=9.36e-5, step=688]\n", - "Epoch 688: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0406, lr=9.33e-5, step=689]\n", - "Epoch 689: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0286, lr=9.3e-5, step=690]\n", - "Epoch 690: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0395, lr=9.27e-5, step=691]\n", - "Epoch 691: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0272, lr=9.24e-5, step=692]\n", - "Epoch 692: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0283, lr=9.21e-5, step=693]\n", - "Epoch 693: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0184, lr=9.18e-5, step=694]\n", - "Epoch 694: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0159, lr=9.15e-5, step=695]\n", - "Epoch 695: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0219, lr=9.12e-5, step=696]\n", - "Epoch 696: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0174, lr=9.09e-5, step=697]\n", - "Epoch 697: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0518, lr=9.06e-5, step=698]\n", - "Epoch 698: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0406, lr=9.03e-5, step=699]\n", - "Epoch 699: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.0397, lr=9e-5, step=700]\n", - "Epoch 700: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00677, lr=8.97e-5, step=701]\n", - "Epoch 701: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0367, lr=8.94e-5, step=702]\n", - "Epoch 702: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.029, lr=8.91e-5, step=703]\n", - "Epoch 703: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0614, lr=8.88e-5, step=704]\n", - "Epoch 704: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0137, lr=8.85e-5, step=705]\n", - "Epoch 705: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0399, lr=8.82e-5, step=706]\n", - "Epoch 706: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0305, lr=8.79e-5, step=707]\n", - "Epoch 707: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0597, lr=8.76e-5, step=708]\n", - "Epoch 708: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0184, lr=8.73e-5, step=709]\n", - "Epoch 709: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0412, lr=8.7e-5, step=710]\n", - "Epoch 710: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0388, lr=8.67e-5, step=711]\n", - "Epoch 711: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00576, lr=8.64e-5, step=712]\n", - "Epoch 712: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00649, lr=8.61e-5, step=713]\n", - "Epoch 713: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0409, lr=8.58e-5, step=714]\n", - "Epoch 714: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0281, lr=8.55e-5, step=715]\n", - "Epoch 715: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.032, lr=8.52e-5, step=716]\n", - "Epoch 716: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0272, lr=8.49e-5, step=717]\n", - "Epoch 717: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0173, lr=8.46e-5, step=718]\n", - "Epoch 718: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0357, lr=8.43e-5, step=719]\n", - "Epoch 719: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0562, lr=8.4e-5, step=720]\n", - "Epoch 720: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0469, lr=8.37e-5, step=721]\n", - "Epoch 721: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0316, lr=8.34e-5, step=722]\n", - "Epoch 722: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0465, lr=8.31e-5, step=723]\n", - "Epoch 723: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0298, lr=8.28e-5, step=724]\n", - "Epoch 724: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0511, lr=8.25e-5, step=725]\n", - "Epoch 725: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0275, lr=8.22e-5, step=726]\n", - "Epoch 726: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0583, lr=8.19e-5, step=727]\n", - "Epoch 727: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0264, lr=8.16e-5, step=728]\n", - "Epoch 728: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.028, lr=8.13e-5, step=729]\n", - "Epoch 729: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0259, lr=8.1e-5, step=730]\n", - "Epoch 730: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0292, lr=8.07e-5, step=731]\n", - "Epoch 731: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0156, lr=8.04e-5, step=732]\n", - "Epoch 732: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.0427, lr=8.01e-5, step=733]\n", - "Epoch 733: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.027, lr=7.98e-5, step=734]\n", - "Epoch 734: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0179, lr=7.95e-5, step=735]\n", - "Epoch 735: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00878, lr=7.92e-5, step=736]\n", - "Epoch 736: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0322, lr=7.89e-5, step=737]\n", - "Epoch 737: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00901, lr=7.86e-5, step=738]\n", - "Epoch 738: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.0224, lr=7.83e-5, step=739]\n", - "Epoch 739: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0505, lr=7.8e-5, step=740]\n", - "Epoch 740: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.0357, lr=7.77e-5, step=741]\n", - "Epoch 741: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0398, lr=7.74e-5, step=742]\n", - "Epoch 742: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0279, lr=7.71e-5, step=743]\n", - "Epoch 743: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0142, lr=7.68e-5, step=744]\n", - "Epoch 744: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0199, lr=7.65e-5, step=745]\n", - "Epoch 745: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0544, lr=7.62e-5, step=746]\n", - "Epoch 746: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0189, lr=7.59e-5, step=747]\n", - "Epoch 747: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0286, lr=7.56e-5, step=748]\n", - "Epoch 748: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00491, lr=7.53e-5, step=749]\n", - "Epoch 749: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.017, lr=7.5e-5, step=750]\n", - "Epoch 750: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0162, lr=7.47e-5, step=751]\n", - "Epoch 751: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.028, lr=7.44e-5, step=752]\n", - "Epoch 752: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0566, lr=7.41e-5, step=753]\n", - "Epoch 753: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.039, lr=7.38e-5, step=754]\n", - "Epoch 754: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0361, lr=7.35e-5, step=755]\n", - "Epoch 755: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0382, lr=7.32e-5, step=756]\n", - "Epoch 756: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0165, lr=7.29e-5, step=757]\n", - "Epoch 757: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0509, lr=7.26e-5, step=758]\n", - "Epoch 758: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0168, lr=7.23e-5, step=759]\n", - "Epoch 759: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0374, lr=7.2e-5, step=760]\n", - "Epoch 760: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0353, lr=7.17e-5, step=761]\n", - "Epoch 761: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0494, lr=7.14e-5, step=762]\n", - "Epoch 762: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0193, lr=7.11e-5, step=763]\n", - "Epoch 763: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0506, lr=7.08e-5, step=764]\n", - "Epoch 764: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0327, lr=7.05e-5, step=765]\n", - "Epoch 765: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0371, lr=7.02e-5, step=766]\n", - "Epoch 766: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0371, lr=6.99e-5, step=767]\n", - "Epoch 767: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0347, lr=6.96e-5, step=768]\n", - "Epoch 768: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00566, lr=6.93e-5, step=769]\n", - "Epoch 769: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0355, lr=6.9e-5, step=770]\n", - "Epoch 770: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0274, lr=6.87e-5, step=771]\n", - "Epoch 771: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0257, lr=6.84e-5, step=772]\n", - "Epoch 772: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.0376, lr=6.81e-5, step=773]\n", - "Epoch 773: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.016, lr=6.78e-5, step=774]\n", - "Epoch 774: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.0589, lr=6.75e-5, step=775]\n", - "Epoch 775: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0286, lr=6.72e-5, step=776]\n", - "Epoch 776: 100%|██████████| 1/1 [00:00<00:00, 1.09it/s, ema_decay=0.993, loss=0.026, lr=6.69e-5, step=777]\n", - "Epoch 777: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.0375, lr=6.66e-5, step=778]\n", - "Epoch 778: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.038, lr=6.63e-5, step=779]\n", - "Epoch 779: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.055, lr=6.6e-5, step=780]\n", - "Epoch 780: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0306, lr=6.57e-5, step=781]\n", - "Epoch 781: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.0202, lr=6.54e-5, step=782]\n", - "Epoch 782: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0363, lr=6.51e-5, step=783]\n", - "Epoch 783: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0402, lr=6.48e-5, step=784]\n", - "Epoch 784: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0449, lr=6.45e-5, step=785]\n", - "Epoch 785: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0449, lr=6.42e-5, step=786]\n", - "Epoch 786: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0464, lr=6.39e-5, step=787]\n", - "Epoch 787: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0238, lr=6.36e-5, step=788]\n", - "Epoch 788: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0257, lr=6.33e-5, step=789]\n", - "Epoch 789: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0352, lr=6.3e-5, step=790]\n", - "Epoch 790: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0152, lr=6.27e-5, step=791]\n", - "Epoch 791: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0435, lr=6.24e-5, step=792]\n", - "Epoch 792: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0263, lr=6.21e-5, step=793]\n", - "Epoch 793: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0164, lr=6.18e-5, step=794]\n", - "Epoch 794: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0245, lr=6.15e-5, step=795]\n", - "Epoch 795: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0272, lr=6.12e-5, step=796]\n", - "Epoch 796: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0131, lr=6.09e-5, step=797]\n", - "Epoch 797: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0439, lr=6.06e-5, step=798]\n", - "Epoch 798: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.026, lr=6.03e-5, step=799]\n", - "Epoch 799: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0542, lr=6e-5, step=800]\n", - "Epoch 800: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.0171, lr=5.97e-5, step=801]\n", - "Epoch 801: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0454, lr=5.94e-5, step=802]\n", - "Epoch 802: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0427, lr=5.91e-5, step=803]\n", - "Epoch 803: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0164, lr=5.88e-5, step=804]\n", - "Epoch 804: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0293, lr=5.85e-5, step=805]\n", - "Epoch 805: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0288, lr=5.82e-5, step=806]\n", - "Epoch 806: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0255, lr=5.79e-5, step=807]\n", - "Epoch 807: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.0423, lr=5.76e-5, step=808]\n", - "Epoch 808: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0378, lr=5.73e-5, step=809]\n", - "Epoch 809: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0296, lr=5.7e-5, step=810]\n", - "Epoch 810: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.0442, lr=5.67e-5, step=811]\n", - "Epoch 811: 100%|██████████| 1/1 [00:00<00:00, 1.08it/s, ema_decay=0.993, loss=0.0565, lr=5.64e-5, step=812]\n", - "Epoch 812: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0444, lr=5.61e-5, step=813]\n", - "Epoch 813: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0378, lr=5.58e-5, step=814]\n", - "Epoch 814: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.0238, lr=5.55e-5, step=815]\n", - "Epoch 815: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.993, loss=0.00857, lr=5.52e-5, step=816]\n", - "Epoch 816: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0221, lr=5.49e-5, step=817]\n", - "Epoch 817: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.014, lr=5.46e-5, step=818]\n", - "Epoch 818: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0197, lr=5.43e-5, step=819]\n", - "Epoch 819: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s, ema_decay=0.993, loss=0.0338, lr=5.4e-5, step=820]\n", - "Epoch 820: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0274, lr=5.37e-5, step=821]\n", - "Epoch 821: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.019, lr=5.34e-5, step=822]\n", - "Epoch 822: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0293, lr=5.31e-5, step=823]\n", - "Epoch 823: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00845, lr=5.28e-5, step=824]\n", - "Epoch 824: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.0436, lr=5.25e-5, step=825]\n", - "Epoch 825: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0422, lr=5.22e-5, step=826]\n", - "Epoch 826: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0177, lr=5.19e-5, step=827]\n", - "Epoch 827: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0328, lr=5.16e-5, step=828]\n", - "Epoch 828: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0148, lr=5.13e-5, step=829]\n", - "Epoch 829: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00843, lr=5.1e-5, step=830]\n", - "Epoch 830: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0363, lr=5.07e-5, step=831]\n", - "Epoch 831: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0418, lr=5.04e-5, step=832]\n", - "Epoch 832: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0213, lr=5.01e-5, step=833]\n", - "Epoch 833: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0172, lr=4.98e-5, step=834]\n", - "Epoch 834: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0253, lr=4.95e-5, step=835]\n", - "Epoch 835: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0168, lr=4.92e-5, step=836]\n", - "Epoch 836: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0057, lr=4.89e-5, step=837]\n", - "Epoch 837: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0192, lr=4.86e-5, step=838]\n", - "Epoch 838: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0251, lr=4.83e-5, step=839]\n", - "Epoch 839: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00503, lr=4.8e-5, step=840]\n", - "Epoch 840: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.994, loss=0.0246, lr=4.77e-5, step=841]\n", - "Epoch 841: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.994, loss=0.00764, lr=4.74e-5, step=842]\n", - "Epoch 842: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0358, lr=4.71e-5, step=843]\n", - "Epoch 843: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0139, lr=4.68e-5, step=844]\n", - "Epoch 844: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0529, lr=4.65e-5, step=845]\n", - "Epoch 845: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.0361, lr=4.62e-5, step=846]\n", - "Epoch 846: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0231, lr=4.59e-5, step=847]\n", - "Epoch 847: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0308, lr=4.56e-5, step=848]\n", - "Epoch 848: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0265, lr=4.53e-5, step=849]\n", - "Epoch 849: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0128, lr=4.5e-5, step=850]\n", - "Epoch 850: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0346, lr=4.47e-5, step=851]\n", - "Epoch 851: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00754, lr=4.44e-5, step=852]\n", - "Epoch 852: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0277, lr=4.41e-5, step=853]\n", - "Epoch 853: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.025, lr=4.38e-5, step=854]\n", - "Epoch 854: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00659, lr=4.35e-5, step=855]\n", - "Epoch 855: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0479, lr=4.32e-5, step=856]\n", - "Epoch 856: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0363, lr=4.29e-5, step=857]\n", - "Epoch 857: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0267, lr=4.26e-5, step=858]\n", - "Epoch 858: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0184, lr=4.23e-5, step=859]\n", - "Epoch 859: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0263, lr=4.2e-5, step=860]\n", - "Epoch 860: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0299, lr=4.17e-5, step=861]\n", - "Epoch 861: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0265, lr=4.14e-5, step=862]\n", - "Epoch 862: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0205, lr=4.11e-5, step=863]\n", - "Epoch 863: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0274, lr=4.08e-5, step=864]\n", - "Epoch 864: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0534, lr=4.05e-5, step=865]\n", - "Epoch 865: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0253, lr=4.02e-5, step=866]\n", - "Epoch 866: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0629, lr=3.99e-5, step=867]\n", - "Epoch 867: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0253, lr=3.96e-5, step=868]\n", - "Epoch 868: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0184, lr=3.93e-5, step=869]\n", - "Epoch 869: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00641, lr=3.9e-5, step=870]\n", - "Epoch 870: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0316, lr=3.87e-5, step=871]\n", - "Epoch 871: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0405, lr=3.84e-5, step=872]\n", - "Epoch 872: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0405, lr=3.81e-5, step=873]\n", - "Epoch 873: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.025, lr=3.78e-5, step=874]\n", - "Epoch 874: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.017, lr=3.75e-5, step=875]\n", - "Epoch 875: 100%|██████████| 1/1 [00:00<00:00, 1.10it/s, ema_decay=0.994, loss=0.00389, lr=3.72e-5, step=876]\n", - "Epoch 876: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.994, loss=0.0265, lr=3.69e-5, step=877]\n", - "Epoch 877: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0269, lr=3.66e-5, step=878]\n", - "Epoch 878: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.063, lr=3.63e-5, step=879]\n", - "Epoch 879: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.018, lr=3.6e-5, step=880]\n", - "Epoch 880: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0197, lr=3.57e-5, step=881]\n", - "Epoch 881: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0345, lr=3.54e-5, step=882]\n", - "Epoch 882: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00762, lr=3.51e-5, step=883]\n", - "Epoch 883: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0589, lr=3.48e-5, step=884]\n", - "Epoch 884: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.0326, lr=3.45e-5, step=885]\n", - "Epoch 885: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.0373, lr=3.42e-5, step=886]\n", - "Epoch 886: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.0261, lr=3.39e-5, step=887]\n", - "Epoch 887: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0365, lr=3.36e-5, step=888]\n", - "Epoch 888: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0163, lr=3.33e-5, step=889]\n", - "Epoch 889: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00711, lr=3.3e-5, step=890]\n", - "Epoch 890: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0324, lr=3.27e-5, step=891]\n", - "Epoch 891: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0436, lr=3.24e-5, step=892]\n", - "Epoch 892: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0352, lr=3.21e-5, step=893]\n", - "Epoch 893: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.038, lr=3.18e-5, step=894]\n", - "Epoch 894: 100%|██████████| 1/1 [00:00<00:00, 1.19it/s, ema_decay=0.994, loss=0.00791, lr=3.15e-5, step=895]\n", - "Epoch 895: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0266, lr=3.12e-5, step=896]\n", - "Epoch 896: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0284, lr=3.09e-5, step=897]\n", - "Epoch 897: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0471, lr=3.06e-5, step=898]\n", - "Epoch 898: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00819, lr=3.03e-5, step=899]\n", - "Epoch 899: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0178, lr=3e-5, step=900]\n", - "Epoch 900: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0251, lr=2.97e-5, step=901]\n", - "Epoch 901: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.054, lr=2.94e-5, step=902]\n", - "Epoch 902: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.017, lr=2.91e-5, step=903]\n", - "Epoch 903: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0238, lr=2.88e-5, step=904]\n", - "Epoch 904: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0153, lr=2.85e-5, step=905]\n", - "Epoch 905: 100%|██████████| 1/1 [00:00<00:00, 1.07it/s, ema_decay=0.994, loss=0.0329, lr=2.82e-5, step=906]\n", - "Epoch 906: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.0267, lr=2.79e-5, step=907]\n", - "Epoch 907: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0458, lr=2.76e-5, step=908]\n", - "Epoch 908: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0349, lr=2.73e-5, step=909]\n", - "Epoch 909: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0322, lr=2.7e-5, step=910]\n", - "Epoch 910: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0341, lr=2.67e-5, step=911]\n", - "Epoch 911: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0136, lr=2.64e-5, step=912]\n", - "Epoch 912: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.016, lr=2.61e-5, step=913]\n", - "Epoch 913: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0261, lr=2.58e-5, step=914]\n", - "Epoch 914: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0203, lr=2.55e-5, step=915]\n", - "Epoch 915: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0177, lr=2.52e-5, step=916]\n", - "Epoch 916: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0143, lr=2.49e-5, step=917]\n", - "Epoch 917: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0563, lr=2.46e-5, step=918]\n", - "Epoch 918: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0257, lr=2.43e-5, step=919]\n", - "Epoch 919: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0143, lr=2.4e-5, step=920]\n", - "Epoch 920: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0339, lr=2.37e-5, step=921]\n", - "Epoch 921: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0529, lr=2.34e-5, step=922]\n", - "Epoch 922: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0274, lr=2.31e-5, step=923]\n", - "Epoch 923: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0481, lr=2.28e-5, step=924]\n", - "Epoch 924: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0329, lr=2.25e-5, step=925]\n", - "Epoch 925: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0188, lr=2.22e-5, step=926]\n", - "Epoch 926: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00412, lr=2.19e-5, step=927]\n", - "Epoch 927: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0463, lr=2.16e-5, step=928]\n", - "Epoch 928: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0272, lr=2.13e-5, step=929]\n", - "Epoch 929: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0147, lr=2.1e-5, step=930]\n", - "Epoch 930: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0452, lr=2.07e-5, step=931]\n", - "Epoch 931: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0132, lr=2.04e-5, step=932]\n", - "Epoch 932: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0473, lr=2.01e-5, step=933]\n", - "Epoch 933: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0444, lr=1.98e-5, step=934]\n", - "Epoch 934: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0513, lr=1.95e-5, step=935]\n", - "Epoch 935: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0449, lr=1.92e-5, step=936]\n", - "Epoch 936: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0512, lr=1.89e-5, step=937]\n", - "Epoch 937: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.026, lr=1.86e-5, step=938]\n", - "Epoch 938: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0423, lr=1.83e-5, step=939]\n", - "Epoch 939: 100%|██████████| 1/1 [00:00<00:00, 1.06it/s, ema_decay=0.994, loss=0.034, lr=1.8e-5, step=940]\n", - "Epoch 940: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.0229, lr=1.77e-5, step=941]\n", - "Epoch 941: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0158, lr=1.74e-5, step=942]\n", - "Epoch 942: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.0284, lr=1.71e-5, step=943]\n", - "Epoch 943: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0145, lr=1.68e-5, step=944]\n", - "Epoch 944: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0375, lr=1.65e-5, step=945]\n", - "Epoch 945: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.994, loss=0.0209, lr=1.62e-5, step=946]\n", - "Epoch 946: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.994, loss=0.0105, lr=1.59e-5, step=947]\n", - "Epoch 947: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.994, loss=0.0228, lr=1.56e-5, step=948]\n", - "Epoch 948: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.014, lr=1.53e-5, step=949]\n", - "Epoch 949: 100%|██████████| 1/1 [00:00<00:00, 1.25it/s, ema_decay=0.994, loss=0.0356, lr=1.5e-5, step=950]\n", - "Epoch 950: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0256, lr=1.47e-5, step=951]\n", - "Epoch 951: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0496, lr=1.44e-5, step=952]\n", - "Epoch 952: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00755, lr=1.41e-5, step=953]\n", - "Epoch 953: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0286, lr=1.38e-5, step=954]\n", - "Epoch 954: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0264, lr=1.35e-5, step=955]\n", - "Epoch 955: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0325, lr=1.32e-5, step=956]\n", - "Epoch 956: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0323, lr=1.29e-5, step=957]\n", - "Epoch 957: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0347, lr=1.26e-5, step=958]\n", - "Epoch 958: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0175, lr=1.23e-5, step=959]\n", - "Epoch 959: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0276, lr=1.2e-5, step=960]\n", - "Epoch 960: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0456, lr=1.17e-5, step=961]\n", - "Epoch 961: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0161, lr=1.14e-5, step=962]\n", - "Epoch 962: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0155, lr=1.11e-5, step=963]\n", - "Epoch 963: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0313, lr=1.08e-5, step=964]\n", - "Epoch 964: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0337, lr=1.05e-5, step=965]\n", - "Epoch 965: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0232, lr=1.02e-5, step=966]\n", - "Epoch 966: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0339, lr=9.9e-6, step=967]\n", - "Epoch 967: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0352, lr=9.6e-6, step=968]\n", - "Epoch 968: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0326, lr=9.3e-6, step=969]\n", - "Epoch 969: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0161, lr=9e-6, step=970]\n", - "Epoch 970: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0454, lr=8.7e-6, step=971]\n", - "Epoch 971: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0277, lr=8.4e-6, step=972]\n", - "Epoch 972: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0446, lr=8.1e-6, step=973]\n", - "Epoch 973: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0416, lr=7.8e-6, step=974]\n", - "Epoch 974: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0188, lr=7.5e-6, step=975]\n", - "Epoch 975: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0334, lr=7.2e-6, step=976]\n", - "Epoch 976: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.0301, lr=6.9e-6, step=977]\n", - "Epoch 977: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.065, lr=6.6e-6, step=978]\n", - "Epoch 978: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0392, lr=6.3e-6, step=979]\n", - "Epoch 979: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0294, lr=6e-6, step=980]\n", - "Epoch 980: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.026, lr=5.7e-6, step=981]\n", - "Epoch 981: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0191, lr=5.4e-6, step=982]\n", - "Epoch 982: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0263, lr=5.1e-6, step=983]\n", - "Epoch 983: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0638, lr=4.8e-6, step=984]\n", - "Epoch 984: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0551, lr=4.5e-6, step=985]\n", - "Epoch 985: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.0366, lr=4.2e-6, step=986]\n", - "Epoch 986: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.0319, lr=3.9e-6, step=987]\n", - "Epoch 987: 100%|██████████| 1/1 [00:00<00:00, 1.29it/s, ema_decay=0.994, loss=0.0156, lr=3.6e-6, step=988]\n", - "Epoch 988: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.0421, lr=3.3e-6, step=989]\n", - "Epoch 989: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0244, lr=3e-6, step=990]\n", - "Epoch 990: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0217, lr=2.7e-6, step=991]\n", - "Epoch 991: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00913, lr=2.4e-6, step=992]\n", - "Epoch 992: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.039, lr=2.1e-6, step=993]\n", - "Epoch 993: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0167, lr=1.8e-6, step=994]\n", - "Epoch 994: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0271, lr=1.5e-6, step=995]\n", - "Epoch 995: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0145, lr=1.2e-6, step=996]\n", - "Epoch 996: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00424, lr=9e-7, step=997]\n", - "Epoch 997: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0337, lr=6e-7, step=998]\n", - "Epoch 998: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0142, lr=3e-7, step=999]\n", - "Epoch 999: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0259, lr=0, step=1000]\n" + "Epoch 366: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00404, lr=0.00019, step=367]\n", + "Epoch 367: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00425, lr=0.00019, step=368]\n", + "Epoch 368: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00434, lr=0.000189, step=369]\n", + "Epoch 369: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00423, lr=0.000189, step=370]\n", + "Epoch 370: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00446, lr=0.000189, step=371]\n", + "Epoch 371: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00398, lr=0.000188, step=372]\n", + "Epoch 372: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00448, lr=0.000188, step=373]\n", + "Epoch 373: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.004, lr=0.000188, step=374]\n", + "Epoch 374: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.988, loss=0.00423, lr=0.000187, step=375]\n", + "Epoch 375: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.988, loss=0.00469, lr=0.000187, step=376]\n", + "Epoch 376: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.0042, lr=0.000187, step=377]\n", + "Epoch 377: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.00468, lr=0.000187, step=378]\n", + "Epoch 378: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.00455, lr=0.000186, step=379]\n", + "Epoch 379: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.00416, lr=0.000186, step=380]\n", + "Epoch 380: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.00441, lr=0.000186, step=381]\n", + "Epoch 381: 100%|██████████| 1/1 [00:00<00:00, 1.30it/s, ema_decay=0.988, loss=0.00423, lr=0.000185, step=382]\n", + "Epoch 382: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.988, loss=0.00413, lr=0.000185, step=383]\n", + "Epoch 383: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.00444, lr=0.000185, step=384]\n", + "Epoch 384: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00413, lr=0.000184, step=385]\n", + "Epoch 385: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00414, lr=0.000184, step=386]\n", + "Epoch 386: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00421, lr=0.000184, step=387]\n", + "Epoch 387: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0042, lr=0.000184, step=388]\n", + "Epoch 388: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00408, lr=0.000183, step=389]\n", + "Epoch 389: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.989, loss=0.0044, lr=0.000183, step=390]\n", + "Epoch 390: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.00385, lr=0.000183, step=391]\n", + "Epoch 391: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.00416, lr=0.000182, step=392]\n", + "Epoch 392: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0041, lr=0.000182, step=393]\n", + "Epoch 393: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00381, lr=0.000182, step=394]\n", + "Epoch 394: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.989, loss=0.00429, lr=0.000181, step=395]\n", + "Epoch 395: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00397, lr=0.000181, step=396]\n", + "Epoch 396: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00403, lr=0.000181, step=397]\n", + "Epoch 397: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00398, lr=0.000181, step=398]\n", + "Epoch 398: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00403, lr=0.00018, step=399]\n", + "Epoch 399: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00409, lr=0.00018, step=400]\n", + "Epoch 400: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.004, lr=0.00018, step=401]\n", + "Epoch 401: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.989, loss=0.00413, lr=0.000179, step=402]\n", + "Epoch 402: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00399, lr=0.000179, step=403]\n", + "Epoch 403: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00395, lr=0.000179, step=404]\n", + "Epoch 404: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00409, lr=0.000178, step=405]\n", + "Epoch 405: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00413, lr=0.000178, step=406]\n", + "Epoch 406: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.989, loss=0.00405, lr=0.000178, step=407]\n", + "Epoch 407: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.00417, lr=0.000178, step=408]\n", + "Epoch 408: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.00393, lr=0.000177, step=409]\n", + "Epoch 409: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.00399, lr=0.000177, step=410]\n", + "Epoch 410: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.0038, lr=0.000177, step=411]\n", + "Epoch 411: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00405, lr=0.000176, step=412]\n", + "Epoch 412: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00386, lr=0.000176, step=413]\n", + "Epoch 413: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00401, lr=0.000176, step=414]\n", + "Epoch 414: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00386, lr=0.000175, step=415]\n", + "Epoch 415: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00399, lr=0.000175, step=416]\n", + "Epoch 416: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00397, lr=0.000175, step=417]\n", + "Epoch 417: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00386, lr=0.000175, step=418]\n", + "Epoch 418: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.989, loss=0.00402, lr=0.000174, step=419]\n", + "Epoch 419: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.989, loss=0.00407, lr=0.000174, step=420]\n", + "Epoch 420: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00397, lr=0.000174, step=421]\n", + "Epoch 421: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00397, lr=0.000173, step=422]\n", + "Epoch 422: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.989, loss=0.00394, lr=0.000173, step=423]\n", + "Epoch 423: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.989, loss=0.00394, lr=0.000173, step=424]\n", + "Epoch 424: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.989, loss=0.00394, lr=0.000172, step=425]\n", + "Epoch 425: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.00387, lr=0.000172, step=426]\n", + "Epoch 426: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.989, loss=0.00394, lr=0.000172, step=427]\n", + "Epoch 427: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.989, loss=0.00402, lr=0.000172, step=428]\n", + "Epoch 428: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.00369, lr=0.000171, step=429]\n", + "Epoch 429: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.00387, lr=0.000171, step=430]\n", + "Epoch 430: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.989, loss=0.00402, lr=0.000171, step=431]\n", + "Epoch 431: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00399, lr=0.00017, step=432]\n", + "Epoch 432: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00407, lr=0.00017, step=433]\n", + "Epoch 433: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00382, lr=0.00017, step=434]\n", + "Epoch 434: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0037, lr=0.000169, step=435]\n", + "Epoch 435: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00388, lr=0.000169, step=436]\n", + "Epoch 436: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00377, lr=0.000169, step=437]\n", + "Epoch 437: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00402, lr=0.000169, step=438]\n", + "Epoch 438: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00389, lr=0.000168, step=439]\n", + "Epoch 439: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.99, loss=0.00387, lr=0.000168, step=440]\n", + "Epoch 440: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00393, lr=0.000168, step=441]\n", + "Epoch 441: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00391, lr=0.000167, step=442]\n", + "Epoch 442: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00388, lr=0.000167, step=443]\n", + "Epoch 443: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00411, lr=0.000167, step=444]\n", + "Epoch 444: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00399, lr=0.000167, step=445]\n", + "Epoch 445: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00386, lr=0.000166, step=446]\n", + "Epoch 446: 100%|██████████| 1/1 [00:00<00:00, 1.28it/s, ema_decay=0.99, loss=0.00375, lr=0.000166, step=447]\n", + "Epoch 447: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00388, lr=0.000166, step=448]\n", + "Epoch 448: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00382, lr=0.000165, step=449]\n", + "Epoch 449: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.99, loss=0.00397, lr=0.000165, step=450]\n", + "Epoch 450: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.0039, lr=0.000165, step=451]\n", + "Epoch 451: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00394, lr=0.000164, step=452]\n", + "Epoch 452: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00401, lr=0.000164, step=453]\n", + "Epoch 453: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0041, lr=0.000164, step=454]\n", + "Epoch 454: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00375, lr=0.000163, step=455]\n", + "Epoch 455: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.99, loss=0.00397, lr=0.000163, step=456]\n", + "Epoch 456: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00397, lr=0.000163, step=457]\n", + "Epoch 457: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00398, lr=0.000163, step=458]\n", + "Epoch 458: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00364, lr=0.000162, step=459]\n", + "Epoch 459: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00399, lr=0.000162, step=460]\n", + "Epoch 460: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00368, lr=0.000162, step=461]\n", + "Epoch 461: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0039, lr=0.000161, step=462]\n", + "Epoch 462: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00386, lr=0.000161, step=463]\n", + "Epoch 463: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00376, lr=0.000161, step=464]\n", + "Epoch 464: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00388, lr=0.00016, step=465]\n", + "Epoch 465: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.99, loss=0.00364, lr=0.00016, step=466]\n", + "Epoch 466: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00363, lr=0.00016, step=467]\n", + "Epoch 467: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00381, lr=0.00016, step=468]\n", + "Epoch 468: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00384, lr=0.000159, step=469]\n", + "Epoch 469: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00353, lr=0.000159, step=470]\n", + "Epoch 470: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00378, lr=0.000159, step=471]\n", + "Epoch 471: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00394, lr=0.000158, step=472]\n", + "Epoch 472: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00371, lr=0.000158, step=473]\n", + "Epoch 473: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00388, lr=0.000158, step=474]\n", + "Epoch 474: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00366, lr=0.000157, step=475]\n", + "Epoch 475: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00371, lr=0.000157, step=476]\n", + "Epoch 476: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00355, lr=0.000157, step=477]\n", + "Epoch 477: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00367, lr=0.000157, step=478]\n", + "Epoch 478: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00376, lr=0.000156, step=479]\n", + "Epoch 479: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00378, lr=0.000156, step=480]\n", + "Epoch 480: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.0035, lr=0.000156, step=481]\n", + "Epoch 481: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00382, lr=0.000155, step=482]\n", + "Epoch 482: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00371, lr=0.000155, step=483]\n", + "Epoch 483: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00371, lr=0.000155, step=484]\n", + "Epoch 484: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00358, lr=0.000154, step=485]\n", + "Epoch 485: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00346, lr=0.000154, step=486]\n", + "Epoch 486: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00371, lr=0.000154, step=487]\n", + "Epoch 487: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00363, lr=0.000154, step=488]\n", + "Epoch 488: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.99, loss=0.00341, lr=0.000153, step=489]\n", + "Epoch 489: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00361, lr=0.000153, step=490]\n", + "Epoch 490: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00367, lr=0.000153, step=491]\n", + "Epoch 491: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00387, lr=0.000152, step=492]\n", + "Epoch 492: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00363, lr=0.000152, step=493]\n", + "Epoch 493: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00355, lr=0.000152, step=494]\n", + "Epoch 494: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00353, lr=0.000151, step=495]\n", + "Epoch 495: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00353, lr=0.000151, step=496]\n", + "Epoch 496: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00382, lr=0.000151, step=497]\n", + "Epoch 497: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00364, lr=0.000151, step=498]\n", + "Epoch 498: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00373, lr=0.00015, step=499]\n", + "Epoch 499: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00352, lr=0.00015, step=500]\n", + "Epoch 500: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0036, lr=0.00015, step=501]\n", + "Epoch 501: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00374, lr=0.000149, step=502]\n", + "Epoch 502: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.0037, lr=0.000149, step=503]\n", + "Epoch 503: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00365, lr=0.000149, step=504]\n", + "Epoch 504: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00355, lr=0.000148, step=505]\n", + "Epoch 505: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00344, lr=0.000148, step=506]\n", + "Epoch 506: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00371, lr=0.000148, step=507]\n", + "Epoch 507: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00361, lr=0.000148, step=508]\n", + "Epoch 508: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0035, lr=0.000147, step=509]\n", + "Epoch 509: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00358, lr=0.000147, step=510]\n", + "Epoch 510: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00347, lr=0.000147, step=511]\n", + "Epoch 511: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00339, lr=0.000146, step=512]\n", + "Epoch 512: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.00343, lr=0.000146, step=513]\n", + "Epoch 513: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00351, lr=0.000146, step=514]\n", + "Epoch 514: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00348, lr=0.000145, step=515]\n", + "Epoch 515: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00359, lr=0.000145, step=516]\n", + "Epoch 516: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0033, lr=0.000145, step=517]\n", + "Epoch 517: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00339, lr=0.000145, step=518]\n", + "Epoch 518: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00353, lr=0.000144, step=519]\n", + "Epoch 519: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.0037, lr=0.000144, step=520]\n", + "Epoch 520: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.991, loss=0.00363, lr=0.000144, step=521]\n", + "Epoch 521: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00363, lr=0.000143, step=522]\n", + "Epoch 522: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00358, lr=0.000143, step=523]\n", + "Epoch 523: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00353, lr=0.000143, step=524]\n", + "Epoch 524: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00348, lr=0.000142, step=525]\n", + "Epoch 525: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00355, lr=0.000142, step=526]\n", + "Epoch 526: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00347, lr=0.000142, step=527]\n", + "Epoch 527: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00347, lr=0.000142, step=528]\n", + "Epoch 528: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00354, lr=0.000141, step=529]\n", + "Epoch 529: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00346, lr=0.000141, step=530]\n", + "Epoch 530: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00362, lr=0.000141, step=531]\n", + "Epoch 531: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00366, lr=0.00014, step=532]\n", + "Epoch 532: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00355, lr=0.00014, step=533]\n", + "Epoch 533: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00344, lr=0.00014, step=534]\n", + "Epoch 534: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00355, lr=0.00014, step=535]\n", + "Epoch 535: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00354, lr=0.000139, step=536]\n", + "Epoch 536: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00366, lr=0.000139, step=537]\n", + "Epoch 537: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00351, lr=0.000139, step=538]\n", + "Epoch 538: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00348, lr=0.000138, step=539]\n", + "Epoch 539: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00366, lr=0.000138, step=540]\n", + "Epoch 540: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00366, lr=0.000138, step=541]\n", + "Epoch 541: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00342, lr=0.000137, step=542]\n", + "Epoch 542: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0035, lr=0.000137, step=543]\n", + "Epoch 543: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00342, lr=0.000137, step=544]\n", + "Epoch 544: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00331, lr=0.000136, step=545]\n", + "Epoch 545: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00343, lr=0.000136, step=546]\n", + "Epoch 546: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00361, lr=0.000136, step=547]\n", + "Epoch 547: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00335, lr=0.000136, step=548]\n", + "Epoch 548: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.991, loss=0.00353, lr=0.000135, step=549]\n", + "Epoch 549: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00351, lr=0.000135, step=550]\n", + "Epoch 550: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00342, lr=0.000135, step=551]\n", + "Epoch 551: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.00331, lr=0.000134, step=552]\n", + "Epoch 552: 100%|██████████| 1/1 [00:00<00:00, 1.30it/s, ema_decay=0.991, loss=0.00352, lr=0.000134, step=553]\n", + "Epoch 553: 100%|██████████| 1/1 [00:00<00:00, 1.33it/s, ema_decay=0.991, loss=0.00342, lr=0.000134, step=554]\n", + "Epoch 554: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00355, lr=0.000133, step=555]\n", + "Epoch 555: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.0034, lr=0.000133, step=556]\n", + "Epoch 556: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00356, lr=0.000133, step=557]\n", + "Epoch 557: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.00339, lr=0.000133, step=558]\n", + "Epoch 558: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00356, lr=0.000132, step=559]\n", + "Epoch 559: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00346, lr=0.000132, step=560]\n", + "Epoch 560: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00348, lr=0.000132, step=561]\n", + "Epoch 561: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00344, lr=0.000131, step=562]\n", + "Epoch 562: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.991, loss=0.00339, lr=0.000131, step=563]\n", + "Epoch 563: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00337, lr=0.000131, step=564]\n", + "Epoch 564: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00353, lr=0.000131, step=565]\n", + "Epoch 565: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00338, lr=0.00013, step=566]\n", + "Epoch 566: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00347, lr=0.00013, step=567]\n", + "Epoch 567: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0033, lr=0.00013, step=568]\n", + "Epoch 568: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00338, lr=0.000129, step=569]\n", + "Epoch 569: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00345, lr=0.000129, step=570]\n", + "Epoch 570: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00354, lr=0.000129, step=571]\n", + "Epoch 571: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0033, lr=0.000128, step=572]\n", + "Epoch 572: 100%|██████████| 1/1 [00:00<00:00, 1.33it/s, ema_decay=0.991, loss=0.00336, lr=0.000128, step=573]\n", + "Epoch 573: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00324, lr=0.000128, step=574]\n", + "Epoch 574: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00329, lr=0.000127, step=575]\n", + "Epoch 575: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00339, lr=0.000127, step=576]\n", + "Epoch 576: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00344, lr=0.000127, step=577]\n", + "Epoch 577: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00345, lr=0.000127, step=578]\n", + "Epoch 578: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0032, lr=0.000126, step=579]\n", + "Epoch 579: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00329, lr=0.000126, step=580]\n", + "Epoch 580: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.0033, lr=0.000126, step=581]\n", + "Epoch 581: 100%|██████████| 1/1 [00:00<00:00, 1.24it/s, ema_decay=0.992, loss=0.00345, lr=0.000125, step=582]\n", + "Epoch 582: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00349, lr=0.000125, step=583]\n", + "Epoch 583: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00334, lr=0.000125, step=584]\n", + "Epoch 584: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0034, lr=0.000124, step=585]\n", + "Epoch 585: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00341, lr=0.000124, step=586]\n", + "Epoch 586: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00323, lr=0.000124, step=587]\n", + "Epoch 587: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00323, lr=0.000124, step=588]\n", + "Epoch 588: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00338, lr=0.000123, step=589]\n", + "Epoch 589: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00326, lr=0.000123, step=590]\n", + "Epoch 590: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00323, lr=0.000123, step=591]\n", + "Epoch 591: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00341, lr=0.000122, step=592]\n", + "Epoch 592: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.00329, lr=0.000122, step=593]\n", + "Epoch 593: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00336, lr=0.000122, step=594]\n", + "Epoch 594: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00347, lr=0.000121, step=595]\n", + "Epoch 595: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00337, lr=0.000121, step=596]\n", + "Epoch 596: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00337, lr=0.000121, step=597]\n", + "Epoch 597: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00332, lr=0.000121, step=598]\n", + "Epoch 598: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.0034, lr=0.00012, step=599]\n", + "Epoch 599: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00349, lr=0.00012, step=600]\n", + "Epoch 600: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00335, lr=0.00012, step=601]\n", + "Epoch 601: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00328, lr=0.000119, step=602]\n", + "Epoch 602: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00335, lr=0.000119, step=603]\n", + "Epoch 603: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.992, loss=0.00334, lr=0.000119, step=604]\n", + "Epoch 604: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00335, lr=0.000118, step=605]\n", + "Epoch 605: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00344, lr=0.000118, step=606]\n", + "Epoch 606: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00319, lr=0.000118, step=607]\n", + "Epoch 607: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00336, lr=0.000118, step=608]\n", + "Epoch 608: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00335, lr=0.000117, step=609]\n", + "Epoch 609: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00336, lr=0.000117, step=610]\n", + "Epoch 610: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0032, lr=0.000117, step=611]\n", + "Epoch 611: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00332, lr=0.000116, step=612]\n", + "Epoch 612: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.992, loss=0.00331, lr=0.000116, step=613]\n", + "Epoch 613: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00321, lr=0.000116, step=614]\n", + "Epoch 614: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.992, loss=0.00325, lr=0.000115, step=615]\n", + "Epoch 615: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.992, loss=0.00328, lr=0.000115, step=616]\n", + "Epoch 616: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.00341, lr=0.000115, step=617]\n", + "Epoch 617: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00338, lr=0.000115, step=618]\n", + "Epoch 618: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.0032, lr=0.000114, step=619]\n", + "Epoch 619: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00337, lr=0.000114, step=620]\n", + "Epoch 620: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00313, lr=0.000114, step=621]\n", + "Epoch 621: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00346, lr=0.000113, step=622]\n", + "Epoch 622: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00346, lr=0.000113, step=623]\n", + "Epoch 623: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00327, lr=0.000113, step=624]\n", + "Epoch 624: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00338, lr=0.000112, step=625]\n", + "Epoch 625: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00331, lr=0.000112, step=626]\n", + "Epoch 626: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00333, lr=0.000112, step=627]\n", + "Epoch 627: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.0031, lr=0.000112, step=628]\n", + "Epoch 628: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00335, lr=0.000111, step=629]\n", + "Epoch 629: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00322, lr=0.000111, step=630]\n", + "Epoch 630: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00345, lr=0.000111, step=631]\n", + "Epoch 631: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00335, lr=0.00011, step=632]\n", + "Epoch 632: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00328, lr=0.00011, step=633]\n", + "Epoch 633: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00325, lr=0.00011, step=634]\n", + "Epoch 634: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00315, lr=0.000109, step=635]\n", + "Epoch 635: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00333, lr=0.000109, step=636]\n", + "Epoch 636: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00335, lr=0.000109, step=637]\n", + "Epoch 637: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.00319, lr=0.000109, step=638]\n", + "Epoch 638: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00336, lr=0.000108, step=639]\n", + "Epoch 639: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00334, lr=0.000108, step=640]\n", + "Epoch 640: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00314, lr=0.000108, step=641]\n", + "Epoch 641: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00308, lr=0.000107, step=642]\n", + "Epoch 642: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00329, lr=0.000107, step=643]\n", + "Epoch 643: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00316, lr=0.000107, step=644]\n", + "Epoch 644: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00325, lr=0.000106, step=645]\n", + "Epoch 645: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00321, lr=0.000106, step=646]\n", + "Epoch 646: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00311, lr=0.000106, step=647]\n", + "Epoch 647: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00323, lr=0.000106, step=648]\n", + "Epoch 648: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0033, lr=0.000105, step=649]\n", + "Epoch 649: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0033, lr=0.000105, step=650]\n", + "Epoch 650: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00334, lr=0.000105, step=651]\n", + "Epoch 651: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00332, lr=0.000104, step=652]\n", + "Epoch 652: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00321, lr=0.000104, step=653]\n", + "Epoch 653: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00317, lr=0.000104, step=654]\n", + "Epoch 654: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.00318, lr=0.000103, step=655]\n", + "Epoch 655: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00329, lr=0.000103, step=656]\n", + "Epoch 656: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00304, lr=0.000103, step=657]\n", + "Epoch 657: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00315, lr=0.000103, step=658]\n", + "Epoch 658: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00311, lr=0.000102, step=659]\n", + "Epoch 659: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00327, lr=0.000102, step=660]\n", + "Epoch 660: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00323, lr=0.000102, step=661]\n", + "Epoch 661: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00334, lr=0.000101, step=662]\n", + "Epoch 662: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00331, lr=0.000101, step=663]\n", + "Epoch 663: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0031, lr=0.000101, step=664]\n", + "Epoch 664: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00313, lr=0.000101, step=665]\n", + "Epoch 665: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00324, lr=0.0001, step=666]\n", + "Epoch 666: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.00307, lr=9.99e-5, step=667]\n", + "Epoch 667: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00329, lr=9.96e-5, step=668]\n", + "Epoch 668: 100%|██████████| 1/1 [00:00<00:00, 1.34it/s, ema_decay=0.992, loss=0.0031, lr=9.93e-5, step=669]\n", + "Epoch 669: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00319, lr=9.9e-5, step=670]\n", + "Epoch 670: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00322, lr=9.87e-5, step=671]\n", + "Epoch 671: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.992, loss=0.00322, lr=9.84e-5, step=672]\n", + "Epoch 672: 100%|██████████| 1/1 [00:00<00:00, 1.22it/s, ema_decay=0.992, loss=0.00315, lr=9.81e-5, step=673]\n", + "Epoch 673: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00308, lr=9.78e-5, step=674]\n", + "Epoch 674: 100%|██████████| 1/1 [00:00<00:00, 1.20it/s, ema_decay=0.992, loss=0.00326, lr=9.75e-5, step=675]\n", + "Epoch 675: 100%|██████████| 1/1 [00:00<00:00, 1.26it/s, ema_decay=0.992, loss=0.0031, lr=9.72e-5, step=676]\n", + "Epoch 676: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.992, loss=0.00303, lr=9.69e-5, step=677]\n", + "Epoch 677: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00321, lr=9.66e-5, step=678]\n", + "Epoch 678: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00302, lr=9.63e-5, step=679]\n", + "Epoch 679: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00306, lr=9.6e-5, step=680]\n", + "Epoch 680: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00315, lr=9.57e-5, step=681]\n", + "Epoch 681: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00323, lr=9.54e-5, step=682]\n", + "Epoch 682: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00321, lr=9.51e-5, step=683]\n", + "Epoch 683: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00324, lr=9.48e-5, step=684]\n", + "Epoch 684: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00323, lr=9.45e-5, step=685]\n", + "Epoch 685: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00309, lr=9.42e-5, step=686]\n", + "Epoch 686: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00297, lr=9.39e-5, step=687]\n", + "Epoch 687: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00311, lr=9.36e-5, step=688]\n", + "Epoch 688: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0031, lr=9.33e-5, step=689]\n", + "Epoch 689: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00301, lr=9.3e-5, step=690]\n", + "Epoch 690: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00322, lr=9.27e-5, step=691]\n", + "Epoch 691: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00306, lr=9.24e-5, step=692]\n", + "Epoch 692: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00302, lr=9.21e-5, step=693]\n", + "Epoch 693: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0031, lr=9.18e-5, step=694]\n", + "Epoch 694: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0032, lr=9.15e-5, step=695]\n", + "Epoch 695: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00322, lr=9.12e-5, step=696]\n", + "Epoch 696: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00289, lr=9.09e-5, step=697]\n", + "Epoch 697: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00319, lr=9.06e-5, step=698]\n", + "Epoch 698: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00313, lr=9.03e-5, step=699]\n", + "Epoch 699: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00317, lr=9e-5, step=700]\n", + "Epoch 700: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00327, lr=8.97e-5, step=701]\n", + "Epoch 701: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00305, lr=8.94e-5, step=702]\n", + "Epoch 702: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00312, lr=8.91e-5, step=703]\n", + "Epoch 703: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00312, lr=8.88e-5, step=704]\n", + "Epoch 704: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00328, lr=8.85e-5, step=705]\n", + "Epoch 705: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00314, lr=8.82e-5, step=706]\n", + "Epoch 706: 100%|██████████| 1/1 [00:00<00:00, 1.34it/s, ema_decay=0.993, loss=0.00314, lr=8.79e-5, step=707]\n", + "Epoch 707: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.00309, lr=8.76e-5, step=708]\n", + "Epoch 708: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00308, lr=8.73e-5, step=709]\n", + "Epoch 709: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.993, loss=0.00307, lr=8.7e-5, step=710]\n", + "Epoch 710: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.003, lr=8.67e-5, step=711]\n", + "Epoch 711: 100%|██████████| 1/1 [00:00<00:00, 1.21it/s, ema_decay=0.993, loss=0.00309, lr=8.64e-5, step=712]\n", + "Epoch 712: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00317, lr=8.61e-5, step=713]\n", + "Epoch 713: 100%|██████████| 1/1 [00:00<00:00, 1.04it/s, ema_decay=0.993, loss=0.00306, lr=8.58e-5, step=714]\n", + "Epoch 714: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.00311, lr=8.55e-5, step=715]\n", + "Epoch 715: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00296, lr=8.52e-5, step=716]\n", + "Epoch 716: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00308, lr=8.49e-5, step=717]\n", + "Epoch 717: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00312, lr=8.46e-5, step=718]\n", + "Epoch 718: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00333, lr=8.43e-5, step=719]\n", + "Epoch 719: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00306, lr=8.4e-5, step=720]\n", + "Epoch 720: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00295, lr=8.37e-5, step=721]\n", + "Epoch 721: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00297, lr=8.34e-5, step=722]\n", + "Epoch 722: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00305, lr=8.31e-5, step=723]\n", + "Epoch 723: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00309, lr=8.28e-5, step=724]\n", + "Epoch 724: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.993, loss=0.00302, lr=8.25e-5, step=725]\n", + "Epoch 725: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00288, lr=8.22e-5, step=726]\n", + "Epoch 726: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00322, lr=8.19e-5, step=727]\n", + "Epoch 727: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.0031, lr=8.16e-5, step=728]\n", + "Epoch 728: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00304, lr=8.13e-5, step=729]\n", + "Epoch 729: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00321, lr=8.1e-5, step=730]\n", + "Epoch 730: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00315, lr=8.07e-5, step=731]\n", + "Epoch 731: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00307, lr=8.04e-5, step=732]\n", + "Epoch 732: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00299, lr=8.01e-5, step=733]\n", + "Epoch 733: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.993, loss=0.00303, lr=7.98e-5, step=734]\n", + "Epoch 734: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00326, lr=7.95e-5, step=735]\n", + "Epoch 735: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.993, loss=0.00314, lr=7.92e-5, step=736]\n", + "Epoch 736: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00307, lr=7.89e-5, step=737]\n", + "Epoch 737: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.993, loss=0.0031, lr=7.86e-5, step=738]\n", + "Epoch 738: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00309, lr=7.83e-5, step=739]\n", + "Epoch 739: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00297, lr=7.8e-5, step=740]\n", + "Epoch 740: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.993, loss=0.00322, lr=7.77e-5, step=741]\n", + "Epoch 741: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00311, lr=7.74e-5, step=742]\n", + "Epoch 742: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00293, lr=7.71e-5, step=743]\n", + "Epoch 743: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00297, lr=7.68e-5, step=744]\n", + "Epoch 744: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00303, lr=7.65e-5, step=745]\n", + "Epoch 745: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00306, lr=7.62e-5, step=746]\n", + "Epoch 746: 100%|██████████| 1/1 [00:00<00:00, 1.14it/s, ema_decay=0.993, loss=0.00315, lr=7.59e-5, step=747]\n", + "Epoch 747: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.993, loss=0.00307, lr=7.56e-5, step=748]\n", + "Epoch 748: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.993, loss=0.0032, lr=7.53e-5, step=749]\n", + "Epoch 749: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.993, loss=0.0031, lr=7.5e-5, step=750]\n", + "Epoch 750: 100%|██████████| 1/1 [00:01<00:00, 1.21s/it, ema_decay=0.993, loss=0.00309, lr=7.47e-5, step=751]\n", + "Epoch 751: 100%|██████████| 1/1 [00:00<00:00, 1.12it/s, ema_decay=0.993, loss=0.00303, lr=7.44e-5, step=752]\n", + "Epoch 752: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.0031, lr=7.41e-5, step=753]\n", + "Epoch 753: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00312, lr=7.38e-5, step=754]\n", + "Epoch 754: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00305, lr=7.35e-5, step=755]\n", + "Epoch 755: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00305, lr=7.32e-5, step=756]\n", + "Epoch 756: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.003, lr=7.29e-5, step=757]\n", + "Epoch 757: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00294, lr=7.26e-5, step=758]\n", + "Epoch 758: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00306, lr=7.23e-5, step=759]\n", + "Epoch 759: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00312, lr=7.2e-5, step=760]\n", + "Epoch 760: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.003, lr=7.17e-5, step=761]\n", + "Epoch 761: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.003, lr=7.14e-5, step=762]\n", + "Epoch 762: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00304, lr=7.11e-5, step=763]\n", + "Epoch 763: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.993, loss=0.00301, lr=7.08e-5, step=764]\n", + "Epoch 764: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00311, lr=7.05e-5, step=765]\n", + "Epoch 765: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00307, lr=7.02e-5, step=766]\n", + "Epoch 766: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00307, lr=6.99e-5, step=767]\n", + "Epoch 767: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.0031, lr=6.96e-5, step=768]\n", + "Epoch 768: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00295, lr=6.93e-5, step=769]\n", + "Epoch 769: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00319, lr=6.9e-5, step=770]\n", + "Epoch 770: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00302, lr=6.87e-5, step=771]\n", + "Epoch 771: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00305, lr=6.84e-5, step=772]\n", + "Epoch 772: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00294, lr=6.81e-5, step=773]\n", + "Epoch 773: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00303, lr=6.78e-5, step=774]\n", + "Epoch 774: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00292, lr=6.75e-5, step=775]\n", + "Epoch 775: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00298, lr=6.72e-5, step=776]\n", + "Epoch 776: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00312, lr=6.69e-5, step=777]\n", + "Epoch 777: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00287, lr=6.66e-5, step=778]\n", + "Epoch 778: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00288, lr=6.63e-5, step=779]\n", + "Epoch 779: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00304, lr=6.6e-5, step=780]\n", + "Epoch 780: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00285, lr=6.57e-5, step=781]\n", + "Epoch 781: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00302, lr=6.54e-5, step=782]\n", + "Epoch 782: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00318, lr=6.51e-5, step=783]\n", + "Epoch 783: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.003, lr=6.48e-5, step=784]\n", + "Epoch 784: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00311, lr=6.45e-5, step=785]\n", + "Epoch 785: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00307, lr=6.42e-5, step=786]\n", + "Epoch 786: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00286, lr=6.39e-5, step=787]\n", + "Epoch 787: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00305, lr=6.36e-5, step=788]\n", + "Epoch 788: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00302, lr=6.33e-5, step=789]\n", + "Epoch 789: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00295, lr=6.3e-5, step=790]\n", + "Epoch 790: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00297, lr=6.27e-5, step=791]\n", + "Epoch 791: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00309, lr=6.24e-5, step=792]\n", + "Epoch 792: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.993, loss=0.0031, lr=6.21e-5, step=793]\n", + "Epoch 793: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.0029, lr=6.18e-5, step=794]\n", + "Epoch 794: 100%|██████████| 1/1 [00:00<00:00, 1.26it/s, ema_decay=0.993, loss=0.00298, lr=6.15e-5, step=795]\n", + "Epoch 795: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00298, lr=6.12e-5, step=796]\n", + "Epoch 796: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00304, lr=6.09e-5, step=797]\n", + "Epoch 797: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00291, lr=6.06e-5, step=798]\n", + "Epoch 798: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.0029, lr=6.03e-5, step=799]\n", + "Epoch 799: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00293, lr=6e-5, step=800]\n", + "Epoch 800: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00291, lr=5.97e-5, step=801]\n", + "Epoch 801: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00301, lr=5.94e-5, step=802]\n", + "Epoch 802: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00294, lr=5.91e-5, step=803]\n", + "Epoch 803: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00311, lr=5.88e-5, step=804]\n", + "Epoch 804: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00295, lr=5.85e-5, step=805]\n", + "Epoch 805: 100%|██████████| 1/1 [00:00<00:00, 1.13it/s, ema_decay=0.993, loss=0.0029, lr=5.82e-5, step=806]\n", + "Epoch 806: 100%|██████████| 1/1 [00:00<00:00, 1.24it/s, ema_decay=0.993, loss=0.00294, lr=5.79e-5, step=807]\n", + "Epoch 807: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.993, loss=0.00305, lr=5.76e-5, step=808]\n", + "Epoch 808: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00284, lr=5.73e-5, step=809]\n", + "Epoch 809: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00285, lr=5.7e-5, step=810]\n", + "Epoch 810: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00296, lr=5.67e-5, step=811]\n", + "Epoch 811: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00304, lr=5.64e-5, step=812]\n", + "Epoch 812: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00311, lr=5.61e-5, step=813]\n", + "Epoch 813: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00296, lr=5.58e-5, step=814]\n", + "Epoch 814: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00302, lr=5.55e-5, step=815]\n", + "Epoch 815: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00299, lr=5.52e-5, step=816]\n", + "Epoch 816: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00307, lr=5.49e-5, step=817]\n", + "Epoch 817: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00287, lr=5.46e-5, step=818]\n", + "Epoch 818: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00294, lr=5.43e-5, step=819]\n", + "Epoch 819: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00306, lr=5.4e-5, step=820]\n", + "Epoch 820: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.0029, lr=5.37e-5, step=821]\n", + "Epoch 821: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00302, lr=5.34e-5, step=822]\n", + "Epoch 822: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00286, lr=5.31e-5, step=823]\n", + "Epoch 823: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00281, lr=5.28e-5, step=824]\n", + "Epoch 824: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.993, loss=0.00309, lr=5.25e-5, step=825]\n", + "Epoch 825: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00294, lr=5.22e-5, step=826]\n", + "Epoch 826: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0028, lr=5.19e-5, step=827]\n", + "Epoch 827: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00288, lr=5.16e-5, step=828]\n", + "Epoch 828: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00296, lr=5.13e-5, step=829]\n", + "Epoch 829: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00282, lr=5.1e-5, step=830]\n", + "Epoch 830: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00297, lr=5.07e-5, step=831]\n", + "Epoch 831: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.994, loss=0.00307, lr=5.04e-5, step=832]\n", + "Epoch 832: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00284, lr=5.01e-5, step=833]\n", + "Epoch 833: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00292, lr=4.98e-5, step=834]\n", + "Epoch 834: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0029, lr=4.95e-5, step=835]\n", + "Epoch 835: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00304, lr=4.92e-5, step=836]\n", + "Epoch 836: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00282, lr=4.89e-5, step=837]\n", + "Epoch 837: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00287, lr=4.86e-5, step=838]\n", + "Epoch 838: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00304, lr=4.83e-5, step=839]\n", + "Epoch 839: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00277, lr=4.8e-5, step=840]\n", + "Epoch 840: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00305, lr=4.77e-5, step=841]\n", + "Epoch 841: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00297, lr=4.74e-5, step=842]\n", + "Epoch 842: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00293, lr=4.71e-5, step=843]\n", + "Epoch 843: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00291, lr=4.68e-5, step=844]\n", + "Epoch 844: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00313, lr=4.65e-5, step=845]\n", + "Epoch 845: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00308, lr=4.62e-5, step=846]\n", + "Epoch 846: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00288, lr=4.59e-5, step=847]\n", + "Epoch 847: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00298, lr=4.56e-5, step=848]\n", + "Epoch 848: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00288, lr=4.53e-5, step=849]\n", + "Epoch 849: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00282, lr=4.5e-5, step=850]\n", + "Epoch 850: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00296, lr=4.47e-5, step=851]\n", + "Epoch 851: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00287, lr=4.44e-5, step=852]\n", + "Epoch 852: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.994, loss=0.00284, lr=4.41e-5, step=853]\n", + "Epoch 853: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00301, lr=4.38e-5, step=854]\n", + "Epoch 854: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00296, lr=4.35e-5, step=855]\n", + "Epoch 855: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00278, lr=4.32e-5, step=856]\n", + "Epoch 856: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.00286, lr=4.29e-5, step=857]\n", + "Epoch 857: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.003, lr=4.26e-5, step=858]\n", + "Epoch 858: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00282, lr=4.23e-5, step=859]\n", + "Epoch 859: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00301, lr=4.2e-5, step=860]\n", + "Epoch 860: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00292, lr=4.17e-5, step=861]\n", + "Epoch 861: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00294, lr=4.14e-5, step=862]\n", + "Epoch 862: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00304, lr=4.11e-5, step=863]\n", + "Epoch 863: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00296, lr=4.08e-5, step=864]\n", + "Epoch 864: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00287, lr=4.05e-5, step=865]\n", + "Epoch 865: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00274, lr=4.02e-5, step=866]\n", + "Epoch 866: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00303, lr=3.99e-5, step=867]\n", + "Epoch 867: 100%|██████████| 1/1 [00:00<00:00, 1.09it/s, ema_decay=0.994, loss=0.00277, lr=3.96e-5, step=868]\n", + "Epoch 868: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.00301, lr=3.93e-5, step=869]\n", + "Epoch 869: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00306, lr=3.9e-5, step=870]\n", + "Epoch 870: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00291, lr=3.87e-5, step=871]\n", + "Epoch 871: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00285, lr=3.84e-5, step=872]\n", + "Epoch 872: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00285, lr=3.81e-5, step=873]\n", + "Epoch 873: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00282, lr=3.78e-5, step=874]\n", + "Epoch 874: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.0029, lr=3.75e-5, step=875]\n", + "Epoch 875: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00287, lr=3.72e-5, step=876]\n", + "Epoch 876: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00297, lr=3.69e-5, step=877]\n", + "Epoch 877: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00282, lr=3.66e-5, step=878]\n", + "Epoch 878: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00297, lr=3.63e-5, step=879]\n", + "Epoch 879: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00288, lr=3.6e-5, step=880]\n", + "Epoch 880: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00286, lr=3.57e-5, step=881]\n", + "Epoch 881: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00295, lr=3.54e-5, step=882]\n", + "Epoch 882: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00293, lr=3.51e-5, step=883]\n", + "Epoch 883: 100%|██████████| 1/1 [00:00<00:00, 1.33it/s, ema_decay=0.994, loss=0.00298, lr=3.48e-5, step=884]\n", + "Epoch 884: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00298, lr=3.45e-5, step=885]\n", + "Epoch 885: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00282, lr=3.42e-5, step=886]\n", + "Epoch 886: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0029, lr=3.39e-5, step=887]\n", + "Epoch 887: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00293, lr=3.36e-5, step=888]\n", + "Epoch 888: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00299, lr=3.33e-5, step=889]\n", + "Epoch 889: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00286, lr=3.3e-5, step=890]\n", + "Epoch 890: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.994, loss=0.00287, lr=3.27e-5, step=891]\n", + "Epoch 891: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00283, lr=3.24e-5, step=892]\n", + "Epoch 892: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00301, lr=3.21e-5, step=893]\n", + "Epoch 893: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00303, lr=3.18e-5, step=894]\n", + "Epoch 894: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00292, lr=3.15e-5, step=895]\n", + "Epoch 895: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00283, lr=3.12e-5, step=896]\n", + "Epoch 896: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00301, lr=3.09e-5, step=897]\n", + "Epoch 897: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00297, lr=3.06e-5, step=898]\n", + "Epoch 898: 100%|██████████| 1/1 [00:00<00:00, 1.11it/s, ema_decay=0.994, loss=0.00298, lr=3.03e-5, step=899]\n", + "Epoch 899: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.0029, lr=3e-5, step=900]\n", + "Epoch 900: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00284, lr=2.97e-5, step=901]\n", + "Epoch 901: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00278, lr=2.94e-5, step=902]\n", + "Epoch 902: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00295, lr=2.91e-5, step=903]\n", + "Epoch 903: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00288, lr=2.88e-5, step=904]\n", + "Epoch 904: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00285, lr=2.85e-5, step=905]\n", + "Epoch 905: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00295, lr=2.82e-5, step=906]\n", + "Epoch 906: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00277, lr=2.79e-5, step=907]\n", + "Epoch 907: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00292, lr=2.76e-5, step=908]\n", + "Epoch 908: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00291, lr=2.73e-5, step=909]\n", + "Epoch 909: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00292, lr=2.7e-5, step=910]\n", + "Epoch 910: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.994, loss=0.00295, lr=2.67e-5, step=911]\n", + "Epoch 911: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00296, lr=2.64e-5, step=912]\n", + "Epoch 912: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.0028, lr=2.61e-5, step=913]\n", + "Epoch 913: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00294, lr=2.58e-5, step=914]\n", + "Epoch 914: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00289, lr=2.55e-5, step=915]\n", + "Epoch 915: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00297, lr=2.52e-5, step=916]\n", + "Epoch 916: 100%|██████████| 1/1 [00:01<00:00, 1.05s/it, ema_decay=0.994, loss=0.00285, lr=2.49e-5, step=917]\n", + "Epoch 917: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00291, lr=2.46e-5, step=918]\n", + "Epoch 918: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.994, loss=0.0029, lr=2.43e-5, step=919]\n", + "Epoch 919: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00288, lr=2.4e-5, step=920]\n", + "Epoch 920: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00283, lr=2.37e-5, step=921]\n", + "Epoch 921: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00296, lr=2.34e-5, step=922]\n", + "Epoch 922: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00279, lr=2.31e-5, step=923]\n", + "Epoch 923: 100%|██████████| 1/1 [00:00<00:00, 1.34it/s, ema_decay=0.994, loss=0.00281, lr=2.28e-5, step=924]\n", + "Epoch 924: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00302, lr=2.25e-5, step=925]\n", + "Epoch 925: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00306, lr=2.22e-5, step=926]\n", + "Epoch 926: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00295, lr=2.19e-5, step=927]\n", + "Epoch 927: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00292, lr=2.16e-5, step=928]\n", + "Epoch 928: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00285, lr=2.13e-5, step=929]\n", + "Epoch 929: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00287, lr=2.1e-5, step=930]\n", + "Epoch 930: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.0029, lr=2.07e-5, step=931]\n", + "Epoch 931: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.994, loss=0.00278, lr=2.04e-5, step=932]\n", + "Epoch 932: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00292, lr=2.01e-5, step=933]\n", + "Epoch 933: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.003, lr=1.98e-5, step=934]\n", + "Epoch 934: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.003, lr=1.95e-5, step=935]\n", + "Epoch 935: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00294, lr=1.92e-5, step=936]\n", + "Epoch 936: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00296, lr=1.89e-5, step=937]\n", + "Epoch 937: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.003, lr=1.86e-5, step=938]\n", + "Epoch 938: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00294, lr=1.83e-5, step=939]\n", + "Epoch 939: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00288, lr=1.8e-5, step=940]\n", + "Epoch 940: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00277, lr=1.77e-5, step=941]\n", + "Epoch 941: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00287, lr=1.74e-5, step=942]\n", + "Epoch 942: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00302, lr=1.71e-5, step=943]\n", + "Epoch 943: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00292, lr=1.68e-5, step=944]\n", + "Epoch 944: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00289, lr=1.65e-5, step=945]\n", + "Epoch 945: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00304, lr=1.62e-5, step=946]\n", + "Epoch 946: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00286, lr=1.59e-5, step=947]\n", + "Epoch 947: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00272, lr=1.56e-5, step=948]\n", + "Epoch 948: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00274, lr=1.53e-5, step=949]\n", + "Epoch 949: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00297, lr=1.5e-5, step=950]\n", + "Epoch 950: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00288, lr=1.47e-5, step=951]\n", + "Epoch 951: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00301, lr=1.44e-5, step=952]\n", + "Epoch 952: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00281, lr=1.41e-5, step=953]\n", + "Epoch 953: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00284, lr=1.38e-5, step=954]\n", + "Epoch 954: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00286, lr=1.35e-5, step=955]\n", + "Epoch 955: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00295, lr=1.32e-5, step=956]\n", + "Epoch 956: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00283, lr=1.29e-5, step=957]\n", + "Epoch 957: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.00282, lr=1.26e-5, step=958]\n", + "Epoch 958: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.0028, lr=1.23e-5, step=959]\n", + "Epoch 959: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00292, lr=1.2e-5, step=960]\n", + "Epoch 960: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00291, lr=1.17e-5, step=961]\n", + "Epoch 961: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00294, lr=1.14e-5, step=962]\n", + "Epoch 962: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00281, lr=1.11e-5, step=963]\n", + "Epoch 963: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00283, lr=1.08e-5, step=964]\n", + "Epoch 964: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.003, lr=1.05e-5, step=965]\n", + "Epoch 965: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00292, lr=1.02e-5, step=966]\n", + "Epoch 966: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00295, lr=9.9e-6, step=967]\n", + "Epoch 967: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00298, lr=9.6e-6, step=968]\n", + "Epoch 968: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00276, lr=9.3e-6, step=969]\n", + "Epoch 969: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00295, lr=9e-6, step=970]\n", + "Epoch 970: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.003, lr=8.7e-6, step=971]\n", + "Epoch 971: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00304, lr=8.4e-6, step=972]\n", + "Epoch 972: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00284, lr=8.1e-6, step=973]\n", + "Epoch 973: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00283, lr=7.8e-6, step=974]\n", + "Epoch 974: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00294, lr=7.5e-6, step=975]\n", + "Epoch 975: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.994, loss=0.00277, lr=7.2e-6, step=976]\n", + "Epoch 976: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00307, lr=6.9e-6, step=977]\n", + "Epoch 977: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00297, lr=6.6e-6, step=978]\n", + "Epoch 978: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00287, lr=6.3e-6, step=979]\n", + "Epoch 979: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00295, lr=6e-6, step=980]\n", + "Epoch 980: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00292, lr=5.7e-6, step=981]\n", + "Epoch 981: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00287, lr=5.4e-6, step=982]\n", + "Epoch 982: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00294, lr=5.1e-6, step=983]\n", + "Epoch 983: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00287, lr=4.8e-6, step=984]\n", + "Epoch 984: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00293, lr=4.5e-6, step=985]\n", + "Epoch 985: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00279, lr=4.2e-6, step=986]\n", + "Epoch 986: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00295, lr=3.9e-6, step=987]\n", + "Epoch 987: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.00288, lr=3.6e-6, step=988]\n", + "Epoch 988: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.00288, lr=3.3e-6, step=989]\n", + "Epoch 989: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00284, lr=3e-6, step=990]\n", + "Epoch 990: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00303, lr=2.7e-6, step=991]\n", + "Epoch 991: 100%|██████████| 1/1 [00:00<00:00, 1.10it/s, ema_decay=0.994, loss=0.00277, lr=2.4e-6, step=992]\n", + "Epoch 992: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.994, loss=0.00287, lr=2.1e-6, step=993]\n", + "Epoch 993: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.00286, lr=1.8e-6, step=994]\n", + "Epoch 994: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00287, lr=1.5e-6, step=995]\n", + "Epoch 995: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00288, lr=1.2e-6, step=996]\n", + "Epoch 996: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00272, lr=9e-7, step=997]\n", + "Epoch 997: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00274, lr=6e-7, step=998]\n", + "Epoch 998: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00294, lr=3e-7, step=999]\n", + "Epoch 999: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00288, lr=0, step=1000]\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "0a5a9fc78e7e40ee89ea570462bcf557", + "model_id": "347efff4a4d94f589c12b3c17dea3c13", "version_major": 2, "version_minor": 0 }, @@ -5251,10 +2179,12 @@ } ], "source": [ + "teacher = UNet2DModel.from_pretrained(\"bglick13/minnie-diffusion\")\n", + "N = 1000\n", "distilled_images = []\n", - "for distill_step in range(5):\n", + "for distill_step in range(2):\n", " print(f\"Distill step {distill_step} from {N} -> {N // 2}\")\n", - " teacher, distilled_ema, distill_accelrator = utils.distill(teacher, N, train_image, training_config, epochs=1000, batch_size=64)\n", + " teacher, distilled_ema, distill_accelrator = utils.distill(teacher, N, train_image, training_config, epochs=1000, batch_size=64, gamma=0)\n", " N = N // 2\n", " new_scheduler = DDIMScheduler(num_train_timesteps=N)\n", " pipeline = DDIMPipeline(\n", @@ -5279,7 +2209,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -5301,7 +2231,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAVfUlEQVR4nH16XZfjRo7lvUAEKSmzsspltz3tnt152bP/aZ/2cf/2TG+3Pe3PclWmJDICuPsQpJRl9yxPnkwlKVEI4OLiAkH+7//1f96+m06nOddmXlpblAJ0XRY3mw8HkjR3t97DwMPDI4Gvvv328ektwYyY5qm1S18jhX98/7ef//aDCt5/+f7p3fvpcDo8Pv7y88/PP/z03//n/1jXq5NPX75vy/X88bxcL9NcSqnXl3OC13VZLmd3f3g4ff3nvxjtejm/PD8v1yvI1lpxp7uZzacDIm2ez+dfispS5yMMQdIQEs0AJeHFZUzJkBm5tLWWarkolNSyNlFOQ8+X5wvNajn2NcJSxOX64m3CNPF8+cffvxPX6Ti13i59OayxtrzG2nI1GYBGeS3o1qJf21Wmp1id0yp1Mt1S2dRBGi0hy1TmnFQvNs/TcT44zYwESLibkUarpVR3SMrxk25evJBuVgAU91oqoED06Ou6XpfVzCleljW7qtfn8/OvH38BSdg8TdHay8undb1GdpnB3Nx8qsWLmZkXApFKURAILw7zFBKwUuo0mzsAQb23iLTjodLZomdG661HSNkzUmlmgiJDylSk0twIAAIS6tWdRFsu7jb51Nu1ryuAde2X8yWWRdH+8f3fl8t5nkusCylDrtfnaIsZ5mlyNyidjN76enVDqV5rUW9tOfe2SIlMRRCYaj3MByeVSZIQyXI8nMxMEmkRAdCttNYBujsJkqRF75mAEJECJUKklcjecp0Oh8rjp4/P67JO1ZRYW/v42yfxxx9//CElt+l6uVp1mvcWVphKEQCSDOlyOb+cX0oppc7Gcnk592ggBLbee0ZmEualCFBkKQWkiFJrTUkSCSndaMaUAIGSYKSRmQlJUvQGqxzLIkUvZZp8Isqnj596rIfp4AaQ5/N1aT9dz6tXW5br+fp8tIdaJ1o387Zm7y2iOY1WYm3Ru3sxOpCX86eU5sNJkZAICMrMzJQSpHkxkFKB0NbWWxjYW7ibJGUCVEJKgAIiE4KUmZwmNzOALRoEuvdAxOXXX39tvfVWIzqB69q0NsCQ/PTb88PDp1onr5NBBgJURCgTdM8NsZltXeUmZCmTm2cECaMh1da11Gtm1Gl2c6VgLJkZEZIkZKo4SAAwM5IjAgSU4khy82ma3YuZZ+YIVkqXl/Ovv/5WPdbWemstYl2jR5DF6Mt1fXl5eXg41VTvDUBkjO9NqEePCC+VQO+NLKUWL5VmG3soM6JHW9sqsJQKIDMAGAkCRhuGDysBkNgSwEiSoNEAQKIxowOC0NVTYWbL+WVZlhEaIyC1tS1L67333tfeL9flcrlmdADKVApghnrvra2SzCwie2+AvFTSokcKEqKHJJIQaqm1TlJm7yCLu0eYuVGwYStBElv6EgABM5ZSBnJqrRAykkCPrgQmfPzwW6p7KaWyhFsEiBG73nR9uX4sv9FAs8eHxxRCQVISQKMHIiPG8kg3WGtrZJCEFL1LA9rpJqWitx5BsNCNZjY8Z0ajmbm7IDODkMqBqFIrzKCc5oO7ZzbzSnr2/uuHn7777vve2vkZOWdkxoivGUFIvffr8yUjCstUSgqtL8WdZhAoRF/XZSml1GkimJGtrRZ2ODwkldFBQIreCLa2rOuiBMgiQRIIYpQymZmbDbtHBCC521SrwAA4ssPkXpD54ddf/v3f/+Pjpw9UvpzXZe0DdW4Dn5QUqbWFeV8uL58+fqjzKbLXUorbsvbqxcDem7sTXNcVpDK1oQAAxqWMSIu2LhHdrGJw/7B/q8PaivFAKmkjcKWUUosZR/ak5GWi+eX5/Ne//vWXX35CYC6F4KijoLm7u42qXou7W0qX8+XDh9+MKO7upZQSvafCitVaa60EI5qU7k66pMjIDJKDbyJ6a2umzJxgiYhhNQEDxlLcPSIBFLfWAKCUQpJmhYWjNMiuz+f/+x9//eXXnyOjGEvxRBeSknJIAQkAMdViZIs8Xxun1YsXn+b5CKY+qUUrXo6nk1uJDBrd3b2CNBtOBWnmhtzTkma0UN+q73DtYCRAxYsXJ1lqNTMvpU6TICt+OB7cS4TWy/KP7//zhx9/kERhKzGQBAGZQ30oJTdOpbgbwZ79er1++PBhaauZKaFEX7uRXqceXcoyVZjlbpCZuRdzM9tyJgev9tZ7K6msXswoxWD8yOSW1TD3keGl1nVdzNxLsVLV8duHD99///26LgYKTGV1phgth1oizMzceDqUudp5iYSy5/KyfPe3757evRE4l4mJiJ6Zgi6Xc53q7HNb1t6bFZ/no5FeyrDe96QEOUpDycwRssj0UoTMzFpqrSMZnO5u5maSQPk8PZ4eIfvur+eXT89KEBDlzsNx0tJaJA3V/DBPtXohp2prjx7diGLeQ5eXdV0/XJ7Xx6eHOhdIhy5QvXWzEqYeLXqvpDLLPI8aJWm4BoCbKUlj6RESRrbXUnu0jPTZvJTMGCqbZqOqWfFSJoG9tU/PH6FuQ0Jl0rHrI5Ksk799e5yrQ2mAznGqzGKXFgBBi56//Pzh1w+fnt4+PJyOznI8Hk+nk7mn0r3M81zrPOigmqdydFbDnuoFYs9WRgIbTTSfPJfMbO5GLz0Awop58fHbYet1+eE/v/vw84eff/4pIp2WgEgBQ5GMPBrab7tC1MmM03mNXDIlIiPTrH5xenj0qS3xopdSeDwcZRZrO51O8+Eg4XI9SzHNh+EaL4WgIKMRtGQxbIm7ka4ZWeCVboY091Kru5MU2Hter8+Xy/XDhw9tXZ02YmpGKJfWekiAxJTWtQ+tn8B1ieh5XQOy6tZ6i+gPc33/9FBoy/Uiyc1LPURGsE3Ho/u0Lku06BbTTJpJisjMiN4Jjh6oFLdNzw2+O8x0n+Y5lYkECVqE1rWtSyfiel1ezi+tNYADjzYYLeJ8aREJEI4OtbW512meY41Pz0vrcvf5OLuV5xdhbQ6QjOhe+eZP7x6//KrW4/nyEtnm08npAmpfzc3rNAScm0uZGQSLT6DK6fEREoZorLXMh1qrm1+vF4AR6i17j4hY14bE5bqsS4uegkIy0syLXMzIiJTTAItQC02Rred1jXWBaLV4770pCJr5NM3VLNWnN49f/Pkvpzdvq0/TYXqp5enprdFPp5jn2no7PZyKV0iDG0f3YnClytv3X14vL31tEHpEXNdMAP16uRJy07q2iMjI3nprfbleo8doMlIpkbJa6lTqtV3jek3mkPsRQtqy9Ot1LZXmtZhFX2n+8HB8fDi8O5y8wObp3bd/fnh8W6d5ng4wrWuDTIS5T/NRgFtxL5kd20FtdMRSfMo8X5eVQms9pTrNZlzX1c2MtiwLpEz11tdl6b1rK68YPUQARpvrNGlerbfoK5KwjlqPB6OTfkq0rr520udpOs3zXPzheDq8OR3fvX368k80AxKQcrv5KOJDiowzpEEChlKG0QCWT59ePn58Xs6X4h49UwmR5Louo2q33o2U1KO36JkJgYA5TUyp9x6RAhx+nB6srWtfQ9HX3lo/HvnF+4d377+m/OeffvnPf/y4LKtBONa37/707s/fnk6PtUyRnUJf2vnlpfUmpVsZ/QrJjAwGMBpdbqxPo6F8+vRpuVwiw2ggCCgjUq11kkZkJojMzAztnbGwtzuyVKRiWa9Tmeb58HA4rtnOy6WYGWyepvf/8uev//Lf5sPp26V/8/fvfv3l5zJNx+Px3ZdfHY8PcBIwMlOtt2VdRPRIMGkU4KWY265LtfdbRiMTZbmcI4JARB99pVr0yNbaEBGZGZBSo2mCEBGppDD0Go1K9b7JgaeHN4/TqRz84fHx22+/eXh8PD29M5sz4fP89b/+5f03X5s7CKZFBCSWQjODHHp6eiIxTzMoEihO4+g2N/gTEIevQZbW2mbe3vQDiIiINDMzjl5+fGjzAqEcim1XPWAKGcrrNZXFS1c8vHl899WfHh7fGIsyM9JF81LMIEqZ6JFdCQC1VHMD4fU0AiJoU522G89dSGyVC5TKQJU2qZ25d27bSEJDK0iS2x19u6a92U8zjmnG9dqEjoiX40tbmo5g3ZTMoD8oSXNawin23vranM5iKRkpIBV45XJ+Zv/4T4IIltG4jGNbAZCZAJgkYWBKozUeaeLaBhakIKUyczRDQ0oTIEt9evt0PByABHNvjyMjU+mlYChEczOs6wIKm+uAQSMb6seIShxi4fUhCChKDZrfuuYBjpvuk8bCx+0JkjI323plZSLH7InU5hkmNNXy7ssvprkOeNpoQ3MbVFpaAlsfQoIcI3FI2HhUQ/prs3XwxhYIgrc/ZQiMm7W3NJF2qTAWu0dvNJ8S1Lc7Z246VymN8RdQzefDofUAaSn61upEhiADmJIyInpvvXeIt6ntgOdmsbT7RQPAe4xBEQNCm3G70znwtRm1wQKvzpA3TA8SYowp5ygyNGUeDtPpdCxWZDd/8BWCd8dmYsRnGwBsyNgN2s3flrAPGXb8QCo7re+fkzSIRwlRzIQNYZwpsx1cOxy3UQA1yMTcS6mZ8ebt43w8WikbY2SOtnXw8ja6hGh0uJdCGvEKvMNv41u2zlpblg0AbDkxkli7129x2BJHKdkQDvvIa4uEbr/HN8Fo5pzmejxMhP70zft5mkkIyi2gGpOszJEVo08y7iyZmTefvypZW2qM/zffbl2gNHIAW2w0SsRrUI3EHmpkj9LrBFdKAGv1wzwdqh0PZS5W5sNXX39dvIxxMGiSFEkyIyI6x+hGxs0PoxXP3dMGs8/MuKGIxMbF26kycEAjZGZCcm8wsU/zQEuCyC2IIw63ularn+b69Hg4Hbw4zezxy29OD0+Ceu8AaLfA6uavAZfM3b17ZACYkZuXdmreomH3nNiTuQxsGSHD2LbQEExE3tIjYIZkYqgrIwSakDTDYa5Pj/Pbx6k4e08/Ph7ffkkrPXpGgLQR/D2BOSaYpDRGvFuCfpaN9xhrT+nNp/c8hiSUcVsjZVRykMYtYEPsDPYwWGTI9pK1hZSHqT4e57mWHhn0N2++OMyPBHJgFchxI9zyflPIW+HZ0/K2gBvDbO8Z6N888Hkt2yB0N3Pj+4ENs5EXiJSTtbj6naTHnZw8HMphLiBbojy+Pb390r1ou6VJuWGG+xqGZNzZYGP17au1Q//O3p/Ze/fu9qcMctjGWLbl8+DSIbCjBzKL2TxNkbmNN8jxRppVN8Jal82Ht198dZiPIKTQHfS3uqPbsdccbgy4X3qFkFe28p+9BgSUjGCKEkEf1IZ90DBIPlUmPj09HA+HyH5d1sH5KfVQ8THummwuD09fPDy+MzPo1jbc+UM33GXejeBO8puO2BY9ILV/8Pb2PaHHzBuiWDKz0Kgx2d0SbL81ABXz4+Hw7os3oPmzaVGEIPTIjKx1ng/z8d3beng8HB7cPLNvNmTusb7rgdcmERBy4/cb3QiZOcTiHpxNxuzMNKrB1ncWCe5Wi5dSxreOIjZ2u81gZseHRyc/fnxel56pcfPIpPF4qPM8HU+PdX6kWWbHlh6bUt1Cvcdi38G6nX1F6Tcv45bNNx39B1ztJ0opPh+mN188Pbx5U6xkxLIs67KMPaU6TdN8mKbTbz/++Px86V2ZUEYoW8/jXKdiEAb6MiMzbcwh78pEnxv3qkKNZeJ3S+AdbVs93hXFHfm8qaXy1Z+/efv26end+8Ph4OYAeo++LiDNvdZSazWV6/OzJBgzMyNbRArT5GWbhXL0k3cLMu4acRfk2HP3BuTNmbxh7fMslV65/5ZOA3QDKCj/8q//djjMxSqkFEspc52n+WhGo5PyYkhGj9569MhIkqAV52Gu5E3r50YzA4QZGHR8s/cuaW5ZsMMf9yXslr/m/rvl3EavdwIup9OjIqN3EpYKsBQaOTbnEqmeucZyfhmMbkZ3S6BWn6spQ1sZSroNGTt0wYiNbhbditPrYxi158KtLL8eQNzD+PrVjqOSPTM7oeIFQEYmw8xhIweTxlgXMg/HCjdrMjMvmqq7YRi6tV1AIu91dN943mlfe1P4mWDkPZNvUOFNLt3XJW7vvMVBAFCydyHuW8S7hEMONSDS6Jzn6eE4Gxu0kpyqT8WZCSa4idJUDgYkx0jk3gN95vpb2R327lplz4LNWO1F//9DQQCKlKNNvDUN+xfvIjFSkk+1VkIOuUDzCkVkl1sIKYw8zojRnWCv2VsTeHfk3p/cLeErvfk7oOCWxK8K9KsrQtHN6OEW2vYCNnpnYRhcSRyPU/GxIrucl8wOlL629XqBGNmFPBwfWHkvqGOv+27XTUbhfnK78k98/Xqdvz87IISdxggQNrbt7z7iWIp7mUCYW6mVERkDpsXg62U5//ZbCj2aF3crY78fe1Hav+4WAX5mw14KNoJ5bShfGfdfrKPoVWWnbfutwtCLo6HKFBPau54h4wXQSjEy2tLXhaWalzLNdB/syB0buler190U/2jNPRDC3jX97urrfwSwvLafW4XbJqFbQzbGoJE015CoxVsMqqO7mcsLptPsdSp1cndsw8ub3/ay+pmI+/zMnsj/5bHT2qtqAGCMFvdg3URjZoxmA0gpo/fleo7eQRnS6mF6mN7UOs3zNE1erMyHUidij+AI/ee657UH94zkLR5/MP8zvt/Xv115Zf/ekd3cpZs0z42+lYq2rtczUjK03ovH9FDr6TBNcynVjLRi7kqNXVlsDxftXdJePl/Rx249brOgP6i6z5exy4fdzP3dZQwh8Vq34qa7de9Vle4Fyt6DkbMVljqeKBzzQTNLiPuGCm4DGGBDFHgrtfg9tPeD//TstszXH7u9KEPJ4Gan9sYJYyYS2CbWMneNXc4U6YT13jUs83CvY93kUI93ctZdJXzmXX3m9dfle+Sx7kLpVpNvYmhfQ+EdO3lTsbvoGs+GDZmsbdcgE4SV4laipyKTuT+GsT+mNuy/z4X/CTIw7OIrn0q72bxd/z1P/YGZSuYuTbcHwO4POQ0OUkqRCsXao/VYs6+9XRfORmA8VcipjgmAboX8phRejRhu1t9Fz51S72n+WTMgAPlZCXilVEUUMce0a/PXTqQb8Alzs1K81r6uCdGY6/L84cd6ePD5QPNaq8+zlbLL5L03GPrMbrb9IRD38fctNwb7a1v6nk5jhHD7FDhWYMws7DlygLb7YOxZRI5wFa9lKsfDm1xaXxvproxPv7Gt8/T14fh4fHxTp2k8InljGOVdee6j11v/jntD9ftj28LBhsXh1wFNbftbGpuLGEWpsEykRIK+8SaUIy6gubNUeJTDqVwudW2eYKYc8+mLh6ev59ObMleQiRzySaOMM7eEBjZBLW3C6h6B34ki7emz5dJ40GH4GyRgu/9dJFPXXv4f5OYDPTDfgv8AAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAOw0lEQVR4nHVazZLcPHLMLIA9kh3rCD+pT47wY/uwu/40DWT6UAUQbGmpUA+bTQL1m5UFkP/9X//zn3/r7fUfjglAGgQBTJlk790AAdCSI6JfV0T/8fNn6122bYLyoCz7//7xz++/fiHw9ePr+vrR2xURv/76S3P++PefBiBH74A0JZkEDNu2p2QpWutX/3p9gdSY7/GWJgzDJEmCbNEJtKv/9ff/7VfIzdY/KRqmzCCAJpHkNADCtiCzNWuIbQYxumt6jTkimg2839S3NedfI+y4bOj7H3+PFhwvS9LEHLY1JwCTtA0CxhjWnCTf15gGacvjbcmpJchgkJPDU/P1utD619/+DS3GGxENsOFAABYU0YyQDcCasolLBqN3B02CBOVUMKwp0+jTYTWKIWvqe6pHFxoYsjwB0w4ARBgGYNtoYNgWYzqFINxgAJCd9jdoAzTHW5z9egUNBdiUQ0XAAInWifQJMGFI4ARBgHnCAGkNhklMSZigYdmGhia/39/T49Ve5DQMyhAI0zkAABjWBAUIBClg2jTskCHbgBmNAVswZE0QwR6tecoWBGdEAJaQiruMoylZlk0DYUMGCdimyYChOS2RIKyp8R62v7//skRgSgRAZsQDGSQpPiQrJyVzOpDlGTkDCEB+AiQIAVQPxrQME7RR09/S5/i2ZcmSCdAwIIMAQDMjec5hz6icT33meL9JzDnmeLfo3GMahkUtBWQJAAOpCio+tRUwTcIwUzYCQF8/Yqvp8gIUpjdILFuZ6XgTsksFYM453kNTbHQqKKVgERzvOfpkD5IVNNsyuIEIzPRY5yC8brUsKggAcQ/S/TgEwI6lklzxnGZAjbseNzMWBMScc4xhTTJse1rW1JANxBxjjHcwyED6M6cgdpzg1snLvjmRJckiGQoQXA4k3NPkPO5fLl531alJwsddmQ5ISLDmlCbL9TahOeec+cSI0d7viGjRSaaLDdYUh4tzjhUk+dtDGO7cAAh0ZkRkjahnvSxc7sMuIpVBsVRYNpOlASuDa2tYCCnNMd/xJgMXeus1zrr11ghL+FUbll+WGgRNG5YQNNBTdhMkAR6O2HM4z8jyU8R2Bg1rSnOM97cSym682FEBTI33e+lPpiCF64bSpD5M41MM76gQRAUoKRg0etqZy/cfcbQfJoiUOnDGp6Txfs/xHmNYmktVOjEzY5wANOcERrAFo7XUIRhe2LfFkByRz+8RlrtsANJcUce+8v3WtWyXicWKhAygxYtQSGxozPH+nnNIylxIO5hAkN6hRsgTijFGxKs1MoJB0siSzoj8+vuRsUje3rUlB2H07awMIdgorGYWh9uQyBocqcy0Nec7pZ8jZfaSF3c8Z35l4HqOMVpcK6m26ZKlcQm8QKXMX/J4DY6VM2a3DBsVfCtydkZnYpBYbCRasIrlHO/vmWTLO2ozHzOu001EI0HJSDaXkIrIsGQBYpX1hUAZvwK4/x0eqfBwwigOQFj5cxugVEitF2ZJnu8x3m9NeZs7QJW/t+G59MfQFCDPMd+/viH3frXoVX9IAxmBIDmnAZLBsG8hzgBLl/b9fQl7Rzq5oqZcjAi2iEhKpTnntJMLrJgnTAOOYIsWLQM95hgKhymEhfF+a87v9r761aIBiAiQ1SFUcQGQnG8DN+5ELPNkEldBBBfuBe7g5yq9ZctkY7IlWJtuFUJVvoGM1lvrLdgIZD2fADQlaHrOgfeYbbbeg2y9996DBAM0yIjY+RF3siyyQ0dEBHupedTuwi/kUxUGeWZjzin9sjTmWxJxkKhd99Y5yQjCYES0bMiWzlIwIohpQWS4g9GyqLXWIsKGNAEzS88qS1l8SDKiRwVfRr6ryDzgrOq6bKYdPQ1Jc5Eis5o2r4pDy5JUDMdjbloJAkoFWrSIdHwEgxGRZSExKixlggeKKGQ9uGHO6NFCQ6uQp7ey0mpx08VV5MSSVSa8wKIwqmifAVBV/AEU15eUhCoCmpv/mEC06L233iOaNGVkTSj8hQuGkqm7alNEI9Fba5Z25USiG+hio/skKybtqpHLBCsy8+uqY9attmaWTrjYWVqF1YEE2tX769WvHow5QSNa4+I2siIi688iBjYRERHo0S6Oke1JTpfSaQFZOazsDVtgrHxZRTAiCNhn7FmGIC9OysAK1Ii4Oq/eW4u42uvrq189IoJhyzMSO8AzA9eESUIWVemJwkkE5IxsruxZxbEQNYtUwvEummX8VWu4qa6c7mS0iAjJc8wsTRGMHq/r6q9Xmp8RiY67f6khlwIr7p04vZOzv8eYc2YPmt7m7YolefDW5cztVdvtLKXMLJRlIHvQ1nrv19Velt6/3r9+/ZoaIKO3/vW6vn5Eay1aSSnls39kRJVwPFg+0OcYRe7S6d5NmEAAizrzFnp/W5U/+x2ACAbACI85g8yFsOt6XV9fBL7e8/Xr1/v9DSJau16vFg2LKQGVbGvkbajtAZStjqNr4YoXCfA6YCA2I6qxNs+qhoCL9yR3pVsEwQZfr+vrx89+vVrvwQa4Xfwi2tWLFAbvvMrgMYJxs+ia9eijVy2+FVjX66PgZjVIuVBRuVd9yB5sE87du3liuii7XvHV+tVaJwithYNFm6vNUmZVjY21VHRLmd45NECR9dKwc/ef5PLOQVs3ReSKIe74Wd+88C17JivzB/s2ZEbdIiVC2lo5LyDOfmox6GSQtyNKLK4SA3cGIax2xbcDaqBCGGI79iDxVVgW31z9BoxorfUeEaXbzrxtGW6vbyGdAb5Nt1VetnCptfmy0XO1cYla5j9ceHC4xQKPUTf1Xq5LomI3srWOJ3LZPtPz2fcuUrW7mttMh3H2xTVmP8eqalmewyob3JY/m6hKKR/iHbBKcucoltHqqYcO5XE/8e0W9FPRTSsrdWNffgD853FAs5933EG1hLQBtN5b9LsOldn+MDRqgGd0rqH8mBuPUwBgr2brTtcHuqxndhWoGHQhyiKGPExAtoh+dSahXwD12VBtybEap4+w/5z9TOi1cAT0O0Pu4SsW6vKaq0a4F6HK+bVatsKGZLR+Xa9CDT7MeZpg85M16IeCieMPoF6t2n1jf1j9ltVb3C39edNSsPqIiKQRQTqitdfXdV2rkfb58GYe5CGKfSTLkS0PN29UeWRK38//MTx3N7ADYGWasUpR3LXJDEbr1+ur5e5ITn2vlyx8YBXJY5rPs1vlmq6QmF60YXvgiJIjiTbm5p7OYoFcAJykqYTONSmZwdZ7JIBuqx6uXIWUxzx+iP0nMx5/7e0kw3Dsu4izzvIEkD+gXvE9VZuzqEBEY7Qsi8urn8hZ+b4j5Tfpz5L0VOMR7al7bEJ/IM2Ge94zFmvIGSUt8TcDhEEy+lJg9SQ3vpyBXp/GiZ1H8fy4cFx/KMe1wZHfav+u2sZalFoqxR3Jq8mUdK94gdFatFb58Jz8Pr+pwmrRH/fc0fY7rJw4vOts11yLaffPAWhHRfLHFEvVHLviR2KEkQjUUgFG0TLWrsvz2AGwMd3HJTwzxh8l9vfR3DUnNqUlyQABBWsfOGG9t95sU6ymZycAkKK3djEiGMQfQb3m25a71Tm9wMfN99Xj/IReAD3XD4JRi2EEDIWQaxuNEa21TtKaFpzbrVJua0Vky9si2nb+IpcfZn/0sk+6+MDPj7g6StHqDUpv2OjRGu3gdV0NYO2naRjIZjWit2i5gZfi53sOkmqFfx3HnL8BiW/5jxA3wYfgH78X4/WD0z791H/+/Amo8XVdnbkwJknTQJSALffeB97ZL6eKtsnGvWLDfx2yPj6PwnSizQ1QDzGP/94bpNtjJti/fvyUR7hHa7nek7tWtnN5lQxPL2TX2g1YwFTNDp9SHv5f+XDkpT/ODpH3jVnvjmqxBH/ej956w1jbbLoDLHtF0LfVV5wSjMz2Ld5dp0/2WJl2F67fWpht/6en+HHT+f0G02KjS4BzvlvtvZdLRNBiRMoT3KRi0UXsP9W/Pi37R4lO0e8q99vdOdzRH+8C2esiDzOtB1cAgkREtBZWBAP1poFLR9tyLsAgN6vIw6jr/I4lFxvyIetux9ZiQV6rov6saqdBOlmvUBy/Yk+4qWuQDCZoUiAsr71OWVKsOh8IxjHhEefF3NZCiG+L3jNiE4ZPUFobeZs5k1gb3VXHdognsbkfJxNtIteyAW/5Jc05ORwb+Nr2cKbig+scYHtIfrC9E4f2o+cu36MfqD0yLrK2iRc+DqYOFqNR+4UFSJhjGiPCld0RvKMAm97saFnxcZSIe5oy32ObdVvzUQgraiK9c1T2T+E/yidPw4AAVlkQjHtP8n7iFDYHXIsxp5AL4x96etluqf37qkA/zj/WDVdSohrfG2JyMyga6Vw6r6Oo6AaoNDx/d+i+xN1n3SdYj+JZFv5UIm8FVg+yofHWANkASN7vrkX0Ws+IFoyI3iOyKudHZRXq9RluUxzc4RHguJf9dzCnQw+l8PwLoJZVSB/iHli6C40tQbWNRyB6Y8tmOIVubI3g2knYgXRAd3UNRci2SVevTW/jP7LwEHtzouPHXm3MWip54vL6eyChZTTExqV8kzPa4h3H8t0jlbEITSXKqj8Hdvq3EOfT6GeQLNmqEh/SrrdZ71Uc1KpzlSxFvfJ5G6osc7h7V+STCz1fmlmLg7dVbyjZ/inQ93HDk3B0CKtGu96Zw6mkAVTwaL+4yMPYJUARQax02ravBaDFlAyQCylLNm6s3eb4TNql7JnVJLInlkA7d1R3sp0xdQs/PacMcky3JNuIxpKwZF0vE4N3eTojVzZZWwhl08faM/mnnm7h1Fqorz/dVL1dTANa/tLOCtq0CMHTHvbU1KAb1NDZOrGw82O6ezdrg3BtQrLE28h5vAq6JP/MB+dt5X6z9mY6na8rtIgJ697pqMwgghQEypj5n5YdMCM60YOtNmqO2gHvonRET73piLWW46MCPOraR/6fkVW2n0RrRMf1Ixp+vWdDO5ZiK4lZHElWUMQbRMBmNPav+PqB6+XWVLu8j9mOqCgkoI9e81+sxt1B/uhRf79JDvz17v8PEUkXOeHoISIAAAAASUVORK5CYII=", "text/plain": [ "" ] @@ -5318,58 +2248,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAdUklEQVR4nC2ayZJlS3aW/9W4771PFxEZkTdvV42qZBJIaCLMMJjwBIDxxAwZAoYhBKhU0q26XbbRnHN2474aBlmP4GZu//r+hv79v/vPX31z+9u/+fX50/PLeb57OM0txZfW7cg7nJhj+OLwarFoapTjfp/zpw/f/OKvp92pHE+P7e3p4Zdx/ti2fPfpD+++++Ht9z/tTT7e//wf//V/Mrr/Ia+3l/Pyu5fy9a0XOPQV7vElvfzf7+TV3XhXe9s2HfbX9scf/jlIR1pPxy9ufnt/N9zZ0/rSL0ZuKdrXutmnlx8/7E6/ufn6OJCp/O//+l/08K2ebkt7fmltvr/dxeU6iSRxa1vsSu0FYrNfwaEtPNZrF4dgSMfiV29/PPP4ePnws2A40fT9y0uum9y+OTzZj48/3wo/iP39f/9/cuR/++1fPF9eLvN5vCFfN9t1au/78+6wn+zDT3Q8Dhzz8/vH7NM46PJKSrbSaTXbGlGuW+sswfuHVGWbmxerb77+l3z76m6/O7GAmaQojcxE8KzDMNRhHKehqBRtW9u8k0gZaxn3ohrC4xH7h83nzS3WQq3FFn2o07a+JMXycTtN8vbT8++evze46DiOA9p2nj89Pn4IkNSdu0NrHXaqu3TGUKY6ejABGZnMMirDt96ufZYbno6HaRzTclmW5puNrIeh7PaleTd39TzPvVYtpbDsiwoRCCxgZgqn/fGkRbvBe69lEmMZX/eYk8suyvv+vD5uQuN1eb6DnOeX37/9w/d//Jn6eWROvsgApt6WZwcUQHM51SJCktv8tNm2302xeZ+vvW3bet08YJxBRJyepUkdxtZXYhJiFa6laAY3W3vPQqVnpPtQ9gQE92m/a63Xca9DRdNBNQHAk70/9/Er92QZ+OVpGXdjGcbLd1fLdSzaV38Gf/j9x5z15XnZyT3TdHm8cJl03Jtnu8yWnQ7UFhuCe/fHTx9ezi/T+IDKVcpyXba1a1U3X627bcv16ne3x9tbe/EMtDWHyoftHcvFmEtrWx1KN5924zjsJdjXXuQw7A4M6nNDJonAbJ1X6zm+PjCRsBYexjqNumOvz58+epIUlMJeO+j6eH65vLxc2xaxPb9/kqFIqYOW21d3BUqe2vJ6Ofdt27alKLelRQSpLNe5zbOZWTglIsBFwDLWCgC8jvtBhyHpaz58tc8IAhOz9w6wFnZyLmxtmcZTqkqVolW9kJYUUdFaCswMzbYPUiXC1+3x+x9+6H3xzcmzb81Nt20B247rz4+Pz1gCre72u2lPqihi29aX+en8uCwvSIhKizVhvXVrq0VXLQIiBhFVnWy18/Nl3VaY7A6HymIH4hrq5sHo8xoZlBDiTBZUnqASaTHUPZmecamj1FKLqLBAKjw9ahAs+XK9np/ewjC3rffmq3v1x8en5ujdaKZ3f/h4+fmDrdvj/O758ZNWzoh1W7Z1QYR7ipTwDMO2LFxGqdNYjwlEOjGn9R5t3c6R8MN+KHXpbd0WvrQWFEICYqbii4PZPFRUcmzhmNs0PQQDy2ZL31oXKmAIZ2xoRJQ8DseXp8cuOyadClFkrWnPvlqH99X6TGsdbn56fFqWyzR+UVRzxebu3mWQdd28NxJ084SY+v44Kku3q7sTJCPcLNy31o/Hw0M9Ne9tne+PO21mp92OyFgoto0OFcmRNtTSeleCvqo9H7PFNN0FiybVw277aZ3eDJ02X53D1r68e/+jZgwj3RymdH/ureNShClJ1J7ev+T6dzcPd9t1/ou/OmVLVqCFSqEi8+XqMLHSwzyXsY6+tBbR2qrjRMjreW7r1prLMBJx823bFvdFd19qG12YEswqS1+ncSROpUKM5GTh3mDa+ChDOSQB8C9O9/22NduU+XjaPX18PH/8/cflOVq/XrBOdVBF68rSPT2yzZ1EPz5dAsYNTw+fnteXV8dDRLJom1uE+batCI9gS3Lemi2+1jreTl+c+9PaL9PdUAbO1ba8+KVHRSZ3Kvyrb/82AAzM4IBSZF82UpmXNSOIhMjNjM2rEKKl5NLO5kkpIky+v1zn3/3dd2//6SVB18v29sPlw+MZTpkqzBwchL6aEz+f7Wl5uVzemlH3HmGq2NZZSIqO52VGMwet62rRzQycy/zRrXUD98JRknKzrWHdrnN09Dxzb7WByZWYlHm+zt3XaTeZGTKi23pdWjcmyshMKJWtdUQKq5P2+dN3//j48/sfOXwQFS3Nc8lISxUJAgsEWgapGap4ma/v3z4OStu2CVFRtrYlPCKDYhoLU0aaA6PWGuooFo0RItn7Mi+XdV56b9u2OMU8X1jmD8pElOEe7t261HFfattauJGke0hob0GCkIE4M5MVBFrX6//5h9/9+PPvE61kSpGAuXeP3NLTA1ALV+VhGMaxes9m9P681GFIYDocxnEUzev6Ara73WF/czQ3wAun1jGq1CJplp7b0gPp1lkozECI7p+eL/z7yz96N3Z3M/dOQgRq3rKvXBRJUti5cdVoQX3RRsnhPbe2vf3up5++/9n63K/eN4fBg9KyXaxHs3CmqGVwot2kdRjKoPB+XtqPHz5tG01ln+CECspQh2nYL2tPwjTs17lt1hGcnpk0DIUIxE4KImzzbBn9uu4vofTTY7yp0IGUmYiI1mUmZq51lBK9eWT0Nu7263ztbtP9BC7W/Pn58Q9//OG8vJBlOiD9OJbe5NqW6TT5lg15GkaqdKejCi/L1jNEWK3//N0Pn/aa3ksVJmYWVq0qTz99kuN4o7rOG9pm27q1ERlDGVgYIUokop265yaF4yhqXskwHsvWUks3a5vbJLtpuq2en+Y5LS2z1Fz7RiIkepoOVIY/fPdP83wJA5LAEOi4KyOyeU/jOuL1cbrd77v3UfX5spGUCiv6Zm2Ps1/oJa6X9XSox/1hti5Z1at5imGdl+7etr4XKqXUYeBgp4iMRLRlDTOQRJFt3nT37S06GJR9M3h4H4dBVUulj5eLHibdfN2aR2NFKVVEm3d4PD19ir7Z3ECpgsJi0CKtyBCch8Pum69enyZu6wIXdBuKjJFrLi3Zm3frz5/69azLyWot7/tT+bLe3b9KIDinYX9zOymLk5FQOY5qEZFgCJPuJiUZptKdFGf2HQhkmfB1mI4+Wx46afUSt9Pdp+19khMxnJLy5dPz+eN5viw/fv+j9ZaabMQsc+vHtHTN9CqaxmGeAeESlONpx8vaTLfrDDNOWJAmnQ7HwRFzL1OhHrvD3iLBcf/wsBtv5+X8cn4k4ePuxt09HBR1GJx7e2pa5ZZ/Vs5OIS+XmYCxHD2p9cjku2GU4B5zt22kUlhffL5cn85zS2vX83x5ORdmDnfoZZmr6PXSjZwBkPb0ZZ4pStXikS9Xv7wsvVN2gLh7I0Qpen8zUIhv7ViLDuN0PIZjXS9SlBTuYc0aLb2MLOpb7xzZ4+OHd5WnhjavXaPCw7aIUYYUFfKHV6+NVFm2lsbOIkh9++7xer1Eb0/zStGXdRVl74nkIlR48LDVERQZlG49sK461jruj8/X9vHDjxczBVRVlcwsYgXZpOizUYm7m+Ppq9fH083Lx+e1odYqOgzjMAyjuaHKyFVPFRkiGXGPXdmnDfe/1VfjsfuGCIMR8/HVm/vbL57ef9z6kiLW4vKyEl3Pl82W1r37pfNO00JCt1xqKZSko6arZ4cnp4KZKFJZx7os9vbpPM8dld06CGFRSK6BvRYKWfvyq2+/vnvz5tXdl6IDbikjD8e7adwPZciM+fK004mVRilFxKztbrXqtD29q7c/6eHV3fL02Jr13peXxzq+ebu9W9eXZV2LSAYu5zns0laOUrbVbvb53Lu7OXlSGmI36r7e9mbn9rJui0ohztMwbJbZ8+3ytDwvuttZZAhRRlD/6rcT/iBfHsa+2un+9Iu//nMdx1J52k+Zba5XJU3riZx2R98amEVGym6W3SHOMnqXD+vjL/Xlrb3ElQ3Xde2b9X/+XRyGo2jaccHTuvTVmncQWRpv2T4siHUjZQmO7j2uHZiGU92NgPkSDX2ncnHbh3A90OXy5c0D/QLzPy8/fdxWrAPx+hg34/6vf/lnj+3l7ldfMA/DuMvM7kksxNR7D2akFymtB0iLoi0I3hgZiOdPfZx+fZ6etd3NP/3PD8cqibIuObenqf6Ke93vH+erL+ule88kFQmfIzys1yqJzOpppMPQw8/Xl/F0GOrhzevd83omMktLrUHr7rC7f/jm1cNxmfr++x9/+P7vl49xoZdvvz7ZLd344fU3vxhEhvHoibi083yx2fKBKmqMtFyes1Ch2OYWxdEvFMN1/hmyL/E6nm/0/T8992Y9OMiDiXRUeeYY//HHj68mFd7N/ZMgtYo5YXaAmnkR3S7LzXS7SLbr5rTaix3Hm5vjzel4Y+Tvzh/XHGLY39y9/ss//6thv6OxfvvrL3/6/uHHl7c1x9PXh6+nOx2P+5v7vvXeVk8NbNvWoACpu3fz5e15f3tS1XP2vQ7ztVOpu/23XErR4suij89LUgaiu7fMSaQta+sbKLZwoQ3JKdnWprXWoZhZZpBk0TpqzAgd4R5zM8oLV/7m62+HukvQw9+8+bPdL1llPI1ckq2Vm9tfvzn94vyvejb0TZJarNfnZ9lPEsJVQ/h42tzKpGQRWWL85qbMFqRDOifpVAYdPawt3TmO00lf5g8H5Q4xSrLIUdeX1SPTcY22184Jt3BvUxWCEDUhCc8QfFgvSE0tcNKUgFzmy8/v3g5Tbc779Xj3m6+HEpml9xWJqipOS21jTiGam2f3q6/HOmU5MK3MtD/dK8IXkx36hqrUqoC7CDmsFAa1l/nDaby3mD+cX1RjpZy6d2GWIfvWIixBQtK7r0TEmi2kytbcIxhDhCUiO5rTuC/oTIVIWFgIuazz+XxGleflg0cEI83Q0jXI0hmFwExBuoUpFXXufYEFChloyMmwkrK3YEemoFKmkIIR1jdjnuTIWkfsdsdZ7++/6m1JadqH2VaN1YNYaiIomRzNehVOw0wmTAhJzjSQ6r4opUaxcK5FxSN3rCF1rPv9Ybd7Lbp5GxTwQuneexOVCJciYVspY3IdCNfLwpqeyZEmL7ZA9uFbcNFw12QqJbfWLaCFSffTYKtBzRp0aR0OabL4VYUzZShiGrGaEFOVIQJJSUZJgVQJguz2U+uUjOYbHFzACbDk5jwwV+Xd+PDqWGnvuboLOJNCpIRbd08kcU1F7WzRN+pMk2iFrwGVybxFIpg4ybObcN3SiZKIGZTByBCplzrxu08/9GUmIpUqILOotI/NCGAhRpABlOEdQUzMRKoSRMmebJzERXxLp6yFkcXDFFY48kzN+9a3Rmu6Izk5ew8PT5IMyrNf2+U8fxD38G7b2i1iW7xbrMZSwpMCfYkt1mSSosqVBAhLVnLZPz7zkY+1jhSBSAdpYU9jAELpaS2gKZxExZAZFJmUbJa9R7SUMihP064MeofM06uyGypCMvsXXz9oCWKinmGOdO8REQKCg8M3dN9My25zjwADnJSgQKJwYSaPlk575pS0Dg8IB5JYiwopWVbWYczMHqCAmWVDcCfALXsEgHBsa7q7ChKWCMsIuBCx0O4oY00tXGTjQXeQ07RLdr17La+mvmxkTFSJODKBqDv9NC/dl54kRes41GF/U3dFJclAaJSiiqS2tQ7TFEmJTIBB1J8WylAijx7uzJWFzc3cgijNTFgicaTBkem+bmHuVEBMSIoIBiLSLRPg0DZna8bQUXGclDym23p/9/pf/PYvcWnWLbKZr917650hbq1df86WiYQHUUHvwZxgTkmgEMgpwi06IwFvyznSkZlIPsBbWEQmETJZ9c30F28v/5DFM8tQ9DAMH5ZzqCKcRDIikrI7kKAEUfcgArFEBhX03sah3B6nm9vDxEqVx91uOt2fdlNkS8owYspkj23rhaNjN76GIDKidRAJZzJli87GjkB22kiIeUiPZCIpytIjwg1RSS0pMxNEA3V9fvjBzqFgpkjWx8tFQMFGKQ5SokggnJgoAGb3AMVYJJJEKBP7abi9mR4ebnd1WgxV5XB7n9GD4S0hQGRPJEQV0VxFenf1jYlAFiDqWLNLEMI360OtSCJgnZvuJInMuveOInWP7CXNM4Fkp87xv3jzgKcj3T3Fh/FUsE+Qh1kgwghgBdi990xjRXoyc+tdBjkM5eH2y4l1XdrWFpqOohMibbOICLetr5IQGaITODlZVQlACjJ7b83fk4VbX20VnUApTB6eJQAQcXqQCJPkCnNPApMA6Vn5rf0ojYVTCwmBIO5zaGPB58ALyQEK49V6ZBDVSgohd+/d2VFK3Y0RyT1s+xBlPBTK8Ij0hLkHs2QiCb60BJcqDOpILsLpS1vDiDXdt4ikbAw169a2ABEogECqcFo4WfqsrCCLHv70E99+ec977HZvQhBmYG5h1kxIQZTIpOw9KGngYhGbexICFM2KFut5PA4S1DbrLsdf31etfe6dHEGepKIJSnOKFsWJSEjcTETYIwGKFyknCq110lIoI2CWRpWqlohARqRHhH1uy3iKhF0pBymvvmT9BgVYtw8MD2F4V9IExqKFiii13iKCGSoihfZKoRK9lWEwQhlYSRZEE0zHw83xITJiTIU0IiZ2GAGZsVlXLWzFGJlRed8yLKLo6xBJ8sxEZiY8MriM5SRgs57Wi4yZyQyCpGSKy57Y0o35h//xeN6WcItrBODBzbvU0qJJhRBXHYap7HbD7Wk/TVMPYwQLhXOY1aR5Ww317uEXx8OrOoyqhS4D1ClMWDglexhS1pKRxt3X7ms41mZrAGUYKiQzulk4gkCJCupt3nzd2jsIQwzgTEQLBMEJoCCnmnp+/nh4dbfCBiVKBudUNcODu1DVoQx1N4zlcEyk6Corg8QktYe523TcHU6vbh6+Ohxve99E0K1fC4aWQgzKjHRDGTkP4U4sGcK842jOTCwU0cMjAgLKCAvTkdJpA5iylK9Uua/OQxZWIKK7M1EiQ6x3PtUaBueFpBLTOElQIqjSxFyGqseD/uJXt4XHZXZrAZJ2wbaFpQvr/d3pcLqbyk5YiKh3C8LEsV1Vh5qbJUL3ZGtPQ6CFBYsiM8gBoZB0R6EinAEX11KISmfUyiRVlaw7OCg4nNIpHCTChYkJVPTh4c+U4v7Nfr8fq8i2bNd1PmrpxBI+Trtyqm/uviF69+792+bBBDPnSm1pD7c3wkmSPGjAQQhy6hEileOydc530u6H/c61ORqCaCSO6JbCzD1p4OysKd2ssxcqgmLpQvAWEAQyORmCpEgHQgYlp+6hHKWK/pv/8Lf1iuPpNFbsxmO37Ovl/Okj1RJabka9ffUF6jj/t3XLfwDMupGnZwB0dzOWzCIDGGSZCQo4pRLxSOhJ5Y2ghLtZlFLAHKsZTCAi2ntDOFEGZRIGVKRv2biHVE0CQxHGIGIGkJTMTAFoMAJE2VxPenv7zYkBaVamcVDONu1f3bIwRZXm42GygIFypRRJ78fX49N7m/bDNI37XU2AtnClcPMIZkXC4aJMLOR9edfKayHLTCKAVWmOYDeK6pxEmcFCCXNLcktmYo7Mnk6RgWQkEZJEXCIzyCIJkX39g37xzS+xPWfjHLxtjTtJGYdaxJgGWrMv2+rbenl5S0IwGaTwXLXY3X682ckyr0MYSvFt7VuXQVgoIrO3VCXAmexUR9a2zhgMSIHGziJNgUQg01sS8bZtMqiWoaUJOPDZi7F7sDAYBUPAMh3MDT4YLT/f87V9stWQnVgy/0TcMHa3trRrM2T2re3AuwNORxn2erjn+9Ph4XSK3tdtKdOhSk1KSCLJuodFEmV+rtW24yhkiRKUKaVQhm+WW2QyhAkgYha4vXAAFAI274kkfMa5zIj2sgX1JDAYKWI9A/tXlXfnkghDMMJ7CMLJrK2O8OyjfhZdDPvh/nC6GYdRczC+O07H/ajMzLUEln6NdAJ/xpsgkLAIBYxckWFhQRARIY7oRJ41CUA4gcDw7so3kb1dmkh67/5sQSAJMw9zHiU84OEAKIvuheRKAzOvCaBlJIgpHN57hLu36O7pzTdEkJahYDeV16dxvxtf3ezGWpe19e5LmyNJygCiSEuCEnMczEE5Jon3yHAKz8CyXC0CopUGgMIigAhHhKi0+QNNiGbZWx4CmdmTmEiZiZGZCCIy62XkkMjSdd6ajgVCoD8BX0aEMoOAJGZJ0DiWWkvR3aRFh4ykosv10zJfx8P+fJmHw5U6LutLWoyHPVhJAu6g9HSmZBW0GnBm0bHmZ8+e4URExKAU4UjRu+XdOr4e6v4YCaakot2MiECEz66MECTek1gedtAYbomu4cFJrIUAp2QSIXI1SgQnBxETExWVWsWbE+rz8olEs/P10xOBKeLDy1NRfc1f546GlGCy8ESkCAKWTsksLFFbzBQBpkwgMjLNXShD5PjFFJnpLmVw2wRM4MzkyGQKgSaYya0x4erO68v3EeBRWZiQICKAkUS09o0yemvmLZiZhNI5A8hAuAfJIEX69Xm5PC9tE5Hd/lD2JRGsmaCEEaiSRFpaCjOB3JZEBkAkDIoMAEUVYNUKcCYlg2AsJfNzVc5GHtFrGQFQUHiYbUKqlfdSVCEiZM25EAuFe+uNnHt2/ywoIaKjey+lRKBDkrjuJ++2u9uR0P60P9KNlsqOSO9sTAQplNm9Z4CVwkKY/E9UrMgMpICCGPCIzPAGlFpAcAstEhH9GrIXTpXgvvUWfVcGpspZbV603FQFMt0dxJQeHhFpCaREBiF6a21dL5ZOaVX3W9Iw3rw+vtpNY1LsDkeYJ4JpIBFCiCgzp4dWMbOIZCZKzoCFJaBcieE9GQRmwufIIpLAADNHRwLhARE5EII4A4J0G6qQgIKQGWXQaBJDMjMlQPCIIA8HEcFAFK21Nl/a+pTBZn5eljKehptXO0UVoalM0NUMFhAurJ7xeYYCigQAqHISYwsqRCkQpkzrYPm8JWG3TkSBFC6ZkREBJ5IAEzk7R3gKB4I/I5EjJRSokrzP6v7ZSCRBPJOChMTdgOzeSEhKKaJl0EI8X9YeRgC6hQW2XJYtM8owqJTPuFJKYXAmgBDlJCYPLmLuUFKVyGROApHDfc0ABL51+nyzegegRbhQbGFhmYzP0ZowE6F6XxYSCWGuu0kUlJSRTt17d+sWPVv0sAQARcIC2bzUghrWSZ16733rEYlgBBHos0irqqgCxMLMjGRJgYdzsjCB3TITTBKeiPAkQmQnqVVYw43BoqM7cs1mTspMCbPFWjIQRE5BTki35LNfAA6kRfgWGRkJBqhyejITtmbrNi+rowUXe3kRBY8Yj7fDOMLC05AF/PmQFGIlEsjnB5RClGjNnpWzkjIxwjK6hRMjAQaIlCgJiAgwr2czsQxD/bwB2CfcrWkdi2uncIpaxsxEAac185YRmdH7FhkRaRHuTsnWvPfNM5DBrfs6a3Jb1nbdBuYM6jZHNK0JqrUU1QHJGSGq3kEEy0wR3d1SECjD3TPAylIATmQmJaWAV/KeBuTu1X6gHRUGITwiNhLhUkWzo6cnbUmhkcIUbI+LW3d3CIE/fwMEIskDFh4LxzhNu90xdYcQL0Nfzx8//fH88cO8XYirlkm4SAaxBjuhQ4gypbBnQqTormDMkN4amLkKE8HNrqtdTSBJbrGqMrtpIUjfHi9zDzKFEANJZOfNLQgQkeadRjAsr1fdfN7lDXOy/QnXMyIjCMiIZLodXxXg5u6L+fKhvbR07DnXp/eoA9Ht4f6+1FGFkZnCtv6k8pqLyJ9GLEQshGiEommenPEnWDafbo99vjBFb1FEp/TtGbSzZbNpoJ1MRWGuEJcOvh0YlJ5EnfSJTTvQ2fjr1795xgTItl1ZuKVDsViPtaCIHm5EKwvr3eQY+PjtUO/l+JvTeDsNX4w3X07lFL0AmsnS8yfc7trhfN1tbbRlNNTcaOtsWmvdrcMgoj/Nufygvm7URxtGqeWqhXfx+DPkF7GSaVQv0+qFRB61KeTCnKnk+lyAGCJ//dhfNOX167/8/7HDsXUoV5n5AAAAAElFTkSuQmCC", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Distilled image 2\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAj90lEQVR4nAXB19Jl2WEQ4JXX2vnk8+fQSROkMSMky9gGY6jC91A8Bi8Aj8E7UAUXcAEURZVtwNgYbJnB0mh6pnP3H0/eea+8+D74L/71v/zh1v8cj3abx8MMPnNPOKweqm9U8kVgvZXro8mLDdC2WtvSnL/48dtv98tny1w27kkxDKW+Sa9p07qiH165KOk1y7+dw/j/uX/4+8Of3f/i7xarfvjO9X8U5Q9/Rf/6yYcXlz+/zvobLOX9YEO7mEQZYA+/Oqx+Op3t7f5V+6hWP/35uIh/lk96pVX9Aazw/bxdvBm15FbNrmCyHuLp2eF6cvPSP/7RKRIfhpN9X7tHLPj4Xtf9WxlGnixdueWtxS7pHsvpY5qLfAyz+vXfnNgD6L/BRJ41Ffr+4zIrO2Z6+1ZhTCO++/7u8fmf33xeDP/rf5OL97vEkwidgPDN453/J/bzuDOr14GatPJEPuBGuw5ra8Hv5pfAoYjqokoi19nMDv/RNCTzvEpKjr3Ddbz6U0I/gV8dDhxaLKPdh2f0/d/5t/8eveR+kjAsIQhKjEs3Us++2EdpqtKABOIEj4tIkm15sB1HWJzDPEvHUxCEwXF+dJmzcd9bJzmUpWqsOMawZC82Lyy1wcbEhDbQ+826C6EQI3HxM5+Alx82jdwbtEIjAaHr2EwMhfkBhbaPqfAMYTdg9jvjSLT9cguFsJPKdaq6iuBl9Nl0VMz2phs3+UCPWXaE4BsAWAQzwyKo0zGtxF/+TROsykCSOM7xSMkuEBpnYZGIyfgnHTC3HyOk4129Tno8SAuwZbGnYelrTxTaNsyOvt3BwGCsiXz/8BiczDJmZVIMfBTHqjkAqSB+luMiCBglIIXaTz8eyleq2+fLpNXQUnlb7wbw7Yk83IBCge9MGvIvk5zNXNfbzW6/VYx3pj6g69/+UbgCBrJVLUPNDbHXp/kBsyY41xTJVzMrCMU8DdyG+cr8UFBw+uOIzxXPVmTMjLCJKRDPXKQCUBCZ8Y/od6UtNOiDWh+qce7H0YRR4UBlKJRiMStmgWQQBNIagOi+vAfgsnMzjTKEs2XO4JugD7YcmqF1+8N21t2D7px4Oiop8ME7V7NgT3lVda8oQUseps1PYUmmRZwxn4S0hJQr7SwcPQfw+zLrjxBAah3v5YfMIaixf6XqfSDt55CNuUtsEmkZCAPW6aDQ7JG4vpWxC60E1u8O/V3v+jI46j3eFLISkzhiII/RCu3UZg1qt/fvHOtBhXDe2ypR10PZd6CXnZGj0RUUvQtOxKyT7YCVRGExnicqYUgf/Uaj18PLbfu/tW4zIt4fqgOTWS0lpTP42SoCGaBVutqLPaA7JLk2NZggNpJFbpzAgXUw2tPapEPUOdd5qVkfjmEKRZQkUezAxruqRmhgMX19kDGec5Ya6yBhUFPusXQpRCE99PKtMjniNK5QWUzXPe4jqinOSOS9dTxNjDf7SjluQB3FQaJUchkVpwFlZTy8rmBbtpIyTTOXD3aYMuDZ29HjwcX0EMMTOdYmRDi4QKdqDpoLDSMxS6ChGEzdnJSjPmr5UFpbqUmTRgnZffDb0G7lijLKFGutjF3tfPWKu2bf48CkcpTRrems7UpT+hmaGz5gJN8GXE8LvR2wEAzDFinPonFEnBnCRlXDYkkVZzsFFdUVl0hBjq8Le7n8PHVMmE7tWweN8sSDbdus+zLbWJklSRdnB1hcTbah7aZdnHHQ6ra5Q7JToR/RmOVWkMDHec2l13p0Wfme9VrIHiRYYdLj8nD7IK8/NdfnF4PR2iB6LJLBNJUaEIo8CtcJ7m/MAuwlF/mleyuV433mgUHyUweUgUTl+BinkPtzbP/sZITWpwf0oIy6+MRK/MNKVjGkoBmNGNobQ/a5GKcgplQkQ/ATKk5S93+jffk/xUFGCucBs2juvM18h5GuNYQCMZDHJ3QxPc3MzHTe8YFkTw9H6PB2qLdUO+hun8PaAKVzKvUjBWngPMG1d8GpT/uMPKNUzUdLgnh77LztQW2KjBOMOjieyM9CYh/+xhaAFe4Xhz04MhdkmUv861M09UaRaZnQOO4qpTPkfco4wR6SgDpckSkwmyN3uZf3IZxyw1y52xjA1W2anOOXDzeRMdBGijfb17DFLHZIAjCJua/u2YbirO91JQ/Y5zeH38yGqxquMUmbCBOGqcOIMvjweuW+QtOuiDzSnrrepwwNeCiNkMhkk2e9/aDedfgzKEeHzae7iGSFEcjgPMkiq0iECMsxhAaQDEHMLfcU26AOtmJKs7ptoxuzGqb0FzM5RtIO+lMqk3wx/mG1LQbd4FbbNt3CnlgXu4Si0SxhWoTQEdpVjYXMjLj8hpiPM8HkJD07SiaB8Mhjj9O2NZ/gOMWbCQBYQr+Wq5iTrSk9ymIIbNDjw4P0prpGUB3UxwrAodfaHY3RpJvKoAi4o7ZQ3jRaUe4iQFjEVg/rupYJZk1p91JhO/DQOC3tsE4wzuY/WbyIGvXeVOt+qFJ/EoE2jLthxyKAQkwpwAkBPBZD14/TYiqOq9XqTJR/eG7IZQJ0E+HEM2xMeNBxDeZ8jvIZHFpXNjvxmPG8wBzR8dSbOgirTzq6Qcn/U48t3RY6BD6Ol3XToSZsB11Sd9THG9wDawCyHxx2wGnohghSKJH1BlFmQzAMspjWejACIX24fdjetduhbXrg3e69mByrOMvTwLcWhCeK+DZ45B0QBcl9MyqdvBu26PX7Q7S3yckoHs9GWRIQnBh6NZ7kuyRDOJqKlIf8jIXGT5G4sJ0aOv9oYPPccqCX3RIyvvYeaoPrtJPow+2bFJ9uBecaCx9ERFt94ruobl4xCkEcWqQQ9iMxCX0wEIpEUkblPtRVfnd7gHV/Mp1by0KaBxOAt02wugrV5qWQeNA6dOXYIujpF4fBp9NE5k0GmrZLaJwKlAnCgIPyXpBldEIGoEljIPSOBopi+zRdA8UCjRYZ8t9rHwAWgx0UBpu+Hpxjp0dk/PQYy+GoDuEYN/2+IAsnh2jxdFNdxM0PMWSdbyEwaUxaKdkJR2WUpvFQYd3X9bZSB+NPQaE5nIILkX4YFDM9WCyyGNjArQL8JCooyeHlfuiZewTgsPnYd3lTvWt/9MUcsiJmxUBgb1fAX63aX00Ra0GSdh0j/v27Lt6tXuTPfyPLXiJUuHPG7wNl0ETiytXG+ndo6iGmkhfJpgNTf97y/gCkSM/Hux9sZIc5i0wfPyls1+g6la8AiLizLD+f940XwvupQACB1BOCW66ZYAxNijiXj3w+Tp8s8iU+fjH+AiDdHN4tMoFsAntbtv7T325++d/fPn44aIFbi6h7H9pPrpTHg4jnUAVDKLD3ihrQUptPgU5o6oyXvlGdj5qFwwrZh8Eh/7lYqvSg1LSzqoiiYZRMU/XpL1z8xkyO/P43/n4ZqS8em9+ATEXXHFvMr0W/Wh2QRGAUxQxqRhhyjcUAj1iUHiEdpL/yJ8dnx+fnk5Okh0HgMp5MLFW6cDEnC5edjtDm7X5I/+rm5q586LrwAi+jWRPfTdjDq1YDOnRmLvJAzsyIAcVGXdDBVew8oRFX5wl7xyMBkyfo8ntUQw0wHJ+OcwtU2aAKdaqDYMF3W5dch58Ev38n5s+iiBcma8Ph8M3N+uaNb/eq68eecSBNgDh4znkwRNbYQTnRWOtBPbad9J4PoSsmy4StKHepCp4myIguSSL44Zi59Y+FfF+vT2zDL6ju78eUzIvZxWzqMbMgP9xX3o7g8SifXGFwWMbpmQD36GpYBcED+vU43mqTSP1y3cfk1kWU84i+KPLRSZZRchiksybX2eVIStB4KVX87uFhpbu4SEHqLVJRnBDGMKPcKQs9GTNCiBPeAVvHVa7a/cP7UqiqhvrCQdmP86vtpmtLuiwWachG+Pm7dH4s3bv6r1RMcZxpp9n4cBAQwSHU99XwHQiaO3AhwKas++C/L/d6f0NF92MUI/Ox6VwJnhdxr9bvsjQf8QwIneUXY+M5G6cpgkrh/p1H2Hc18NsWGgpGGCoXe8oLOngnMCeU35WmaiXU6MLHYUCqOSAwwvkkYkf1rtut79jwIzqOny4wLxoddRkdbE550iMNkxMJ0q+TSRrFIrEg6DjaHbgQ6VFE2CXOAitYI8DlJBaTnIpodny9eP40LhLyRxfh16vJcNtfHuUhHYeUB7oSZLQ7dG0PRDLyoXbGo0F5jyjuVBJggmIXLBYBD7rDnCMdcBInIcgIaAvoA1fMMRoXyJteIojJ4bGyI4CGH3pkm8cNntBFMzmQ/VmPeyyWp2l6esZN4qlToezCIwUWLUez5fPH7/8yg2mqwgZuFrRYnM3LUqMsGo1OsB+2qkG7o380HV1mTtkMqrSEpK8GrdMhqCZwKw+V0WqQwwHogGznXUSSJYuDF2RXuZbpXmd5MpvECWAEUaliZu14clwgHga4l8CF5r6tRIpoiAaC9m2zOWKjuECMRHTmOgGK4zyJ/S5oK8WAo6gYT5Zak+ptH61/aQ5teDROmBxnRsF9L3dBWj1Id5DBjGRDqu9WAMm9dviN+2jrHwmzf1cepkZmalTwRIJdGLiEkxT0LoVNZ9N+kKzTcnGW82KiDJG0oS1gJzmu0/kRtMJ0XXBc8LDP3MwGm1gXzbLd1nfD/oTEsq7XPj2PIk5Z8bwYUp2IRSfvyRC5JQul35bVUXFMrybm0LnU1rAhOuqpOUHE1VtLQTG+ds39mifQZ0j1j/ubR7PGpt4VqYFQoic55TL21n5a3e+rUCO8RLwiotrPp7ggzDfNhAmCJ4h2al81d/pQt6HTi+t0lEcEIyNvhkS//vDnJDbbXlJootPRM5YdF198avuX9/VnhLq+tKlrRtfL9BSTsSvH95iSjV8/3MWkrfb3Hm50IpmnYx/RQRZai66u+vuo72Gz3X4YCtbm/REppz3urEtRhL2qi4ECTCTD/LHZz4q0t5j7AoTbbjLmJI0t2XYSJSnft9ns+W5X41jOAt8jo/XgtqmlJKcZn38Vf/9Wo9/qUciQYtPF0ckTN/Hw8B2kXzXyriSBAiPY/JpJX7Fa39GlTTu1qnTd9zxNM/eO7CaPxkVC1cpbpxlP3sLOjM4ucdoqYi6jPJsn6P8S+HHoBkhCcGAuSTXNctWvKvhjTm4mYKIiM/J4fyBFYpWmA/A+lozWVXEyHz44aTWMVAwn5LrWW3m4Tcg8uZ5NJVtnyWw6y1gN8DHNGRj6sH3Is6fkReN3v9XQNdLHKd/d/48C/B6annh3a6zsJmlqyikJfpzMy0EZ0GR8XiDZOQgFmBh2eJWYM0pFGHViaKaGDUhQb10HcU3CkIUHMCAtiR4+QT4phabQ1eZDTBnBkYU6B5SJROxnOf1iywjJQJYEkCZ62QYAet9Rnun9+K56+PC4ExBG7CwWcaQnDrCBjyitkE/JQsfwEHO2+u4o+QrXjdp9jANlRT5TXMALyJF5ZzoT4clo5vwc+IgLBnrnEf/yQlrTEowbsqHdm41zyBMUi5igQrIOmrObpsYYxYBK70xvggSRWLA4NsoyhyCnKYr9PEP1jTeWmtinPHMY3cHg+e9Mltzvmu0rU3aJt+Vm2ynCCVS96YZOgrDb003QtGaUXybJePJlGLy3njD2iKHT1nUfmgIm4OlUUJgAbh2l9tHnGmiM5aewBOsGNszbpkzgbMui4+cTZOspAixoZA8Ht3Y8wgBFMPMjEPcKxXFEIbDeKScxgSjyiHmJIBaKipxmNO6j0ur+3s7P/+F7FQ8xOpqiOCb7GEUoSlNhoOex2UPIUIinyawDsqh6UECwQ0xQAqZpf7dnrXIGOjCO1rIm60Nenw8Wo+GeMywfq2Aa2V/yO9IDM0Is0PEeaNrPJv/5T9Cbt//HuYFAqOBWJxoggyKqTEehcdAJizH2kCBu0ywfJUAYxxOtERHe6AQxQhVUe3fpEfg1nB+ZBj/ebpr9/lTGRydZxJ2lzItDof+ySHDoh5ervzXrlqxvJSWMTKMA2IZ8njCMNOw15ooOaqe0ild9Z51mN9tHRAsuFiih69tqxJEDTZ80CxRhcvc/kEM034eYUizZ9GfjZfCDZYPs7MhCtrzKNQJdSlPD3JgOOyMJyHWvbOz9jjDeU90CELJZgITY0HZ/i7Rz59qSAS+8Rsn+zQN53D2spEG/XXdYSTklGQKczfO9Iah/v9aVQehB7W43chGgawkOgZFMBgOzZvOmjvLZePKjYEsDV/yo32sTslY8ts6069V7Fv0CRb//exlMrVlh3mOYpYXvKvuTEaFJrN6ELqjEA6MgowQXvg/vygrEJ5jpE+yxudekCZ1xYw7lXqpKDEgX5VEz8OAR8D09O5KTSKCM99YMACAOo6AP5SBlUpZ1ryyxmQOubCB4tWZIig10DLjWqqaxevT8qL3/pZTve2ugW3gRQwL9GgVxXEdiym7ytELJQ2l0H47PI4I7INWAjo7/8GPbj3xFoyQdqK9soKY77Azkgj1Jjp7GLmLceaxAJsVRDB2UFjgWGLVfpGywPQ09QTOc5CgAoo8R7JWtjejM4pHQGZtOyUU8BDdJUqaZyo0bzU7wz1dqJ3RBi3yoEaTZVGMS8Kj4cvuuCtShvAuOc4CAjGvcjL9TbvSPu/4MjQStgQKrQHdc9zIVm3fn/wV5dOjx3m0PwITKHJVR1CNuh0K5Ty/fNn3Zot5CMjBBkF3GcXEdAW287Fay9kCrmsSfF9ys5Ha16d96RGO8KHqmPxzPAOkHJl+3MJgDQDwiZfeaL+KukLTzSeSaar/xDaGgCcEghQ1iEWzb7NWnPfoECFItbhkayrM+BxZ3GokHyimu2nLgPVvMiX8C3+ZJidp9zawN1umcf1sMukC+TTZVz68HBDHD0bC5KwDYvWnjGX38WAcGspSh2Zwc/NWL5Zcbpn0KFiLWHmhbY196h/GbA2hz+8d1uWoZihn1vfb4ON9R3PXM0NthZuv+kqduYwDwva0b4sPIL9OaeoQuQe98lIwiFou40Gh8lXv0bmk9lunZ7Bml+A6oMFxFCT76OrmKnTfQ9BIY175JeepZ3TX9CKW17WvQjotnChAQ366+241wNB/NaXJ93Lv4sphNfuseNg+Vlu8a4wYAXIt37U1J8ud7F9rsBchzoWNTVlZR2MoOPNJAkl1Q9pU/co4hPIpc1dnGOf0A0LPe0KdpQmxhVccUk4MDlaaoLYNCWZ4Em6IdenmDb7CpBnDo9kY/POwDRhgbih0BHna3+93jR5JTSwWzLNRD55o1UvHZsy8ujyfZuKvvxfvVh30bYL/p3jR+WBGCjoWKI9e68YbmF5wkLP6075A9+UII7N/aXsyhhxjIogqRXKYxKELJyqaOQAeOAZ9QKua6/lVwscEpwlrKWIc96A1Iba9dM4rQDK7Ism9FR67UKS+RB8FgALqZyKMgVgX29Q5V0wFrIXKpZd01pgGKUi/CgmSqI1DWhEwUHtVPEObA6aPYQ2v9lx1Cd2fERElEfUo32sl3GzuOMUjalxvbXJ/Eo2GnGUo7sncaazdwqwB0Xjy2E4Q8Mz0CzKUs2q0eD2DYK8gixtlP3UiEdMUFoxCh9Zvk7k25DGOTjWwQU64L7jfkA7O+Fs3ZmbJzmn4Nx5gGOjKYEWVZAROIsAmyNKHtH9E5oPdiJ9D2aetLxBWKjQ5+mG7t7FU8Ay2sEKiw/kGzenp0RJvYNCiiH4K6SjISCUM8ZLCuw94ZCR0l4SuxTaMqMGi8RH3oJnkqwo71e8kAZm/lAQp5jUk4nnYonoSTeV9FLd18jMdfGxirafZb4gzN3ATF8IO4W/eH11qjMZPUlqGVPXCW4jioNYys9tiRAQepZ/sgXop8IhxOiIbMNeVDLz3u1DAET2MLnjAxl+3O+wEfjU1inH5b+LxF2tVOQz+SaZzxQLs4896vRIv7CcPQZKwoOZ646ejzmD4GH3hSOEDAYGP8mqC7d8A/PN3JsFxdyfYOYl1/7F6Hh7Db7g5Dm8YTjDD2cm5fPB/FGcIcew2BlYYn5bf3OArGb9Z1dAXqye476bgumNlO6Zov4+vR5KTa9j6G2LD5JME8IkFACJtaGY9UZF/vbjeVx0Wc86iNd4Pm3kGz3dcq66c4bl40Q9JTHBsl9Yl9xNGCa601pNZg6pt22aLr3QU7kan6tJ/C+w+rVpNhbt0bELlTgqRfH4CLGY2PDO489tgIe9kH3S+4biWYsCya9g/AohDE+XD+u/AwS4nQRegj4PgouA4BVMRJIB2ODKg2VkVM8CQPfCCMiqFxZ1Geop8NdR/5qe8q5wAKJCmmcRRly1tGBhA8taWltw42PQMOEuIUNGCDpBoKNDDersdn6Oj7Bs9PgqCj6HvIvgglqnk6i/fmiKURayodbl/ee2mk+yi9rd9U5YHLODKkuf7iYgKyfiys8XS6Hb7Z7p1CK80oM6Z10BM3sQ7grVacON4YiTmAXkDE/Xg5AVNvzF8r6lxuOuATjjj9EtFWggztIINx1AmUZdgGF/AizmKOYk78gh2ni9OnCTp8/teWDeWIf3HplsvF+AjOX4zjKoGwji8gvsx4Fmp4Mc9CDx7VGLgUS9lWauW9nUYYW6h8TVLkBsRBxliKv0TCweTJl3I4fNqoNCu8kVK3b5Wn1lATRYmwveveBy4VxqXfPL5TVfyjzG5QLkYaAZ28HXxUHd7XANJUeGS0Iq4AABVI+7rRK01Q3dvVcPNtS2B2Nfe6SuOr8dExCO/7ejyNVDfB2amu7PGpXkanRzG8+e6dISN957GRUREYiaweZnycUeKdRdR5CT2UftubWS5ITc2HTlfHZAwBIFB5Fj3hiWmRTizyHgEEviJDZRPLZBM/y8ThjfLCUICNdWBTcdD7URpZYHYSYYujAvIwCmZQ+t6Zr+zIxUqRRu8ylP5Vj4/yn52nQ9XqiCcCwUjMT7KiyLLJWByf6KMlidkdo0a/Qna4F4j1eOdTLMigiav32lrYgSgFlgYUx4wqALOGBIZzJFyP/ABCHogxULPvYaikc8Hgfm1QTSUdg9lPOtP6Zl+EMaBeW6kTgT4rOEytIyEzTriVLydkCaBt+ur5LEfJMFiFfUjHPYr++VcvSI5ULpJ+X+27QSZdpBG1ELKIuJbsq/vdw7ti1w52pgY/60MjbeFajLOTo0hppIeGjcnmQaFaxQnVlhhfOxcwwcHhiPhJ4L3Gla9gfEXjKCOBxtEs6pFDjfo1iL9JI85oKbtudXugeVZksXurgB4AdjBghMmTPIN6c/vDKory3BerPuA46BJETqNv/tvbQ93JuvUuc9owBkwcHutVkHSAxofe4cp5G7Ma80SIaJzB4owH3E25DSDUoJnFEzoAPy4QPQcWo9rvvgNcUWMCT4FuusabNAdEhtZAILk2WBKlbP5wqidP/yA65Ep7TC9IwfhpQQamKoMiifsAiA0Eq4CxAeuuYlf9ZJY0oBTpAxgQH4X9vEF/8M/GBvuh2Y3RNBvHYKCgxznEBADfNA92NT1M88ULBmdn5ws6QIfQsIM5HCWEH27WRELFmLGG9y2UZQcVKfT0OkGx8cB4BaD9VEtDCefUj6mwzneH980vy8+BxJWofn07mOqRegghFjzH2HSl7mrhL0nh+gEBkPBg2q7n58UU/V6160VOQj0GGmNSsE/v0NlLAMUQzUeEd+6jdUMz0Hvmxc7cvlzZq+Lr2tqhVWaOr7waXcIgK9EMo5hbPxiiQgFoo9vgDEW6lLbTSlHMmXUAQOhBkPA8y2bCMiFS7waeDWiA8d+f36P0EtuoAZbxcx/afg87wRuTUkpFIcOdwwliGujWIJLlqX+1IbBSwZrBqtAIkUIko59/gZrbhocAON0f/uuTI2qaX7pQhG0VFPvpdGL1TWm3KVKHUv1wcDgrxtmiWI4xj4xSagOqOmJIulSFTSPznsuSOtMFo5wxGntAMjHRsirtdqUlwCo74I/4ePxgvPui8wGcQmTAvh0oz6PkaIdsCNlZTCz0iuqMQIcy0gjdqag4pthBB7w0ghSU99rLv3hIkLrovcbzHAf89TqnbPb7Ana3Qk+XJyKlP6xWEu6wyI6iRLoH3huaW0S1le+NTj2MEsN/fb87/Pohg/v93ae2tvtd6RsZsXwqMJCmtL1LnAI48b4rwYDAJUUqSrz9NqQ90rLtdwWfIFn11Q+YYFMs7omxnREqAiLL50CMSkjFgMpK7QCE3ouB+TnMoWU/Nlu0dAs+mfZbBJjerV6CfYyLy9MkCmXVUnWRTJ/Ov4LcKOQyPiVUwjYQQXQXGyvXdW/Qx0YFGYZPDvnA2pQKMdBRcEBVztYjB2zstcp0ES9nPLWYExh1OexQZpghPXdpUmCgQTQKBWZhiEij5B0uOIAOKVC9+m/GVsJLeO8sYSOeMMymHb5VZXCBtsfogHCgmCKdkTkXn01/UlBHSDEfhAPKd3bQfaUGSYFFMTTWI2+QYVJRy1RcQOhERHdC0EzYNIuSWIyTH7PG4R286eykJyMkwUNsBeDGDc1eOeCZPUjsmQuWT3lOE2yxdUMFpDLGo6FBfozKnlCClIxmYzbCxg/mQsDAGGFQDIP+a5d6W/YsXZPkYgY3+8FPbNHSATX2wLY1FjpoUokmIZk0GPZ1rWo0WAdMDISjmgni4Jhd+jwaqbYTMeEIDwZwCjVWDnKWgyd6xGaL/fDIWJv2sKOPBqyweAJhkaWnlbkjDIbeGdMzgmGCMcMAzBVrGWToyTP8adfvOXj62169JRjl+QT3qsaIuO5e/L2vA9lOy5bP0ZObsnndaHgDJAAiDn0EMr3Vwk3T2GYGOkv6vt7jA2lsANpJGpQOfHooog9TnIrStBRGXmCK83k8Wo4cdXQUQeZoD7VdC2CDi5qFVGh8fP0PQO8iOunQnr6mbsdLaBFGLpklPEHV0pe31hX9gNDNbmMlvvyQoJbDBeYZlcqCELoSdckVQLY5pnBeeIL+DCvzJBJxjghFwGHuFUhCCib8BKcIFsTDABT0MUgTQGjCXC2wGNoZjc4YqdEJOQ6AJikRLDIXWsLAWK0YwUQtAR2Cs4+IQVmH/mB5nfLiFEkPd6bNNZr78+i81TapD9a0xcWBzdJRHtIkMhx0EcrF39Ot0RriiJXdBiFvUwJTpMS2AZ9EYvPGoDOWmeJpb0UAdTfUrjIYBAAsevg3ZvMJdQPAyQ7hdCrwHDAS9dbXpsyKJZyee3bhyqu0PTP9UFlg2E1oUoriZ9iHgALQWPaMXmQoRNQn071S0j/WhDcDtslRVkST1j6kAxk0ZHDUrxG2uOlb7kuwdUeCIdMbAngEiJrE2ZL3CJvYlS4iU+Yi13uZvkb4s/kJWoOhh9llHE5cBNaqDvX9I/mnZkkOqwYom4XU3L9pX/e1fbRa8VmyOH9SIAbpwOO1yW3PDDIZZAkhDgG6PQIhYhioMH9GXGgg5mQC5PkqjVcn9JcPNYpi17v6/m0aqC0wQQE66rgsicf+oG0KU6qsMKbBVnhPQlzCXpkxIFmTjC6JYpZ6Z/9S8yv0n/72rVIDEQjAnSIP1pB5LQRbXrt7UV8nY+GGHWz6QyuSUYwRx75gqtt+UxEmbb3rZIlTl412109GOXR2dJC+DHuSU47ZUqxLa+BsETSXNNGTi1Cx9hcX86w9vN4e4DpfywbXAqUp5cTQeBGYABcefRsxOpHSIkggRJxJaUEeY0t5JVxbeypSzTX8R+2uQtPy5vWnxleZWd/09XbOUpNEPLNvmZZsrfeffFz6OYiW+Yic2KDSGTpIrej7h9dlWdPYCVc1JP/DnTN9fOwrs3BJAqJho0A49BMjON4/HM564YOfgf5pv9C71rLZ6bJIPkPjeI6SgRLpobFan/BEcqNn136qmw9d4IhT4ChDoWUaQU4kRSZtAWubncXgT1L2HP6rf/cf/Lsmm1jvmwjY13/68eR3XkCcY92aIVSoWhQUrdm63W7Ma2THCQYAmGBxxCKWLuc/OkMDjDAaME3n8/LmdR5DeZCV6IwbxvQJAaByrdnFk5y04E8Y/B0PLCVUP1ow6s2gXQAsBm2L0zRjznvkK+O2tr+6WtB9NBzt2X1KMwJlsBgAA1DKvEMsIFfujpIB/Qd89fHv6Pur6T66fvzWjv7pwv7xn7OPpqcLdMyYnMMPGc0DHU3j/MvV15hkgr+I+OyrLDsR56dqR81qEfIL/SC//4uPd9OAYd5eFH+j4g/s63v/5BuLD0Py4999/zwenHqWobNiJuRM9/RGow8oX79bb7vNr5r3uO0ZGJ8ScXh39fHZr0aHG3q3ffPdR+zddcTjQdja18SQBhm3h1P4RjFqy+L/Aze7FsSORaSNAAAAAElFTkSuQmCC", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Distilled image 3\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAWOUlEQVR4nE1625Icy5Gcu0dmVc8McA7IpbiS2Zq+Ss/6Cf2ybEUul3sOMLeuyoxwPWQ1SACGaZueS1ZmhN8i+b/+9//5wx/0tG8eE7HRBywEzzED7E8tz9u+3QO9YDN5+2Obb7/8+V9vt91AHtFv9jSURvz297/+7f99L50vL9++/fplf/5FL/vn999/+/f//PO//ffzfrLXt69/OsZ9fPz2/u62b0+3Pj4Ps+7HvL8f0tluL//j3/4nVH57//3t8PH6zja+v1mpQpm3592F2/70sX9vcfsIPDtHOcOeOcWIkmpAjaOa3qtUnokMc+8fY56ROe6nCBs+2/3+3voW0fJI4cyZeX89d3Q1zfn93/9yzvtTa44jx8R5cIxxWHU2g5OuEa2HXfl5HOOpgPONiFGjaX6EtjHvvrfUtEmghDSq4TvU4yn6JtGkCZIKQTDVe9MWFkFZBVutqUkRpSBBqTdZMOqcec7jfh5luHiOc46i9HF8fH9/pSTF09Pm9Mf5MXIUgR6MYAttXa1BIAijUNNAE1pv27ZpO+esKkTse48miCVMz6xq0YrIMafL01lpUoCrUoTgqoIsyqjW1MVpBEpgl7I8xwdB2GN8zuOo8jlGlcf5kfP8/tt/jvP+6y9fKg/R5Jz3TwWboL6FJFQAcx7j+ARKQoiYY1bWnFlz5JjzyHTf2bf++TnrHCQrC3C7NYmRmDBhABY1axIRERIlSc0ugFLYhsKgQUDGnJl934P76+vvY0ySKGTW+/uh9v2311cwWr+NOVuXomcWSlUDkKtgp/35cXy+fzgi+ibGx/unYZCZNc5zzISLUrRu36dr67sUhFrEJpEAyHKJFAmbLBIAQqGIzCJJ0WVJpERQIqO3rUcH4/PzyJrBHsEsHjP5/pZjSnKOWSPwtN1uNSfB46jKadjVSFaOc9YWLYK2P97fFLHfnqvSthQg7QIAuzX1vrWIc6KBrTwME4SLip/PAwooSgrlRFAiDbTeJBrOSsARMafB++vH65ilVrABVvkcB7zJ8zjGcY5te4poAFiWmOmqQg1IKLcQ7DwnSFgR0Vp4OsQQAZxjxjgN922/3XabatE6RpVpiiwwSIlrgyUCoQgpSDJEBcTeOkmQtmG7UOU5z+P9swpIG4adM2sSKiCyMmflTBLjPGzPrKqElZWeI+HYOsqZGa31bW/bFopPEkbZVXZW5Wyt3V5epFZVkjREkVobvz5K5M/nkEIKmZQaruqiM2nQqKpkqsmo+5lNYjAEiFkemZmVyKw6x5hz2AskKEJqRqUrcxJo0WBkZvS+325BEQZp0lm2EQJj3/fb7TlaoNyjt+i9VUpk0S5JilVFksTrKRhS65skV7I1g+WiOSsBxN7G/QQymnqLcvOcAwBAOGcex/Hx9orKL19/ve1bzTnLJWg6KUUvT7sMrMohdZ6zdLA24545AYoECuoEZs7MclNr0YMWyGA61vevSpWCEgADlNSayXT11iPknEVKnNOfb69//ft/zVknBqrKsEECEACUcuTn532MabP98VvOPOc9tFdEL5bsY47zaK1v+1MwnJ55aOytRR8tEyQB5SwxxzjP86hy9q1RoAMqkVJbO99CZQMgWS6UFQrJZVQRJp2wWkPFx8fvf/v7f328vdI+z6wygwQjmm0AtjPJMwGcx/3jx/fYn8veGqV+HGdXnyFnaVOLNsZQCKANiGUYDgWcM6mocd7HMXvrosXyOmqSQRu4ioasKoCZmbOoEAm5jMx0Ua2L7Tzuf/v7bz/eX51oEuCCYZKKUKwfBEgA6fQ4j++vb61Ha1uP2PpGF4i+t/3ptu83STNnEdu2MeiqrFmZJKuQOWfO4ziqJiN6RjuqwpawGtcwxWjNc8IWtaAm1O2ioSYDpgGe9+Pv//H3t483lwVGhF1cWF0GgVV/UIuQkPbncd6k27Znix67lO+fLTHbfntpLdzmPGOLbduktn4MUFh7rNXRcJVNMazRPICOhfC8frklKlQuhkAGGNErBxS9NUljpLN+//7b72+vLmCiXAyCgFfT+HphNHHvzbYzszxmvv748fT81J521xmhyuoRLfq4fzB06y+Klll2hSRKAGGJTi6Apnmcd1Y1YICNDNcEWK45JyFA9iRJUyHJ9zMbAAXBnPn6/uPH9+9ZM6zipB0hMKqsJfVIkk2xbeoR92OeObPsz/GXv/7l69cv/9p6j95bH3UQyMr7ce/73mM/Pz7f31/RtPWdUISaJGkJGFcZMY95RjZzyUpmMSJgVFWL1jrnaQIgEo4MexRjb+32dKvCjx+/jXOwVChRbPV022bN8yg19oiXl6dt3xpF+zzO+zEEhZ2F+8c4x9t5/7/fvn3RtqXdCoJnloY9j/P4uB9H802YW2tPt6fKWZkuk6TQe6MYipbzqNYNEFQLz3KWOqO1mkkDME0oFb23iNhslucc0wtgCRMR3HrnZMWIHs+3/qf/9q33jpGV89V5G4GaH8VkGVGZv3//8f3t7evXl/3pCT73vb88PZV1/3yn9OXrL9u+H/dPKvbbNk65JntrPaLF0+2ZihOjVS7SismM6JyjqgAQBFFwkNF6b6zqdMzz8/f7/fh8fX19pbFIG3IQITiYoZCw7IVtscTb81bwmXOeR8F01UCT/nB7uaHP+/l2nPz1qbdtwc4vX37t247y59uPZO2359vzzVURjYqqbJLVbkSTk4ArnUCUzZIucyOpqbsrwgwis8Y4x32M8/5ZlRGrbdlcBNJpmAqQBR4j7aLadNzP++fncRxTpqgjZ1Vubf/ydKPtY0xm+akKrrRn692V4/M8zgF430tqszDGzHk/z1Pk09PXuLFJMSuVTqCL2/OTIlo0OFsXCTMq7RpzTgNzJuZY0oV2SCAqWznHWekSZKMyc4zktu/N5/z++9uPt0/D27aBMT8+Rs7KOuekK+mXb19eXr5g4n4Mlyk50/K2bQpRIcW2d1F23nKX+tafILXnfQ8GqX2P7fa0bVuou8Z53AFmLtqyyayiWU6Xl/UhyUCLnlFj9vQsuLeGaHBWzao47/Pz8/72fs5CV0tnZRpCMSjBWbm/7F9+/WXfb85KsO0ft21j8LbfImKcR+u99aZoUtgFF9i6+kS22x//2OaoHGLMc8yRimHnHKdNeKJglxBFLwNFokmMcNWS2c9901P7PN4+j8qqQIJ5jtn77fN+//H6SqJHgzHPYWjfo/Xn59aKqU3Pv37dtMHe+2bP9x89y0JBihbjBJcrLGdNY+1dTaaYrUfPMY5jALMy02itRQScYkxoIRcEoWWdoiiZJjEnCMAJ9dvLc4LH/ftxfoKExJhfegtkI/Ye96PmnAuvAtEDitC2PX95fro9l+3KjOaqctnOMlEUqUXpF8mSgAKEpAm333988ng9x9jbjjIBhmWnXU64DEtBYCUXIHJ5m8u4oaru49DZWsTTy0u98RhnoT59vr3+MBwdf/r6p/Oc3397/fH+OsZsnJL3P3378u3rvu+k4OHRznl/f32fNQCT4qUeVOmJSdIwqVgPZjSqHR+vqhEkCYXA1ZdG2iyrX/4X0NoQWS4CuggezjFzvn8cLbDvz7f95bjf395/pPPj/ewbfvn266/f/kWIX759/Mdf/vr77z+i932LX59fmiKnHZPpkZ81/Xb/cGGMGSFKZUQEKZDraUyZgAVoMts8jz1IwjVymXarEuVappIKcJ2dIoJc5FZL9hmgG3PO8Xm/19jHr1/+8PTLy+Ts+/bll6+9ubdNoOntefvTn//l6fnJLrIi4vj8ZN82BqACwHp+fiGqRUAQiRYb9vV7KdoiiwzAgoVqVRNtMzyzyCQoTZuuUsBVooh1doQUIKIyDdAsmEZlec4amTPvM38T6+04/xjfvnx5acEcOXMSINhu/YZbHjPnkZ6eJD7pPXqjaHJvNziBn56IqwkMCwSLIFiyig6jhdhEeBkYLE5bXsKPT5pLwa7XBqVYrxIuwF6+20bioz7nqDnHb/PHL7++Pn99CRBAlQFrxUkBaiNssDLP4+gAxcqSbJuYEAHZ12L4U+Py2jnZsFoqKFYuv24bVtkgBdvOQtNKtWhAJgB5xWCpzDSyqqqAclaqRFvQfttaBGriUYTOnDldufCAFgWTxzicabNqObhM9suKXOW7fuE/HoRAldyq7STgdb4AFkURJLwcLVGwgDIEwpbCS84QdmXOMWe5ssouT1S50Pj8/BQdzixcmVReWctgBIpWBlrRVZ5zLpIqGwZUcPiylUs/UyAWoi/DLn+oGrVMEyRmeYUsWMfIVYYCjaJdxIq2aCDtWTWyRla51upXUjjtDsE+7wcVvQMAbdsjR2YFBaNyJmrOc55nRUR0kIEEBV/baqyqWA5XFEWGAALgnmoitZwkKQu8MjO4VnmXTaBWggUuBWFc716ywqsNCnYVqxIRBCpzVcKqgqr1EjDBdKIwcmat4naRqsJqg+WDL1cqPDoaFAw9irgatIOTkkAzH7y3Tg6VlZorr85UhGEjwH863cvHuy4DTJuMdsWPAJxVTlJYAMAsW3k9h9YfPngRZSBdrMZa63mc+qr/AqLSDJfLTVoPuuKI1QbXc6+2riwJP/ncBqvKRFbNOebMrHUOLhMQuUXcekioSmZOV1UtlKjKORNI2Cs1My2zXDDW6hsFG8wsBR8dzGXZCyRKcBmissECC7xM7OpwksD6dxXRsuursFwEPMpVNWdWleWgFI+AXrrtPQiUjVm+UqYq15gzBworj2phl/MqrCvewVVmF07rUV0LVGHZJSxM7M1VWFyFSzNIWpj9Uz+50pLLpmlj7ZBrZqJMsiuWOCkXidbavnUQWZWFCKw9rBwzZ80CIBjAMFELHR/n7wU5JcgoEgkCFoFiocgSVEQ3zz5bXuB6efuV3UpcDbmQqADZVWUScNhZdlVVGm5Nopqc5ZmQtG89WjNwcdvj+OZA1iKZqhSQLJYtLGxsfiDP2j7DVSAe1X81yTosmmBGsxPWQnxe711oe4VUxM/yr1FQwc7KmTNnASX13lb3lcjeWou+pJ4NuwppIMuVs865Wn6tUgyjKMEri17AfR0FapUvCdgiL4Ghi65yGq1QoHlBAO3i9RMIkGsQiYWkLE9PhDxzVmZVudg2Nq3ItBTRt1v0roiyjSwXEiinc465iAIkVnan4gMjzaIllQDhWrxdFIG4GnmhKpf40VbZXE9k4lGGoCjK18aQLLtqLidROTNZclZV2ca1OxZQ0Vq0vu1btABIV5hpuzyrnLNWx5tmAiGsUVsZWsbCcpoNNIl6RHz2pYAXg3ppC0KYLVuLe1ajfk6isDqAVl3FV2ZQNJTlzDESLtuuKq1plCKiM6K13lpfi7GRMiazkpVnJiqrUJfHG2CEr0FPweuRSNkJLCRdlSS4YPmC8bV+k+jFRkOgbci1WuEKSbniUoprwISalTlmXgJrcVqL3tv+dIvWRa2eKcMu00hX5eoYFUbZ5nKFZCMNoYzg+mjUogTmHLbWbkswY618abRLpxlHqH1k9KgI2hBWGL84lgZXbkXQzuMYi7bws4TpaNF669sWamVU/YOPXJXlcmbNAqfn0hMPlZ9kYDk7AdJq5gKmESVjRbSuS4hefXkJUoMwq9oTEohVBxSFNZsK4pqrBpvJPM9zjpmVZSxss1sorubXQtglYmnBCQCVLti8loHi2kLGgykFmF4UteaDEgpKF8HCVfc/gRWijTUKhsR2J28rTb/Ge4oIosoMOVoAIvx6HDmn67EmXrHoTxlygS9JqmrCLFc9QN2+5LxtWvRcwT2Jx18Vr/nfA7j9mFOtongoQqOtJ6KT2X7ZWmtdrGspF50hnGZTCGZlVY4xFntdi+JjfEtQ0AMxACzknEKQI1ZGSmLpcpMyKYhk80NwLgeypinlBwgA15TtHwh0PcpSrG1szSiCZdCuxbw2xcUHNlk5M8eZgKtIuFZwSuhiFq3duuq1XCgWveTA44u9JOxDbokKImgwAICFUi40ormWgSVEr/Lhz1dwFYI829kOP3dPXSdml0v14GTTlWtTABIhlB0WhKCWs7vuWBSsRQqr5X6O+1ccsg62bIdW2EEFjNDjy5YnWAJkSf9a67+c7qORL61g0DHZgsfV3KDLRtXaZ3tJo1UxIQRkGY/9F0OwEBJy1YDrYh9TRFlErvUj1sy6GXXNz7Xe4T9pmFpbeNmwCzMvSXD9f2ERL8sSbkTaYT4U+bqyY14ZxLpJ4FyqaqGeUaIiChAFmsosEel0CvJDb11tqlAVtPSSQqKwri+s9Sz/iiIu+ZiP0v/HEV4S//p4abcSWtpaE7mf1I1LNT2mvEWsmxITV1KgCBJezGVjVqlqFssTaot0BFiUI5lWZyZRsYSiGmUwlk016iEj1xa2RVV84Boevf7zTxlhc6pNPxn5uLfhn8bl8rm43pFAxePHLJFXlIGGypycBWO6iJ68JlaU5XDUQsQQwlhxH9fZwbGKtK4Tlp0EjeCK7/Xg0utC1wWyXreDNrQXDS9Kse0l3vzodgO5gJNkxDrDXOC2ulGsspCJ0OV7/6l2KBFOUARrgh1rIrjAGiibviKDq8gRQK6d4s+Al4tmwX88gS1iZrtXfoluJB8Qh0fP27yMbBo/LfMVeIdUjKDaghCJWDrykVxQEbZRhEHVOja0VcNLN2m1LKhczL8WELriaVGi1ymU0WuFalel1Ca03R10rPtOV+VffLUO0ZXrM4+WktQYAntEixYS4LXXF3KsEPya6lpcYYcI+uIy8kIYP3hFopfEXBLmcSNsieP1rSoirhKGjWCxlU5iL2RcweOlWX8aN4N2/iNoFdQY0RTRWmMsqVfLhGjZjVWjVwiGrCARD6165QYP21VG+LLuvFwdVp6+JM5CAsQqTK+Es8Bmfna0Jmrd+Cg+Ll7CC9mcXnoKChmOFWmGInqL6AvLQdPrTtvydoaxREYJK2hZlAc3sR7PeG1VmGlEhOHAsiGkdImaazQA0WbgEa+YhKqh2oxtJ1gUkatuDJT5wCAioCnFuv0EWtEi2vodXmNwrvt1P1v40r0NV34GEoh1qe0no/60W5Lsao7iEq3CVVXX1ZlVQpTBthTfuvQls+0M0tfdBstYKdDKfmArmIYoS3EN5XvTGsEu9iDW/cVi/UwAeAmalbUtob8IiH4Ie15upSDJcRmSWsf3YAJpKZSS4gJnrEiMFsRWmK5WYIPMpEUsLQGACpQlyj8xeFXmmpdcHHTxrR569mfM98+8uJZewCVNfwZPEFAEl8Kk1gYSWkooHjoHxZWzUkSBQUDVClSsCGVlV48Q0heSU0sLgAjSErB0KQgGeF2NRdC59u2R7pBipkO+xiqXh7lCQqzpDpdK1TqyZToVsulCI203sq6hHJZzYqMk3ZKt8RbKxljiDKoywg+xELGuAQLtAhFYDAoKMToDgkySdriMIK+MHwbUFnWYtYz4w3SsKeLytwZdrCpbcIFMeF3abqss11VbiSCCQZkR0myzja+1zbZYblKuiVKhSAhNgYq2UumCq2AIAYpdPUILHZZiFQnWeVmGslvQBUR4zBCQrYh1h9gz9xYjqwmuCqjMLDqzgynHRFE8rI1RkIK5hhcMiUUNcPDr/wek6CscVDPZbgAAAABJRU5ErkJggg==", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Distilled image 4\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAomElEQVR4nAXBB0AIigIF0GuXzMjILmREEYpKu7S0JC2FhiQt7T01tfc0S1Gi5SEqpUSUQnY2ZY9H7+v+c/D03Sj6AMLXPnM+xhvABMS9LU0X0fT5PM8tMO1HukrjyFTkgvC4Frxi7CP8A4YL19bT1bYDBLLMFoO+QJ3s5JNdu3npduLr7+g/zEFQp/N/JKoVsFWWzUn733MuAdFblQTqDFIiX7iWrH64ncfo85t3yv/FfwAES5aMr+f7dY/+1WLTwVTaNgK6MVC75Xd6NwHmln/s9JxdYWBjGROISL05eHcyweCzh7avC0Gx7liWNiJ3UOkjDsclb4a3125yCg5T+FWFXwYY/ldFDOlZ7rkLJoh9nm3mauV7CFW9nM/AE9EVL7h5BCnnO3ysWoFdjjAMhGg7Cl7xUS1twN9IWQzw1FIznW3wwPljLJRsIyhHsOXz/e2SQCt8dfGGfbWmalsK5MEwHGbQjZH+7eBFac8d//itQ7DytbsAq9lYeZqaRBR6MHzgvnPAPlIqnIy2o65qq4gVlLTP2RC9m1y+Ae3t+Lb7vOFF8XkE3hPJJLim5w21SCv84Fckljp00cwDw+asSM0C3IH5gB5WeMYnkaBoWtucxf2tjJ2orAW8UsItpK0nd/A0lo7mG4BwkESp1FKLXyF9fMIqMrxI2s7Z6HAIE6gKfD3vRscFVFInSnwCzIuD9lUsXMCNdSTAZ1LpFszZDz/Q8bCax5kRAI1JxLIdVsdqcAYQIcj4lacwpEk+68u1wGGwkvTdpRH5pXdrEDXHbSlBAl49gNIrRGAdvv0QIUzdkBrXEzQ5GNo/NRUA3NnzIHaPPRehvPHcCxDZm6rkcf4zeAGBmpMeGEx4bFHLDE7FLmioHxE5RtCV1WXXE4C2+pwa6udAiZuqtTNkHFYFl+nSkAbQ+d7Vx7F2oUjTIZqxvxMBNivjJx/xG3HYMW2mkukDQTDRMLHEG8wUC759WwQVSCujXQYccF3TwGBIiYVJrnU8rfaa7qQnFXkT5CVrjdjFX0BjL8BnY9doJ5Y4IR84SzPmENTLsvlOq21u09KPVHWSwOjUIGCzopo6qFwGB45p5eODTpcVdLTSBevbsciO6zh93lEFOQyIEttVWXLnAtbdBXj/Dj5caDYF01ECs+vnWMwTePRnv15m9rbGNWKMBnG+nHAnKvIfd5PQkz4X7ga+On39lbouLEIGeB7EtL9EQhDhxwN75bH4WS+ozcl1JK8SnDysrLZrKtTJk0CYnLDVvf02JF8Q1/rrE8hWUaLuptkYfQHLSVhud+pjR8scsl6xYeRNvN1AxLlvxd7UszKxlSX/wwt2qSMQ9yr4gniFtglswpKaw9XkPxnBC3cz7Bw7a6e7GexfPRAXRjVwd81UXVKPPAXOyovm60mOht2vy26MHlor+vp76QxM6Kwh4FnBQVKhlQLLp8BVswPYjEYoPYxoSA3fGLLIF7TECq0bIJZEJ1Eb+Yq7pOm8Y74cyq0oGbAgbH5I9+Spc22gvjngcawTMdzxZJKDLQqBQDOAbgT9rwFgKJy9WgELZBAbKqEjKyc/H6z4kPiKB2jvyuEqY2uLvq2DmkOri7GB4PztLpP5AkYJ2DLa4jRfAUVEEDGZKQ3Ho5vdCbKK+eVepzI3HpfZ66WwMPeBxmc9C1eGLTFKvlo3DBfGBeGJtZh97Fu2m6ORhx3Sy0YHXQZANIG0p9rpPAXy/X1ImmR4h3PtP6BTIsGAyUIpm244vFAZ/ZVn5q2QYRQBmATv/7YSM5ZCgK7VWXoAzVOpcgIPx23xUJsN85x12rDla2UXgpGLslIchdYbHOD5wtwX15DwJzJqyKVorsP9B3FnGKslP+eLD5+jjBxgCGh048bV+2kEt5zQj9SSlvX81PiLFUo3bfCl6DdabYyBlUhg+UEc3mYwdT63L8PPdNLcbDmyiOPk5xcoDtTBd86d9pCrBTI2Cm6Fi5f9A9mA+oof5F1Sx3IN5g8FU8CwG0EW0LMkyWju1uXqLshx1GrIXw+gOBJ/CW3y+cyyE1Z3X0WqdX9lFZfVt4r5jvZWyDp4F18OLgslS9LJSh5vsHW1WZpHzKtx7yvgTVLHlOfSv2HsPK5O82UM/EFIs4Y01jreykIQWQ+d4rGPGhkxAHXQxeP34dFm1G8xTR+kLQ5rFIDc34tCAr14ewXbni8LYFfCGWT1FH5kkkwPgbG8GRy4HqMqf8adQprqrfd5pPYR8kEhAAGWMcuDVU0amB5QT1U60NagS7eUC27zZyiSGQKPIEAql8/B5BZUBy/o9hFD7YvJbFtLo+WrOjpyAeEJYGq/K62JTBxGHR2HFaiZKseO9bEYCbmEscW35X8LH1S8WozF5zxn7V2rhXUQovnNz5naT24NfuKkeSUjWyHO/57EHVghwXeoDUHGa7u3UhgJxcrmdxzGuX/qDx/pKXPiRXwFCtZgLQJ3egBlZRbudsdToZW5nYTAigUG9UiNuXeOkzkSpl5t/5a4vkAJ2cjVx8AbYQf3LUYxFirouxp/hVafFjk1LvX9yN3CVwxoNAJyE57yCTtNb/XJ3LW94oTWSRh2dqYAlMgDeAhKEb24JbfQ5aHfRihzL73my/Kq86zyVI/DcEYDtW71qy8yvX7nEQlJuhFUDQAwyo1uAtiYlLSafv4MF4HFkoUHHzB21BWne0PRLjjeTdwjTTjFDtunpDNc9rjD+j8EE3u5GvPKeObRhbS1b4ycCStZvi+JTuLg3w02OwxDa+FetNTiOvw5TYnVMtfV042Ssg0SN1JP9tn7fjev8PL5NOuEuTxGb07R9NffFQEeuJu7KH8jSKTjwud6RJX7e3Iuzv8+5Ml0AcKfwAXYvcG++rHwvalOapBCnF02fRnXABag/PYsgyAtSNLeDzisxQwcC3iDDz11zV+LCbhkc8BkEo79cgYmyIBOTItSNOI1DLZGnDITGhx36fWdYKutoULy2xMHgMSgp5eXY9bUOrvnuLMAVUQfTwLc9cK6ATjS1l+GqEvNwkYsTcXoftv6PyrVuC7jEo+JVvu/jj0NEM2JuuAr8g+J0EWnEHArXle7o3iOgRemExfy5IAwP/iaAL1eFp0sQJHlhnd4m8owbu4hA96+EdWcZPemzoN3uBmHheB6HlJGyIwDAYDiy+xSeJHBAWmAuvWatVxydinF8CJtFyYDq3EwUBiYfGrAIk6eVD7shX794f/1BwtzUyvDWu/ikj3lVZALaWkq7rNi89/0v2Pu2TG+6eXFUwTtSkDgg/x577XEhdG0LHqXOqKS91JQMhGD7uLeeSug9iw0Fp+nosIE9uS1Km2F7ziOCwZ/mBN0gxh0+8hgfIrc5yLRFW23rZurIu9GyBRqgND0lypC2pdn+8g/0hqz/C13g8RqQpUjJYiHpZhEsmh6+4hMLkAFpW07FklsIdd/aHWJgrdU0UeET7400Hy7w1UUvdJe6ABkqVJvt9iJjH8wertdBiuPcpgcnDjX1/I5c9PDFUH/QuJjxVVLfANadtNyPGBt0PM/LHR8+GghLToNHhNQZDtkk3Dw0RzBW3DkwYc4TH7AnEkuDOEUeIdrKc/himMrKEzXsB69CjbKPtyLnXWkvkgYwhW5BxOq3YoWOKI2//A5ovvvY+BedPVwFPYwx12aWAnuBkTgXJrz5teNyhLh8jlX0Plx8z9smuZtTBY5+yjmC7Kb5s/+gbxzykc9xmYRT4pQgWNqm90AIIW36Kp9DKNd9oU89zopDNbOaa6H2O7z3ScxunXzc6XcAy5+cWbCKGMkjXauB5MdxxPcq0IwSy5axQD0O9g5COA1Nyu3ilp3ZwCPH3W2icn3zNctMSkYjy3wKJaIj5EHCi15NVHmDPknUXdOOd8qK/NabXD9dQvX6L62o/62JGZoaQ1WgjuUSZQwuRczJLuoIsHv6zh+xiJc/Igu/x0Ef79ZWuWHNSngY+PknQRmPrkcA4dMdNAQFAB/grEAyPFvFiNWej8A2ZsSmE2MPGGdhy29eKpa7vmTYCelLkTB4LbZp6UYGIVLeV23cI8ZyR1UqoMWj8mmEMYPycBRUuHY7n+kho2DX3AQ8aCdAqjjQLPKmX7kiK9bWo72buUJTL4PdBFJehf5JSAPm71gti2WdOn0ttB4kvz59fw6JVASM/ehjhzDasOQl1vdEyPDrwR76RwBPNhfNs0JpVBZ6LX5plOtsMtsFCiDWBKmRy0jBHYyJ3RJFrAZlILOQlxcmGghUb2ltBtXArVNz+fAla1s+T5KeKL+t5iX0EVj8PqWItSv2iRRPRvyKo/A7y7c5noWAuURS/y3wo8NelOUT4+GQDLetpPbuZNxUpGsEGE/V9eNmK4GYAM19rS+uw8RNHIcLFPSiS6I4CS45Gih4jHl5ZB3eToZ/OlBsvcSbX+zqVThBZd+6G0HryPk3ZWKrCoHpDdny98nWwQfDV7jXRdf4xfkx8KI1hh8OHNJi0jZf4HgzTmWYkxDRsa4ebgoegXWhxnrzc8RdXYwgUl5duWOuWMvN9CCFtJD3j9EjaALJr9OdnOUQ4wsKR2On7h78+Rqaez2fqOAJXiHrOcSuxVIUWCM4ZdVFkMbO07nYaqKP6CxaApiBg5KgMPwXz8dP8u6hNTBph/T+O83Qs8XjyEu8IYRj4pYkBaP5IZs5xHt7OSpx95bjtdugvC+y8bCBN3Z8lhrFz+J6qr9SfYiwj/jUwOjV1bp2sBEH8ShfHTcNExi/ktmcgDHbvM8hN1YDNybwqa74ICJKDwrFrjNVvN9sIrKF51gIDtt8ZHVe7l5YbE6yFoQouDrAkowcE4k3JfEdd39oo+1XwgTy/zarraM39hFxZAPsMh1GT9SDPVzw9a/bshvOXcI0hOn8ICKCYZAY7A+mafxK56bAaLQMOwUeY8gxfnHFClne8xG8jqQX24dH8R7DcfBF7SVQE0Vw+m2hJPmLqm9sA0XwOfo0uUW/nQVPXI7vIUbQ+pqMGtDRNoSz+7158rBDyoDbgQPbg1bJAPyNmRZ+fcpRB8GgYREyxU07J9y8UK4pjdxx1xi3uCk0x+m9o94AnDTyWcF6Xf/V/q3xAU3ElmAxP6GohocBAehJAIMlsutjd5VCKJzmAcCB+Zh4vDsEWr689okbj03i/+EN6K7/O4/xSc9ydnXIPtuakxF5LfnAL5EVxHvNR7yg/A59zFXmRmuqlCLvL7pUZQHL9fZ/cqHjAhX8fLa8bl25b9eJOEdSu+mZaaRqzAW92jfcB48DIAg8LbDF0SLKqExM3xzgTvXB2KWfn+bWxj4DoaHjNhxkhRRXARCZ5t+/NHzGbLgJVRxNi0usRY/pQHLfAoAjuwXAQdC1YmcM8KHBo2PHos1IiFbReYSeIMl3k66SGHKtuU8xFsV1JKebIP97KZw9mmAzvTkNhfly7kmBPeNx7YGAgL85ZS0Hd6tAqSftieUZJLqdag7CWHwWsoEMjyBVWJhkapiu1fwmOj3Okp78Y12G2wl0QwVN/qQ9tMIJ97FiPP+DD5BBRYK/7yxi0SiVS/CSH+DcXpALV22jjja9Z7iwvu9BCPQeiBeSAQgseI6FQYCEJs8CCYxO9urjUDmtvgHAPfNCEpuUj8ZfZjZoz8S4CZQo4H/M26ZqhBK30SKb0g3He5AJbt9iXymckZ5aFLOwXTjaLyZcAX0XYhXe7h8g+mxNElCuTpZnFSI3W61V5VNJAFuVdn8TYev6SJy/WWipxDPs7T5hmB9xoptW9wbppxsT70cSIrWnazcya13EFl2l5z5pOmQUZQskYXPF69ODcogp1xJMgSJocUkAZMcg+m7Fm/f91gJQF+9TBpIl/B/mXFbYttPFoPUVSFRN4HIxF/iLOn1WB6BCBedXoa/Mkf/ZAJHfAr+I5CoJ+Qw5tdGfWK1pwatvxwkemyr4aClQiFiVLXr8xtI62I052B0Vg1qxsBoI2Recphw9uvpPyvJIAL6TzOlvPFdN8eBB/OI+G3drofoxIJsiM3MBz09aGwOVKZQJ/Dw3dOOJ5bMPKt+blYngEuYMHLMoucP/bM/QVXtiNpcRy2H//o4ZXpjOwH4xREGCwlojgcqx99i1L8sIzCRQODs410FnSE/jjFpBfew9qrn1hK4izt8Znh1dW5J6ymURuiPE/g3wRRWPuRAgWB6SvRV92177acq3mbRkli3x5fVVdy0OXOmNOjzJFkL94PCFKowyYqnFai60es7ckwW/W4Wu3vV2ipODDjbfdWc30VJ78LLfCi9k6AmI2n4ADXk/9oKoP9t3T1AElzYynZoRJfnJQQVgoqFmE+pbQzyB1PZY5gAPNkKU4lY/5IKS2ukOwZCGtrS0Rr4aYw1c9GOpKnnm061tQIS1rej+cRcj8zeK09hCkFVdBEe4yfnDyI2vOOf5+e5IvqC+Fbyv6GZEyOxwGO8FHCbfyIq5v/gKkzR1nVEr/vZY8PBf5PiOPOf3w88FfM1EaEI32CZI0AjbEory0FnESrnz3z6hdC1KgLY70JfoXnHRZKbFqLapvkOLqGpIC85owzFe/C2/lQhmKL8Fc5hgP6rlzxFncxmaiOjkSa3wMKbGLp2HMSsEUG6wDGmutIoNfw95r8BmRfI8m8uD/N/zjapP7r4HfmG8LhwUlAngjF4/PDpVMixT0b7QW1fBQ9EofY+n2PO5zJcda8kJ6FCHKvY8HIB7cf1YRGBGvTKU6yp3Lyrha6G92nGTQeE1M1w+xlAsQPE/mxxzYNuP096Ao2XnFDOp/DqhSSvYp8W4Cvwjd4hebu/hUBxkhwIhFZoJrwqQ0858X64cjUotzC/jYHx7Y0ma+FS78y00Ie6KfANXyaT2MJ9W5ipt5a76rAQ4Zt8NlTc2QGSPYiD2daLWT9wIvWZTtkHHmDthpJjJPjOVICwgD4Qs9Rtstg1PAdmaUDvN93fsR1tUMTq6YQqSUPaV/p8X0ApkpGpkMNkIpD4j1O6MGmhB+gcAuw4IGn4szmfZz+I3FzE/XOveePwMYd5Z0GQnCZehK2MZyMXw7NSlTTfMqqvzAM8IA3Hs0LaalQYIE5vVG2dxDadP1Y01ptV9M9xCkscwshhzA0vMdLNOtE4w3oPBTMcb+o/Kb+K0u/alJg5Y/pwBIq5pPImYZyz7/7v03NLPvmbn8FjNRlGgTeF5vAv2SQvAMII5jiCEU7BmygVm/n+Jgies7m+AzyRjomBSEh8WrTTwB/R1dgEEaSSY8oQ6UttguuzARlnTiZjbLpA0YuJUvQkmYypj3dy+psDIAWmvfXeTAh80L80RZmrdn2ESFlL9aiDO3yO68TgNlV1ZzerZZBokPpUEnS0bBrcNoCxJCDLZonrQtYGp7bYdI1Sr55AeN5K/lNwhL30Lq2lIRTIWL+A0F6SX9mJqDS6nyZSgHku4AmKubpT6hED9b9Y158mdvg3XmMSjzqZltgTHHEocWz1GoLrowrzX5JQk7yD0tkEweKYkMzhUfhl04IbvWw5mf7fQ4QOs5wjr43vde5xL/6CAkYTz+wWmZlrASMMNH8w6Z8EYiTuKN0qtEcufmv2joeiP0Ar/TtsurXvOux0xnysZC6Ul+xye4tWP0bh9puhKsjA48hVeGHZVJ0ixncCKCoZUGmj+F/0RPCuQS+bAZLX4AL0tu5UhSq2oFUB5KmTcqWyulxAIShf+3q+o9IKiE99JDdpzsDn5qLIenKKP2KIo5Shfs1Lbnpnvju/TDYpgfGaxN5xz/X6OvYCUNv7J5DClk9N4C+04Sv3I23uFzyYcjQuxq+eKBUEMMk9cg45oIqLUnRYBvCfDAL6VUH8k8DeNYn8hXHZB/AN0eMQJgmE/o8GpfmgVjvH9qfDCnNozo2vrjVTWv2ptfV4pl7FBvafIBCFDi+sGcShrQ/8s+OEvEZOT11LhqX0AWil8HdWw5KMjVu4pSRojxygmR1vlOT7mdBgMq+5H91nOA5uc2dgqQ5fdO5pyzDrdbrvwDNiHwVp2Dnj0BJWoHu1yWVfCYxat92QLyACII+/RcErofgyyHhuKoog17l5k1FH1BMxtAm8yEBm3FFX6nt8wQw65+Mg+w3k7gwn0VQ+0kAdSK3SzVjG7TyHo/7HIAHM3MY0fJ+5n5N7KDndh3FQP82gqb1Jv3gYO6fa/he64mkHUj6bhMypwXzOzD6k/NWHPFivlPzE1R+D0EhiOr0puGVjgqYe/apd2eUGcGI3DkHZnv+SObDUeYKSRge7edsBwBLmTgyVA580ANweF380QSMGBtRcgisXAGauzFzW2bphDzBjIeTAh7OKFkniB00/6163JtNxnnvfZ1/OmHklwdtn/0gAWuzRsgcDM//hihFgTZOLT9Ue+VFmq2WKgHTCrLhzylL8d5J4dGstZaRjQqN5Miu+fersuHMTePCn8BbmfdmNnyvU2EG+EsZ7Zsl1L6KiqyzvZ+v6jSa+GzIHPbwC6AB0X0XLJA+A/ht/je3TdkA8oORdec+K1jzIsiZwtpei5xbzuU+GGrXJNWfEJaOYgYqVrrsg3zO7Sb0Z5ON53DM79ur3A/frnmcB5MYu6jr3mJFo0pHWfEYeIEw1HguRHgUkVpGrd1EbUmZ8ZfVgND9sUoflHhhbOoK0f8oo+u+GvoBF26W/7biG3dOusWf6yXN8xXH6fqbRIlJ1QK3leyHwG8SvRoBVQa0ckN37NEj8QTVCANi2RnQhbCHNGdhu03WIsMtbHoGd3gFA4/N4Vd/ut1zDbTs8SOO48V5TcCIETKY3dU1xNXB2iptg6SrjnXxeAHkbsCXv5rZ7FyzOjtocfsulw7O5+dwjoX/AcK5ErY+Lq+vvYqZa4tNRhXG7VIihR2zALlFMOXsDqBu5ubHznRxWfq0Rxg9PvH4PIru8oWOi0wFjzj30YRqAJ64PcrrGu1pQWe3wKcDcGrTwME96u5K0d2Nqef8KpRsY5jMG5AEk83I2hMdEg/b4+MGZyHa8WrFY60Kj+h+yXLeH+C1vC/t/MXG/KZP2OmMn8HA1HnQUKlMfPZOKYp8/RTbmH0eEOL6FegaJeZTg5TNV27BCQuzGEZTdvhcsBVuApMuRWoLUupzJzKwxeZZyt4zqbobHDALFUQe5oI4gt0mOKEAnh6Qy5VeGkow6fRvl3s84naA64d0yMRa4CWzyJUvJih88jo7X7MLLeA8aLNUi9Qgnsx9MHWZadXpFC3Xwi7+OwACz/U5M515Fj5Bbj6InbLTCwGAIkkxht7P0zsYydcYvQwRzJ81Dpx53gp/JS/9JPSEvgFzm6g7CVW6bO73EZC+PLbUn9UGjPY+4KSEYu5n8CB0cWiBI5tMuu7NrCz568O2K+YPs1AJkOSCN+W2QwW7YZJDNn69wFw5wGs+pgbQGCdJG9Ru5tUnKJA3FHI7BZbA+RLmdzYeZadO2f3RfsOVE/cQleMMHQgbBJUQIKcvGIF+Qjj1qkSBWSYXiEff7Y0OslqsE+QF4x0cw+XeflDJNGs6g9/Xpk159nne/bJr//hbfP6sCJnPPwmttULyGX/RA5uhTgCGqsh3Sr1yYhttYsisOqHrSnuizmQi//szWRcgrF1enweOr4g6+uxOacSV7bm13wG6fwEU8exFmZK22qT9uPGSBguCBSy5mXXnEu7Nv1oFjQgx2ogl3WATu5enLwPop/nI/oITkgAA3V8sRNMrkhnVXHgEAFK702d2DHkFOvoE/x9lXOeUj8XvqViInYurzCNYQeB1+4C2/orgYVL91x02EIcweeyfK8Ao0CeI47xB1kKu/csaEbFhFejhy9yV2lN5ktvEcII0EWcYM1ocyLRUk/d2xzkEDd4lZfkeZk4ndXy4pITM5hgZf1+cRnNToK0efN1eLaxs4H7QJooOngW/r092hWgCwRLp4nmq8whxA3elvx/t7XAMMAA4OuBBvyLrjmzj/7O0IzhNTScSN7eW5/Eju5AcQWMYaLN8DOlB9FPBnlC3dRuEIPfBQk8EER5tI7+NcnP3cF1vgHv9ckHrBbXQpQ+j8Z22hUJcunHeKKQhk0O2zpXz30YAJtCx4HNR8ec/ARbZDHk1Y+b+3IushTBJDMvyOazqwXr61NE6Sl+ShlCD8w/5kyvdvTw6c07ocuxgPJrLYhys6LO+0kDAtxvkdObnkFLHLQ1mtBiRfTn3Q1E4AUWQL2W2xm2Qk67fbcm+Xj0UxTINNxg6thAmcsBNKDwf3Wue6ji05Sswt/mt59uH+GUzu4Fz672Bb4Nph2/5iIpveg0FIAEHhcMvc29Vec8ihjQ7v8J3gbb6XwbSEYFbjTlm8C6LwSwJnWKXxV4KWmDX1DPBgpcCQxtYljrdV/uIiThQd4ebFpc1OqqDet8dOFBQXmBd4iZc60ebJXe3BMpjErMPeQdgDMxM4jnjz7EMJbalbRVIvHqv1zwPge8WRdCG2k0Q+N0VEuYSDToooeASbMWv2vbfZ04kz24EiDXmbMi3cS0b0IcJLH/4ztX+41RVlrR4nKLuDswVQsnhe0KiqENHHRdNFyY2KN4764gmDOeVL80o+GhuPwry9NTp8BASly/xYtowLWJ0AxKX4uV2ABAByqMyIFsGvMqtb3b9FvlOxWxIsCJ5wxCABlAmOpshyAAGQgw87aQjYI1Ktvd9uxVTH4/yZZPUXx6gBwSxjW/IIgGkvP1xpevRctKUh+KVUxPqTwDmEkdxED4Qdnuexk7mfYrygTgKvdYg2vMvz8Htl3zGCXPBkol8rjm495pN6ZnyyLS+St1bjpKQPO0Iv50lb4Ss+VzBNaefq2lM9NQ3R9GCV61kgRyiJ3NiNH81T8PspZLgRfInEMTgSxylblMnXKxDeuN/gty4078ABg5WunIM1wbxVflv8JyTgyvYyI4wof0Zz+qxJ8IC9aerE+dvFcXuB7RnGIpFzn26+CFKESrgHXJfqVwzHWbsr84vD8J5zvl1v/IjubvjoVlcDz5K/mOOMwqfednwBEHOVwV+okuYXKCLtBAgoiszaF7atI7fvCMXTgemmCA1eptttTPK13yuCK3cYi/IJ1rQR5+nWzl7fslcqePeZNApYMnJvKFCOc3BBXXUgV1HfAm2F+HpneYsvBOf5jX5zIP6/PccfvOkR3AzxF6wZUm2JG8pRhUA1kDf70QHmrfACf5F4MTExmuUDMZW3S+EFZp5p/KxiDodCmtppJyJtg/csg2G8WNUlzr38cVoR9ZQCgVN9YzpNiGXTo6savP3UEeJ+5KQDqLGQsnh8FHfFQIYeQ8jxt6b4n+Up6/PUqMKhDC+2gzy9DdQj9mBBwDWYIXPUOIDzF0IaXgh4vVT/mFW2Fd2fCrzPvd24aoQMJctmkb+I4wifJkPw0ppGttUMkTynLLU6aMK4IvI4MpavoWOiAqgkSSA1oWdM2muHbfZ48PAevEmJEK943PfkhjkpdrLk6ePEbMQidiRJcgfePnJdxl95U/GKK7gSHMCaK+vJCLmpoV7kLaNH0J2dSYMfJ2CljyNa3S+J0321oP6biPGDxMdnZ6zvnJn5HdtKXOJxKp/cM485M/8ECFhKnzFduMqhinJcpYOxNGU5Qfer3/aGQW5klZ7BHMfFc8JTREBKG2ShZ59GwsNcYtrc7wShGCFhfHYCxnSknpdXI2RConkgI28sDI1vGodcwFTXqM7vAQffToZHHfkC3nbIkcbN3r8OJaP6GHpC+DUCfMZzW74mKIou5v+Jd56nPJ3839xA4hfGnhIBAGIHwdF0Kb54YjYL9j2bAlu86c6Z3W+FFe9UlrLOcA/xjuamstthuxeq31JO+qs0CKVbBEgOkJVs60LLSRbvZFz4/SuebM2VnlJSwq8k8GxYqUhHul3XknsqmXORvPi01rkMTuOECVN/S7mUd0WgmBMHIm9+Xrt0NlwRU0tRgBEhBrVBvPfeMKu4UZzlEAriplK0jQGauL6EkAM86IevphhQBJphiyAypPEnsX/nPwUvCjaGWwXbpSCNQdsu4crTdwB54g8PEcRygqARBWdkUfzcLBwEM0adZlXfqW9uPBK0r69f8hQw3pnfMfE6oAKSvThNFVIjBxVA911fSHEtsfP7wI16EuwNSFD886e+bNZlcVOU3yGOshPOwaFw/TRX0NB8L2hiSRA/v+IfeigC3EqktENJc21jMtydOOOIo8YOjlwFEOg4t8Y6JpenJtni31foZ9AynUD7mkSf9OZ4REExb3pq8RZ+47fpz/F4Wa4/2I9VWR9HrOcVnMu5t3CHvxWX1U0p+HyYiAoAD/ROm9+gHXHGEBrHjLkjzbqcyD9+ZldUC6gIPSyH/u485pSMHLVDP/kK4b9qIvlaeBeiR+q2cf6HQDUeZMxcSTO9O2THN53iSWwvACowArgHa8U7EUmemQ1lrJQZs9wjZKRz6wr8+bJrzyXJXBWsQlVfy6QzWtNG2NGHpxdCk+bdHzdvx4AgFhx7VUhcZdfg741c9As1Z0wnsyAkmthDDvpIQkTEasJL7S7UbKYDz177QKkazAUYSu9X/F8aUaHhgLVHwa8g7g/NGeFmsQv1a5+OmNoUKc4kqYPt/Uaq+C6H/WNucMV/1eJIqPUCrNam4VXaqkcOqNXWmPkpifFAtEsKCNx370rQTDUdnh35bPiTwL57XuLgEnOONSMwN8r+AkAIMAXHRTRAFvjVvTRLkWKJoM9FHQZl8ugEyWIzV8rOwzD1cFZZlxL5fDATPDO3yCf8EXnvly/fE89rss7oVA7xSsSMt5kMfC+FkPjfvwRrOidN54rcNyQDLudpkkBGUy89fz4fiCbS9quKLaY+ZXbmnIQYjmqQ4Nlp1bE+axlFx6qWWwDD6FBguboemCBXrBNvtME4QToILz/9YV/vGsR4UmXmJaNgRnIl57a0ATs0MMmwQCQkcuYY1GE9znHLppazVa++7A0dVYMc7hGajQ25o5UvTjhCROEeQPBb3829t2wPkVctswlC1YiAsBHUtqJGx46QHwkAuxaQHML1+3y/icqRFySHdOIdyBbVoH2StWx0LvQxbN+GUXr93Pik+tSN7hbHvTVzOq24exvrtUsoZPTvw324j37w8gJz/BiTaYMU513ceF2YkVM3YXTaBxfLyFHgqCnI5gXyMZivR6p8QGmIeYG3zfY2kGKRUWemhisU0O1XlQPflxBQLhB7Em8kCAovg9b9W7hgC9NjEM9luSbwLMS/n/0SRMHmNuActUjFV/y7TO7DQCFYTE1g+3VUMPchUm6jh4vASkZi9FVz4/jv3mSGD29t1vfEJ4O6GR9/LQ670xz/d5Qvmt5btw9tvvuQ2qmf6p6Pqs1hUsm6sY6Eq1a5FfP/92Uf0txgbXxdZwbjH8NciIWykYR2JalGktyPEw48vJzokTkWRVciBUiozQMiwTVLX+YbRT1OOyEpjFlRYhBUyrzxCRO8tBzuguRPoAihvluPx9IpYylWbn1pzsBz7hiS0kx0f0AUEbqAWR7/2WRSySP/srDVBhQ88aKBOqXTwXrXRQZgxzrCHmFXdZxwAGTUhiMTFK98KQR1mb7HFxpNfwHFMdUrmJH7dyi6BINImbnOHo3IqpeBPfkE/wb74QvlAXwAigmGfRkkqik7qo+4ZnKIh5aqxoT5vGJ4O9c39zRW5ZTuiA0kalu+NT41I1BaPobcvqloAbn/9jI3VjYPfAWewtn7X0ezY6mFCrBQxzr+B1D00C+u++Rm8mOFfBP0HAp/aZKrgWEgxiKh1UX5Thga563aJJoM2nCyUx5yV0DMdZKJz/lmHrLo4oEcUo5kVgO8mbg8UCUZ6hDYbpcTmPsnlG11MNOZTXCdlWL7QY5R5PmShpzcSIYTNob2jnkGVvsuEHHZ8sS8q32nCmlAtFLqtGNag3wrulp2wT9bbqfaWPS4oMbt/7JP95c9xR5fAAAAAElFTkSuQmCC", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAASsElEQVR4nH163XYbTXJkRGZ1A6SkmfXYHl/sA9rn+NHtndWnHxLoqszYi6xqgNLnxdEhKRLdyN/IyKjmf/z7f/7r533bP0syQz8OSCTeb7fW2uV6zaQZgbzfD3e/vrwI9i//9vfL6wuElMwto4/ITHz9r//6+t//aBf/8pcvr5//sr18buZ//N+v437/+//+e4wBob28ZPR+PyLGtm1OO45Didtx77272acvn/7pn/+ZwP3t9vb+3nsHERFu5s2NdG9Smu/f3/67uRJ2SQggEplmZmUYrUkWmQIz8uh5oR9HF60HeBMMRsbI43ZYuwCMWyCh1O395turNeV4//Z//rG97sbLiBxjkIrQOCIzDRARw0CpI+59IM3ayzVIjgMKKpjKGAmnJILRpNC2cefeXv/6+vp6OUYYaQYHm1umPOz6upu5DpFMJV1+cXNLOd3gMqPTM4NOUMyRyO26y2NkCGGO+/14G+9/aVeYtYv3uEfeBcFAc7qZcaNJio1DbWSXQRZGWlOTRVpGyGRba5srQRMk6BBgL9cLzVMAIQmgmUugWWsbaQCMkACgkcoE05DIYQZQqW5uzXyMPpRba+rqxz3H0Bg/vn09xn1vRHRKYEb0yC6GGQxJCUDmyAyjmtFN6r33Y8RIZSoyB5TubGZkQilIggxt368QABgQKZBGk0TSzDNkdIIZUt1MafQUKAAmZGa6be4t+j1iXPf9fkeMvN/u7j++f/+OTKXu/XCCskxJmUo4B0Z9XD+O47iDpLkSt/sNEkhljN7LEyVAIiXUp0NKa75JgGYKzIxmEkiCAGFupKUkITMjgiQA1jtE0twbwffbTYjWaEYDR+9v377f7wfLvvu7JLPmRpIQMmKMMcbIyDHGiAAwc3rce+9KxcjMlBSZkYFUpgSYOQlJTVJkCJIyMt2NIKD6jEoFJGUSkKRUa83MAKYSWeHg6PefP9+VkRGZSVrv/chDAsTb2+39+rb5BrPpOpGZAkQZTCmCSqSSDCDNGkGtSCkyxhjNpHTbjMYUgQYpI5WSaZpLADSi4kEyNTIFICUat20zc5JSFYMoHbfb2+22e/bRM0NCv937GIKROm73++3Wr5/MPUaXMlPKBAEoEZJYvZkpGbjRHARUsc0YI6KPYRIaTYIypTRwmon1U7UEOD0AqrlBMjOrv6UENTOSgLLf7jnSwAxVDPoY/egxIkJH7/fb7X6/1SjIlFISlMqMiKgSzoiIAcBIQBkpSdIYI6sEMo3m7oAyE6QZjTQz43oB8185UmaSdHOCJN0dgnK2tTJz9Nv9HQpCIwcpEhVlZebIfoz3t/cfP74dxx2g0UA8fVY1pAoGCUCIMcoZCDECM925fjMygkQDyyojbUadMKOISoUyM4MEzcDq2TbrB1AixziO/uPHD+W43UdFQaIEghAEKXC/3YFvpPkXlxQ5zAycCJ2RY3Q3N7eJFhkQ3FvlggAyYwzSY/QxDiQgtEwJWqgCKauDK8QomMw0MzcT6q9CTW5Qodv7+/dv397ef8SI0ZNAa43mpJkJoDKRiJ4Hj/v7233fW9tS6WwEeqTTCilASuh9EBBTsEI8SeZWLmTGGD0i3BygRQylVq2j8kgahCrBzCgHqsxglWESDuG43f749vX79z/GMQrjUvN+ZlWaWDWNGHm73d7efgpyc3M3s1QKMve2NXc3EkhBhEGUkBkpVQfWa4wRUUMBrTD0bF0hAcz71mWRgmxiAtyNNIiC+v3+9Y+vP79/P457RjoNqNJSKmuGKOVCMxcQyn70436HZN7cHMqKm7e2W5ExAHD3qmorIKqIVc8IqQRQIGsRAYkgVNUKKVc3V3+JNHeXZM59381cyXEcP/74+uP7t977OMboQ9DsIQGSkc3pxq21tjVrbmYZOo7j/f3n6AdS9a9QyOiFv24uMBfAkcWYjCBBSTVqUhkRLZU+205VbZHJablgVQxmxhHRzH3bAFfk7efPr3/8cRyHIpQpwojkHCIkqyIyZTSQxzEylKn77fj29Y/+2pFq1pBKRVojcPTDrdFakW1rVpOstWbulY+aVEZkZiBa1QlJI9wskZnZvNkcmYV2kze4e2u720bp9vb2fnvPjAxJMKN71a+M9GaX6763BpBgHxGRGS4xIm/vvY/v9/txvV5aMyVJd1iMYHOMiOiZSWNkmlnb9olnWDzGqh2tZUpePIg0R6SUJKpmTs5D0sy8NdKlzOO43d6VA5mCCtRJs1lybN6uL9fLvhWS8nZkpEHo0MgI9dFvt+P+fr9ed9BSeLlctrZ5s4h0933frTUANNv2PVMgaObmNEJ082Zo1BwoJNwtRRB0A4BMziE3552S/d6jf+3vt/fbW0YoRYBmq2dOd2lu3prRAO4CUlL2EVJmqI8wujVmj9Bw933fvDUakeNyfdm2S2Yc/S5g3y4ww/lBklJujq01UgU+CS5TzcwL6g02RwmgxOjR+3tmHPd7H2MOToMlSOaqRj7gAqQDyERkwSshRMSI2JubGQSTnKCZ2SaFQG+7eYsRMRI00Iw1U6jUhH43M7QqFWVWo2/bbt68tYwI1BhFZGTmGBGRUdxljOJFNqOdBQApFVEFJn/3tuUYvY8ekRCNZk52IUMxYnhy2/zysu/71dse4z5Ea066uZs7amrafEGwdEjNdm7RXq6v0XummrfWGq21tgEouhiRfYwYIzLGSAKRWaNNRRUMRkJOqqB4cplERgqU0I8YRyiYwR7TscUSReP15Xp9+bRd9q3tw5XMfd+9OBsylN7cm5u5mwFockEbd7TRXj59uf/8cb8fEkckYkgEcNz7GF2RvfeMiMyMFJARlZZzq6HRYG4mZmTWuJGQScJixBgHnZYmMSJScvcNvHrbml0u+8uXT+1y8dbatgnDDluriLXWNIaBk1ZJT/yPBjSJkepjTNomuW8E+3HPCGWOMaSsIQEJhIkijKaJ+5pDF6xNMyGqeLeD8K1daDcder9X5ox2afZy2T99ul5eXy6vr7VgAEBW92lmmIaTJai6zB7Mn9a+//zR32+9dxNjRtcg9T4yQxEj8mk1ABftgzFzBlssumGkSSWsMDKHwp3768W5XfbOVI9+PwSiub98ur58+bxdr942MyeYYxzHUYRy8gBiTt9MkpmgiTjJPtrb20/0XiQOcw1WZNZyKGWR+2m2WLkAwKTR1sKAUBJ035o3AUcMMwdp2+X6ct22C/+K1y+fXv7x8uPnj5TM8fr6sl8uVks0oFSP3vsB4xhphoLgE6OBs3ZmJEG26McCwel3ZmaMLB1CONG9zDVNtCE5qY9qZmQQMGveNm8auL68vH76y3bZm7uZkbh8uv4v/9vL508ZkQo6AdDXHCKY2PaLuVWz1vrh3h77Fub3xbvQABk9kVLOFRRRTZupkwLWRCu+rhQyVPcBBGpuDtIYpGXmkfHZbb9ct9aAAncA8G27GLNn5MgYkaHQUDSf2+G+77S1HqgC96GCV/nPV3N3lymmQzpXu9m1p880N5JSaVEyVEurZEmrCObcX0fOiWFmhKBMzHuS9OYU04hhqcgYWTCTyVpGZwsk1sdPe8ui2i8pQM3dLIo+snZ0ZZb7EmjrBnOhsTL6EZKSB1I1Kau8UqLZvu1mAKJWz1w7fCqNVjuqNzAk1aabyqxtsPQ1nCv6XACBDx8NCM1oNJmqR3W+Y+7bhM36YTENYKqBpRdUmGZTab4iR/N927eCvuUn5oKqnGtd7SGTuqeSQhIVI4iqCn0yeZHRp1ejGZRFT2vteegF1Gk8K0nFqKakSBqLiRYDDFXuFZk7SFpGlja1xI3MjOl3LVapiMgIaN4Npicjp1IF1aq3WmClA0Cz6uwpc9SAXdvlk994XMcC1sfWdpK3SqCk1La1kmGmHZnLB62YlqCZxeM59cAlhVQ+UbnAqRWeBmmZ187VcUkqddHDiVVSzxpO/X7+cVFM1YeQ5o7LdTc3zV8rFZx5KGJbRV8oWYhvKg4zC2hyZMKm1rbk3EkIlgEN/ODQWcwfOuUpo3Nt11S1qjAAUDSSbs2ctl1fX0nDKUeVBshVSQqs/WGqmFrerS1qmaCnLuCSoTUtlNq8SQW5EqFnix9dNDtKykVIM7Ocam7N21bqiNt2uV5fX4wLkma1cGFU8tyhniIz1caHQVPixEwNJkGafK4QFe2s8tkLZNaPZ8aFkm/LlIkzMTXFuUA237ZmpLn5tl1fv2ztAixtTE92FoKfEcGSEhZQVS64dJKzlln6ghacLThvs2jWwDjJBnGqLcUclMhTuZgKkwRod2/Nm3vtrG2/tP0CsgaiANbwWctCwVxZk08unJ7w9I9nLXNO/DWETimulTerg5+pQ4HSTGIVb2TiSW+MCCNtacMgzbe2Xby11WSP2ptz5iwe6bRuwdJZ2ThzVKcUJ3riaaTVz+2EGxJT+Z1OTHB9KsbirClpLpepk5wDgJlvu297tW/BE5+Cq1WtjxA+xff5v7N81xCZ1z25cUbGFu3BVNQW4V/sYUJEqRKoiTlPYTLzlFUBM2/btl/8XE0mIONjyOY8WJN8/UEfZsSHqz74+ov9aBFpmcv6qjSiJHybbzOztjWA7D2V8UAhGSmB5r7t2+Xatp1Tz13z56OJeKTjuX6eML7+yOcb/BL4iS1VWK2P3lRCuEAsmaFQnaCQ1vZt2y8xkjSpzlQy6rSmwcx8a/t+aduleP8y8IE/E2Y+jvgPXbom8zlm6neTp6zuXRX9SGnro5PNmtHdzHRqVbUiGoy27ReAI27FVZe4kotms7XmbaNNQpfLhcdInS2gWRqLM/xJdeh0BZiLCz8m4UNCmhu3tu0vL9fLqwERI0aM0QG05tas+e623d/fxois487SpVLuzrlb26QThXVrMoknR33UPOt0DYs1zOp99qX6kudA+M3FR3u0v/7T3172l3a97m13N6QiM0YX4O7mdDakZe8r/FHbigT6Itv8pVtXN55fz3E8actSSJbp0u9G/o6fwqqk8+7tr3/71yael9N9c2+tATAzmowNMU/IJSmy3nguezbp6DRzGiidhbIm5wL5VWAA9DiF+C3CHwJdM/VPWrp529BHiRaAzGSPgK7BnRmjr+XxPDlB7ZjzztI8d10xXSWix0z7veSf2MIK4W/vefbq+b0Fo5mBHJKIVkwLXo/RPK6j0sjWWoxwMzpqwbU1v1eEhcnUZo8+zX09r3t/5shjDAMQH5Ti2QOt4jrT1GKEZdThDdaiVydl5xpKgzfbN89hze0Ufe3DpJYqRUbSPjLyXyucf+bDB2M1HzP4xQs+vwcA0DCl1gkemNE76Qhq2JqbNXqzbT2skZFTGdI89C5FzWGoRwEe8T5Z7Lr/7y374Vc8KfUDQde3X763x3Vzk3jGismlANDczFpr+7YPCwAjJ9uMyNG7JhII+25mCyafo03wz+P+qC1M0H/mrFie/HLNnMSTaRYeEFjK5/RJkJKaqoo32zYnlekRUarRiDyO7iFB5mbu7r4IMPlIxQJ3/FZD67/nWvuIPp9Fhg+Xc2ZAa2qcmHt+pM5aKgUX1Q+0JVeSpGk+ATCfuphDbQXtA8rM0JAng6//S+BTdvhwYRn+0dtHRtjWhOez/ZMNANUCAusskkgj3K21FpEi6klCb9Y2r6MdN1u9tGjxwgKuSH1AmPUGntsC8NQGv0/idVlloD7nEQ/NDezEQQFIjX7EGPWcjHuj7fRmZuY1+ba2tXUgeCZ9FcJsIy2uvqppuTcFrEftPkrpo/X87WvJKhLnRWv0aJbM2sRi9K4QUplh5m1vvjdzt5KSrZk7Sp2y1UmTCP0awYeI8ME+Pr/jSVRcl/zex1wOFOwvHMtV+lNGmIppHd8LMYItzZ3uPE/e3N1bDbGn7KvifTIY6bm8df6gMlnLWJ4x/h/G8tOrkQbEJA1V8ItNksCkxpkKUlkP8qRIM7qAQs955qll8lPl6sQY/TaUzt7g4nXrTw+57czYb77UndtstsVezkRMuVuCUiPqCc6MrCdFzM3d6/QchBndrNjQo/qLT+op1P/fWD7D0pNoudSqKfw83lz3bJmyfAzLJ9YkAZniPGNXjhwjMxBRC4MVdjvm2cQafSs+sz9nZXwQiH6JZfX4U+we91jv4APGdN6OUBMCyt9G/LmD1CyxeZYNgIg+bu8/2za8lQbhZk76egztnFLn18XeZ1f/FvvHYF2/OGtx/QywJtKyH0aa2NiBDKfVU2Y14zTJDRrgbjLb2pb9kAIpoff3wRj7Z7+Y775NDFrRe1bU9OCia6P/LRWPP2n6s4rOWIc0NfOrH2SmBGlCa9bGBU1bRLo5ZDP4iXoQ1czhTZ45WnQbN0s5U2zky9U/f/HLK1tLYz7WjgeRW4TolCOezHwukocqVyXE+W1NwzlZzlOK+gLch/0/jyUt3owFuIsAAAAASUVORK5CYII=", "text/plain": [ "" ] @@ -5392,12 +2271,12 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 11, "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAApNUlEQVR4nAXBZ4ym+WEY9uf5P723t7fpfXZ2dm9v9xqvrY48kkeKarRNx/mQ2PAHpyBBggAJECCfAwgJkARJ4NhSIkuWRZGUKPEo8nrZ273bMrszs9Nn3t6f9+m95feD//jvf/X13/4vUPSyEJ+9CFP/L+qspBka8AVMOL7RK31UgPUJVK+ZcugAKM8WX0nfPM3tR2mthqfSHbeokrO9tdarP1342Mz40a8Gd6GOw5GuNL+aL0wtmF8ggj7ob6Z1yzAmLXe15+o/9stb72WX+V+3P2aksFMBK3uJ3V8rrT3rNzPaKSvrhLWFzd/HmslHeWy9/VyFX80xNM2e7j9bqiinrrBUhmiyUaDB5T3gd59Q2NoCA2ghfz9UVy0xhYBIlftWC/oIwiszdO2CWCRFBKcGc/GkY5Gf4UVb5Mem68z108nEc2on8KOdYOlb6vr3tpGxXT50kSMr/Jg71toyBbOCP4zvX02ihd2sXpl+u2wErw73ex/19yC+bLTC24Y9/UpT6A5MoIoZufzY9vv1R0S0WayUX0kxgrpWz+kOzGT+QrlxRZHVb0kBAU1ktz86fP46+PRMj0FEvnZlSU/l3bfKy0wuIblJijLVUpEj7YKJ3k72k6QHouzBgmL0avDZz8++RzMrwf1ehBNVGhFCT3iUGQYweJSP0BlOlCBfhaNy/q0+cjGaNS/6pmAQxCwnLvm0yIn+2OsXc4nIZC+slsJvFjbe/oaohJaJIDwCB6aP9Ng5T9V7WsFZZJAnBSzAUehEN1ORVWwUPYVzlIvbTVeW5/bBv3heFeQMe3p9+nTb2g9PPo5UULDfhqAgYMtouhnTngcvxn0+qLMcbFeJZ8kP1771p7/9uVl/JzLzWpgavRGdzCU4k46jz08CCEKwAZ1OF8+1bg/zBlmrQYCUi1opDeWn8HTQ/ttwIZwnBz/Pq5I5cgA1C4z3shmgfTIgeIYjCWceOoV2tUugIo8UsGsAkCWjwWAdrYSRfycJJ8M+OfP4U0vJTcC9W18D7StNb7GdkEma9dfC1aIZ9FkPOIzVYP2bLo0ERzbN+m3AWnUF580n9Ee7P/hPiqI2Yw2qgCRJMeVkiCDOXWcOj8voK+Oa3mY+SoVBhP4Z9s2dj+s63/YqCzhrZx9RUnwrM1nHhr4fnV7E4YIJsO7EDLTEOB/vQxfCzip8Y3VWwS9I4IyHnBPQaW2WJEKVs8heY/nNi/n0ElMEVE3W0Ie/hcHcVUPA3pssDscvfxd7iUOcNSkQnac6Q5VhbgJ6D2hEmttGxKQheu4l3l1TYMRZnEGJA8HlvOn1aSGP1wwlQZMq3fLDA2AfFgyerRb6roMN6Rh8ufB3agDNQe0g/lp/gXJzVhxPAx/hZgsae2swVY7f2KkaG45BqvWzXBaNyeaXl71Pk1+R7Npy5JxH7tdyCvooNTSkjjEGTS89CI3rMhaZ82/eBQdKW2XJ9bPcrvax/hgM0EdIjTXIjge3EwKDpnna571M5uMxVQcLBvXlF2Q5UDL9Mp0AGLWTf8cnqHVZcXGYvooti6mb4Cml0gVruaIF+wTJeE+0qIJrQXQ8sV+eVlgOLKdTJOMJwDh+/9MeH4jNqd+RA3epTu0KzvtYwWJqFpkuhl7PouMVnBLmSExO6ForQwl1EjFvvlkoTOkAKxgBC3qXqdcJmmhOCnrpXOyxrcHKp2s/4CEuccZJl+mNCW3U7bqo+/TcEJ1l4gY/KgdyCr1PktHjGv+DSz4Ay7xJBE7hEtWHXgzHDGVqg8/aGo7+zVnfgQSJ98uwNu8/Vu+7oXZtUle4kWtPm1oCs4t++Qor59ce5qt2THvZMf30eUKEb6zrOy+RAXJgWAhWbWELPtWfVHvul/B+iFlamHVwGY0X8TNQLgMqfMjdhQ4UKs963MM5+2vCOkXwYDk7COKScEd3BCyYmGwjh5on5yiUAG+SxtdvTbOUCJo4lpC401Fcf3ye9BO+JNLXe7ADIagUF3rwm0aWrLylEf8w1P7XswX7RQaHnpHnWn8Xc0leQomZT3y1O+z1wvMrAWKcXuf6XINm4uxUb5ztuQxcvIYO7bZqmgdjOoSZ3F3x9golMghZCTw79dwqaNB4qr5F/uLZRlNEWgcbc3M56np1hhTyfWs1YU3iMTBDucTbMe8UtH/GgZkjYFs1vPW4vk8J+Ty1YGexjI5PZnGim/nQLjbauZySwBkND0mhU6yYx59A2nU9sb2+fXKip8C5xW2Y5M1OmnyFNkclCWIqYbaKZYHq4pwD8yTJ4UI8w3oGDp58Nq1yNDq1/sBc5q2h93eq1YVSZKD6/waKwghBwQvoO0K+ldy6XlqGx1mjuxZHA9ePwGQU4XUFLDm4XETAtLYwxCSk2uIdGkKEwblnbQc3q1TkBOeJc9XXIGN26j9Oobc+dFflmdvKpaKX+92QSO1JQWFiy3kW/XjaCyKlU4Do03DiNR8yoPw74OX6p5gd7t16ZnAtd6476JqBFVjOaZKHMIm7NH94pxqjUUwHXsOMkPcHW1nGQN+0Qqz07TphPb38ELyfPuO3TCqD9wnEx1LxkLeR0vA9h4KV2eEosDksSycBOULmZpCWPelZTs9IIay2KcrCo8StWOjBaPdBMvUP4tH6GvE3Lxp7IriiVbFiOf+BTfZSNN+dJ1P35sGH01Tju+BXbrNrd/rcrw85N9Ay926MZsiT+aKs5Ju5fIPh0shXalaMMHIQoapeWnrkM9OL8W6JDO5GvJAztCkcFj6rdtDK+pvfBU/O3+kZC+FexC0pabCmuwaHZMT9nIHp1zGlAhjIB5gBaYBaQ+tnpM9FkdAKl2YD+PiIh3oDJKSi5729P34iPSh7/UvUM4+bY8BX4Gd0V3QbP1jMKH6k3mQ8FBToALGu2v1ID29y4Mc3EIxrUBUzC6B6Iii2yyjiMogM3iMzDZvO/GASWknXOV1yJzB89dFX+cJ8qXWoRYOX6TtP5Yt0MgmfPwA/8D+gzx0tmUYzqoHaPnsKZzYcRVVAjhcoPxk+CXv11Uoe1nV6KssdeCZCS1iLsoI64uXYTssKTrrr3u+v0aV098sVOIiTHi5qqq0YZSY79j1mcaeHfKV2F2XgV8QpJbc5l/qc4Cc7oi2oyMUnJTrC4cSGUYQR9oaM8EEOVbOJL5Sw5ZfkpNYbHmEMktsw7T8bX7SGiOlSenaPPNdQGqSlJeD93gAXAsvDogsU4AmRLcTvJhFMmTqJJumpSc4pRc9OpRngZqkZLs/WZZzGQhvMpUV1rz2dHcZsN0IMwBwy7i4q1C8JhogU9Ls8Y2vsxqhO3Tu8K8rEXR+aX77v3z8M4HRit/DiAts2e2gE1i6p0K9kbOiqhr68iKSvJslunHSyQVc4QimigY40PImCOeq6K7BbvizM0ZqbIYSeAtuBQOGiVAuGhRxMu3rHzCKGtz+GkJYxUFk9ifIc+QbM4YS79a50RiBShK9CJUwa0jR7ctVOj6zzXz6EBgJOBlF3c26ibMxXVuClHOcGv9TIGliogjhXoIeVqHFWAsO/XfryJUAO95JRcHKm/ZJ4I7C/dEQZo/UeruXAq0kcueEk8dX7TDU/o7e1Ija4vHLe5IGAkVkZ8vuZRoaUSM7jvh4JmM5XVXDymb+HV5OQNYuq6jVkqM+MPYtNIxlcQx3xuf7UOVc7zNfvM0zOn7ItFI95dXXmrgza/UmEF67fnFsiSbNDOZh6LR6lnSTqKmhFgGWysoF+zY73YTqdvmMrBw1/2VkuKtEPfi/c94//7w+/+O1fj+w6OkCCWYhEfMZ+yemu3k+dGUJc9OKn41P1/jMft2thuLuqx7+xmSxByTI0s80mnnHRwIqtz0KA/hdDkUzwVFa2JCWG3Qi6EtlqCd+iuUvVjumKEC+RW2FaMXiblm3OjMIuFHJrJwXobh0x2PzS0GGOMCIHkfMTuj9bU/lpzCiXo54Q8+a1W/g7zCWI9/3ptT7K2kIXwH/+b9EqjryohdId6nQ8mD75mZt8ynbGIQ8pmbAmESU5WZCpl69D4HaVvVnJstpTdX4h87ClGm5dIp+5geIL5RvymSKHHlD/j4UZzPW5K2+6XlgaOPRLWZqqAceUJlZETFa9ccQRH6y00zeI61QiIak+y8Cw81l/j77vjuEBgyC6j/vAr5mXjLOjjZKadK7u36opDHmxnbWN+/q1ILwRMCnicutjLp7jsVfbKa41BPNXbXab5uVviRZz3LsXaSp4RbazZHROrHg3ofbqAodzvQzBNBiaeP+kRnFSHiu98F/zRLESidZEJvlrEnhS+DljzMjkXWhnIuksrh8C+pLyEw/8HkdHFQrm5aP49s8KsyfWER3PE31zGnwex0YInfpPizNJPxRzKQMJParhnnpXol/8MJpOoaMiEBTaQMkKpozllz4nwdi/MXVIbxbhyvv0OIYjkT3tiO2kVIMdapnhiqJcJRPIdu1gjVThC646C8bOUDf1oIGiTPKIgAaSW2S+vO8ERwM3cvCVxH0UgleC5cDVluKvBk8mnfiE31kpwvOjPImPD0l5l57k0/Bl7/ELb/Cs5+mtXyz7rNwGUW9qDFad1IxhCJ7EDmxTsKb7Qmz1HF3OyCep2MbPU/wOj5iLMdbFbSnM1IfwlEJW9LyKpmvXs6NLuL5dmRDR2YQc9gr5Cp4SiT/nV1ghCpxVKUq0eKYrJFSpniPjBe3dCqr8b3SQO36TBLJSJAtLEGOzDpjd+r64Oq9jnyDui2GR8xzNYqUtWgZLT206dcCVplwZO5M9N+4NJjvbBC+4OSjN7rRfsE7q+FzOVu0mFQWVSkzNBWshmYp1ufYmQav+zr76a+LAjA83y43UwzaRc1Mdeg8VxBKJgQ+XeLQ142z9DPefLGyXNt4hZIivFSS4SAvsslpt2vUHt4mKgn/ac/Fret6nxcp/z0t7bz2M5kW+JLC6hty+gcO/+N7//DnWMWWfIkXUVI01iB1UKIwI6RnEEfogwSLGGxrCTkdv0nytEsXmbKDJgSkWl3VH5X0+MJ4oyC1tdVqnmJ81hY1sfDpkbyg6NaQ3X6j+f/vaOrvdrT1mxvGQ1iK0UNPD6CqX8bO1F0rsezj6UJnP7tJbatcflIZSKEZNx5qvIaTF9g+G7Ao7f988e3HGo9spFToCIk1DK0nRYTspliZyDfz9O0e0TUJhbrxvtBIgO+LpBbj0/N6gmhhIMHQiB8Uxr/slPoPpjqlOz6MFtAJAIWe8/Fgz+tAbOHO9zSYrJPfN0ydQd2ZZOcEy+0I1XRWekMrG9rGHHM1PF5c25hsZXTxrKfl9enXan3NjV8H/PtIfwOb6J6akwFPTyAUP/BkO2eNzBa4FDF2UaffJDtbVZOeZFnyR5f2p+ldayFzDiDU2l2M9DjSaCz3J64d9oZ7N1GT8LFgqWlJBp8ZXYzDlMz8jRrHlEXlKQiLuEKVLmAPcoCAMiu8vT8qx2MRF2Z4tXFzKC+tvraFsg0bgPFaf0eqDhNQAO1nhtxzhhxTbGSqPUYZRBqes/gDdcrBSiUGg2tJ/BLJoH+pcXBE5xj8ueIjbNlLBnUyKT+fTkYtJmb2EYd+8VHBehDvxOPrdnO9pHpxY/U9KsQ5//71/nhdjQFb7cEdMPHSIQ0JTxF58rJxd79ygw+koMLJBTnwdJ7LI891+f4Z1G9EL9qpZ2P/xC8rfPQLxhW4uZqBPUFSZRmuFmp0Mal3m62VbSZhSVBSvcXj3aVTYjDcw+69ho3muFoT6xpgJ3s0kqV7owkZtGCdFFjb6F+O67fkmFt9ZKgdYl4hzakHTjrhMhnknPpCSbYQn9rt4KRgW/mbOeHkA/+H/8F+6Hxq8Ap8Jw+2SQrf4QaI0CpfnVxW0ivA5lL5I2u64XCkAg+oZF2vXOWYsjeAn9B+8aR5M2081KeIYeNrDQyjkcqRZSwoOo8Q0WZQ8tNZYAeIEhHGhsRnjFtuPVb87Mp8vgJ3QFqe7HpRwn9jpfy6WLrQOZNOslI9ZS56VNeyURdM9k6iXA74pNxn2cbn37TFXwIjZzBFELgkmgjj6CwHEaDcVPWYpWBqPWveonnbnnLwwz4n29f7v5YA8nUXBoMBMjMnAhy938zTw1gbrfRTM6//nyHvYja4gDDdHlymYQlU/PzsTdCReEon+Wf+by2ixVAkUKtfCa2nmXI0QY5PNIJ7xXxnViGiFqOmiMA2/Y1Q73ccUs1G9hkBIS9BTPI4qsKNPUkXCxugCNF9CAu2t/wsZ2ZG7iiO078ZN6IwZurd+BIPSBz8iSuTp5wTdbshzo5n52RrEjwTtHSv7qjuL6IyUizC+uZBD5wSMy6Wf+ifICcA3y2zddnCFwYiZmbplq4x+3689/Fev/sddwH3Ub0XnRzvfMaPREKQeMiegFnWBDWDvkZPYbOEma2IKDJJhHsEoYQceeUFe5Az7IAoe8AYK5b8ZBepmyC3N4xk26E36HYqgsB/hDVLO2d2mKY5KcnaXAwL2FAGHC38Szu6v055XrOL8ClG1QhcmRLHbn6XJ6+fH5amrukZsQEzEl3v+3EbTt7ggOfdj/8E4V1feNAQUr0gNr/QBYVd+0fx3TvY8byRvbbDB1TacZ5EA8XkaVy5LhVWyTgtQXUg8fPMbjvrXJDrg6Sx7LIXtWs2aoeafq7MCjkTmJ7vAKUwY2LdmWj8gVW7qc+QZn/ii9pfnNh9DudHSguMww+68D7x9FbFrlwhnigOvr1NmM04NIognPO7mflm5e6tOVvDMSrAD+8T4OpnQnAyJQgjZHPrS+vkje0AhKGF7h2jLDGLJzNLb1IqJob/pcY1ydI25QUGczHrDZxTKDsaPAea1zyw1+lOIOnk12pN9XUhol/quc0DpvfSf08gNvyzJK/AsQiWib8Kk+NIrcUPIEMfeSNh6794NSHxdJKNy62Eo2Mw+jYJr3/nD6O2VteI7EEfymObShagAhRNwiywY7ryy/4sgdDCStq03oCy6OQaMlIltzrhymh6jM8K6F5pTuD+qYPiiOovbRvtgPBtp43oOBb3KsfFrWN+LjQzz8NhACBlxxM3ceQ+8Xj0svO9sWzO7r/u14kVuaXwdKtYR+WQ+cgecIwV86q3geTfQ7oVOZznH9+2W3q3ewcli51WrjOCJ6UbM7e4VoAOee+x+nP3btR0OxL1cUPWlvSIjq499bDhuFo3M6dcCXDp9itAslB/yUoQrDtowuZ5WZvwZlOeKxsbNQJtv8RAp5NIuZUDMhbDC2rVOkNaRxvU+EpE386HBEw9wN5lSd0o9ly1n/9kcr6FEp9w66Y2cy+QKTI8F7qo90wRXCpfN6QSFerHCliI80/DPRFRmF1x+GXXbn4BR0GfCzLKZmwwYP/nKJi5X15DpKJ5MGmGRWJz8kQnN6J0kljuEVh0z9bjoojcXfYVLus5M8aypGlxN40qSxXPetQtyeuHPtJ0TN4G1uA/ldU9g1hKvdWRQDLU0s5OclkecpgCn/Zf9XYD5PYjJTbFNuYsUyuY8t3QjaOgrXoERUu+Xq0jI4AdYfkAo9AiSiCYbMkHA62+KtR52BrptS5qbUxWHhRuktGC12yDYHAjqOtLDlDM4ylQWHkHuTcwqHaa45OBvw0tdvDN8oGqBmVzuw4vU6JSdxRlTLvOzzDOGyPFdPZLgpPwlxssYBclccUxgmZLL7/7+3RyjP5eZgVzqfAJrmI48KsaG1o4uUwnioLN7RCB2xcnz+alkl3Um7WvK6h3lyDPDDX02THXWN6cOPKk2ZGYcDEfTojYvkqZ87k/qqZAWrZD3p/lXADGs654jQK6atEs1QT+hZ9ZvmmRz5xA28Fzv4B+2yLes7xW9YcQqYkxzIhI7zrXQwh3oGOVHo8l9QlqmVpnCQoaKg+VVWKw6edKO8Xp8CB84tp3IWuC925x3G7lvOd7z1iUy59YxKbB3Cl8cm4OMe5DklBh1j9ep6uNyS3LOwUGNExyeKviRd3161PWhNIzIIp6ca2kVEBNL9ma0aXqt+yJrgZPuRbGWXSq5F25WKXCHLE9iyK0CydlGaSTI+JpH/gK0/fKcNFBTdc+xs/HKcsTdsFRk2YMoyUQ9aZrcW8/FiEwQ+T7JF8uvz+Gr2ykUQlgUGBfjUbhiFi2XHIX9L5GVdb/kjPOuTyDa73DGbGHnhtCdJvCg6BqPgqb3sH4zKSN6AHXdVBBeC0vcORKQ1yC9I7KA1swrFIG+NvjUqLDbS+KfaIDkS4M2RTyMf/l8hoirWJznywQZ206bon3sohLH93Zzk2ngVcbWEVo1ez1+dHqmNyUS/V43AOpSezvMcjc+oQqDDofYDNeY/0785rodpWbSbyZktc5CgcbLu/NW5JPc2O5Fp1JzTCKSX0rTF7bDJmhJDArdATon1grynb+fcEszXP3f69U2iC7OWLACyfVxqXKtT5bvl+h8RFdrn5weopHuIOpOHn739jYml4WyHLkMduHDeGecW9r1twZQ+HDlfEEzt9aWLzqi3L6ckqZro5B40ghvTE3VpxRawBTixAsbtImkxSaNLwKj2VhZJ+DbxCaCJHg4AbijBxzMj7DR1lQSlpKZiBZtostk1REQS/c/fLa1lIv3wXJdGdF7gx5fFF/x+anIWN60A0e+Jy3b016FEzpGmKcmkPBeGmgs0vOxFIVQUFgB0OatPCuQMQKykqloGSEhKWr8+DnPfb2z16bd+cePPvA7jAHbRpzF0jLbvP0Vk3XF5GolCOJh+wqZJ4OFhoP7K1DGIvS3BuA1nc5lf4HjlwMECAkMHTUAmaOoe/2o1XW32l+PTLziTtpwetJNkYLQFvFr8oD1ifOVdGWJL7dYmKyQfpHYBbX1rREmRuUurtYbFFy968aPmefnsb8TNC2XtbHgKcBbu8che/9oQtod59KIAcYExuCvWgdx3ZXRKDgAQe0ofBAzVBFLkdlf+YXxpjaSu/XSYYL5abVUMlhcy0llLjELALtTvl0Tl4uY89o/qAuLWMnxOfTVWSWpzZOvbReLG4/+u/G3odCJRSU+12f9zXpmm5KdnrD5mb3Nxn9mHMPdwBtcaRcaZxkRsQc9cYsmP7eZLFyF2mWB2YH+Ma4TD4q1AhwQcf8aGFz/66pxVgtJGI3V0qLOpJ6NdjS+4mg+WfcaIkdm20Ma3aAQRiDlOQ6wvlAKt9CAjlHCH/sa2Q0JmwiIvIbIu7qLvzBuZ1Pnm7tMES5e/OscBZKidRRNRA1yo+kfbKdjAZSGkU0VTXGdSlXxZR/KjpxebU627rz2lcDJ5EId1+YDRCXNf8i9Pqck0mdaHz2+fZhCWWAdH3y+5KaxItbw8nj1CCDXfv8A8MBGJqG67PqpTd+SoPoWA16bSJ5vmRx+PM8u+dcWzdI6wgmCJV11rlzsLCoGtJGR30Fg/s1yVIWRAvFPFyR3jjEfRfbD+dwPY/LYOiv8T7EyiJ6OuDBWKXEYpN0bL/knWJc/FYU6X3wK4DXRsgLtFtvChm38WLklXaiXT5/5UuZOHDfZTQ/SMRP/rof1Q+pWxmGaUfUjTeLNWSPthqBLg5VfQLmB72Q+gr1NvfaPFvKwB40bJza9Xy/F8PdIPLvpo1vK6W/0e79Cx8OClt7K52C+bCoEbntr7RzLg1HBEd6e/yHMymgP7khL+WkVGVevPs2F9CPoYjnkSPRQw3GCmYXVsc/CKOGvxoklxLcamNFG1RyBhrUmJGtgX4sL1UIkKEUmIuqNS06/iOS/XSSSNcCfLwPohHyUgrdFrUBCT9twnOF7AGmNQzKsba6sk7Hc+RwaiaF097D4WpoZxkX4006requUi3iPn9ta5mp8RrsVI4vzATvU2v9IBL3MOzlp5knpGB2Zl8yUYPihKrz+YLCEzf2R659JpBBYiqOIepD24AQhFc2c6wew5U+unBg/u+rGqW1uFZJtz/U4ermRje2MOR931E0qDhZEajudLk9mk96URPZLnTx1I4f7Ti7RxDRf1Y+zbfiPfvK92uLqC+6r9utWu0UtQIhNoq/68TfDrj6XVoRNxs7rV5q/cPn5Ez+xDqKJPh4Gs0JIkuJuqVOBqhA9//qt16owehimVylfzB84ukIhdCbsC7nXPrl6sCMU4gfoYonwUvEqGfB4HicjMsoYldeow4TL5TPJlWyfkYm/hNyX7pezaxjLqajan8nKCpm1IF/+PHU3lPUeN7wDHR9e0SIJcfkVz+pWQbHw8ir5wvKPQ6+nbuACWgnrBdSsO2ve2s3idknZNOnA2cLGU04M/30YTtsBxIKsYCI7A3wy2PTt2RZbPzCM+wg0jWzGjtA9QdY2RUUg7TX/C2316FfxAwyer9polBDndfWep115lGlGgKzAProUuFXDQxL9eXLeTnZ6iveaD6EmMrR+phaWMKbgzrPnzqTI2xw9XXegzilL1FlHKrSCGcSwTVD5Z/OFCvrrXpJ4dTyzlM6WHmGDWaH3di8EboTcT1CoAYLIaV0bvGIQudVejCE8WA+zBraxjta0TZvuCNAs8lo5OpJ5Z8SniNBLHxmzz58zfiIO/lNmCYXx0MLtNKhS5VcLUimMrIzR+hiMghmI+5SCoNA8ogdhaVUXmjNpEBOk/TYTaNbKN/1+T6LEDAXN/tSndCvVUB1zGeSCzJIO0MHrXzVOBhn801X5pmD92rfnTynz55jqwub62Mk5bllgBIcb+bPms80J/tZzcMtjCvk7mfxejr9IT/uL/RCqqSMuU2PElsHMZwqkFw6aSxHz9roqr+jf8WlPY1Q3RJB50kNSPM1hHG4TQCIQA1JETyoylRUPKbkFGE56fgFk6Wbod9nKR8h0ekKlEdYAFwSu1InU/RJS8lOHcOeDSuUqgVmjDP/Zb/9UPwL4UYAt00ZekZznVIBmG7WZgy6gsBWMgzFDpNRR92trNnyUs3fTqU28dHD2/OU5eiVimheT3JJKFBkZ/W/d+iw5tWOZUHy+K4xWcltermlOW0TcC8a/S5DtlHFxvaLaEMpzc4tJz/GYLuxmagzhJIkVAaniqI6f5dwdJheqk84IUWpnaf9Wsm7H4zYfU56NInA1YywUzThfWFk+uw8Y7FUMgtBv4R3EKLWXMmguSAvu8bW5iP2mM/KOisQqlaYqwa8tFevfVoqs+U/owbPfKZZYmT79bBDehQaD12Xpv3Elb+3SCRj1BQyHlnBixLjBs8lXTPVwJZxcgyUbGXDwUHEKATB9aPRs0HJpa73JdrcKAoMBLBBU0h5CRnGvEIHEm8aYxc6XaFdOV2d5146SHLG8ky/KXBm+tSoQaQRxnc7jVh8M409hrmx5bAXU2vJZhilpFYUSHIYMKUO0OZ/VNhxlky54Bp/6TZtj9pX1ldjMoma4xEnc82qVdQbB+bB9+TGiwiD3fDicnOPoipNdjPtOvnmdx3gBG4d2Q/Ry6QIGkddhLQdQLkaCz8rQ+hd2+nzClBtHmRYvxBv2VjOLVHCehkbXPBqQrmXfmKnzVcrpfL7/XP7AR8z9g7TFKg/QRaPyrTWQflhyWhcQp5JSVDXjXOYhGGpU9pwBxZQ8IkJHtUcg6we+VsqYmMqlacwOGdwTEPj8crVgWrN+sMGrY8Y4zpBUjyEDOkLTp8W9xX5uQaef4P9Gg6LIivroOiLZD89bUBZE83KJTkp4QPjRVAwerpf58xM8zfAu/QWINuJRxXrLz89/FyhZ9zuKfygo0RQmBvN5WdtRAjgow+Ws+VrYTWvjhyB6KXQYsuHvIE8otTTSc+kspoXzUJOSmfXtHpNiLZZTRInmjv1u4IJ8Y5kwrXQRs913owYBSfSYPFchXQDVwNKmk4tYmbd4jNzoZ07zLl3i/zCIER+fX4m/Pq/NbhsYIAIZ8yeBPTHi1aJMlcjdx+MLWCgBl887u75MxIGtTzjvi2OtvUQ/y9UjxTzFbHijksFu7hIG2ENkxYf2SUCZmz8CyTGuUL3fZJ3oDVdSFBKF/NNBvG1sc0AeXikqjEb2ODWmIKH3QEGQJm6ksgsNYRM0oqTrhiTyk+J7faFweStazBblp8e5/PVatjV0YwZU4oKrBVmaxMmCkRC1q6KVVRIIdvSemYK/hIqnEYkMWlHdrzPctdlqL7BTSLfZBrSAJsHtvysGZy7stsH/6HESv8x1oxoROahjXlfvu89ZccPQOxHo2kOCWX2n25g0YfLzdEZwZT6j3YVBCAfSBFbGuQRy3QSfxNNraAohruEoC6PY2VNiVtm+LuXyZ9sXlcjBXFJYCOb+5Q/FqQ3jTdeEYwtzD7aiFWYMctyxzETmDCdXTZmlcmHEgeV/BUK0dTgpN7LcWg1yn6NNNKy1ee15qAXqPVX1XzM3+UbwMQP/mOUs6JueazKIVoagi8THmBfKo05wz07Bt60Z4ifwtr6+pI7IAI3gGIZC3h46s3VcxOOwDQI+1S6vPqqKZU4JQ+s8P7OBuITO8yWcmd+aqwQJhrE7xcIHbuKHoWXY42jMyJSwMEurh+hlTrZ6SDQLjkyplWhMOp3O8hUp0wZYXigXZEPgYKvppfYEpxKrqBdvZXmbEftQex13oVVr91wjIIIibyt9Ya8N+7F1M8uzEBaIQ+L7yzPwF9Nhlrb2Pb/dXlLnHPcXiQsA7QbLc8CBPCSZQFTOE5Dc6h/qExCpWrTCP53gE4Yo6znAz0xr2c/wNe9d3Xj5W4bFJnl5TqSSJSvJNuIGETsbXg+iSgGcZ6s5jJQYy/hEDPNKBRwFoUsmxPO0CmfiEKexOIiUuosHIT9gopXVaKNPmelTCV/DYrhjaefJ5nOyWOY8Ak76/0+mkqMC1NJV8MYPtrLc2gYxBm8xc9x87Y/cGHQgh7ua+r/yWchfC8oWcQih072S4k3pXuU3LAnFOUYH67NsVkCTC0ePnpbv0cP9iC6ZgZzCcgrtrESg7YlOInvqPA/2M9T0hauhBMIEe9Fjuu4oWeYRxz9Tt/2ZLgQE4ikLbJQ9j4hh3CU6069+Sv15hBXpiBo5pRnPwTCmgsrIhHqePYKRnyTlBLX8RkQAsMdMo3YEV5Gf+sb76lUwldV6T4yEzHzB6KMjhjvGIOMibnxXHVxhQ8p6hctkPQyqvQ4dJ73AjiI+pD+Y1/MsLAg53D3onXLB+FDCQ+qPKSGWyx7eXiTMXITVBEtSjaqSQCGsBmMrI9k1OciPQc9+iqc6oggGZVpCRCIyulgh/iXkaf69y5QaMLYKFWJtBA9z/PJCnE6Q56efmrBU0IkI5Hf1Bpo/Njlom49fus5NxaS4mUb1gJ7T42ufsKem+zNCXOLVPjIKTStvJG1nUI2GAA2ExRkXHjaxVmnhJzYVz8QZoao/b1SZyhsm10ng6U8qpbnDLIDr5oyw6TWqNrCT/CS0W7ZKYGMaBRE0tCcPwtZevdwIzPBSqKwxKAu1Y0G5UxiVgmjvXdH0ocynAtFlI/gfh1fd88L2AvkvblUtwnC++HwPQDfBaAKtopeQNzaYR0iUIm6PtO+N99tzlelOSjH4y2J9x4A6zmIt5d3xwYDThkE6VOMvn3V+G+RVWvQs80/2dLdFebnSfzXpItMmXSucWQI1oM5J0wsRhdV0bmyy5cIK8DKolE844aYENV2KeYC8tXbz+typy5W5XPnXj80MH9wmsi7cw48y5s5EwGNAhgh4IHIz9dKxa+io2/73zzurCgz5b9Sug+BoxSE1NwExay8BAqKfhKh6ySTMnKQhMcIWcEkcappQLzBjGbVoBE21nnZ1+LV6cHTGrxelxlTCIP/1O+GYZd3QeNSoyRnWtqZCAB15Z/jwUyOc3azyMHwGjsSnExSlCjQgkMjxrAIIC2/5vS8TjOq5Cdeh729qk/hkNYz8WpDBxQndwdMchM5zyZRc0Yq0WxAH7Y8u9Dvn5WbFcF1lxQDsQ0meQ4SCS1meIeFZMD9HMbLxUtcUSphOPVyOkFuMkLqjFWEFy86Ka0dvN354U3hylb9987UqH6avL5d3pKX6oPhOqflixW9MAxE6KizeERAth2vB+o+wOtz70N8IKpWVhUpYbSRwX+MuphfWbIpvt69becpaJ/Kw/g2uSd7OLrH866EmzS+as6e3goq7XimsTnZX8Pf/4vLypY81/SPvrT5Pu3v25TOmJjdv///p5rDIJB100gAAAABJRU5ErkJggg==", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAUI0lEQVR4nGVa25YbSXIDIiKr2GxJszevvf5Dn/1z27ujkfpCVmUE/JBZRc66pSO12BSZcQMQSPK//v73Py0W6yX33cit70CicN/ukNqykEbS3HqWgZfrKxh//su/X768mlmpzJh965kqff/Hf//4n38WclmX16+/XF6+WLNf//Hbdt/+9h9/TYDA9cvXqur7vve+LM3M9vuWqu1279sG4PX68pe//aeA+3b7eH/f75uA3rsBZg4iwiHFws/7PVS9x1XEDgZ5LzijUJ8Fp5WQlQSZtWc29+ob2L+yM3fKCZiwbTtohO973bI669Y/1RfqUu/5v7/9ellC4dV7V7kKUld2pclZ7AQoGTf1rK5u19wh9l4pFFmqXmk0qAgIDgFqiS1eXy4v61oQw83Y0sNs2/egtTDSqpIQBBY8ws0Bd16N5s0pSKIRIlRVaeZWqs9eS9fa394+Pj4/Lu0XGltbct9z7yRBswiL5mY0QqiOtjRtMnMKJDxsWRYJ6jtosbSIVlXmJpYkCHFZV29ee5oREAAzShIAUaXKMqKEEkgQcPMWZTADAanSSJG5bXvvQO7bnn2/fdxa/Pz1+4/9vkGl7OGr07J3M5IIMycMEpmZYjq9tbZEQ/YEqgQJkkqAYvHmS983SQQNABnrejEDUaRlldGMrBIlAqWCJKEkFVSVmWYSTCSNSpXk7qDfP962267M7Nv93ksf9z1//vyoqr73j9vn9RIglFUiDTTLKoEF7X3bbxvIiAW0+30rFckqZfasnkoWbTHskOTmIAFEa1HqNJIuyd3NDABBABIIkKwqiRJURc7BNphM7kZQwP3ztveNUKaq6n6/7X3bN7lx37aP9/d1ubo3ETRVVVVl7yIBqqokNzeapG27i2zRNJsBKlUJgiCamTshEEZjlQDM447Tjb8BYHxHAEaYEUJ4I40oVUqAoEL2/v72ln2rvlf2qr7t++22VVUVbrf94/1j324gzI5XrMrMnlkqQTACyFJVViWFmUrSzCD13jP3ktzHKJJkABjZJSTIjGZGmpubm1Udp6fRzMzdl3UxdxpKAKokSX27v7199r4js2fPzL13CWaNwr712+f98/PDWjMGAKk06zuqIaMLKKWj0cM8SGIMQSl7Zvbeu6QRmKpDiqMbSGhES6MZ4WZuTkISYFlOdzcQHgEVaELxqHHfttttU1aOBir1zEq5Q4692/2+vb+9x7K09jJgRxBGBQGVQFSpstBgZhAqqzSSlKUCINQ4V2VWFYgwM5obScmevgB3d7JACGhVLZZozchoDYAkjjxWArh9fuz97uq9sqTS/CORAPput4/7W3trl+XrF3euICgIHNOmgRUSCJIQeu41Mi313gcQVZYZpcreMxNUgDb7XjIb6feIqEK0kHTkQOu6enMVozUzSAWwqnrvuW+//vo9+73Xtu+VPatSJaiqAKBzv99pb/T2MzzWl9HYZrPArMzed3ePCGr8s5tZRAOQvZ+RmGnft75vVQlHSCWIAEY/AjR6xKAtgKWStNDWdRVRFAcBACipcr/ff/z66/cfP6r6vvW+D4CpEz+kqqrMvt3t8/3zfWkW4d7IJrCq3F1QZpq5hG3fYx4MJAyYfS9kpqS+W+8dJMpiaBiNI418kBHRu8zczHslVO4t2lrqs2UFglLt+/7zx4/v37/3vplYYs5WwIFsAqoqs9h73m7397fP9eVyuRi5kMiqiX3ubiYoKwNjKgcPZFaZWZVKJdOOrTIjmpCR2aUa7zWpmPQIAeYeEbWDdLMwd1WFO42QqpR9e/v54+dvP277bfDDOIrAGQIBCLPfS8q+9ftt2273tS1uMADKLDjZ2mJmkmRldDpV4mAjiQTn+VBVKEEsMLLneKNZbwiQ+wjAPJpLJN2bqkhv62I0lXrfPz8/fvz4ue336qWUCNKMDkAmk802AGgCJVVWbvv2/vER4W1ZwqKyVOltcbOqBOnuIKsEFUoEjEPEgMCgiUqBVsoolc+01YCzzHIzmoFGc9LNzD22vMPMohGsrO1+e/v54377xNEybuZhkpAQjFZQDbAiqYH9mX3bP35+oCTjdbmWJFQajbHvu0czt+1+670XuS7Fmgw9eBUTV9T7XllRJfcxkjBasbIqnO4BgkZODiMM5ubu7l5WPfvn5+ecV8CMEQwGxDIFYIOGVFJWZfZBGZXZbzdmYa/6vN4u62VSp6P3BF2lysy+06Oswqy1RVAVNEBNMtoobVQVjgBohkJlYqGbj+blKB9BM4/BbEWg+l69DySCRDMfQidQYNDXFjSMs2z7fWfPrMFy2HsVtp73W7++9qU1gdeLh4ebS+nh0YLmpMyscakqqeDjMC1gIpIZB2BI0DjrYHgaJYgY6SHNaCzs923fd1S+v71X5tAhAoExKiY1EYvHdV1aWFVt+0aXSXegVFVDfpTD4Mx79uIee10uS1vMrTKXZXWPUu7bJmVrjYiD8ErqEEGCFTyBuso4RTJoIKbcc+P8J6qy9+yV1ffb/SblJCpCc2HwCNBsabFclhaOFMBtz90SrIkllCAjm7vTQ9ZgJjMPAiVYOGHqmT3T4BGklVRZve99u4MIX9gQfhBEVY1ER2sTzgaAAZAyc5DIvu+979m3I5QiaLSkskYRvEW0JdZ1aa0ZWCA/72DQ0txADP1Jwggj1iWur5f1ZV1eLpV7ljV3ycbIlYo0czcAATe6BVAeV8Q91nZJdADuNKPBIhxE9hz65JBb6D0lZc/ce+89e+/ZVaK5h7FIAKS5t2VpS4t1XdYLBLttRoc7s5GdhFHgwINYPb5cL9dvX5brt2VtmU5qvVwERoY7+7YvrQ1ZAAMiEh0d1qxnxPV6ue23vvWsrJ4lZab53nvCGN5ADgib7VelKlSvzMoSEETzMFsAZZW5e7hHeFxieam+08Jb817dYQUzmLnoL215ucT1urz84evl+uprLLHsqJ0hkAmCYVGWRiNMEBIlEUzKYDQEaNXrfrtn7nvvmeURHl5SRNRS5q3FGGQr5JiY7INOShiAzmVpdK/MQeH0sLa29Vq+v7xmidJ7gXeQQngLj68v69ev1+svX17/8Adrzcw1+VRjTsbqT7Nj4Ro8Ox4VSdDj8/Pz7f1ju90qc997Vnns5iGgtZTYFoQ5PQwjgKzKysQQ6EJWsbKgNZovLxpwTFYiq8x5/eX1+suXP3zbfv352z+/f79t92b+sixf//jt2x9/uVy/XJa1CIFS9b6XJBEGA2VJm3KixvmnMiFIivH5+fn5+ans2avvPaVKmRdIAh69oc01s0AJQvXKqtKQ84czUSmirZfWVjPbtruRktjaZbku68o/84/bv/31+29vb2+AIuLly5dlXd0byLHz9sp931PVcwfNaQLpDrNDWWFYI9TE0bjfb33fULXtPXuVygim08yMlTkaXypwLsUa5IW5wg28zcze96z9JV7a8gJiuVyuX169HTxC+rrGX/705ZdvY5kYEQ7/g6AZBC7LsgDuDhgNVh42h9DPBiOFMmOJkTnGMfveK7OkJI0y9+yWvStTlZITYzkzjEVezys/UcjM23Y3/1il+/22XF5aa26ssf8R5HAHQ4JLpcqRGtBpJGDWlvVozsIUcMT5RUA24I4AwShBA+Yrq4ZPQ5lAZs1JrSpVETZf6/BVOGd7CA4zWHVtt11V99v9+vqteprbXBwlcKh1HTFTYuYwz5zkwOySVCJ1LijncoEpmScXAohpuJx/SoBJOtbamr8lHLDg7gpPCVVuhJlAd59+xhBLtGVZSA5vDScMQ5LMprNmZlT1oZzFqhxyvzBVz3SmjiWKAgFDjSGGENK0LjgdAg2HQPM7nVUw42h6D1NFZHEiAcd0+Niu2Srv5tGWFZxUyGO1GSq/ZOSwqabtxyoQBzSUhDJSwyM9VhVIAGmCYVqLCKAmlPDYGHjWa1RidFLW3Hpkw2kyG97T2GMiGs0cZsz7vbd1NVI5PTPxKEElJJpYrEEp2fu+D/VNSbS5SSeHZJrbyjlzEA0YQAvGbGYaWSQ0/TgdtqqyqjIzO6ePVZr4M6DDYHSP1hpopBNU1bI2cwNrJGW4OsdLjt0Jh71YEmgChhyZ/opYKIgaEhNjq8RsFU5jSTHMjYlzRVB8Xshn9rN67yRZvbKySjX3MMKGDxNNgDHMEMu6Xq9ms29RwIiax5o/RkrCMEHoHA6ahp1c08wBAORwikYpjF4aQ2BEHQGY0Y69ZbbQyFXNEJJpA62zeuUwClJAlZnTx2QTBjPHy/J6uVxQKsmh45ZknL4qc8T1oCUA0wCfLTt38xnhWEsxHdWh8gXSgRmA04o82+gcgVEDjQhgpUL13gd3pATAFGS0tqyruVFgMJaXpQVQkPqcqxp3K6MbD/4YHtxj1DTtJB2QOvsO0PRMCorDB4YExNhyLfPEwGP9H9j9iADpqVKvHMxXEmBVTq5rWy+rm1clI9qyDgOwDv3CgSzH6kRwqgMdX6MrpenxjKkfXKZJ+pIRhU56CTSUVDGWGJtDMHMyie5IQJXIKqhnITOVwxIvUkKELa05bbS5mxs5ZyQp5InjY7kgeOozTY02qaaOpwzYOXYRzhxQKC/Kavgr0lEPPk2CDn/Rxm0NT+7TEJ1VpdHHBVPUWIQm89FIAyhO73Si+tHUAGDE9AvHNI8/H8WYjHF8P2MAKILFgmiCrCgoNCz1cVwzH7vk4fS5u7vRzM0n/h1tII0XG/2KzCzQm9FscOMJxEdXnHKGkzAnR5/8MymLk7T0QCKI4/wYTTkWhipVGGWk09xcFvJDZJpFeGsxQohwib7veciB4QSPiakqsehhPgERmL7aIU8eWmbm+Dgg5+voCPmQzef/AY4uGhcxhTRQSokKcaY7fNw/TZ5w99aiLc1o7nZ5eRl24q1yvE/Nu8N50+LefGnu/rvRPE6FJzlUpQn6YyIem/d8OkCe9cPxnNGPw6y2nMYGECML4cGF7mbdhqyI8GVZWgsza8uyXta+7bfP0H3m6ZD0RnOLFusa7sDp/OmElWMVfAQG8ATJOsLTKXfwNBJHEUhJNmEXJdFIqAKkeXOPhlKhZZcEws2XpUULay3aQqJqG/J0nLAKZgwzc44LcAGqnNBRNqgJ5yp7QNHZGAedPR55AN/wUvEvjx2TP2qSKCqah0WsLy9GGJk5LyNAxrK0dYllCYvt9tH3PqAmq3pmJt3cDKNkJx0dLd5xoOBM6FMI//97HRV4/OTQxg9En8vBQ54Kiuvr12iXy/VipJuNqeu9i4ileXh4WEH7fZwwhzTKksw51ncNYTaI6jhwqTjE3NE7vyvAnGA+jP0zrkNkHD/5fREGSp4sEa+v3yzo0ebsGhH01kSEj88xEAZS89xVg1SMNIM0MzSXldnRUgmsUxmeE/A8DY+Fi8eF5VmboyxnOOcs8xQ5GK5UiwmEEDT1MQCbKgXzdrCfp8f4dMYRQI6xOJXCOF/VIcz5QP2niZi/jm3qKIp4HhxnHZ6XlKeusxJYMdJStPnfSjLZ2JvHWYihxVt4uId7SeEwGseGBWhqjUmfB3k/r+Nn6zz9OtOsM8TjoeNu6pkSeEYnAeoAgKgqqAY+HVQ4Xm/ALsbK7uHLEkuL5t5VDTAd17uoUnb5UJsQYHYcb07cAeuP7n6kWY/nPEc6FdFxuYRBecMRMiiHHBpL/XPNCAgJGc7cQCiAZhZhS8wL0JltKbOUKWQB1ZPj3hwSxBNe5vGf21uHaEABQ9P9yzHGtjCWmXl8jJMdTUXFqM2Zr2OZoeYzZmJIg7O1WJYmFaCeXYJSfdu37W6mYQC0pblc58scZ3lkl0+PPJbEo2DPyT4f4RkBJxaRGq7EWWScS8BT550NN9cPo7tHeFZ2gmCV7vedHzfzrMrhD7kPo+ChZB8n5tNfPMswm/fwFY49bVpOEOZ5KdnQ2jMPDB0feeAxc+fiegwEQR3ekGg43RSBArPnvu8BA+Ee5j5PzxOAzrYxoPjI8tkxOs59INe5cBqnATQ6RkQJTubhjQ5Z5k+vpRMTzoQN+q0aciQisuhdw9WI8AhbljCPiDYuAjGbFzxGYfgFPMt8dM9s1oNPHrzLebcNA8rBooatWBTJGveuAf0Oqw6y0cEwQ39U3++976qUysMXW+CLm0X4srZYL21Zgfm5l/n2h9CZHuGBjXxK/+Odp5P41EYzFicEq8cPnYChCBiZ8dyUD7J/CNtBE7lvW/ZiqSrNY7kssSIiIjyiMZp70zEqkwIeLf40xna0+jGfGiugGaQztkctrCibS9PsJpFmSAeTDDtp7vn4x0yjZEJKqjKSMJXgahFyd4uxttHDI4b6mRvKg8zOZtEZ2DO1clZFR6MfMRzPPvzvIygdb2LGsgCGf/YgwUOSC3wkTpCZzT0D9Ah4I0w0Ta4b9P08nRM1x5r0qIONC5YnYKKo5wLMpjoDeEzFCJeE6ES5xSjO0wA8zcPBL1JlPuxrM/rSzJZpt40vJzQ+o/fEuI+NCho3GpOExt4332em+1GTRxEfAfCIgENlmcxMFePTlKXhV9ZpIwPTWoaglFKVpcwan1Uszc9CoATw+CTiuWc9tcf81/CqjsTrsAZ//8xTvOERjY7PYz2xCjFGmAyYVOMKc5b7pN8afTS6w13G6gJR2W/vP31N95Vhi7eI8Vkl/KuFwnl3p5rUNhtFgIkyHIvXg4QB10EWv0Mlnsg2sIikg+ElZBoMVpQMSPDZSjBzmvdYtG8b4Uaq94+fzFq+xeqLR4S56Tj37wUm5jhpSqySHpc1wCnr9Fw8BUyYtHGEQoLjEYcJMri5RXcLbzvM5dMIpOqQJ6QxXCUsq/Ude/eQQ9Z8eX1dv3xry2JmHXA9cCBVjnGpqTlZBZkIdQ7L4Yl6nuDvmDxg3koOuSDAjmkgxgWBAGLP9n9UjzCnUdLD4QAAAABJRU5ErkJggg==", "text/plain": [ "" ] @@ -5407,7 +2286,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAApA0lEQVR4nAXBaYxk+WEY9vf+777fq/s++u6enu6ec09yl8vlYVreSIIVwJACxUFkhEhifUiCwAj8IQhgIB+SDzGCALIAO6ItObIcS6RJMSTF1V4zO7MzOzPdPX13Vddd9arq3feZ3w/+xx99b8LDSevuTlUKHt/8f3j6QFW3V/TXn377ePVl7Xe+TfzkuFlP3k63P775VG9332W+czG8yP/uf7Jy4Z8wUHZ8x3tT3vvqhUHYnyj9xq3/OvnVv/hXv3z5/f/9D1svdcKVFMyI1eV8MGt+8A4o3MukrsIMvdeZB1vTk0eHnQ1jG/kwwNEYhpjFfGSdSxHjv7C2/3A16t31gi9mqRS+Cqi7ojfvM0FxXjU3fmGq7xomsl4UsL/OHoIvU1hI93dc07QgxZu9947nYcQvPQK+8+gd7+vaz7/0oIUiR0+IBdws4V/igzluQF9Ifz2NcfZtm8Yyp+yNfZ4XbuzNfWxPef7kby/MvYqI/PnJMpkYWwNcYsbmLLuWofPiSHw8vXzJenF2bfaip+ubB/vyuxaqK71z0Zx3jGHaVy6W03hVUAFhQR0urRC+brQuAPrrV8Q0AhOuRs1+QyLCTQqNQzsom6ugzW1DTciG5eTUgLiM/VcIwyWr7nfZdDXa/h67gDmHoHVEmZ3Yv9YxqQ0LUF74h80SfRSGPTwknWptHk6OwvjBswg/y0wvCcLB+QZWjpz2ajqsm1eaMwO/slbHMLmvt4s7C0dwdQOFSZ3SbZ88YRqSREWD3SWC0db+Fi+AacxPn7BEm9XZC4di7kulkrzR0kjoIZVVaWpAJyFGBUKEOcXeEvCVNOcO6VeuD9VQtlCiGLWxsUEsNO2SmyVQsy622aRE6FAZfsjz+YL7i3Mngg+JcfVaDlEfo60xaHgCxXTuZKmNazYqiRsMp/RO5+TTEfVi6jljlj169+Wj1WVMQwMQychJkgW+B8rCjq4L+WkntMB6znlLmm9ysWWo+sO3SamsY9o1qn2Kp4GC4DYmlCOWvL71fGR14uMbrqO4iqUzg0QCYTSQvaKydRs7fuzNw2UGbsPyK//UDVgajpuWj0bvqatCo8ONAkaqkNiDFgs7X4VvHHxrxY7FhQqfvpZXuASWwN+mOBh+A/cmakdMUa1vvL4SF6/Ckb3M0N9aARmMatHozR6dxcaOgcEzHZ663VSIRVTTlc7lkPrC7S/Z3R1top9cX52frXvK79rPw/OPJRnujxmDi09WikhrrSo0M7X7js2rDwkKICzDwy/4b82gOxsFQqY07uwrF10QKacFlf3pps3rP/9womi7fnn21PUWc+gotPlv6ObRaT8PULSSWXtrATVjaqTRJ/a8+AnqihSSHj0dQofS3IlFg+03MHeFBJE87cBR1vPilIhSYULMFxjJ+/DNr0jRH31u4BjA53D1RXEWD4WKiGEnMvMCqvcGrVdLm0ESdgnSGgZ5MsMceVqfk2D+xVgG9ew338du0/8ayO/bBEzG7CLDkWlNzFI1y7mQL4RFnhwFsHsQLIfM2LQK3jqX0HQ78UtCKnmY1QtVJjRzmLOhAGV4dkwXo171Uf7dO73sLCD0sOAWAgW6ZuTFIJkchxhNwIQkcArA851bDqQrWk6J5Tn5euH6aKniFOf+wp9NLFSmHWjFZQv3mKVU8YE7O+uZfTp2yBAkJJ7YsauujG+BT4TBr6HsEURf/pSb/laMD9Hcej3VvTlu26dIsqbzHnUZE8M/u+HycZb/z4xrxSo9Q9UbcranzvAwRBwEypsZU1x0X3TDRe64o1fHv1lbPgyIcHE1Gz+14zQXRUP3Kp74mVlacnukrTBIygXogC5VQTJD3f5KFsBuwbBsI0DYwWZ7KfnlRRXI1fWyht8hlYyKwSmbQXRX0FBEXOMr3WyYQ+vPQSLNVvH/6Bv6g5J68iP8y6zjLbbiPIJFQe0OFJ0c+/OItHgtk8tqd2H8M6wsIdqBcdAOrUfQyKJYNNeMFm5H7j3LBZ1cOd36Hv/9ipsgP7VmREr2c7W51k8NSD/Tli0KLyudABr0rl4mg88I2LSehCabJn5WsKvC0TEtCEk6wNYVUEmpVh4qV6b9CYXZ3vwCXhToosV0yASQCZ3rv36oGUq/hgFc566H9zOVImfi2/fv7JUKAfXEpUMpLckwL9S3+TX8nrQo5pjZ/iS6qsCb77A8yx+FKQ2wehN2vV/chEkUjyaXJpatifT0JlquaBbk0fmCUNiImqxwkY5vYEwwrss7CM+rD3fb36ui7XaUb0H4NUYIWTz1qhfw319duIxrB1yMOi4y/VpHP/Uq6QqhCHNfhdNX6elMq8BO1fehJwIZgZxV1rfAClIIEpGFAurdtNi7tCybgjB61HK1XglXOPetIHXOJJefGqR9Lrcn06M/L25scNuMDVOMNkzp2VuGcgUtO3a0NJexNQxS/c+HLr1K5ktm5p5qRMyUzdC01mPu/u1nf1br3bC9LnaOO23J9i2JYqg0vXQ2l/NNrf8873ZozIciLA5sglK8e/F0OlVQA9IbflyJijjkX2hHz+BOmoqRpmKn42OU+JvLSOQNXcNf1hPknMbxKk65yQDnEmvuWfCTALYpi6fFOLBX+CybZh9OtEk6m9FpwyN4nLjT/+qPu8g0vMqERNxi4s+6g3UDtt9/f6ycBf+HT2/DmR8IlNOFz39Wy708rP5uZ3he8uKhI90ngo4WwhzEzI8CVaDDg3D3BMhFGdZs0Qt0Ecw8y+eIzFAVYpFu8aTfHyMbXBErSOOxmCMnzHd5MCtjiL3NFQqK89rvRulVbEkKMkqDwn0LEMCcc1MeeGIS+G2fyyvneXaXgJNrCMbKtlVazCafPsZGfn+miT6BOL98FLWFZFrIoheD6hU5rhLAQLyL0fwoxBFobbj57FLxjnkfKu0w3IAIyM2qP56uMzbdSseTRydWRW/SPgKYAGDADjAgvRXH5VV/UxIIH8AclywiJwyDpQP9vz6EXv1JAkrIbpB94SR4E5FSKewmPqySxIGOq0uUyCcYc4Xa09kQQ624jeWcXBj+aBYMC2KLjoDTC58fnzND10Oxwsyi0Rr+Fj/+8psk3bbN+tUmhVKMAl8GxyOHXLFN6JmAjrPXdhtHpBtYoDlWEBkoFIi/mWsxpNfe2d6Ygbgr+RhLAw4ADLqNk4MJO11Q/2K4sInY0cNQNN6GYj0Y+QUKg/buI2CO2eZyQYTqLKDOKXHtbgVigqpmDYEMIt+n3UKKW2dhnnNUazD+DhqzTTLgUJi0FlUVug71s5BBJQ5hHkIBryeyp9w/JqFrY/5S+nhZz/illUXARzjZAziMqNTka1c7fhZcpfp1n0SQkM9Xs3hppRyCMGdrU87EITWy1XJk86GufzUNCmWYwOk7+YiX3RdyuJLFj6OuNxWmURxjSuEa/Lam5BkNjXFGsNt44PXlSDeO1QZqbCDxdSXdYaW0lYkW6W8i3sron3nRi8MHnHpkpJp2/PzZpN91XRrlIRsYfwAl6zFIx118MAJc5O9+9FuBLr1HtMptioeofLXQWQ8YA+JURBtqF8bJ6csTomeh3ggfxnaRRgksTn1WWgqoc96bj2VYT+TRK2sRwxHtOsNQercVhYsUDlbw3bprRjJsYlXg2jgk3EeSCkhpuKf5NJVGRi5fXMmNoXxBITOu4YDVqEBfg0ySuf2l+M43ruXmXVj5+KR/MtJEocwl/NmBDYl/ko8SPiFrkp/L5crVb2DKyxbOXWP/AL9TKhXyyuMZ+9QRPKwHvuxBM3y30O9O7eXZhd/0IBjrpH4VX4D846wQKLDjGVHLwfhqdd2LMKfKFgoM4UE0fvaJ4gQGecmuNjRxBjIxeCx+AZgEqy5EZ5mVEF5/czljrIIJiRXPITg5UhVR5aRyHwoGeAH/bkztuVvmaaSlRLSHCqmghngv+amgdpEqdclQZCIBHFxCjspuYjE3AenT24O3knezKzjEIRiREHyyHiFj/y+gsX50M+ojY62PDFQEXcTM+ty+3RMN2IKqlcKcbMZN5TKiLMqTjAwqkAlUaPwgh29ADm4OJTYpioEIdOd7+lgvWiGa0DPplgaeJrnqFmNAicUWAqSmcBxZNFs36YqcIyfQLLr4cvrYkT95Dl71lmuGtyCY87gWMSTNfQEPbEupYf7hpDLfR7RJsExylYT06anx6QjfTIlKViXJYJBHoB11cXaHfSMpu7r2NFPd0Lpj2LLZcUSRsOpfljJcY2OVarkWkQNJBxhUO8QKbGJ4RX+RMJlIeBfgfJb8KgEw3A1xOySdk2Bxz5fJDB0h85lCMxnE+rwZ0yjGW+65AQiTOtGSAv066ky+/Lez+6pZIOjXXqYReTXJua9P4mnNfzfLb9rqQZv3ijcdz/FoypipugLJSAVeQjvyQs6wLC5Yl5N4hLl6PN9ebqHFby/18Upzy57C53czHmrni28H5r4NCt6CkwydVXblEnK2BvwwLkLKl9wZ7DXc6dhTrv9GUIAGqDKLN81bBTK5JDXgglLpVkjHvkFBAmcMsJtV4tb+Lu7ZYGXkP/PxjkeTD/2/HHP7EjSwIYiNhHyo0EWY9sgTdaoq4ShFo0uBXDiGOtZVglRg5Ow/zKf9I5awTHcebTA1CcGcqmmKg6FdznzN/LyDRtuNbOFbxaJ4UZUyhZxHT/BOj+2heXKsXzXZkoRVXReW1m+9q/JiQ4L6LEjd/7K0Dv/eG/9N4X0kUpNvJtBpBFvrAivmWn33poTKvQyWnkORMiW4IYEF3t+uTW47y0VuyPYEx6C9vEexG2lPpgTIGs3CTIOkIVYeJlKWKs4WMVFN2sN2viRfxoPga4xqxArukTBMIiWzEpFTuGxnsfWsnIFWnEqeMqj3pWro++cpHuX5LX/5JwtwB/dqRtDNM4AHt1IAPI5jl6NnWaN1mbOyFu3r8F/+Lz861peaJqeoto+ty/nqBFXY8+FlGkUiQishEZsqF7GJ7SwSO9nw4CM8IEUokGEK5HKUgk/0TqGaClrBoEaqjuXYlOgmL6uzDeYbs0vv7m6smoY7sj3uro8NUV1uEfH52xnjUbJ9F8OXB/dKSVqMpY0VlCA4dgW8vBny5xt6ZbCFtGH3yLPnz7g274fQ4pV06xYqC1wrbC6+/H+cd9//AMk4wGuTaB5ZwnqQNj/Jyk+Pni+U66VDHUOWON0EpOPjMASXzlQoDqyYvL5NFtAQSXilTIWZ0ZQ2L4UwChPqu2A3KbSBCL+yfT8n77lcYmmt95YzSMMT/6ZN0PgZFWhrRPGI4skvojXfWEk+aJSXmQbJ322KJIGB3bA7eMU+jiNO2cxJUvLZjMf0Gn/HlfPjnZ2//w1Rj2Kxb50tz5kPNvYYZkDIPwFn6eiFfO3Xgxd0H3ylcLgL67hYWGS6JLzyc9l0/S4daE9baAFQLKNTcSDuFnJx5jdMae0FbEwEWoeuExv6pF0Z6xiH0jWlGrvF+TJLQW/g/t6qA9Fe5e9V6GITQzLGn2qPafpWhcw5eCJi2upbhfzabQZuaK7njyFZDuFoMwjJmSGEaqtCKVyaVq6Zrpo/TI8Dvw7Pva3MdySpKe7N5B7yS2sO//B/+LUx+fc8i4SYrzpBS2ou+h+D7HtKxiJvwsh1pU0B6QxAJqfJsl/IV6z41fyy1s9bt2FvpsJzyi4zRMels2K6iIS3gac1g3QqG7MCNlxvf8v1FtX9WwWOzq2snnT7FV14cf1XIV7PZFbe3sxH9C5iydAmhsppkIIhLjt2IhqeJJAqJgpkzet3jHSaPYHsrQpOWmkGbTkXo+VOVPpnufCDBf42/E9++D+f+a/IhQi/uSSPs7UPEvXH8IKjyCzm9Gwf7/PJVpLqUQEUIOdVYBvdcRm7H5FqPo7mqEkrQ4a/r7FmQC0vT1ZumQTauMln/ilJ/pXoZKu/GQlOxkSk3fxuQjsybwzP/whOfp/1c2iqkFzKkbmuY3AFSgpNxm5Mqavx1G+0t2K/i4RiRCaoEy+cAtOY4lOnN9BWcrcP2eFH48Z5/rWCM/eyNTByVDjmoQwKlA/QOEw/Q+RpShc3kH5CISHtbYGMOmiUl4t0OEWrL4xKfpXjLqFMzocTCd9F9+48ciFMpBHjfsGbWU1d9uDXzj+/jGbz/Q7kb3HrbwnN4pC5uZlNz//l/8bP6lmoyBEjlrEQLJrPz9ATZr407UHfbTH0OGwUfV+7iCZ4Kh1idM10xTI5BAoWnyJIFRZYZQ0Xrx5Oq/LK+6KwkAxwCFsAxj/INxNrxgTUsySlKog0xa+6sgmhtpTGYwhc6g0/zKYE/6EkwOnAlURLs4gdWw3n4+iNLAFfkUgU5EgOzkFFslMIbPN8QY0qs6/A/GKCP60jwTCpitLef5fd+ohGU+LNO24bG8fZGVdDP3Kq+OicbiPza6OcR6UmTO3p+9WTm7oG3YgVR8EUVxnZm2XpkDFM4Eem1Q87b1CHHnyqMeCH67eSsngRRc68q3Wd/MFJksvExa+q76CNDPg7SY3pufjCNzEvLkLN/Equ8UaSxxeaNxdnOGKXUimSORRT5+z8UmAvj62+0AQQLraloGLvvzOLqhDVYmjzB+SFdntfDNNI8dLhF5c319JK4bQFUmYROXPx4W7FWto8WHN7NpbOePXLbY5Z/N+kSunVCDbX6gzGkAdZqqESCAU5XaYfxX9hYa+eolPEKwUohnUqTM7Y4cAnAlIwGjwzOkbsajRtqBx/e/1gbiZi+jP9SrKV+pV0HLDfmLKnZcSjo4ZvIS7iZcIZXoDDuG9VM3cCumvKllS8t0jqWcjzC+cTRxVQYjayh5xflKPiJskjHJTemyXsh250iF6pxoyMOKv3NbPOQX6EJPlZSqM/WGKna73RYQNn0qIdfgLPuyK2kUOZNwA1Tb/PFJAtcHRZ4xcRDzhMjrzwGL7LstRw5kX5zTReUvqrywF20/5Ssz9D8Faq/vKy9EceW+B89SBWmgm+eGFZUbhpkyaCC0u95N+tarPlrJuiohcs9SheuqZlDLkSUudXKaky5jQ7OlCue3jBMS40P++9/IoZP4aOw89hNbO+s5AMb5QONGfJkV5/M4WdoczsN1Tbcc5em5xP7BJq4TNqXZphKhz+q2I0B0z5LHh3fuFDNjIrJv+5P55GcelmFk8uRPWK0s94cYD3tB2qiDgK4LYzi4creR+Np0paOMpvwnfFGPc+K6ZE+p92N7i07FFIvlfbJlFQItwoHzOSxQolxx8WJ35AX2dTrGSZUmaPRn2r4lxEgeT67neKa2ijDDk/udwZIkgSclz4H7G8Q83SMLcV8lIsyPJjA87mgBfCufMd80+f7y/29/g/nCCfgnrBMl9mkNe6v9ZIpC4nMOnbOEWYXjjaabv139wtk7hbwVNzBEERG9S0sDODggEVxyr91JBGHpostvtKoD+Cx2k3KsYUVj6c2XSaobP5tALwUnppBTZy5j7OdYpwQCOC7uWuzr/wEwJvhIhHI6gapDF3zLKb1HoRZGtWxtW/JehCFaIS35kvBjlI2HhI8ckONA6hAIPQ3YwNpu48qUF/F8hndgg5/goGy7UAn6jKxDgGw3mmWv7hmNAX0wPVW3h6L3FqkZ324ZONnciV7NsygfpBoWLWndvFO5eMSDTGIYVHQA3NDPHdu6WDVq38zSy1WLno6BViRj/YKNyeoWZSl/kKEFw+j7BKMrPBC5Iz9zKM5Otnhoaqv6DdE4KmyZ01M2DHR3pQQ47j+gRUOsrPZ+p1oZ4PYLpv23pcom5y1t9MDoGW0ESMh5jHbC0t/rnkZgnVvJd3YvDvmDqeYFdxkxWyOR3rB1cBCzVKr1PTYSzM4XmAQBNb/vh+nI08B59xFTi/kY02rIALODEX3h+pCnLO85Bss/PDyxN1Yk+wnjl6wuFw8KI19xcZiPhOrCLhVUIwpY/u4Wt2EUbWp87kUhsvwxvoLao4gn5Q6mws5znrm2vvLD/PzajULWiJJX51Chz44Nu3QYF3T40ou1NN/KggVybgwLPRwzqHyPFFxzLc4y6+q8x4CsdWNlfJULyJEzFBMJRcqGHvsEKtlT+Zf6FuU9It+u0yCzWy9xQcb/GgsP8kemQv/hiyZCEMBM5p3qtu1OhR2S3Jc1X8nUdMJuugCOQVIZnTGzF+cYh+msYy9kr+3GzBtI9Z9GhmxLERZv5dRsWzVf6ymFXjCR4J2b31zsEaBObIJ+ugfxm0Mtj0PKVPI41aZ8Ku76flo32P0go1ThsOMxePrPycmuvXgTYnj+KAiOhyhA5/p8pvbxu+ixbdd7Z7s5yT/5TU4pOp9U6tESBkyptffOtR9h+z0qqTo2AIJ1+Xz36m7RFFE9Ggefdd8naKXnwJZ1h62rEvxKTEDVkOi82w9KaEY4BBGj/OfpowWbeb3KXEF3fGUSYQCCIYEj6lF5nPJABt4LU/ArVbeVRJA9delpPA7kdQJOqQDp74wXtAYduS8j5dEy8TolnOYNlgUFBy4c7gyg7Lf5bI2NUw9IYB63FnFeU78or7mIaRWC2EhX2x/jKztnwI+x5CMpNrIHMN01PfWhZ6irc8RAtawn0K9Po3X8YdUtslRUdXvGbimyoqNsl1NfTI2P6dwe8J3szmu+lI2u5nlUsDEAuyjKPkfDjOy6DvJ+hHwD71SjyWWbokIHJsPhZCe48ksSiLXcAkraUfPkkmeC3Qr1Uam2OQYS9DB3dzAsbRuHqVj8nEYobgIdecJtPZG+gP1rEDBioeQM17JWmN69u66VNMRRNVoy4fBuXHjScl0MBmxUL0G2Ym87LaD16puvk8TcR4IoYilr22MUOcFJMi1W3pGpcpcLmT2OzEEbLxhm/oAhN45K1/6zVgpoEU0yIINmMVxtKw26Cl9lvI5oRnEYkI30XbBSFYKJmbtFRJE/OMj2RDDYuiNHy19AsTI5585XEbJG6EaHJrCc9wg2s2k/pGnbG/LcJf5x+BAarDBSOmtaUahu5soAof8Bn8P9z+xd62npF/5n1tiAbv7ST+NkUYJOK5/FOWzXbb59T18mYhkngZSUJ/El0m+bojRSjR/6mlNvQlhN48/22RIg233Tk+BesP9jEEz916SzbnyvPsAjut+0mWf8UiqmIH/NLWtemRKecQopwTaY7mVrbY8jhPXzXakkoFM11IdXylTseW57ZKTg/NTbwIftItP0G7iVeHaIe2mMaq2MyVJdSdgfv83+vXRS3ZFwDv0FHu10jOWbJ/93yip155Hj5h6BWvXaWj+aXomMAJPDdvZdAigaVousOS8idgAerrHUsYw7vP6mEODE5G7GLkbECRx5zYJ2KxsVwHizMPCnASFfKbLThBW5nWm+IG3SQLTnTG3T2eETCck1OKhONKNE5Xw4kWsStFCVK3fBF653TprmS8Fs4guFkgxLiqLt1uXl4OBoBo8alR6LvwUzxqIUZ/tb5zUBujs9MmWUKytUJwJ6SUoOPvSQJ3TcOTNe52nsk0ZZ9ez2gzw62t/1fUbc1JbhEaDV8KQyILkm6vT46xT14HNQxJay9llKP48GE2jkkPQxrc3QrARSwKCX/m6uOKQkzORYruG6Py9OKFDIv5RqWLbnF7sbBdRe51VtSfu+it4dvT8cDNGl66RL9ygAj8x6f0aFTNmabs+XrJ92qiYkYPOS0ufBrcCmZCbxwemnZgxGmPcEmPsB2H1CHqL+LJHI1Ofcx9Ne4SPNw3XyxHRfJ4TB89qEMmjUxQZpE60jZ1X1w9zy33SdzSJD+IjS8i+E2p7qbcsFAtblY41wyup3gOS8Sh95qDdrl7S8vcYgyIFyRIRcFq4cC2voi4Dv8eIww27fRyxdcSNqKhB4ZPgV//drIlZ4p6AtpQZn4DCaKDhxU9nNhXzJ9SzD88i8SDEsre3OCUETFwLDL6l97OVrYYroB47olz02REN8qsLLG+O79iMm0oHWL3rlbPUWT71joNcq393Q3l2EJ9O4OEinj7+wxdZHP3Cl8NlXJPLOdHh6FndWPb78M2Lwo3nmIXZ5t6YeOhwDyIPcUWn5L91TSnmhAE2TFM5ygg4soSIlP91ZG+X2wU3smQT7tXvyjx34lsKi7KNu1iaPaM/90PMSdZ6gXtk6v6QcvNEyBnvXgdH+RGdk+KFHOSm5SxFka3N+1xQhZFMM5tpRp4aVlihuTBHl/LZ9Yw2bzprqBIdYvny+QeT32Tmq6Xs5MjrZv7YL/jtVNSFjnTjIJGUSQpEXeBpBeXtxhdvUGeUJ5PyBS0viiRQt3FEjGAmNRA9Z8z+U4S9XdaG6vyQbuj9OXV+H2ooCiiXInx6OoZrM6/SdlFW23MeL3y1g6oFrho5E5BuUqnp127aAd1u8wTkjab63z6GFmgQKus+a/Z5gta0pXLP/5LABfEcr0FrRKKM/f6uoLceMKQyOnJtjW9sCt/0FLOvTZ9HTOD0P+oDrIgEGhHd1kUTyXmikFkbAoDGtsIQN5F42nox36kTuEjLjMplfTi7cVPeNdscpmvH/3LE1o8rQVnUwhCmz4L1scfTKFffD3Wnl/F2htXhkN2GGNWSYZcTp0Z42S3EZ8GqEP2z3W1sLHa7CkfyjA2KDoORMNuhhQhovzDHWDGwcw80742Rb5uiofqIcs5LmuLKL2Eahxy4yyU5RCNlgmLUM+BAxkCjBLR3pUYVvXsmnGWhXfkkF1buNbQfDFEFB9GMJubVCy9jN7JPFkKv/1OqfbKNRP/9xpFMrRcKLV8dQ6H1M9Bd1L/b9fyRU1KhzfbC3UV0pLFNF6Nlj6h1dDFktkyArRfIoUgHZx/rsARwkp4nfW9YdT9sRN6GqVqoEkiIKlV3xQQ6DzVbhO3ELfU9OhBZlHWDaB1FsVbbPj6NlDKKySa1Fk2DpVrZPbBUQEP+VQswUwnhxLnC6pYKO4LyYQcLPxmuoGCbfQM74KnuHVpvg76RVRwuDlO7H//YK+eSaY25bV3ufbRhS1PiztECZ3sZS71AApGCoXV2nTWvkGKKBYj5hrS3EscDvfi1ObVeTAAbVRaYv9ICcW13WcCUAr5OFo4hmVK5QJQHR3x5TljMJfMgBZDqVD1xuVc9rXdXiCog5FxFgOZNtUK/hHf+u+DuEpTuB0avSBHqPDXcx9k5wyLyTHHKGd22Wvg2348BjwpRVA+cmaaKQ+gDpqli7id5mJkrfJLluLliCIG5DChChFaXpfa4asxdcrWuQRnkyTHo37XY+B4R9GkYhkf2NOvOU7CjCriz/V3V4FnowgKBDvJBSaTy8E+hFNYGoUZ/9fuuAEBauF0jMd24DknPTwdlQqrLiuWTPox0bsOS+czSycosViMQgjkMbujdX0qXSMwldCSFENx3AVUjqcW89DDiSKanxqaAaMcUgSYFRV0+E1NiyDN90sEwQAsG7sxem2Q00Ex0vFpuOQpVHE/5fgAiDdF7iJnEuUAwEYMIVrZg6D+X/9zkNFHY+dGUSkT6yKIxVTJoarG5UQAvy9A00h8RiTL7N+Jq8TmdgNGbNodtrJ8mIUjsvU0xDAQraYjwzj5un/8vHOj6z1h8QllQB+fK7SojYJwE5C52TINbBLHCpXUwpxt0lu24Z5m64jqYT5XIDAXBDBAmeVImRgmNoEAvbfNNKM3w0hxl8m8wUGwpj0s1mpnIXsnomCaRiPk84Qjm9X/Arz0noFgiysQEKg+iozp0s8m22ye8eCQWs7Qzip4gflhLtRu0ttWsqUe4R70OqvZYKhq6a9BUrSvJ4hhl05Tq4hSzAqTioOxsX/QvFlCDQSPUaIq8AjCwf1nJy8tzwYfG1o6IhC6XvAxMsyGNyGLbtHYwl56Jp8LTPvFHUnUTPNE7pO1bMooFbWfwssHC/dXVoAOni6WSCRiGbbhLooX+DU42epLmamnqmTHvutuZz3gI4NxB7Vm/6dSDT0rdfZQEKdJ7b1h1E215B7uoreXFMOGSDmWxrRttA4cMhtvrkgHbiT4QuM2R1OHheb3vUKg08+HnQvYlfm2NH+zXy9pkV3LlG0rDMMfz6HQkYiIgbFozJolFGdjv4vmo98arwTKmJY2LXZap80XkMTz5+0xlHvQiEw4bNKAP2x1u4LtHq8aD8D+z1a8pAzVssYHQsrLAD33zCjVeFr44ehG8jlVYEUfw+3l1Po0WxE2mXD9hXdfpIX1cra4/2G5uOCSCUFlMwn7yqmIpmeELCvntJvPZl56tGS/g6BRFl06ZrW3h1FrMKydP44BQyFv3w2KVY63SzpKw+X5jxK/UCV3Q5m48l5iuZ1hcG0G0rnOPnC3cPl2ZgMPEBO2BHKLRp5/+9Al+mIrPW7baPNBB3E3nZuKMEYixCOwFivwGT28nHi06hrZxi0LuU4tVP83aoOfIefqUbRWsa8jtVRtrDOlIPlfU/tjhypGKpZ9S/V/RW3uJoMobFO32zR5x7MN1EEimC+TiBdhsx+Jxd8X3rTKzqAfxJCVMCkR+GeWaAjfFQMHKoZefR11r8mbLkMlWQD1A26SW0hL9eWXJMNxVONSel70H0wZf/0F3PnIOL4ACPj9AOD8fsQF5wG1hFhXvdaGGQVqYJCIIpBhGQxYqg724R7LrDjpGtssMCcrt8otgY+XaWF0nrhEBXN3axyuTsv/YDTrL0pk0U8Dbtyf3yjjVTExxXiKZiRuUPwDWB9CDuUpZZRimL/0DlswEM17RgIfN10HN3Ne9zo8DFSHN1F80LVn2JUU0g5VsATurcBPQXsIBf/XvLIkvfvLqeZXeWAGUzxO0hv5x7l1JCgh8a6Sr5qui4102DU4QsIKNcgrwY982gAOYwxm1+mohpyZkU5Ytm+Bm0SYdAeSmcGT/VxKvcnuE+x7/LO8DRGUDmWhSx3ChQz0ZKAsUd2A09WZbi5amk+mycM32Je3odz+OcTQtZNclPBGZzF/vs7qq0ZmGKsrMxoq4zFmhW4dvfnCRddtvLQ0U2MSjHwe319myzfwH/1P58zWqc6MMs6KS3Cu5+UWX8/t8nJ1sWts9DFd+5z3q0fKq6Jb+PggT2mjir09QqDcHtE48oMi2My0E1OFhGwjHR1jFSF1fMtdCYmXqe0wW1vGyXizmC67Ird2PtJg4K0QfD5KU5tKeF72Qg6PSDQKUysE2CReoGyuPoZB0V2a3Aan3iAt0dP1NHXRgKXm4iKztFCTovMAnbPEalAWQDyY2htQyNwDhB/r3RzyVC49JFYBwLetzVhdm1XfmOAsLK5dlBDiS4dWocf+T2Iezc7MsIpmM/y1lbUx2w2eHspVmvYUEBh4vScAgJREboTZHmwAWi7G8fLeZnxLCFDY6QZAY5ZXwY3JnAd4HGQbkygz5QoFGYR4QY/SR7TAEegygQl3aSsWRMpWjP/Fs+Jrx0L77FTyknR5Bpxz6MUjkPyPAXEBzMPek2uMRt9x/N2hfBJd4Nmz5dVEzp5zMUtyb7aENxHYVLPXfXvBvLqTeIvTpTKc3h/BkWDaRpZfgt2IXqAVvB6slKQgVHWQQaMe24FSXAvmArAMEE8aqb6m8WRyxbhxXlq7Rx1W5SrhLGdc70K2Qq5WrMbLPDmThl4CBz2O8NXF9cIVf5olxeRWhVlh3l/sGNsRq3DhA87KRuEUODcqUmGLtdF2y7Oh42iZEK6E/sB1mwZ9FPpp7DsAnCdpp5JKElQnaTRcnWvRMAHDJvx5xvPMytLVPQRwIZmDv4anjLV2w0/JrvVXCpvRxZjVUasceZGLuV64wGefaiT21pgOYHp5ir8zpV4Z8/UjauvWd8v+PIJUbfZPXB+hslxsZbp8xOc3WmVn36sv8Y0c3AxOeYHXHSRyc77DzaqNBtjPncUT1OHXWRafc5cncbj+Djb592efvVZq3CoV0lKe2U5Pa88mMFqmOkyXlyazMnyrgQkz6OG0WZeiZhNZa0/COkaKKJ24X/+b9dxDp3J3tawUnp4VsSejA2HMiQaaZmdcGN21D+avTSAXSu5lUe9s3BYLwfvXvacey9I6h8ToPy23jDelrL+X0KvVIOrF/Qy7vPSuZJo9CRcUlP8kTvbnKu9GefXTp/8/+l/BFs2tA20AAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAUpklEQVR4nGV6a5PcRpKku0cmUN1NPWZm1+b+4tn+8TW7lTSUyO4qICP8PmSimmNbFMtaVU0gMx4e7p7g//2v//p103a7nccZ4uM8aAO+Px4E+rYpQoyIGOdw1b7vgP72639ur68SE45QjuMcw/bXf/2/P//nj6y83W7729vL7U2NX79+zzz++Y//OAcieHt7hXmeZ1Vtt12K8zwAPB6PcRwUX15e/uOf/6eAx+P+8f37+XhUIcdJmAoAvXfafd8/8nuzz+xvJ3EAnTjKIVXVvRxiVblKSp488mRnVlTz21ZUUiGyhCNHQWQ7T3yUyzXOR52h3scjf//rX7feK5hVSQdAYtDFEi04SYoWT1SO4VMfmTBH2mApCpmELAouUDCrEWm219vL674XjBYSm9RbHKcD7AyCWWWXwUre2NV7S3TdpNb2TSBcjIhi1ajMJo0RdVYemf18vz8ej+Nl2yPaLp5ZORiNal1ore8RUguCzqqq44SiiaSIrQE3k2OcVdXa3roqS03OSAhme7lt0VqNQdJVBgBkZpaDBiozJZbhMtklh1pvEdF6NNg5Rkimxvk4jkfleJynRxKk+Oe3j+M4CYDV9FI+jBOOkKRooghQVWlXSFvvW++oTMzlGAYMitsWfduPx0GUGaJJtn27qRGDIkcChsvjHJVZgAtZ5VK6KpEtcwz1MEWJIWcW3LYOxPevX4/jTGae4zhGVR05vt9PGDny4+N4femkqlwCbcJlu1D2eZ7ncVKK3sl4HI+qIoVyVVaibJIRAcGmQmzgydZ7N2a0BCRB2yOzspIswlUgRhWKVa4sblRIoZAMl0Omy/f39+M8VFWVVeN+5Fk10i1ijPP++Li9fIkWQYq8FjZIgirbcEikDJ+Pw3Dvu22ADGPAIOd/YkQjDKmRzLRh2JU13yurqjJhAkChKouQbdgRTQoQVQWbRhnjPP76/q1yVGVmzoulAYYLj2Pc7/fMo/dXQiRZc6MAGQIARoCsKhIFUyEpXRQFwajMzAE6+tYY9hDZCFQVCi5Ull12VZXtmtsHbJcrOO9zYavWfsqoqsf946/v72OcMwBZHpWsioClMfJ4nI/HA63RQa7yoQ3NO1kKALYBqLVQUEISAIy531EDYESgMLfaKBIkBaRtVxlVvl4ACICYWwFgRATs9T/XLx73+/1+hCurbBgoe6JfkWPE43G8f3xntBYbNa8FzBzOe6ysFNSlABaoTHSBTZJkhFpEOQ2TbFJIEUJSBAx4LeBa5HyRMyQUW2tz7TBclTlof/v2feRBjolkFFizoSozz+N4/66IQMSX159CnRAw40OSM5cAKIokMHKUixSMXBelbZEwKqvKJBue19AMBEwINEmuHgAgMYISSfZtkziTXVU1xuP9/X9+/y3PezJRcBFXWg27amTe7w8TkGTtL6+hiAhRgElm5hhntBatA6yszLTde5CqqrmrygKROUYOV5FsZdhroQRoEJRYM2PrU4bZIkIgKQkECJZd9fj4+O///u+vf32tGiPTE2IlSYRhA+XEWSD4Ht9bk4Jue0SQyKQkIrMqABjnebq1WQUUKZCIaKTsKiBzLJCVlXna5jOVpEgpuFpjIdecW9EiFNDcF20cj8fvv/322x+/5zkIlVl+Yt3K6sSAqnGe4+P98f2v9+PxAEohSUbaRam33nSBChAMQi5UVZZBenVnjZFVNVuizWlHicKsPlGtqRwARBQgqLWIFpImBAF2ehzH1z/++O333+/H3ZV47liz/mYTzc4CAFed53jcx3mceHVrIQrD5YqILXaRVRWhmF8Bs3JISKIIxzULKCpZrSpJErz6BxBbtIklQaUdUu9NZGttf3mJaC6PzPf3b7/9/sf9fq9jZNYCkzVuABEGbYgzXUZV5nke7+/v+76/vb1JG+zK0VqQypGAo81qgTGZHzWbj5zYMTs4M8vVCmikqJUSkkA0hYNGhORqEb01SGqt7zvJzLp/vP/rt9/++vZXjnNeMQTNeEkgBUKeEGM7s2y48jyOb39+IxBb+/L6MzARqECO81CLcD+PY4wB0tvs/CbFQgbYNsFyZWarMkRJlKI1DQHu0QDa1VsbmRFq/a34aH1r2x7Ri3U+Hl+//jmOc4zMKsCz7iesR6hHo+AqVI1MG1UoI6vu97PwfQCPX863L18UEWkxR45G0SPHyEpFVFVrrfeOOVjsld0JJHArr3uDjAgq7IxoDJXdWyDZ1PrmUb3tm0SjRBwfH/f7o/LMPKvSRsSqIZFNsW+9Nc0R+jgPAZk+07MXH49x/vHXedaZ+fLyZsTLvm99ZwhV0VvXrhCwEM2epBRSkAgKRMNoE6xmW0SLiKhy61EOu1prCIVa2zeMDOp4HJl/eZx//PGv87jnODNrNkzVbOJ5MYTQQwyVy6gAj5GJzATsTARjQ4uBfJyHzn3beu8ghut223vbynUch529b5QWUAJVRVAEGU2AjTJsg5wiK1oXUK5ojRWKpmgqVNYYH1Ufx8f7t/dvlWOOm8kls0qL3wOkQRNNQgEIw6AjTCILJHpvb9vLrq5iA0hB4RqZCclAjhznmWJEE+VJdSpzDEGtCcEWEUZVjXQF2Hqn2FpkJTw5Ng1mVuZsxTGO8+P9+zjPzMqyvVhFZs0phloUiWS0VpnGCUrBrqgCRhYQgkQCvcfrl9ft9RaK80wOtAhRCkXI86IRFAQaVdkARQhC2/d91CCoFkExQkEC46gql0fWJFvMUVKNMfI4juOszHIuvkfSk8+4FpNDgWRIgTQgi6iiLbF3pdWjt4gI7l/eXn7+qW09FBEu123fSbYWIRxnttZaD0wSYLslLYaMai+vb4/jY5wD9shzZCoF+/54TGgrWAySNYn2GJXDlZhkGl7jhqrJtxYuEqYUjKbitgPnyFFmKqK1LcDXbd9v7C/7yy9fWm+SWgtX0KwsAAYokWOqSnsWfxmUIReIZmJUfRxnnY/zOM/zFAngPE+SDIHqjRFNoidcLnq6xuuilJiZw8S7MsDYttvt9XU3Xs56/3h8fP848lSLvm1v3G4vL6+/3Paff769vV3kHM5CeekCYg7HpYsNM7k+FmAC7f37+8f7+/k4MvN8HOc4p94bI6kpHeN6Uy0UhghpkVNgfazFvAQEQFARbd+2vm99f8mq71+//fH7Hx/HB1m41f7319d//P2231q0moNijOPxqMqJmE/K7ZoSfWE0CbJmGNvH+/v9foddU0pmFjyyMktiOCgtOi3RhiI1Jv2e1H2NWgJAKCIao0eLLaK13rfX159/efvyFluvf+Y/vv7+119/2o5t+/Ll171vS/LZlXXmuN8/svw4ToqhZmPRUayUeObEqxLa8fjIMQgcj2MOcFeNKhsRghiTcBNSqItE5qnrFVK5FhDTRinYe+tb//LT2y//+MfbL7/ury9qEUDb2s+//v319afZ8IQqyyxYuJjgfrvN9p0zl1oezsXtL7azNoNWWRNbj8fjPM6RZ5XLnumThIUrc0KIwNA5dRynmLOrkobtUcWRimxo2769/fTTbX8RVZknwRIBhFBAVdbISgyUHFM+QLtuk/PYNcWhwCX/SHrCxKWzgEbQVed5jnOMMRWJ7aUJF8ovrJQiYKu1aC2itWgZVa7LEKONTI+sNgYtIWDAJqYvVoAFQzAJ0MVC5XkylvKaCn4VyBOiYV4ffHJ0w0RbonWcmZk1MsdkYzKvMruksahoErbcMM2TypmuygUVS8eRId1uN/WwXJ6qZpZaFa5fklqnM89MLH2amB0Bi8JVLUtZGOAs58nTbaNlzbhfZlAB9Po3vla/hu3SQH3bXdXP4xxnZo4xUiJdC/ulYGvx8vJGTbAjfNkzVVWpy8C6dAJqzpyqJzRPnWsY1MTRaY3Ymk7FTHib8nkaQZ9p+wF/fVGQOXdn5CKaIiKirR9cNhLT/RPV1drW5uSBVrhcNcU4AMLlcnmMcZ7H5POwL8dl3Z6WfU2Cuay5cq9aal66+6oy8uoQX/BWi0DliBpQXA2h6Sz01lDMqkET7C0k7re93268UHaudflOl+NU65V43pBcEtSeZihYLPmSpbTNWdwTyNy4Fv38e7X39OimzzYyz5HtzGi4SP8cBRGKaO5kFcqh2PoWxNvPP8Xeqc/KmfN1XnjK2MX2ol0+31O0rEm8Ev5cElm0jLLpMulyuyg8Jak0M2bDE7zKlc6ctuTIHLwstItRgKEOdrDgHm3vrbX+099+DUa56HQ5c7hMoqoys+hZxcsWXI7DZOXL0ZzbkASUZ2NKQJUdpYJoFNymBTAh11cQJkub9T8LqMa0nk4YE9cnAhWg0Lb3fdsktlCT+suXLz99IT0NEdiTi3OePOSgQiIoT51eq04XCb+8OtAuQCiVPF0yslAiYBcdbtFa7w3kQktiOpQuzjCUneVZSIMHqypeXKO8Irb1/tPby+vry2QWam1//Xtr21XhfnIFe26ghNnahatD5w4WZsBTQ9oACwYHkhUlsMqik5BRLrdo0VqAWj1sJHOAhXk5zDldqRxjDpTGe2W50gbBl9v25cvb1vcqFND21753wDlHbRWeW1iVvBoA9rhwb24Qly70E0MwkFGwqJJJ6OncmkY1RURvjIvlAUqCWcnlqM5bzPON0zJBjHPkGGMMSfu27dtNaokEI/omxTXAF3xyVuYKKhfS1CIpuGAWFwt4ut4lxuzXFU5e02PZ0I3R2lY2Q7OypvU5kjkLeKKTAA2mPTjw8DmGXZkV4kxgprPQ9h6tG89awHNVWGddhulaQDltkrW8uv7BykfBRmnMacfltJrzbYFVU4joZBRDEMDMkDRGppZgJxZtyzFcHjVy5JzqBhFK24ZatN6l5Un/+D7DdsFhuSb3q+egt6tc82dd4Z9pWicUzyisXazSa5PGRmuiBJmokVJEnK4q+zyHyJeXvbVWVSOz7Mw0cKZ7Dw8WFNvWem9943UOgh+c0eXxrlZega41X3MF84LmlR1fTWmAqLJYgFiTQwAuAM3lMhuDEjdtQrVUjz6ay1W59dGivX55sX1/HI/jkemsynKBITGi7fu+v87Yr1lbrko/GYovxT979Tn+sRT0NfYBuDhpgUFULbt/TlZonj0YRgFltyzLhILLDo4cpxlSTUvWRus9xPv79zHyHKPSZZ/piOgtWovetogGr1OJqyKeo8U/vC4uyWtT1+BfshcACpaLUAIA5eWum1cJPZusoRy9b6+vvbVQ5BjjOB6Puysj5pjvwbh///Z4fB1zDp2Zxpl4i9Zi+d/XzKsr1uX/tfyazbe6p37k6s9UYP7CVHhFr/XDBq+yfL4bbl9+/uX28vL600+SWgTsHDnOY3r2CjY2pOt4zNE7RubI0yizB0NXfVd+rt7XXPflAni27Ip3XZ3w+foBqnhV1DJtJwPFRXEueWAbdPvlb38PhShUeZ1uhHQzpsyDTKtEX4RiYgWC6m0dggBYwuyaplW1lMCq8cnBvLRUXSBzNfkFuqs5JuP0lL0LOq+c6YfftFtrHZUewzaqrJiMXItGYhSqxvk4Z30Q0z1ED7XAnMefhOAiZfMQd5ETAywW1tbX+xVHXFBbn9H9bIsZ/TJo02DRwnU7E62cqJqLnnfXda4zQWCBsdxba9FaCxom9x6TXXIKXlxtkHUhBufRuJ+geq3tc434tz75bGbq0o6XgoFR5ASohQWA3SqLlWCsdBvrWYxVvOvIq/d+2/sx2hjNtsytxRW2ee/lbrj8eWL7Yw//UPK8pC2uol7D7ioOIGHOU55ZNwSlIoTnoDFMtCsE624EYNYE4pUVwlYomvbecuuZNcn57MVFq4mpGUgK7XJCPuP+WRrPpMzNGJ/65RLkc2Bp1cGzg4U1xFZby27zSsuSBc1/v9XVZlQouO/N3scYWTXGqESdeT4eandA53EfY/RtV4SXIJqHJ5/d+r9fa5sLKWdWSMAqQwR/oCFeanBWFWywXT01w78k3MVQLlvAYGhapa0HaSbHKMJl3D/u6e9lnMcdAKlofRXKvxX8FQvS/PyOplEzEbPmV/lK025dR5TLiNV1+L6S1yYxtGfNXE33Y/XiE9oBRwBgUQigNsPHcRYfYgPUelNryxgQr9NEX4V/ecPzK1tArUG4ojwDOJXwsl8lXpbi+pyfVdLsoq8b8MeF1/NHwDlG2QCmI4rGdkLF1hgRLdD3Dmyt92gxl7pOu+uzdnjZgn7uZgJgfUr3ywC9FqTnoRtXOa7HIfzcwA9G4xqe16NgmBTRVXk87jlOOmFH77f+pv7Wom9B9qbWom2VqcuV0XVYD9GlVb7GOkD73FMsj0PA8ovWnxmAaTg8t3X9hPl0B4HGH5rLzwnjWmpjIvs4j/u9sgRXDrW+3fauiN6DnGgfivFpzDy9BsOWrqa7igRXkZJGyTBLxc8m+dzB3MXn+tfVDc9mbZ9O0DUjryx8smDAmSmJhRwZlYpQ37VsEQBUROBZ6lef2VfWaZCs5/p/6HDNXOva16qfICHMhzieJXTt5WpjNl0dcTHyi0ktR/aTn4XCa9h6JhflBXXzstIzwGsoSk+aMft0eiaX/UbQYpG0qhKLhq4UgAuD1gMMVw9DpGFRAhsuv3ES1udUW9Gfjs1yftNVrvX4oxTrMRpbEZT0BBt/giZMuJ5PTl2BW9UOExA0Z2dQ6Ymxno/q8HrK49r3swu0Tsmar1qfZuu/j5wLRctl5KjJRmtUnifZ51fkhKbpG69CxBqK+LcrPvvgqTSxasykYj7ZkytNq1WeNjPRrt5db6LYLCAnBb9u40UsMQksMc+ZB5EwyDyP73/+0V/O1l8obX1rvStmQgQmnuJ2rbWutPyg7Pm5E4KW5rEdHcbzwR1D+Ay/xc+vgPUcWRYWo3wKPM9HTApAWRLU9rZXPHKcEuUxvn1V5v5r3/vrtvVQX+SHebH1yyXBD131OZX5TPL8cxlxi6JhuUUTgw1/AhDMIA1KKLJlk8ycR0/XiVJKhSKMIAMeodstxhmjwmx0bLF/+fLy8y99u80HuMrTK5lUWsB0mofdrEuL2Zfv9glTmGazDa2cL3xaj7vp8koBaSJRrq3wcfL/A4sO9SdnU3NtAAAAAElFTkSuQmCC", "text/plain": [ "" ] diff --git a/examples/progressive_distillation/utils.py b/examples/progressive_distillation/utils.py index 0d3398722d28..2e9c8b8db78d 100644 --- a/examples/progressive_distillation/utils.py +++ b/examples/progressive_distillation/utils.py @@ -75,7 +75,9 @@ def get_unet(training_config): ) -def distill(teacher, n, train_image, training_config, epochs=100, lr=3e-4, batch_size=16): +def distill(teacher, n, train_image, training_config, epochs=100, lr=3e-4, batch_size=16, gamma=0, generator=None): + if generator is None: + generator = torch.manual_seed(0) accelerator = Accelerator( gradient_accumulation_steps=training_config.gradient_accumulation_steps, mixed_precision=training_config.mixed_precision, @@ -118,31 +120,32 @@ def distill(teacher, n, train_image, training_config, epochs=100, lr=3e-4, batch bsz = batch.shape[0] # Sample a random timestep for each image timesteps = torch.randint( - 2, student_scheduler.config.num_train_timesteps, (bsz,), device=batch.device + 0, student_scheduler.config.num_train_timesteps, (bsz,), device=batch.device ).long() * 2 with torch.no_grad(): # Add noise to the image based on noise scheduler a t=timesteps - alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps, accelerator.device) + alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps + 1, accelerator.device) z_t = alpha_t * batch + sigma_t * noise # Take the first diffusion step with the teacher - noise_pred_t = teacher(z_t, timesteps).sample + noise_pred_t = teacher(z_t, timesteps + 1).sample x_teacher_z_t = (alpha_t * z_t - sigma_t * noise_pred_t).clip(-1, 1) # Add noise to the image based on noise scheduler a t=timesteps-1, to prepare for the next diffusion step - alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma(batch, timesteps-1, accelerator.device) + alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma(batch, timesteps, accelerator.device) z_t_prime = alpha_t_prime * x_teacher_z_t + (sigma_t_prime / sigma_t) * (z_t - alpha_t * x_teacher_z_t) # Take the second diffusion step with the teacher - noise_pred_t_prime = teacher(z_t_prime.float(), timesteps - 1).sample + noise_pred_t_prime = teacher(z_t_prime.float(), timesteps).sample rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * noise_pred_t_prime).clip(-1, 1) # V prediction per Appendix D - alpha_t_prime2, sigma_t_prime2 = teacher_scheduler.get_alpha_sigma(batch, timesteps-2, accelerator.device) + alpha_t_prime2, sigma_t_prime2 = student_scheduler.get_alpha_sigma(batch, timesteps // 2, accelerator.device) x_teacher_z_t_prime = (z_t - alpha_t_prime2 * rec_t_prime) / sigma_t_prime2 z_t_prime_2 = alpha_t_prime2 * x_teacher_z_t_prime - sigma_t_prime2 * rec_t_prime noise_pred = student(z_t, timesteps).sample - loss = F.mse_loss(noise_pred, z_t_prime_2) + w = torch.pow(1 + alpha_t_prime2 / sigma_t_prime2, gamma) + loss = F.mse_loss(noise_pred * w, z_t_prime_2 * w) accelerator.backward(loss) if accelerator.sync_gradients: From 89009211240801a418b7926e914727edc12c67da Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 27 Oct 2022 16:29:33 -0400 Subject: [PATCH 101/133] correct beta schedule --- .../image_diffusion.ipynb | 2628 ++++------------- examples/progressive_distillation/utils.py | 6 +- 2 files changed, 618 insertions(+), 2016 deletions(-) diff --git a/examples/progressive_distillation/image_diffusion.ipynb b/examples/progressive_distillation/image_diffusion.ipynb index 70832030b15e..9e1666638a17 100644 --- a/examples/progressive_distillation/image_diffusion.ipynb +++ b/examples/progressive_distillation/image_diffusion.ipynb @@ -18,7 +18,7 @@ "source": [ "import torch\n", "from PIL import Image\n", - "from diffusers import AutoencoderKL, UNet2DModel, DDIMPipeline, DDIMScheduler\n", + "from diffusers import AutoencoderKL, UNet2DModel, DDIMPipeline, DDIMScheduler, DDPMPipeline, DDPMScheduler\n", "from diffusers.optimization import get_scheduler\n", "from diffusers.training_utils import EMAModel\n", "import math\n", @@ -47,7 +47,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 2, @@ -120,7 +120,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -134,1012 +134,314 @@ "name": "stderr", "output_type": "stream", "text": [ - "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0, loss=0.127, lr=0.0003, step=1]\n", - "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0, loss=0.26, lr=0.000299, step=2]\n", - "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.405, loss=0.225, lr=0.000299, step=3]\n", - "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.561, loss=0.196, lr=0.000299, step=4]\n", - "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.646, loss=0.141, lr=0.000298, step=5]\n", - "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.701, loss=0.136, lr=0.000298, step=6]\n", - "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.739, loss=0.172, lr=0.000298, step=7]\n", - "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.768, loss=0.12, lr=0.000298, step=8]\n", - "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.79, loss=0.127, lr=0.000297, step=9]\n", - "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.808, loss=0.126, lr=0.000297, step=10]\n", - "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.822, loss=0.104, lr=0.000297, step=11]\n", - "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.834, loss=0.0866, lr=0.000296, step=12]\n", - "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.845, loss=0.0951, lr=0.000296, step=13]\n", - "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.854, loss=0.0973, lr=0.000296, step=14]\n", - "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.862, loss=0.102, lr=0.000295, step=15]\n", - "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.869, loss=0.0813, lr=0.000295, step=16]\n", - "Epoch 16: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.875, loss=0.0739, lr=0.000295, step=17]\n", - "Epoch 17: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.881, loss=0.0826, lr=0.000295, step=18]\n", - "Epoch 18: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.886, loss=0.0599, lr=0.000294, step=19]\n", - "Epoch 19: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.89, loss=0.0772, lr=0.000294, step=20]\n", - "Epoch 20: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.894, loss=0.0631, lr=0.000294, step=21]\n", - "Epoch 21: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.898, loss=0.0664, lr=0.000293, step=22]\n", - "Epoch 22: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.902, loss=0.0635, lr=0.000293, step=23]\n", - "Epoch 23: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.905, loss=0.0503, lr=0.000293, step=24]\n", - "Epoch 24: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.908, loss=0.0693, lr=0.000292, step=25]\n", - "Epoch 25: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.911, loss=0.0553, lr=0.000292, step=26]\n", - "Epoch 26: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.913, loss=0.0673, lr=0.000292, step=27]\n", - "Epoch 27: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.916, loss=0.059, lr=0.000292, step=28]\n", - "Epoch 28: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.918, loss=0.0688, lr=0.000291, step=29]\n", - "Epoch 29: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.92, loss=0.0688, lr=0.000291, step=30]\n", - "Epoch 30: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.922, loss=0.0586, lr=0.000291, step=31]\n", - "Epoch 31: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.924, loss=0.0528, lr=0.00029, step=32]\n", - "Epoch 32: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.926, loss=0.0546, lr=0.00029, step=33]\n", - "Epoch 33: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.927, loss=0.0571, lr=0.00029, step=34]\n", - "Epoch 34: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.929, loss=0.0638, lr=0.000289, step=35]\n", - "Epoch 35: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.931, loss=0.0514, lr=0.000289, step=36]\n", - "Epoch 36: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.932, loss=0.0586, lr=0.000289, step=37]\n", - "Epoch 37: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.933, loss=0.0543, lr=0.000289, step=38]\n", - "Epoch 38: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.935, loss=0.0454, lr=0.000288, step=39]\n", - "Epoch 39: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.936, loss=0.0457, lr=0.000288, step=40]\n", - "Epoch 40: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.937, loss=0.0426, lr=0.000288, step=41]\n", - "Epoch 41: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.938, loss=0.0522, lr=0.000287, step=42]\n", - "Epoch 42: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.939, loss=0.0444, lr=0.000287, step=43]\n", - "Epoch 43: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.94, loss=0.0508, lr=0.000287, step=44]\n", - "Epoch 44: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.941, loss=0.0422, lr=0.000286, step=45]\n", - "Epoch 45: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.942, loss=0.0431, lr=0.000286, step=46]\n", - "Epoch 46: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.943, loss=0.0413, lr=0.000286, step=47]\n", - "Epoch 47: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.944, loss=0.0424, lr=0.000286, step=48]\n", - "Epoch 48: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.945, loss=0.0447, lr=0.000285, step=49]\n", - "Epoch 49: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.946, loss=0.0348, lr=0.000285, step=50]\n", - "Epoch 50: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.947, loss=0.0387, lr=0.000285, step=51]\n", - "Epoch 51: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.948, loss=0.0412, lr=0.000284, step=52]\n", - "Epoch 52: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.948, loss=0.0436, lr=0.000284, step=53]\n", - "Epoch 53: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.949, loss=0.0358, lr=0.000284, step=54]\n", - "Epoch 54: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.95, loss=0.035, lr=0.000283, step=55]\n", - "Epoch 55: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.95, loss=0.0409, lr=0.000283, step=56]\n", - "Epoch 56: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.951, loss=0.0314, lr=0.000283, step=57]\n", - "Epoch 57: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.952, loss=0.0327, lr=0.000283, step=58]\n", - "Epoch 58: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.952, loss=0.0398, lr=0.000282, step=59]\n", - "Epoch 59: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.953, loss=0.0336, lr=0.000282, step=60]\n", - "Epoch 60: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.954, loss=0.0357, lr=0.000282, step=61]\n", - "Epoch 61: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.954, loss=0.0343, lr=0.000281, step=62]\n", - "Epoch 62: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.955, loss=0.0332, lr=0.000281, step=63]\n", - "Epoch 63: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.955, loss=0.0301, lr=0.000281, step=64]\n", - "Epoch 64: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.956, loss=0.0328, lr=0.00028, step=65]\n", - "Epoch 65: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.956, loss=0.0353, lr=0.00028, step=66]\n", - "Epoch 66: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.957, loss=0.0275, lr=0.00028, step=67]\n", - "Epoch 67: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.957, loss=0.0286, lr=0.00028, step=68]\n", - "Epoch 68: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.958, loss=0.0325, lr=0.000279, step=69]\n", - "Epoch 69: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.958, loss=0.0324, lr=0.000279, step=70]\n", - "Epoch 70: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.959, loss=0.0301, lr=0.000279, step=71]\n", - "Epoch 71: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.959, loss=0.0308, lr=0.000278, step=72]\n", - "Epoch 72: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.96, loss=0.0303, lr=0.000278, step=73]\n", - "Epoch 73: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.96, loss=0.0337, lr=0.000278, step=74]\n", - "Epoch 74: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.96, loss=0.0287, lr=0.000277, step=75]\n", - "Epoch 75: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.961, loss=0.0275, lr=0.000277, step=76]\n", - "Epoch 76: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.961, loss=0.0248, lr=0.000277, step=77]\n", - "Epoch 77: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.962, loss=0.0278, lr=0.000277, step=78]\n", - "Epoch 78: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.962, loss=0.033, lr=0.000276, step=79]\n", - "Epoch 79: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.962, loss=0.027, lr=0.000276, step=80]\n", - "Epoch 80: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.0277, lr=0.000276, step=81]\n", - "Epoch 81: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.0232, lr=0.000275, step=82]\n", - "Epoch 82: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.963, loss=0.0281, lr=0.000275, step=83]\n", - "Epoch 83: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.964, loss=0.0266, lr=0.000275, step=84]\n", - "Epoch 84: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.964, loss=0.0281, lr=0.000275, step=85]\n", - "Epoch 85: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.964, loss=0.0299, lr=0.000274, step=86]\n", - "Epoch 86: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.965, loss=0.0301, lr=0.000274, step=87]\n", - "Epoch 87: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.965, loss=0.0279, lr=0.000274, step=88]\n", - "Epoch 88: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.965, loss=0.027, lr=0.000273, step=89]\n", - "Epoch 89: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.965, loss=0.0225, lr=0.000273, step=90]\n", - "Epoch 90: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.966, loss=0.0205, lr=0.000273, step=91]\n", - "Epoch 91: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.966, loss=0.0307, lr=0.000272, step=92]\n", - "Epoch 92: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.966, loss=0.0255, lr=0.000272, step=93]\n", - "Epoch 93: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.0268, lr=0.000272, step=94]\n", - "Epoch 94: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.023, lr=0.000271, step=95]\n", - "Epoch 95: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.967, loss=0.0261, lr=0.000271, step=96]\n", - "Epoch 96: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.967, loss=0.026, lr=0.000271, step=97]\n", - "Epoch 97: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.968, loss=0.0241, lr=0.000271, step=98]\n", - "Epoch 98: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.027, lr=0.00027, step=99]\n", - "Epoch 99: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.968, loss=0.0246, lr=0.00027, step=100]\n", - "Epoch 100: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.968, loss=0.0247, lr=0.00027, step=101]\n", - "Epoch 101: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.969, loss=0.0241, lr=0.000269, step=102]\n", - "Epoch 102: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.969, loss=0.0263, lr=0.000269, step=103]\n", - "Epoch 103: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.969, loss=0.023, lr=0.000269, step=104]\n", - "Epoch 104: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0257, lr=0.000268, step=105]\n", - "Epoch 105: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.0263, lr=0.000268, step=106]\n", - "Epoch 106: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.97, loss=0.0244, lr=0.000268, step=107]\n", - "Epoch 107: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.97, loss=0.027, lr=0.000268, step=108]\n", - "Epoch 108: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.97, loss=0.0239, lr=0.000267, step=109]\n", - "Epoch 109: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.025, lr=0.000267, step=110]\n", - "Epoch 110: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.0273, lr=0.000267, step=111]\n", - "Epoch 111: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.0282, lr=0.000266, step=112]\n", - "Epoch 112: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.0241, lr=0.000266, step=113]\n", - "Epoch 113: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.971, loss=0.0217, lr=0.000266, step=114]\n", - "Epoch 114: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.0193, lr=0.000266, step=115]\n", - "Epoch 115: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.972, loss=0.0245, lr=0.000265, step=116]\n", - "Epoch 116: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0239, lr=0.000265, step=117]\n", - "Epoch 117: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0267, lr=0.000265, step=118]\n", - "Epoch 118: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0259, lr=0.000264, step=119]\n", - "Epoch 119: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.972, loss=0.0257, lr=0.000264, step=120]\n", - "Epoch 120: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.0212, lr=0.000264, step=121]\n", - "Epoch 121: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.0255, lr=0.000263, step=122]\n", - "Epoch 122: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0227, lr=0.000263, step=123]\n", - "Epoch 123: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.973, loss=0.0222, lr=0.000263, step=124]\n", - "Epoch 124: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.0223, lr=0.000262, step=125]\n", - "Epoch 125: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.973, loss=0.0214, lr=0.000262, step=126]\n", - "Epoch 126: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.973, loss=0.0215, lr=0.000262, step=127]\n", - "Epoch 127: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.974, loss=0.0232, lr=0.000262, step=128]\n", - "Epoch 128: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.974, loss=0.0222, lr=0.000261, step=129]\n", - "Epoch 129: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.974, loss=0.0258, lr=0.000261, step=130]\n", - "Epoch 130: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.974, loss=0.0226, lr=0.000261, step=131]\n", - "Epoch 131: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.974, loss=0.0239, lr=0.00026, step=132]\n", - "Epoch 132: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.974, loss=0.0206, lr=0.00026, step=133]\n", - "Epoch 133: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.0228, lr=0.00026, step=134]\n", - "Epoch 134: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.0221, lr=0.000259, step=135]\n", - "Epoch 135: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.975, loss=0.021, lr=0.000259, step=136]\n", - "Epoch 136: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.0255, lr=0.000259, step=137]\n", - "Epoch 137: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.975, loss=0.0201, lr=0.000259, step=138]\n", - "Epoch 138: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.975, loss=0.0244, lr=0.000258, step=139]\n", - "Epoch 139: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.975, loss=0.0226, lr=0.000258, step=140]\n", - "Epoch 140: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.0204, lr=0.000258, step=141]\n", - "Epoch 141: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.976, loss=0.0211, lr=0.000257, step=142]\n", - "Epoch 142: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.018, lr=0.000257, step=143]\n", - "Epoch 143: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.976, loss=0.0185, lr=0.000257, step=144]\n", - "Epoch 144: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.976, loss=0.0187, lr=0.000256, step=145]\n", - "Epoch 145: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.976, loss=0.0239, lr=0.000256, step=146]\n", - "Epoch 146: 100%|██████████| 1/1 [00:00<00:00, 1.05it/s, ema_decay=0.976, loss=0.0175, lr=0.000256, step=147]\n", - "Epoch 147: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0179, lr=0.000256, step=148]\n", - "Epoch 148: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.0191, lr=0.000255, step=149]\n", - "Epoch 149: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.977, loss=0.0206, lr=0.000255, step=150]\n", - "Epoch 150: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.977, loss=0.0244, lr=0.000255, step=151]\n", - "Epoch 151: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.977, loss=0.0214, lr=0.000254, step=152]\n", - "Epoch 152: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.977, loss=0.0224, lr=0.000254, step=153]\n", - "Epoch 153: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.0243, lr=0.000254, step=154]\n", - "Epoch 154: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.977, loss=0.0194, lr=0.000253, step=155]\n", - "Epoch 155: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.977, loss=0.0186, lr=0.000253, step=156]\n", - "Epoch 156: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.0212, lr=0.000253, step=157]\n", - "Epoch 157: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.0207, lr=0.000253, step=158]\n", - "Epoch 158: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.0189, lr=0.000252, step=159]\n", - "Epoch 159: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.978, loss=0.0217, lr=0.000252, step=160]\n", - "Epoch 160: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.0186, lr=0.000252, step=161]\n", - "Epoch 161: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.0224, lr=0.000251, step=162]\n", - "Epoch 162: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.0212, lr=0.000251, step=163]\n", - "Epoch 163: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.978, loss=0.02, lr=0.000251, step=164]\n", - "Epoch 164: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.978, loss=0.0219, lr=0.00025, step=165]\n", - "Epoch 165: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.978, loss=0.0193, lr=0.00025, step=166]\n", - "Epoch 166: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.978, loss=0.0203, lr=0.00025, step=167]\n", - "Epoch 167: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.0204, lr=0.00025, step=168]\n", - "Epoch 168: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.0227, lr=0.000249, step=169]\n", - "Epoch 169: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0197, lr=0.000249, step=170]\n", - "Epoch 170: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0196, lr=0.000249, step=171]\n", - "Epoch 171: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.0192, lr=0.000248, step=172]\n", - "Epoch 172: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.0177, lr=0.000248, step=173]\n", - "Epoch 173: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.0185, lr=0.000248, step=174]\n", - "Epoch 174: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.0212, lr=0.000247, step=175]\n", - "Epoch 175: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.0185, lr=0.000247, step=176]\n", - "Epoch 176: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.979, loss=0.022, lr=0.000247, step=177]\n", - "Epoch 177: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.979, loss=0.0177, lr=0.000247, step=178]\n", - "Epoch 178: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.979, loss=0.0212, lr=0.000246, step=179]\n", - "Epoch 179: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.0201, lr=0.000246, step=180]\n", - "Epoch 180: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.98, loss=0.021, lr=0.000246, step=181]\n", - "Epoch 181: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.0191, lr=0.000245, step=182]\n", - "Epoch 182: 100%|██████████| 1/1 [00:00<00:00, 1.31it/s, ema_decay=0.98, loss=0.0179, lr=0.000245, step=183]\n", - "Epoch 183: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.98, loss=0.0207, lr=0.000245, step=184]\n", - "Epoch 184: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.98, loss=0.0183, lr=0.000244, step=185]\n", - "Epoch 185: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0194, lr=0.000244, step=186]\n", - "Epoch 186: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.0212, lr=0.000244, step=187]\n", - "Epoch 187: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0197, lr=0.000244, step=188]\n", - "Epoch 188: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.0174, lr=0.000243, step=189]\n", - "Epoch 189: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.0174, lr=0.000243, step=190]\n", - "Epoch 190: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.98, loss=0.0177, lr=0.000243, step=191]\n", - "Epoch 191: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.981, loss=0.0184, lr=0.000242, step=192]\n", - "Epoch 192: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.981, loss=0.0163, lr=0.000242, step=193]\n", - "Epoch 193: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.981, loss=0.0188, lr=0.000242, step=194]\n", - "Epoch 194: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.0173, lr=0.000241, step=195]\n", - "Epoch 195: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.981, loss=0.0183, lr=0.000241, step=196]\n", - "Epoch 196: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.981, loss=0.0184, lr=0.000241, step=197]\n", - "Epoch 197: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0168, lr=0.000241, step=198]\n", - "Epoch 198: 100%|██████████| 1/1 [00:00<00:00, 1.16it/s, ema_decay=0.981, loss=0.0167, lr=0.00024, step=199]\n", - "Epoch 199: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.981, loss=0.0178, lr=0.00024, step=200]\n", - "Epoch 200: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.981, loss=0.0225, lr=0.00024, step=201]\n", - "Epoch 201: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.981, loss=0.0173, lr=0.000239, step=202]\n", - "Epoch 202: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.981, loss=0.0207, lr=0.000239, step=203]\n", - "Epoch 203: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.0181, lr=0.000239, step=204]\n", - "Epoch 204: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.981, loss=0.0179, lr=0.000238, step=205]\n", - "Epoch 205: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0186, lr=0.000238, step=206]\n", - "Epoch 206: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0167, lr=0.000238, step=207]\n", - "Epoch 207: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.017, lr=0.000238, step=208]\n", - "Epoch 208: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.982, loss=0.0154, lr=0.000237, step=209]\n", - "Epoch 209: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0197, lr=0.000237, step=210]\n", - "Epoch 210: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.018, lr=0.000237, step=211]\n", - "Epoch 211: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0152, lr=0.000236, step=212]\n", - "Epoch 212: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0178, lr=0.000236, step=213]\n", - "Epoch 213: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.982, loss=0.0192, lr=0.000236, step=214]\n", - "Epoch 214: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.0174, lr=0.000235, step=215]\n", - "Epoch 215: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.0159, lr=0.000235, step=216]\n", - "Epoch 216: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.0172, lr=0.000235, step=217]\n", - "Epoch 217: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.982, loss=0.015, lr=0.000235, step=218]\n", - "Epoch 218: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.982, loss=0.0165, lr=0.000234, step=219]\n", - "Epoch 219: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.982, loss=0.0153, lr=0.000234, step=220]\n", - "Epoch 220: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.982, loss=0.018, lr=0.000234, step=221]\n", - "Epoch 221: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.0152, lr=0.000233, step=222]\n", - "Epoch 222: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.0151, lr=0.000233, step=223]\n", - "Epoch 223: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.0182, lr=0.000233, step=224]\n", - "Epoch 224: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.983, loss=0.0166, lr=0.000232, step=225]\n", - "Epoch 225: 100%|██████████| 1/1 [00:00<00:00, 1.25it/s, ema_decay=0.983, loss=0.0173, lr=0.000232, step=226]\n", - "Epoch 226: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.983, loss=0.0172, lr=0.000232, step=227]\n", - "Epoch 227: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.983, loss=0.0175, lr=0.000232, step=228]\n", - "Epoch 228: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.0162, lr=0.000231, step=229]\n", - "Epoch 229: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.983, loss=0.0159, lr=0.000231, step=230]\n", - "Epoch 230: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.983, loss=0.0151, lr=0.000231, step=231]\n", - "Epoch 231: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.983, loss=0.0148, lr=0.00023, step=232]\n", - "Epoch 232: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.983, loss=0.0181, lr=0.00023, step=233]\n", - "Epoch 233: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.983, loss=0.0167, lr=0.00023, step=234]\n", - "Epoch 234: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.0138, lr=0.000229, step=235]\n", - "Epoch 235: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.983, loss=0.0136, lr=0.000229, step=236]\n", - "Epoch 236: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.983, loss=0.0165, lr=0.000229, step=237]\n", - "Epoch 237: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.983, loss=0.0157, lr=0.000229, step=238]\n", - "Epoch 238: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0163, lr=0.000228, step=239]\n", - "Epoch 239: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.0164, lr=0.000228, step=240]\n", - "Epoch 240: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0151, lr=0.000228, step=241]\n", - "Epoch 241: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0157, lr=0.000227, step=242]\n", - "Epoch 242: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.0162, lr=0.000227, step=243]\n", - "Epoch 243: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0164, lr=0.000227, step=244]\n", - "Epoch 244: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0152, lr=0.000226, step=245]\n", - "Epoch 245: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.0158, lr=0.000226, step=246]\n", - "Epoch 246: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0151, lr=0.000226, step=247]\n", - "Epoch 247: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.984, loss=0.0151, lr=0.000226, step=248]\n", - "Epoch 248: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0157, lr=0.000225, step=249]\n", - "Epoch 249: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0164, lr=0.000225, step=250]\n", - "Epoch 250: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.0143, lr=0.000225, step=251]\n", - "Epoch 251: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.013, lr=0.000224, step=252]\n", - "Epoch 252: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0151, lr=0.000224, step=253]\n", - "Epoch 253: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.984, loss=0.017, lr=0.000224, step=254]\n", - "Epoch 254: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.984, loss=0.0148, lr=0.000223, step=255]\n", - "Epoch 255: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0149, lr=0.000223, step=256]\n", - "Epoch 256: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0156, lr=0.000223, step=257]\n", - "Epoch 257: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0147, lr=0.000223, step=258]\n", - "Epoch 258: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.0155, lr=0.000222, step=259]\n", - "Epoch 259: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0136, lr=0.000222, step=260]\n", - "Epoch 260: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.985, loss=0.0151, lr=0.000222, step=261]\n", - "Epoch 261: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.0139, lr=0.000221, step=262]\n", - "Epoch 262: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0138, lr=0.000221, step=263]\n", - "Epoch 263: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0138, lr=0.000221, step=264]\n", - "Epoch 264: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.985, loss=0.0118, lr=0.00022, step=265]\n", - "Epoch 265: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.985, loss=0.0143, lr=0.00022, step=266]\n", - "Epoch 266: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0132, lr=0.00022, step=267]\n", - "Epoch 267: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.0136, lr=0.00022, step=268]\n", - "Epoch 268: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0142, lr=0.000219, step=269]\n", - "Epoch 269: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.985, loss=0.0142, lr=0.000219, step=270]\n", - "Epoch 270: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0121, lr=0.000219, step=271]\n", - "Epoch 271: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.0131, lr=0.000218, step=272]\n", - "Epoch 272: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0135, lr=0.000218, step=273]\n", - "Epoch 273: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0115, lr=0.000218, step=274]\n", - "Epoch 274: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.0134, lr=0.000217, step=275]\n", - "Epoch 275: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.0123, lr=0.000217, step=276]\n", - "Epoch 276: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0122, lr=0.000217, step=277]\n", - "Epoch 277: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0136, lr=0.000217, step=278]\n", - "Epoch 278: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0125, lr=0.000216, step=279]\n", - "Epoch 279: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.0101, lr=0.000216, step=280]\n", - "Epoch 280: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.0114, lr=0.000216, step=281]\n", - "Epoch 281: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.985, loss=0.0142, lr=0.000215, step=282]\n", - "Epoch 282: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.985, loss=0.0126, lr=0.000215, step=283]\n", - "Epoch 283: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.986, loss=0.0128, lr=0.000215, step=284]\n", - "Epoch 284: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0129, lr=0.000214, step=285]\n", - "Epoch 285: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0135, lr=0.000214, step=286]\n", - "Epoch 286: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0123, lr=0.000214, step=287]\n", - "Epoch 287: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0111, lr=0.000214, step=288]\n", - "Epoch 288: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.0112, lr=0.000213, step=289]\n", - "Epoch 289: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.012, lr=0.000213, step=290]\n", - "Epoch 290: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0124, lr=0.000213, step=291]\n", - "Epoch 291: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0113, lr=0.000212, step=292]\n", - "Epoch 292: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.0121, lr=0.000212, step=293]\n", - "Epoch 293: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.0132, lr=0.000212, step=294]\n", - "Epoch 294: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.0108, lr=0.000211, step=295]\n", - "Epoch 295: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.986, loss=0.011, lr=0.000211, step=296]\n", - "Epoch 296: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.986, loss=0.0104, lr=0.000211, step=297]\n", - "Epoch 297: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.0111, lr=0.000211, step=298]\n", - "Epoch 298: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0111, lr=0.00021, step=299]\n", - "Epoch 299: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0107, lr=0.00021, step=300]\n", - "Epoch 300: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.986, loss=0.0117, lr=0.00021, step=301]\n", - "Epoch 301: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.986, loss=0.0108, lr=0.000209, step=302]\n", - "Epoch 302: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.986, loss=0.0122, lr=0.000209, step=303]\n", - "Epoch 303: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.986, loss=0.0102, lr=0.000209, step=304]\n", - "Epoch 304: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.00978, lr=0.000208, step=305]\n", - "Epoch 305: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.986, loss=0.00996, lr=0.000208, step=306]\n", - "Epoch 306: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.0104, lr=0.000208, step=307]\n", - "Epoch 307: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0104, lr=0.000208, step=308]\n", - "Epoch 308: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.01, lr=0.000207, step=309]\n", - "Epoch 309: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.986, loss=0.0118, lr=0.000207, step=310]\n", - "Epoch 310: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00997, lr=0.000207, step=311]\n", - "Epoch 311: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.986, loss=0.0111, lr=0.000206, step=312]\n", - "Epoch 312: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.987, loss=0.011, lr=0.000206, step=313]\n", - "Epoch 313: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.987, loss=0.011, lr=0.000206, step=314]\n", - "Epoch 314: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.987, loss=0.00917, lr=0.000206, step=315]\n", - "Epoch 315: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.987, loss=0.0102, lr=0.000205, step=316]\n", - "Epoch 316: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.987, loss=0.00926, lr=0.000205, step=317]\n", - "Epoch 317: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.987, loss=0.00927, lr=0.000205, step=318]\n", - "Epoch 318: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.00935, lr=0.000204, step=319]\n", - "Epoch 319: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.0102, lr=0.000204, step=320]\n", - "Epoch 320: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.0103, lr=0.000204, step=321]\n", - "Epoch 321: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.987, loss=0.00955, lr=0.000203, step=322]\n", - "Epoch 322: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00891, lr=0.000203, step=323]\n", - "Epoch 323: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.0101, lr=0.000203, step=324]\n", - "Epoch 324: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.987, loss=0.0089, lr=0.000202, step=325]\n", - "Epoch 325: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.0104, lr=0.000202, step=326]\n", - "Epoch 326: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00999, lr=0.000202, step=327]\n", - "Epoch 327: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.987, loss=0.0095, lr=0.000202, step=328]\n", - "Epoch 328: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00957, lr=0.000201, step=329]\n", - "Epoch 329: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00976, lr=0.000201, step=330]\n", - "Epoch 330: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.987, loss=0.00869, lr=0.000201, step=331]\n", - "Epoch 331: 100%|██████████| 1/1 [00:00<00:00, 1.34it/s, ema_decay=0.987, loss=0.00927, lr=0.0002, step=332]\n", - "Epoch 332: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.00875, lr=0.0002, step=333]\n", - "Epoch 333: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.987, loss=0.00829, lr=0.0002, step=334]\n", - "Epoch 334: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.987, loss=0.0086, lr=0.000199, step=335]\n", - "Epoch 335: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.987, loss=0.00869, lr=0.000199, step=336]\n", - "Epoch 336: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.987, loss=0.00869, lr=0.000199, step=337]\n", - "Epoch 337: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.987, loss=0.00884, lr=0.000199, step=338]\n", - "Epoch 338: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.987, loss=0.00788, lr=0.000198, step=339]\n", - "Epoch 339: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00836, lr=0.000198, step=340]\n", - "Epoch 340: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00828, lr=0.000198, step=341]\n", - "Epoch 341: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.987, loss=0.00825, lr=0.000197, step=342]\n", - "Epoch 342: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00885, lr=0.000197, step=343]\n", - "Epoch 343: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00897, lr=0.000197, step=344]\n", - "Epoch 344: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.987, loss=0.00853, lr=0.000196, step=345]\n", - "Epoch 345: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00762, lr=0.000196, step=346]\n", - "Epoch 346: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.00759, lr=0.000196, step=347]\n", - "Epoch 347: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00771, lr=0.000196, step=348]\n", - "Epoch 348: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.0083, lr=0.000195, step=349]\n", - "Epoch 349: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.00856, lr=0.000195, step=350]\n", - "Epoch 350: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.988, loss=0.00851, lr=0.000195, step=351]\n", - "Epoch 351: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.00831, lr=0.000194, step=352]\n", - "Epoch 352: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.988, loss=0.00834, lr=0.000194, step=353]\n", - "Epoch 353: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00809, lr=0.000194, step=354]\n", - "Epoch 354: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.0077, lr=0.000193, step=355]\n", - "Epoch 355: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.00776, lr=0.000193, step=356]\n", - "Epoch 356: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.988, loss=0.00803, lr=0.000193, step=357]\n", - "Epoch 357: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00816, lr=0.000193, step=358]\n", - "Epoch 358: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.00757, lr=0.000192, step=359]\n", - "Epoch 359: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.00758, lr=0.000192, step=360]\n", - "Epoch 360: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00804, lr=0.000192, step=361]\n", - "Epoch 361: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00748, lr=0.000191, step=362]\n", - "Epoch 362: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00788, lr=0.000191, step=363]\n", - "Epoch 363: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.00768, lr=0.000191, step=364]\n", - "Epoch 364: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00751, lr=0.00019, step=365]\n", - "Epoch 365: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00779, lr=0.00019, step=366]\n", - "Epoch 366: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.988, loss=0.00678, lr=0.00019, step=367]\n", - "Epoch 367: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.00776, lr=0.00019, step=368]\n", - "Epoch 368: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00806, lr=0.000189, step=369]\n", - "Epoch 369: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00777, lr=0.000189, step=370]\n", - "Epoch 370: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.00795, lr=0.000189, step=371]\n", - "Epoch 371: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.00628, lr=0.000188, step=372]\n", - "Epoch 372: 100%|██████████| 1/1 [00:00<00:00, 1.12it/s, ema_decay=0.988, loss=0.00744, lr=0.000188, step=373]\n", - "Epoch 373: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00706, lr=0.000188, step=374]\n", - "Epoch 374: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.988, loss=0.00669, lr=0.000187, step=375]\n", - "Epoch 375: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.988, loss=0.00711, lr=0.000187, step=376]\n", - "Epoch 376: 100%|██████████| 1/1 [00:00<00:00, 1.10it/s, ema_decay=0.988, loss=0.00669, lr=0.000187, step=377]\n", - "Epoch 377: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.988, loss=0.00694, lr=0.000187, step=378]\n", - "Epoch 378: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.988, loss=0.00719, lr=0.000186, step=379]\n", - "Epoch 379: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00713, lr=0.000186, step=380]\n", - "Epoch 380: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.988, loss=0.00676, lr=0.000186, step=381]\n", - "Epoch 381: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00694, lr=0.000185, step=382]\n", - "Epoch 382: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.988, loss=0.00649, lr=0.000185, step=383]\n", - "Epoch 383: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.988, loss=0.00716, lr=0.000185, step=384]\n", - "Epoch 384: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00677, lr=0.000184, step=385]\n", - "Epoch 385: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.988, loss=0.00665, lr=0.000184, step=386]\n", - "Epoch 386: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00643, lr=0.000184, step=387]\n", - "Epoch 387: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00742, lr=0.000184, step=388]\n", - "Epoch 388: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00669, lr=0.000183, step=389]\n", - "Epoch 389: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00672, lr=0.000183, step=390]\n", - "Epoch 390: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00649, lr=0.000183, step=391]\n", - "Epoch 391: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.989, loss=0.00624, lr=0.000182, step=392]\n", - "Epoch 392: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00626, lr=0.000182, step=393]\n", - "Epoch 393: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00663, lr=0.000182, step=394]\n", - "Epoch 394: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00692, lr=0.000181, step=395]\n", - "Epoch 395: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00618, lr=0.000181, step=396]\n", - "Epoch 396: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00654, lr=0.000181, step=397]\n", - "Epoch 397: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00603, lr=0.000181, step=398]\n", - "Epoch 398: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00624, lr=0.00018, step=399]\n", - "Epoch 399: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00743, lr=0.00018, step=400]\n", - "Epoch 400: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00689, lr=0.00018, step=401]\n", - "Epoch 401: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00608, lr=0.000179, step=402]\n", - "Epoch 402: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00632, lr=0.000179, step=403]\n", - "Epoch 403: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00679, lr=0.000179, step=404]\n", - "Epoch 404: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00634, lr=0.000178, step=405]\n", - "Epoch 405: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.989, loss=0.00642, lr=0.000178, step=406]\n", - "Epoch 406: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00622, lr=0.000178, step=407]\n", - "Epoch 407: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00756, lr=0.000178, step=408]\n", - "Epoch 408: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s, ema_decay=0.989, loss=0.00628, lr=0.000177, step=409]\n", - "Epoch 409: 100%|██████████| 1/1 [00:00<00:00, 1.32it/s, ema_decay=0.989, loss=0.00624, lr=0.000177, step=410]\n", - "Epoch 410: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00624, lr=0.000177, step=411]\n", - "Epoch 411: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00616, lr=0.000176, step=412]\n", - "Epoch 412: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00653, lr=0.000176, step=413]\n", - "Epoch 413: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00669, lr=0.000176, step=414]\n", - "Epoch 414: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.0069, lr=0.000175, step=415]\n", - "Epoch 415: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.00627, lr=0.000175, step=416]\n", - "Epoch 416: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.989, loss=0.00593, lr=0.000175, step=417]\n", - "Epoch 417: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00632, lr=0.000175, step=418]\n", - "Epoch 418: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00605, lr=0.000174, step=419]\n", - "Epoch 419: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00574, lr=0.000174, step=420]\n", - "Epoch 420: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.00661, lr=0.000174, step=421]\n", - "Epoch 421: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.006, lr=0.000173, step=422]\n", - "Epoch 422: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.00597, lr=0.000173, step=423]\n", - "Epoch 423: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00676, lr=0.000173, step=424]\n", - "Epoch 424: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.00571, lr=0.000172, step=425]\n", - "Epoch 425: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.989, loss=0.0058, lr=0.000172, step=426]\n", - "Epoch 426: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00625, lr=0.000172, step=427]\n", - "Epoch 427: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.989, loss=0.00654, lr=0.000172, step=428]\n", - "Epoch 428: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.00587, lr=0.000171, step=429]\n", - "Epoch 429: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.00575, lr=0.000171, step=430]\n", - "Epoch 430: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.989, loss=0.00655, lr=0.000171, step=431]\n", - "Epoch 431: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.989, loss=0.00625, lr=0.00017, step=432]\n", - "Epoch 432: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00576, lr=0.00017, step=433]\n", - "Epoch 433: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.989, loss=0.00595, lr=0.00017, step=434]\n", - "Epoch 434: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.989, loss=0.00584, lr=0.000169, step=435]\n", - "Epoch 435: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00545, lr=0.000169, step=436]\n", - "Epoch 436: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00608, lr=0.000169, step=437]\n", - "Epoch 437: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.99, loss=0.00576, lr=0.000169, step=438]\n", - "Epoch 438: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00604, lr=0.000168, step=439]\n", - "Epoch 439: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00649, lr=0.000168, step=440]\n", - "Epoch 440: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00551, lr=0.000168, step=441]\n", - "Epoch 441: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00604, lr=0.000167, step=442]\n", - "Epoch 442: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00569, lr=0.000167, step=443]\n", - "Epoch 443: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00631, lr=0.000167, step=444]\n", - "Epoch 444: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00572, lr=0.000167, step=445]\n", - "Epoch 445: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00575, lr=0.000166, step=446]\n", - "Epoch 446: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00568, lr=0.000166, step=447]\n", - "Epoch 447: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00572, lr=0.000166, step=448]\n", - "Epoch 448: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.00563, lr=0.000165, step=449]\n", - "Epoch 449: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00597, lr=0.000165, step=450]\n", - "Epoch 450: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00523, lr=0.000165, step=451]\n", - "Epoch 451: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00549, lr=0.000164, step=452]\n", - "Epoch 452: 100%|██████████| 1/1 [00:00<00:00, 1.06it/s, ema_decay=0.99, loss=0.00543, lr=0.000164, step=453]\n", - "Epoch 453: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00569, lr=0.000164, step=454]\n", - "Epoch 454: 100%|██████████| 1/1 [00:00<00:00, 1.07it/s, ema_decay=0.99, loss=0.00581, lr=0.000163, step=455]\n", - "Epoch 455: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.99, loss=0.00561, lr=0.000163, step=456]\n", - "Epoch 456: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00548, lr=0.000163, step=457]\n", - "Epoch 457: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00572, lr=0.000163, step=458]\n", - "Epoch 458: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.99, loss=0.00555, lr=0.000162, step=459]\n", - "Epoch 459: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00526, lr=0.000162, step=460]\n", - "Epoch 460: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00496, lr=0.000162, step=461]\n", - "Epoch 461: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00588, lr=0.000161, step=462]\n", - "Epoch 462: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00564, lr=0.000161, step=463]\n", - "Epoch 463: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00538, lr=0.000161, step=464]\n", - "Epoch 464: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00527, lr=0.00016, step=465]\n", - "Epoch 465: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00616, lr=0.00016, step=466]\n", - "Epoch 466: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00524, lr=0.00016, step=467]\n", - "Epoch 467: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00555, lr=0.00016, step=468]\n", - "Epoch 468: 100%|██████████| 1/1 [00:00<00:00, 1.22it/s, ema_decay=0.99, loss=0.00587, lr=0.000159, step=469]\n", - "Epoch 469: 100%|██████████| 1/1 [00:00<00:00, 1.14it/s, ema_decay=0.99, loss=0.00534, lr=0.000159, step=470]\n", - "Epoch 470: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00554, lr=0.000159, step=471]\n", - "Epoch 471: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00501, lr=0.000158, step=472]\n", - "Epoch 472: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.99, loss=0.00567, lr=0.000158, step=473]\n", - "Epoch 473: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.99, loss=0.00554, lr=0.000158, step=474]\n", - "Epoch 474: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.99, loss=0.00549, lr=0.000157, step=475]\n", - "Epoch 475: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00539, lr=0.000157, step=476]\n", - "Epoch 476: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00513, lr=0.000157, step=477]\n", - "Epoch 477: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00557, lr=0.000157, step=478]\n", - "Epoch 478: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00517, lr=0.000156, step=479]\n", - "Epoch 479: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.99, loss=0.00506, lr=0.000156, step=480]\n", - "Epoch 480: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.0056, lr=0.000156, step=481]\n", - "Epoch 481: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.99, loss=0.00559, lr=0.000155, step=482]\n", - "Epoch 482: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.99, loss=0.00549, lr=0.000155, step=483]\n", - "Epoch 483: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.99, loss=0.0056, lr=0.000155, step=484]\n", - "Epoch 484: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00566, lr=0.000154, step=485]\n", - "Epoch 485: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.0055, lr=0.000154, step=486]\n", - "Epoch 486: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00511, lr=0.000154, step=487]\n", - "Epoch 487: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.0053, lr=0.000154, step=488]\n", - "Epoch 488: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.0053, lr=0.000153, step=489]\n", - "Epoch 489: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.99, loss=0.00505, lr=0.000153, step=490]\n", - "Epoch 490: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00491, lr=0.000153, step=491]\n", - "Epoch 491: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00513, lr=0.000152, step=492]\n", - "Epoch 492: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.99, loss=0.00564, lr=0.000152, step=493]\n", - "Epoch 493: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00514, lr=0.000152, step=494]\n", - "Epoch 494: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.99, loss=0.00486, lr=0.000151, step=495]\n", - "Epoch 495: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00515, lr=0.000151, step=496]\n", - "Epoch 496: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.99, loss=0.00573, lr=0.000151, step=497]\n", - "Epoch 497: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.99, loss=0.00522, lr=0.000151, step=498]\n", - "Epoch 498: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00563, lr=0.00015, step=499]\n", - "Epoch 499: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.991, loss=0.00563, lr=0.00015, step=500]\n", - "Epoch 500: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.005, lr=0.00015, step=501]\n", - "Epoch 501: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00546, lr=0.000149, step=502]\n", - "Epoch 502: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.0051, lr=0.000149, step=503]\n", - "Epoch 503: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00566, lr=0.000149, step=504]\n", - "Epoch 504: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00476, lr=0.000148, step=505]\n", - "Epoch 505: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00489, lr=0.000148, step=506]\n", - "Epoch 506: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00507, lr=0.000148, step=507]\n", - "Epoch 507: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00525, lr=0.000148, step=508]\n", - "Epoch 508: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00536, lr=0.000147, step=509]\n", - "Epoch 509: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00444, lr=0.000147, step=510]\n", - "Epoch 510: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00534, lr=0.000147, step=511]\n", - "Epoch 511: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00541, lr=0.000146, step=512]\n", - "Epoch 512: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00527, lr=0.000146, step=513]\n", - "Epoch 513: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00556, lr=0.000146, step=514]\n", - "Epoch 514: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00524, lr=0.000145, step=515]\n", - "Epoch 515: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00549, lr=0.000145, step=516]\n", - "Epoch 516: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00493, lr=0.000145, step=517]\n", - "Epoch 517: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00494, lr=0.000145, step=518]\n", - "Epoch 518: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00445, lr=0.000144, step=519]\n", - "Epoch 519: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00488, lr=0.000144, step=520]\n", - "Epoch 520: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.991, loss=0.00541, lr=0.000144, step=521]\n", - "Epoch 521: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00492, lr=0.000143, step=522]\n", - "Epoch 522: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00514, lr=0.000143, step=523]\n", - "Epoch 523: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00515, lr=0.000143, step=524]\n", - "Epoch 524: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00472, lr=0.000142, step=525]\n", - "Epoch 525: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.0051, lr=0.000142, step=526]\n", - "Epoch 526: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00471, lr=0.000142, step=527]\n", - "Epoch 527: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.991, loss=0.00472, lr=0.000142, step=528]\n", - "Epoch 528: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00492, lr=0.000141, step=529]\n", - "Epoch 529: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00455, lr=0.000141, step=530]\n", - "Epoch 530: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00497, lr=0.000141, step=531]\n", - "Epoch 531: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.0044, lr=0.00014, step=532]\n", - "Epoch 532: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00508, lr=0.00014, step=533]\n", - "Epoch 533: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00504, lr=0.00014, step=534]\n", - "Epoch 534: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00471, lr=0.00014, step=535]\n", - "Epoch 535: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00456, lr=0.000139, step=536]\n", - "Epoch 536: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00462, lr=0.000139, step=537]\n", - "Epoch 537: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00516, lr=0.000139, step=538]\n", - "Epoch 538: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00496, lr=0.000138, step=539]\n", - "Epoch 539: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00509, lr=0.000138, step=540]\n", - "Epoch 540: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00478, lr=0.000138, step=541]\n", - "Epoch 541: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00441, lr=0.000137, step=542]\n", - "Epoch 542: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00428, lr=0.000137, step=543]\n", - "Epoch 543: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0051, lr=0.000137, step=544]\n", - "Epoch 544: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.0054, lr=0.000136, step=545]\n", - "Epoch 545: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00473, lr=0.000136, step=546]\n", - "Epoch 546: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.00593, lr=0.000136, step=547]\n", - "Epoch 547: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00498, lr=0.000136, step=548]\n", - "Epoch 548: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.00479, lr=0.000135, step=549]\n", - "Epoch 549: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.0068, lr=0.000135, step=550]\n", - "Epoch 550: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.991, loss=0.00541, lr=0.000135, step=551]\n", - "Epoch 551: 100%|██████████| 1/1 [00:00<00:00, 1.33it/s, ema_decay=0.991, loss=0.0046, lr=0.000134, step=552]\n", - "Epoch 552: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.991, loss=0.0068, lr=0.000134, step=553]\n", - "Epoch 553: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00518, lr=0.000134, step=554]\n", - "Epoch 554: 100%|██████████| 1/1 [00:00<00:00, 1.34it/s, ema_decay=0.991, loss=0.00494, lr=0.000133, step=555]\n", - "Epoch 555: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.00636, lr=0.000133, step=556]\n", - "Epoch 556: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00579, lr=0.000133, step=557]\n", - "Epoch 557: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00459, lr=0.000133, step=558]\n", - "Epoch 558: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00524, lr=0.000132, step=559]\n", - "Epoch 559: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00546, lr=0.000132, step=560]\n", - "Epoch 560: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00545, lr=0.000132, step=561]\n", - "Epoch 561: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.991, loss=0.00474, lr=0.000131, step=562]\n", - "Epoch 562: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00455, lr=0.000131, step=563]\n", - "Epoch 563: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00511, lr=0.000131, step=564]\n", - "Epoch 564: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00539, lr=0.000131, step=565]\n", - "Epoch 565: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00554, lr=0.00013, step=566]\n", - "Epoch 566: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00459, lr=0.00013, step=567]\n", - "Epoch 567: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00518, lr=0.00013, step=568]\n", - "Epoch 568: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00496, lr=0.000129, step=569]\n", - "Epoch 569: 100%|██████████| 1/1 [00:00<00:00, 1.20it/s, ema_decay=0.991, loss=0.00478, lr=0.000129, step=570]\n", - "Epoch 570: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.991, loss=0.0049, lr=0.000129, step=571]\n", - "Epoch 571: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00444, lr=0.000128, step=572]\n", - "Epoch 572: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.991, loss=0.00536, lr=0.000128, step=573]\n", - "Epoch 573: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.991, loss=0.00481, lr=0.000128, step=574]\n", - "Epoch 574: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.991, loss=0.00469, lr=0.000127, step=575]\n", - "Epoch 575: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.991, loss=0.00491, lr=0.000127, step=576]\n", - "Epoch 576: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.991, loss=0.00483, lr=0.000127, step=577]\n", - "Epoch 577: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.992, loss=0.00484, lr=0.000127, step=578]\n", - "Epoch 578: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00446, lr=0.000126, step=579]\n", - "Epoch 579: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.992, loss=0.00446, lr=0.000126, step=580]\n", - "Epoch 580: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00466, lr=0.000126, step=581]\n", - "Epoch 581: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.0046, lr=0.000125, step=582]\n", - "Epoch 582: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00432, lr=0.000125, step=583]\n", - "Epoch 583: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00463, lr=0.000125, step=584]\n", - "Epoch 584: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00462, lr=0.000124, step=585]\n", - "Epoch 585: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00464, lr=0.000124, step=586]\n", - "Epoch 586: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00468, lr=0.000124, step=587]\n", - "Epoch 587: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00464, lr=0.000124, step=588]\n", - "Epoch 588: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00447, lr=0.000123, step=589]\n", - "Epoch 589: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00441, lr=0.000123, step=590]\n", - "Epoch 590: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00445, lr=0.000123, step=591]\n", - "Epoch 591: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00455, lr=0.000122, step=592]\n", - "Epoch 592: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.0045, lr=0.000122, step=593]\n", - "Epoch 593: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.0043, lr=0.000122, step=594]\n", - "Epoch 594: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00481, lr=0.000121, step=595]\n", - "Epoch 595: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00475, lr=0.000121, step=596]\n", - "Epoch 596: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.992, loss=0.00467, lr=0.000121, step=597]\n", - "Epoch 597: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00442, lr=0.000121, step=598]\n", - "Epoch 598: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.0042, lr=0.00012, step=599]\n", - "Epoch 599: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00477, lr=0.00012, step=600]\n", - "Epoch 600: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00468, lr=0.00012, step=601]\n", - "Epoch 601: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00466, lr=0.000119, step=602]\n", - "Epoch 602: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.992, loss=0.0047, lr=0.000119, step=603]\n", - "Epoch 603: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00468, lr=0.000119, step=604]\n", - "Epoch 604: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00423, lr=0.000118, step=605]\n", - "Epoch 605: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00448, lr=0.000118, step=606]\n", - "Epoch 606: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00451, lr=0.000118, step=607]\n", - "Epoch 607: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00416, lr=0.000118, step=608]\n", - "Epoch 608: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.992, loss=0.00463, lr=0.000117, step=609]\n", - "Epoch 609: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00401, lr=0.000117, step=610]\n", - "Epoch 610: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00425, lr=0.000117, step=611]\n", - "Epoch 611: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00413, lr=0.000116, step=612]\n", - "Epoch 612: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00436, lr=0.000116, step=613]\n", - "Epoch 613: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00426, lr=0.000116, step=614]\n", - "Epoch 614: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00519, lr=0.000115, step=615]\n", - "Epoch 615: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00448, lr=0.000115, step=616]\n", - "Epoch 616: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00395, lr=0.000115, step=617]\n", - "Epoch 617: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00461, lr=0.000115, step=618]\n", - "Epoch 618: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.992, loss=0.00448, lr=0.000114, step=619]\n", - "Epoch 619: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00408, lr=0.000114, step=620]\n", - "Epoch 620: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00437, lr=0.000114, step=621]\n", - "Epoch 621: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00407, lr=0.000113, step=622]\n", - "Epoch 622: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00431, lr=0.000113, step=623]\n", - "Epoch 623: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.00421, lr=0.000113, step=624]\n", - "Epoch 624: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.0045, lr=0.000112, step=625]\n", - "Epoch 625: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00415, lr=0.000112, step=626]\n", - "Epoch 626: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00456, lr=0.000112, step=627]\n", - "Epoch 627: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00402, lr=0.000112, step=628]\n", - "Epoch 628: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00449, lr=0.000111, step=629]\n", - "Epoch 629: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00376, lr=0.000111, step=630]\n", - "Epoch 630: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00413, lr=0.000111, step=631]\n", - "Epoch 631: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00406, lr=0.00011, step=632]\n", - "Epoch 632: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00404, lr=0.00011, step=633]\n", - "Epoch 633: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00426, lr=0.00011, step=634]\n", - "Epoch 634: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00426, lr=0.000109, step=635]\n", - "Epoch 635: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.992, loss=0.00452, lr=0.000109, step=636]\n", - "Epoch 636: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00435, lr=0.000109, step=637]\n", - "Epoch 637: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00413, lr=0.000109, step=638]\n", - "Epoch 638: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00413, lr=0.000108, step=639]\n", - "Epoch 639: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00384, lr=0.000108, step=640]\n", - "Epoch 640: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00416, lr=0.000108, step=641]\n", - "Epoch 641: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.992, loss=0.0045, lr=0.000107, step=642]\n", - "Epoch 642: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.992, loss=0.00427, lr=0.000107, step=643]\n", - "Epoch 643: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00432, lr=0.000107, step=644]\n", - "Epoch 644: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00415, lr=0.000106, step=645]\n", - "Epoch 645: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00432, lr=0.000106, step=646]\n", - "Epoch 646: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00417, lr=0.000106, step=647]\n", - "Epoch 647: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00415, lr=0.000106, step=648]\n", - "Epoch 648: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00406, lr=0.000105, step=649]\n", - "Epoch 649: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.992, loss=0.00407, lr=0.000105, step=650]\n", - "Epoch 650: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00414, lr=0.000105, step=651]\n", - "Epoch 651: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.992, loss=0.00422, lr=0.000104, step=652]\n", - "Epoch 652: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00426, lr=0.000104, step=653]\n", - "Epoch 653: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00398, lr=0.000104, step=654]\n", - "Epoch 654: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.992, loss=0.00429, lr=0.000103, step=655]\n", - "Epoch 655: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00387, lr=0.000103, step=656]\n", - "Epoch 656: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00428, lr=0.000103, step=657]\n", - "Epoch 657: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00416, lr=0.000103, step=658]\n", - "Epoch 658: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00438, lr=0.000102, step=659]\n", - "Epoch 659: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00411, lr=0.000102, step=660]\n", - "Epoch 660: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00402, lr=0.000102, step=661]\n", - "Epoch 661: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00385, lr=0.000101, step=662]\n", - "Epoch 662: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00399, lr=0.000101, step=663]\n", - "Epoch 663: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00398, lr=0.000101, step=664]\n", - "Epoch 664: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00426, lr=0.000101, step=665]\n", - "Epoch 665: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.992, loss=0.00393, lr=0.0001, step=666]\n", - "Epoch 666: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00385, lr=9.99e-5, step=667]\n", - "Epoch 667: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00379, lr=9.96e-5, step=668]\n", - "Epoch 668: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00412, lr=9.93e-5, step=669]\n", - "Epoch 669: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00387, lr=9.9e-5, step=670]\n", - "Epoch 670: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.992, loss=0.00385, lr=9.87e-5, step=671]\n", - "Epoch 671: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00376, lr=9.84e-5, step=672]\n", - "Epoch 672: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00409, lr=9.81e-5, step=673]\n", - "Epoch 673: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00393, lr=9.78e-5, step=674]\n", - "Epoch 674: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.992, loss=0.00396, lr=9.75e-5, step=675]\n", - "Epoch 675: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.992, loss=0.00414, lr=9.72e-5, step=676]\n", - "Epoch 676: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.0039, lr=9.69e-5, step=677]\n", - "Epoch 677: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00372, lr=9.66e-5, step=678]\n", - "Epoch 678: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.992, loss=0.00391, lr=9.63e-5, step=679]\n", - "Epoch 679: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.992, loss=0.00393, lr=9.6e-5, step=680]\n", - "Epoch 680: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00397, lr=9.57e-5, step=681]\n", - "Epoch 681: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.992, loss=0.00399, lr=9.54e-5, step=682]\n", - "Epoch 682: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00391, lr=9.51e-5, step=683]\n", - "Epoch 683: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.0044, lr=9.48e-5, step=684]\n", - "Epoch 684: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0039, lr=9.45e-5, step=685]\n", - "Epoch 685: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00396, lr=9.42e-5, step=686]\n", - "Epoch 686: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00414, lr=9.39e-5, step=687]\n", - "Epoch 687: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00376, lr=9.36e-5, step=688]\n", - "Epoch 688: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.0039, lr=9.33e-5, step=689]\n", - "Epoch 689: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00387, lr=9.3e-5, step=690]\n", - "Epoch 690: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00403, lr=9.27e-5, step=691]\n", - "Epoch 691: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00391, lr=9.24e-5, step=692]\n", - "Epoch 692: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00391, lr=9.21e-5, step=693]\n", - "Epoch 693: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00376, lr=9.18e-5, step=694]\n", - "Epoch 694: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.0038, lr=9.15e-5, step=695]\n", - "Epoch 695: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00409, lr=9.12e-5, step=696]\n", - "Epoch 696: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00425, lr=9.09e-5, step=697]\n", - "Epoch 697: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00389, lr=9.06e-5, step=698]\n", - "Epoch 698: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00371, lr=9.03e-5, step=699]\n", - "Epoch 699: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00366, lr=9e-5, step=700]\n", - "Epoch 700: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00402, lr=8.97e-5, step=701]\n", - "Epoch 701: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00386, lr=8.94e-5, step=702]\n", - "Epoch 702: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00389, lr=8.91e-5, step=703]\n", - "Epoch 703: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.00436, lr=8.88e-5, step=704]\n", - "Epoch 704: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.0039, lr=8.85e-5, step=705]\n", - "Epoch 705: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00377, lr=8.82e-5, step=706]\n", - "Epoch 706: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.993, loss=0.00352, lr=8.79e-5, step=707]\n", - "Epoch 707: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00411, lr=8.76e-5, step=708]\n", - "Epoch 708: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00387, lr=8.73e-5, step=709]\n", - "Epoch 709: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00394, lr=8.7e-5, step=710]\n", - "Epoch 710: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00389, lr=8.67e-5, step=711]\n", - "Epoch 711: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00393, lr=8.64e-5, step=712]\n", - "Epoch 712: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.993, loss=0.00373, lr=8.61e-5, step=713]\n", - "Epoch 713: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00387, lr=8.58e-5, step=714]\n", - "Epoch 714: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00384, lr=8.55e-5, step=715]\n", - "Epoch 715: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00398, lr=8.52e-5, step=716]\n", - "Epoch 716: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.993, loss=0.00384, lr=8.49e-5, step=717]\n", - "Epoch 717: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00398, lr=8.46e-5, step=718]\n", - "Epoch 718: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00371, lr=8.43e-5, step=719]\n", - "Epoch 719: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00373, lr=8.4e-5, step=720]\n", - "Epoch 720: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00383, lr=8.37e-5, step=721]\n", - "Epoch 721: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00375, lr=8.34e-5, step=722]\n", - "Epoch 722: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.0037, lr=8.31e-5, step=723]\n", - "Epoch 723: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00383, lr=8.28e-5, step=724]\n", - "Epoch 724: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00408, lr=8.25e-5, step=725]\n", - "Epoch 725: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00403, lr=8.22e-5, step=726]\n", - "Epoch 726: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.0038, lr=8.19e-5, step=727]\n", - "Epoch 727: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00407, lr=8.16e-5, step=728]\n", - "Epoch 728: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.00389, lr=8.13e-5, step=729]\n", - "Epoch 729: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00382, lr=8.1e-5, step=730]\n", - "Epoch 730: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00387, lr=8.07e-5, step=731]\n", - "Epoch 731: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00357, lr=8.04e-5, step=732]\n", - "Epoch 732: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00387, lr=8.01e-5, step=733]\n", - "Epoch 733: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00358, lr=7.98e-5, step=734]\n", - "Epoch 734: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00413, lr=7.95e-5, step=735]\n", - "Epoch 735: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00385, lr=7.92e-5, step=736]\n", - "Epoch 736: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00359, lr=7.89e-5, step=737]\n", - "Epoch 737: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00382, lr=7.86e-5, step=738]\n", - "Epoch 738: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00361, lr=7.83e-5, step=739]\n", - "Epoch 739: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00395, lr=7.8e-5, step=740]\n", - "Epoch 740: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00366, lr=7.77e-5, step=741]\n", - "Epoch 741: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00398, lr=7.74e-5, step=742]\n", - "Epoch 742: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00397, lr=7.71e-5, step=743]\n", - "Epoch 743: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.993, loss=0.00401, lr=7.68e-5, step=744]\n", - "Epoch 744: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00384, lr=7.65e-5, step=745]\n", - "Epoch 745: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00414, lr=7.62e-5, step=746]\n", - "Epoch 746: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00397, lr=7.59e-5, step=747]\n", - "Epoch 747: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00359, lr=7.56e-5, step=748]\n", - "Epoch 748: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00366, lr=7.53e-5, step=749]\n", - "Epoch 749: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00377, lr=7.5e-5, step=750]\n", - "Epoch 750: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00338, lr=7.47e-5, step=751]\n", - "Epoch 751: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00354, lr=7.44e-5, step=752]\n", - "Epoch 752: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0038, lr=7.41e-5, step=753]\n", - "Epoch 753: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00413, lr=7.38e-5, step=754]\n", - "Epoch 754: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00394, lr=7.35e-5, step=755]\n", - "Epoch 755: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00344, lr=7.32e-5, step=756]\n", - "Epoch 756: 100%|██████████| 1/1 [00:00<00:00, 1.26it/s, ema_decay=0.993, loss=0.00369, lr=7.29e-5, step=757]\n", - "Epoch 757: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.993, loss=0.00371, lr=7.26e-5, step=758]\n", - "Epoch 758: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00377, lr=7.23e-5, step=759]\n", - "Epoch 759: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00379, lr=7.2e-5, step=760]\n", - "Epoch 760: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00362, lr=7.17e-5, step=761]\n", - "Epoch 761: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00377, lr=7.14e-5, step=762]\n", - "Epoch 762: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00368, lr=7.11e-5, step=763]\n", - "Epoch 763: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00394, lr=7.08e-5, step=764]\n", - "Epoch 764: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00384, lr=7.05e-5, step=765]\n", - "Epoch 765: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00363, lr=7.02e-5, step=766]\n", - "Epoch 766: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00394, lr=6.99e-5, step=767]\n", - "Epoch 767: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00378, lr=6.96e-5, step=768]\n", - "Epoch 768: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00355, lr=6.93e-5, step=769]\n", - "Epoch 769: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00378, lr=6.9e-5, step=770]\n", - "Epoch 770: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00393, lr=6.87e-5, step=771]\n", - "Epoch 771: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.993, loss=0.00365, lr=6.84e-5, step=772]\n", - "Epoch 772: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00364, lr=6.81e-5, step=773]\n", - "Epoch 773: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00357, lr=6.78e-5, step=774]\n", - "Epoch 774: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00375, lr=6.75e-5, step=775]\n", - "Epoch 775: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0039, lr=6.72e-5, step=776]\n", - "Epoch 776: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00384, lr=6.69e-5, step=777]\n", - "Epoch 777: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00382, lr=6.66e-5, step=778]\n", - "Epoch 778: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00387, lr=6.63e-5, step=779]\n", - "Epoch 779: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00372, lr=6.6e-5, step=780]\n", - "Epoch 780: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00377, lr=6.57e-5, step=781]\n", - "Epoch 781: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00354, lr=6.54e-5, step=782]\n", - "Epoch 782: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00414, lr=6.51e-5, step=783]\n", - "Epoch 783: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00356, lr=6.48e-5, step=784]\n", - "Epoch 784: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0037, lr=6.45e-5, step=785]\n", - "Epoch 785: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00363, lr=6.42e-5, step=786]\n", - "Epoch 786: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00372, lr=6.39e-5, step=787]\n", - "Epoch 787: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00368, lr=6.36e-5, step=788]\n", - "Epoch 788: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.00357, lr=6.33e-5, step=789]\n", - "Epoch 789: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00364, lr=6.3e-5, step=790]\n", - "Epoch 790: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00358, lr=6.27e-5, step=791]\n", - "Epoch 791: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00379, lr=6.24e-5, step=792]\n", - "Epoch 792: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00349, lr=6.21e-5, step=793]\n", - "Epoch 793: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00379, lr=6.18e-5, step=794]\n", - "Epoch 794: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0034, lr=6.15e-5, step=795]\n", - "Epoch 795: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00392, lr=6.12e-5, step=796]\n", - "Epoch 796: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00367, lr=6.09e-5, step=797]\n", - "Epoch 797: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00369, lr=6.06e-5, step=798]\n", - "Epoch 798: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.0039, lr=6.03e-5, step=799]\n", - "Epoch 799: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.00412, lr=6e-5, step=800]\n", - "Epoch 800: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00352, lr=5.97e-5, step=801]\n", - "Epoch 801: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.993, loss=0.00351, lr=5.94e-5, step=802]\n", - "Epoch 802: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00349, lr=5.91e-5, step=803]\n", - "Epoch 803: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00355, lr=5.88e-5, step=804]\n", - "Epoch 804: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.993, loss=0.00354, lr=5.85e-5, step=805]\n", - "Epoch 805: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00381, lr=5.82e-5, step=806]\n", - "Epoch 806: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00355, lr=5.79e-5, step=807]\n", - "Epoch 807: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.00334, lr=5.76e-5, step=808]\n", - "Epoch 808: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.00352, lr=5.73e-5, step=809]\n", - "Epoch 809: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.993, loss=0.00404, lr=5.7e-5, step=810]\n", - "Epoch 810: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00374, lr=5.67e-5, step=811]\n", - "Epoch 811: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.00347, lr=5.64e-5, step=812]\n", - "Epoch 812: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.993, loss=0.00374, lr=5.61e-5, step=813]\n", - "Epoch 813: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.993, loss=0.0036, lr=5.58e-5, step=814]\n", - "Epoch 814: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.993, loss=0.00353, lr=5.55e-5, step=815]\n", - "Epoch 815: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00379, lr=5.52e-5, step=816]\n", - "Epoch 816: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.00376, lr=5.49e-5, step=817]\n", - "Epoch 817: 100%|██████████| 1/1 [00:00<00:00, 1.16it/s, ema_decay=0.993, loss=0.0035, lr=5.46e-5, step=818]\n", - "Epoch 818: 100%|██████████| 1/1 [00:00<00:00, 1.26it/s, ema_decay=0.993, loss=0.00364, lr=5.43e-5, step=819]\n", - "Epoch 819: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.993, loss=0.00371, lr=5.4e-5, step=820]\n", - "Epoch 820: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.993, loss=0.00363, lr=5.37e-5, step=821]\n", - "Epoch 821: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.993, loss=0.0036, lr=5.34e-5, step=822]\n", - "Epoch 822: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.993, loss=0.00379, lr=5.31e-5, step=823]\n", - "Epoch 823: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.993, loss=0.0037, lr=5.28e-5, step=824]\n", - "Epoch 824: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.993, loss=0.00356, lr=5.25e-5, step=825]\n", - "Epoch 825: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00354, lr=5.22e-5, step=826]\n", - "Epoch 826: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.0037, lr=5.19e-5, step=827]\n", - "Epoch 827: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00365, lr=5.16e-5, step=828]\n", - "Epoch 828: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00363, lr=5.13e-5, step=829]\n", - "Epoch 829: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.994, loss=0.00337, lr=5.1e-5, step=830]\n", - "Epoch 830: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.00359, lr=5.07e-5, step=831]\n", - "Epoch 831: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.0036, lr=5.04e-5, step=832]\n", - "Epoch 832: 100%|██████████| 1/1 [00:00<00:00, 1.37it/s, ema_decay=0.994, loss=0.00368, lr=5.01e-5, step=833]\n", - "Epoch 833: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.00363, lr=4.98e-5, step=834]\n", - "Epoch 834: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00393, lr=4.95e-5, step=835]\n", - "Epoch 835: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00359, lr=4.92e-5, step=836]\n", - "Epoch 836: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00343, lr=4.89e-5, step=837]\n", - "Epoch 837: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00359, lr=4.86e-5, step=838]\n", - "Epoch 838: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00357, lr=4.83e-5, step=839]\n", - "Epoch 839: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00382, lr=4.8e-5, step=840]\n", - "Epoch 840: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00357, lr=4.77e-5, step=841]\n", - "Epoch 841: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00376, lr=4.74e-5, step=842]\n", - "Epoch 842: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00361, lr=4.71e-5, step=843]\n", - "Epoch 843: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00347, lr=4.68e-5, step=844]\n", - "Epoch 844: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00361, lr=4.65e-5, step=845]\n", - "Epoch 845: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00393, lr=4.62e-5, step=846]\n", - "Epoch 846: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00352, lr=4.59e-5, step=847]\n", - "Epoch 847: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00366, lr=4.56e-5, step=848]\n", - "Epoch 848: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.0035, lr=4.53e-5, step=849]\n", - "Epoch 849: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00358, lr=4.5e-5, step=850]\n", - "Epoch 850: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00366, lr=4.47e-5, step=851]\n", - "Epoch 851: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00372, lr=4.44e-5, step=852]\n", - "Epoch 852: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00339, lr=4.41e-5, step=853]\n", - "Epoch 853: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00345, lr=4.38e-5, step=854]\n", - "Epoch 854: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00336, lr=4.35e-5, step=855]\n", - "Epoch 855: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00366, lr=4.32e-5, step=856]\n", - "Epoch 856: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00343, lr=4.29e-5, step=857]\n", - "Epoch 857: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0034, lr=4.26e-5, step=858]\n", - "Epoch 858: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00328, lr=4.23e-5, step=859]\n", - "Epoch 859: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.00326, lr=4.2e-5, step=860]\n", - "Epoch 860: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00368, lr=4.17e-5, step=861]\n", - "Epoch 861: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00367, lr=4.14e-5, step=862]\n", - "Epoch 862: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00376, lr=4.11e-5, step=863]\n", - "Epoch 863: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00356, lr=4.08e-5, step=864]\n", - "Epoch 864: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00374, lr=4.05e-5, step=865]\n", - "Epoch 865: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00358, lr=4.02e-5, step=866]\n", - "Epoch 866: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00387, lr=3.99e-5, step=867]\n", - "Epoch 867: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00355, lr=3.96e-5, step=868]\n", - "Epoch 868: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00331, lr=3.93e-5, step=869]\n", - "Epoch 869: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00355, lr=3.9e-5, step=870]\n", - "Epoch 870: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00376, lr=3.87e-5, step=871]\n", - "Epoch 871: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00364, lr=3.84e-5, step=872]\n", - "Epoch 872: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.0035, lr=3.81e-5, step=873]\n", - "Epoch 873: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00365, lr=3.78e-5, step=874]\n", - "Epoch 874: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00373, lr=3.75e-5, step=875]\n", - "Epoch 875: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.994, loss=0.00364, lr=3.72e-5, step=876]\n", - "Epoch 876: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.994, loss=0.00362, lr=3.69e-5, step=877]\n", - "Epoch 877: 100%|██████████| 1/1 [00:00<00:00, 1.30it/s, ema_decay=0.994, loss=0.00342, lr=3.66e-5, step=878]\n", - "Epoch 878: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00382, lr=3.63e-5, step=879]\n", - "Epoch 879: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00374, lr=3.6e-5, step=880]\n", - "Epoch 880: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.00355, lr=3.57e-5, step=881]\n", - "Epoch 881: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00363, lr=3.54e-5, step=882]\n", - "Epoch 882: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.0036, lr=3.51e-5, step=883]\n", - "Epoch 883: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.994, loss=0.00341, lr=3.48e-5, step=884]\n", - "Epoch 884: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00382, lr=3.45e-5, step=885]\n", - "Epoch 885: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00361, lr=3.42e-5, step=886]\n", - "Epoch 886: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00355, lr=3.39e-5, step=887]\n", - "Epoch 887: 100%|██████████| 1/1 [00:00<00:00, 1.15it/s, ema_decay=0.994, loss=0.00368, lr=3.36e-5, step=888]\n", - "Epoch 888: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00367, lr=3.33e-5, step=889]\n", - "Epoch 889: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.994, loss=0.00353, lr=3.3e-5, step=890]\n", - "Epoch 890: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00349, lr=3.27e-5, step=891]\n", - "Epoch 891: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00361, lr=3.24e-5, step=892]\n", - "Epoch 892: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0035, lr=3.21e-5, step=893]\n", - "Epoch 893: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00349, lr=3.18e-5, step=894]\n", - "Epoch 894: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00396, lr=3.15e-5, step=895]\n", - "Epoch 895: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00364, lr=3.12e-5, step=896]\n", - "Epoch 896: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00361, lr=3.09e-5, step=897]\n", - "Epoch 897: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00364, lr=3.06e-5, step=898]\n", - "Epoch 898: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00331, lr=3.03e-5, step=899]\n", - "Epoch 899: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.994, loss=0.0037, lr=3e-5, step=900]\n", - "Epoch 900: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.00352, lr=2.97e-5, step=901]\n", - "Epoch 901: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00387, lr=2.94e-5, step=902]\n", - "Epoch 902: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00357, lr=2.91e-5, step=903]\n", - "Epoch 903: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.00358, lr=2.88e-5, step=904]\n", - "Epoch 904: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.0036, lr=2.85e-5, step=905]\n", - "Epoch 905: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00349, lr=2.82e-5, step=906]\n", - "Epoch 906: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00384, lr=2.79e-5, step=907]\n", - "Epoch 907: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00367, lr=2.76e-5, step=908]\n", - "Epoch 908: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00364, lr=2.73e-5, step=909]\n", - "Epoch 909: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00353, lr=2.7e-5, step=910]\n", - "Epoch 910: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00351, lr=2.67e-5, step=911]\n", - "Epoch 911: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00368, lr=2.64e-5, step=912]\n", - "Epoch 912: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00362, lr=2.61e-5, step=913]\n", - "Epoch 913: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00335, lr=2.58e-5, step=914]\n", - "Epoch 914: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00358, lr=2.55e-5, step=915]\n", - "Epoch 915: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00354, lr=2.52e-5, step=916]\n", - "Epoch 916: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00367, lr=2.49e-5, step=917]\n", - "Epoch 917: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00337, lr=2.46e-5, step=918]\n", - "Epoch 918: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00329, lr=2.43e-5, step=919]\n", - "Epoch 919: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00355, lr=2.4e-5, step=920]\n", - "Epoch 920: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.00352, lr=2.37e-5, step=921]\n", - "Epoch 921: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00403, lr=2.34e-5, step=922]\n", - "Epoch 922: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00351, lr=2.31e-5, step=923]\n", - "Epoch 923: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00344, lr=2.28e-5, step=924]\n", - "Epoch 924: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00344, lr=2.25e-5, step=925]\n", - "Epoch 925: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00346, lr=2.22e-5, step=926]\n", - "Epoch 926: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00373, lr=2.19e-5, step=927]\n", - "Epoch 927: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00341, lr=2.16e-5, step=928]\n", - "Epoch 928: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00353, lr=2.13e-5, step=929]\n", - "Epoch 929: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00354, lr=2.1e-5, step=930]\n", - "Epoch 930: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.00346, lr=2.07e-5, step=931]\n", - "Epoch 931: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00352, lr=2.04e-5, step=932]\n", - "Epoch 932: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.00368, lr=2.01e-5, step=933]\n", - "Epoch 933: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s, ema_decay=0.994, loss=0.00337, lr=1.98e-5, step=934]\n", - "Epoch 934: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00365, lr=1.95e-5, step=935]\n", - "Epoch 935: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00355, lr=1.92e-5, step=936]\n", - "Epoch 936: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.00343, lr=1.89e-5, step=937]\n", - "Epoch 937: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.994, loss=0.00352, lr=1.86e-5, step=938]\n", - "Epoch 938: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00359, lr=1.83e-5, step=939]\n", - "Epoch 939: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00342, lr=1.8e-5, step=940]\n", - "Epoch 940: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00378, lr=1.77e-5, step=941]\n", - "Epoch 941: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00372, lr=1.74e-5, step=942]\n", - "Epoch 942: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00388, lr=1.71e-5, step=943]\n", - "Epoch 943: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00379, lr=1.68e-5, step=944]\n", - "Epoch 944: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.00341, lr=1.65e-5, step=945]\n", - "Epoch 945: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00339, lr=1.62e-5, step=946]\n", - "Epoch 946: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00383, lr=1.59e-5, step=947]\n", - "Epoch 947: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00362, lr=1.56e-5, step=948]\n", - "Epoch 948: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00394, lr=1.53e-5, step=949]\n", - "Epoch 949: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00358, lr=1.5e-5, step=950]\n", - "Epoch 950: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00361, lr=1.47e-5, step=951]\n", - "Epoch 951: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00357, lr=1.44e-5, step=952]\n", - "Epoch 952: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00378, lr=1.41e-5, step=953]\n", - "Epoch 953: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00357, lr=1.38e-5, step=954]\n", - "Epoch 954: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00345, lr=1.35e-5, step=955]\n", - "Epoch 955: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.994, loss=0.00362, lr=1.32e-5, step=956]\n", - "Epoch 956: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00346, lr=1.29e-5, step=957]\n", - "Epoch 957: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00336, lr=1.26e-5, step=958]\n", - "Epoch 958: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00351, lr=1.23e-5, step=959]\n", - "Epoch 959: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00334, lr=1.2e-5, step=960]\n", - "Epoch 960: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00343, lr=1.17e-5, step=961]\n", - "Epoch 961: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.0035, lr=1.14e-5, step=962]\n", - "Epoch 962: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00366, lr=1.11e-5, step=963]\n", - "Epoch 963: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.994, loss=0.00362, lr=1.08e-5, step=964]\n", - "Epoch 964: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00339, lr=1.05e-5, step=965]\n", - "Epoch 965: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.0035, lr=1.02e-5, step=966]\n", - "Epoch 966: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00346, lr=9.9e-6, step=967]\n", - "Epoch 967: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00347, lr=9.6e-6, step=968]\n", - "Epoch 968: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.0037, lr=9.3e-6, step=969]\n", - "Epoch 969: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.994, loss=0.00344, lr=9e-6, step=970]\n", - "Epoch 970: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0034, lr=8.7e-6, step=971]\n", - "Epoch 971: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00346, lr=8.4e-6, step=972]\n", - "Epoch 972: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00366, lr=8.1e-6, step=973]\n", - "Epoch 973: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00345, lr=7.8e-6, step=974]\n", - "Epoch 974: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00363, lr=7.5e-6, step=975]\n", - "Epoch 975: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00341, lr=7.2e-6, step=976]\n", - "Epoch 976: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.994, loss=0.00354, lr=6.9e-6, step=977]\n", - "Epoch 977: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00358, lr=6.6e-6, step=978]\n", - "Epoch 978: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00338, lr=6.3e-6, step=979]\n", - "Epoch 979: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00367, lr=6e-6, step=980]\n", - "Epoch 980: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00347, lr=5.7e-6, step=981]\n", - "Epoch 981: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00377, lr=5.4e-6, step=982]\n", - "Epoch 982: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00338, lr=5.1e-6, step=983]\n", - "Epoch 983: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00347, lr=4.8e-6, step=984]\n", - "Epoch 984: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00334, lr=4.5e-6, step=985]\n", - "Epoch 985: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.0036, lr=4.2e-6, step=986]\n", - "Epoch 986: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.994, loss=0.00374, lr=3.9e-6, step=987]\n", - "Epoch 987: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.994, loss=0.0036, lr=3.6e-6, step=988]\n", - "Epoch 988: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.994, loss=0.00374, lr=3.3e-6, step=989]\n", - "Epoch 989: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.994, loss=0.00341, lr=3e-6, step=990]\n", - "Epoch 990: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00365, lr=2.7e-6, step=991]\n", - "Epoch 991: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.994, loss=0.00385, lr=2.4e-6, step=992]\n", - "Epoch 992: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00335, lr=2.1e-6, step=993]\n", - "Epoch 993: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.00349, lr=1.8e-6, step=994]\n", - "Epoch 994: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.994, loss=0.0036, lr=1.5e-6, step=995]\n", - "Epoch 995: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.994, loss=0.00336, lr=1.2e-6, step=996]\n", - "Epoch 996: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.994, loss=0.00353, lr=9e-7, step=997]\n", - "Epoch 997: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.994, loss=0.00397, lr=6e-7, step=998]\n", - "Epoch 998: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00324, lr=3e-7, step=999]\n", - "Epoch 999: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.994, loss=0.00347, lr=0, step=1000]\n" + "Epoch 0: 0%| | 0/1 [00:00 {N // 2}\")\n", - " teacher, distilled_ema, distill_accelrator = utils.distill(teacher, N, train_image, training_config, epochs=1000, batch_size=64, gamma=0)\n", + " teacher, distilled_ema, distill_accelrator = utils.distill(teacher, N, train_image, training_config, epochs=300, batch_size=64, gamma=0)\n", " N = N // 2\n", - " new_scheduler = DDIMScheduler(num_train_timesteps=N)\n", - " pipeline = DDIMPipeline(\n", + " new_scheduler = DDPMScheduler(num_train_timesteps=N, beta_schedule=\"squaredcos_cap_v2\")\n", + " pipeline = DDPMPipeline(\n", " unet=distill_accelrator.unwrap_model(distilled_ema.averaged_model if training_config.use_ema else teacher),\n", " scheduler=new_scheduler,\n", " )\n", @@ -2209,7 +811,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -2231,7 +833,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAOw0lEQVR4nHVazZLcPHLMLIA9kh3rCD+pT47wY/uwu/40DWT6UAUQbGmpUA+bTQL1m5UFkP/9X//zn3/r7fUfjglAGgQBTJlk790AAdCSI6JfV0T/8fNn6122bYLyoCz7//7xz++/fiHw9ePr+vrR2xURv/76S3P++PefBiBH74A0JZkEDNu2p2QpWutX/3p9gdSY7/GWJgzDJEmCbNEJtKv/9ff/7VfIzdY/KRqmzCCAJpHkNADCtiCzNWuIbQYxumt6jTkimg2839S3NedfI+y4bOj7H3+PFhwvS9LEHLY1JwCTtA0CxhjWnCTf15gGacvjbcmpJchgkJPDU/P1utD619/+DS3GGxENsOFAABYU0YyQDcCasolLBqN3B02CBOVUMKwp0+jTYTWKIWvqe6pHFxoYsjwB0w4ARBgGYNtoYNgWYzqFINxgAJCd9jdoAzTHW5z9egUNBdiUQ0XAAInWifQJMGFI4ARBgHnCAGkNhklMSZigYdmGhia/39/T49Ve5DQMyhAI0zkAABjWBAUIBClg2jTskCHbgBmNAVswZE0QwR6tecoWBGdEAJaQiruMoylZlk0DYUMGCdimyYChOS2RIKyp8R62v7//skRgSgRAZsQDGSQpPiQrJyVzOpDlGTkDCEB+AiQIAVQPxrQME7RR09/S5/i2ZcmSCdAwIIMAQDMjec5hz6icT33meL9JzDnmeLfo3GMahkUtBWQJAAOpCio+tRUwTcIwUzYCQF8/Yqvp8gIUpjdILFuZ6XgTsksFYM453kNTbHQqKKVgERzvOfpkD5IVNNsyuIEIzPRY5yC8brUsKggAcQ/S/TgEwI6lklzxnGZAjbseNzMWBMScc4xhTTJse1rW1JANxBxjjHcwyED6M6cgdpzg1snLvjmRJckiGQoQXA4k3NPkPO5fLl531alJwsddmQ5ISLDmlCbL9TahOeec+cSI0d7viGjRSaaLDdYUh4tzjhUk+dtDGO7cAAh0ZkRkjahnvSxc7sMuIpVBsVRYNpOlASuDa2tYCCnNMd/xJgMXeus1zrr11ghL+FUbll+WGgRNG5YQNNBTdhMkAR6O2HM4z8jyU8R2Bg1rSnOM97cSym682FEBTI33e+lPpiCF64bSpD5M41MM76gQRAUoKRg0etqZy/cfcbQfJoiUOnDGp6Txfs/xHmNYmktVOjEzY5wANOcERrAFo7XUIRhe2LfFkByRz+8RlrtsANJcUce+8v3WtWyXicWKhAygxYtQSGxozPH+nnNIylxIO5hAkN6hRsgTijFGxKs1MoJB0siSzoj8+vuRsUje3rUlB2H07awMIdgorGYWh9uQyBocqcy0Nec7pZ8jZfaSF3c8Z35l4HqOMVpcK6m26ZKlcQm8QKXMX/J4DY6VM2a3DBsVfCtydkZnYpBYbCRasIrlHO/vmWTLO2ozHzOu001EI0HJSDaXkIrIsGQBYpX1hUAZvwK4/x0eqfBwwigOQFj5cxugVEitF2ZJnu8x3m9NeZs7QJW/t+G59MfQFCDPMd+/viH3frXoVX9IAxmBIDmnAZLBsG8hzgBLl/b9fQl7Rzq5oqZcjAi2iEhKpTnntJMLrJgnTAOOYIsWLQM95hgKhymEhfF+a87v9r761aIBiAiQ1SFUcQGQnG8DN+5ELPNkEldBBBfuBe7g5yq9ZctkY7IlWJtuFUJVvoGM1lvrLdgIZD2fADQlaHrOgfeYbbbeg2y9996DBAM0yIjY+RF3siyyQ0dEBHupedTuwi/kUxUGeWZjzin9sjTmWxJxkKhd99Y5yQjCYES0bMiWzlIwIohpQWS4g9GyqLXWIsKGNAEzS88qS1l8SDKiRwVfRr6ryDzgrOq6bKYdPQ1Jc5Eis5o2r4pDy5JUDMdjbloJAkoFWrSIdHwEgxGRZSExKixlggeKKGQ9uGHO6NFCQ6uQp7ey0mpx08VV5MSSVSa8wKIwqmifAVBV/AEU15eUhCoCmpv/mEC06L233iOaNGVkTSj8hQuGkqm7alNEI9Fba5Z25USiG+hio/skKybtqpHLBCsy8+uqY9attmaWTrjYWVqF1YEE2tX769WvHow5QSNa4+I2siIi688iBjYRERHo0S6Oke1JTpfSaQFZOazsDVtgrHxZRTAiCNhn7FmGIC9OysAK1Ii4Oq/eW4u42uvrq189IoJhyzMSO8AzA9eESUIWVemJwkkE5IxsruxZxbEQNYtUwvEummX8VWu4qa6c7mS0iAjJc8wsTRGMHq/r6q9Xmp8RiY67f6khlwIr7p04vZOzv8eYc2YPmt7m7YolefDW5cztVdvtLKXMLJRlIHvQ1nrv19Velt6/3r9+/ZoaIKO3/vW6vn5Eay1aSSnls39kRJVwPFg+0OcYRe7S6d5NmEAAizrzFnp/W5U/+x2ACAbACI85g8yFsOt6XV9fBL7e8/Xr1/v9DSJau16vFg2LKQGVbGvkbajtAZStjqNr4YoXCfA6YCA2I6qxNs+qhoCL9yR3pVsEwQZfr+vrx89+vVrvwQa4Xfwi2tWLFAbvvMrgMYJxs+ia9eijVy2+FVjX66PgZjVIuVBRuVd9yB5sE87du3liuii7XvHV+tVaJwithYNFm6vNUmZVjY21VHRLmd45NECR9dKwc/ef5PLOQVs3ReSKIe74Wd+88C17JivzB/s2ZEbdIiVC2lo5LyDOfmox6GSQtyNKLK4SA3cGIax2xbcDaqBCGGI79iDxVVgW31z9BoxorfUeEaXbzrxtGW6vbyGdAb5Nt1VetnCptfmy0XO1cYla5j9ceHC4xQKPUTf1Xq5LomI3srWOJ3LZPtPz2fcuUrW7mttMh3H2xTVmP8eqalmewyob3JY/m6hKKR/iHbBKcucoltHqqYcO5XE/8e0W9FPRTSsrdWNffgD853FAs5933EG1hLQBtN5b9LsOldn+MDRqgGd0rqH8mBuPUwBgr2brTtcHuqxndhWoGHQhyiKGPExAtoh+dSahXwD12VBtybEap4+w/5z9TOi1cAT0O0Pu4SsW6vKaq0a4F6HK+bVatsKGZLR+Xa9CDT7MeZpg85M16IeCieMPoF6t2n1jf1j9ltVb3C39edNSsPqIiKQRQTqitdfXdV2rkfb58GYe5CGKfSTLkS0PN29UeWRK38//MTx3N7ADYGWasUpR3LXJDEbr1+ur5e5ITn2vlyx8YBXJY5rPs1vlmq6QmF60YXvgiJIjiTbm5p7OYoFcAJykqYTONSmZwdZ7JIBuqx6uXIWUxzx+iP0nMx5/7e0kw3Dsu4izzvIEkD+gXvE9VZuzqEBEY7Qsi8urn8hZ+b4j5Tfpz5L0VOMR7al7bEJ/IM2Ge94zFmvIGSUt8TcDhEEy+lJg9SQ3vpyBXp/GiZ1H8fy4cFx/KMe1wZHfav+u2sZalFoqxR3Jq8mUdK94gdFatFb58Jz8Pr+pwmrRH/fc0fY7rJw4vOts11yLaffPAWhHRfLHFEvVHLviR2KEkQjUUgFG0TLWrsvz2AGwMd3HJTwzxh8l9vfR3DUnNqUlyQABBWsfOGG9t95sU6ymZycAkKK3djEiGMQfQb3m25a71Tm9wMfN99Xj/IReAD3XD4JRi2EEDIWQaxuNEa21TtKaFpzbrVJua0Vky9si2nb+IpcfZn/0sk+6+MDPj7g6StHqDUpv2OjRGu3gdV0NYO2naRjIZjWit2i5gZfi53sOkmqFfx3HnL8BiW/5jxA3wYfgH78X4/WD0z791H/+/Amo8XVdnbkwJknTQJSALffeB97ZL6eKtsnGvWLDfx2yPj6PwnSizQ1QDzGP/94bpNtjJti/fvyUR7hHa7nek7tWtnN5lQxPL2TX2g1YwFTNDp9SHv5f+XDkpT/ODpH3jVnvjmqxBH/ej956w1jbbLoDLHtF0LfVV5wSjMz2Ld5dp0/2WJl2F67fWpht/6en+HHT+f0G02KjS4BzvlvtvZdLRNBiRMoT3KRi0UXsP9W/Pi37R4lO0e8q99vdOdzRH+8C2esiDzOtB1cAgkREtBZWBAP1poFLR9tyLsAgN6vIw6jr/I4lFxvyIetux9ZiQV6rov6saqdBOlmvUBy/Yk+4qWuQDCZoUiAsr71OWVKsOh8IxjHhEefF3NZCiG+L3jNiE4ZPUFobeZs5k1gb3VXHdognsbkfJxNtIteyAW/5Jc05ORwb+Nr2cKbig+scYHtIfrC9E4f2o+cu36MfqD0yLrK2iRc+DqYOFqNR+4UFSJhjGiPCld0RvKMAm97saFnxcZSIe5oy32ObdVvzUQgraiK9c1T2T+E/yidPw4AAVlkQjHtP8n7iFDYHXIsxp5AL4x96etluqf37qkA/zj/WDVdSohrfG2JyMyga6Vw6r6Oo6AaoNDx/d+i+xN1n3SdYj+JZFv5UIm8FVg+yofHWANkASN7vrkX0Ws+IFoyI3iOyKudHZRXq9RluUxzc4RHguJf9dzCnQw+l8PwLoJZVSB/iHli6C40tQbWNRyB6Y8tmOIVubI3g2knYgXRAd3UNRci2SVevTW/jP7LwEHtzouPHXm3MWip54vL6eyChZTTExqV8kzPa4h3H8t0jlbEITSXKqj8Hdvq3EOfT6GeQLNmqEh/SrrdZ71Uc1KpzlSxFvfJ5G6osc7h7V+STCz1fmlmLg7dVbyjZ/inQ93HDk3B0CKtGu96Zw6mkAVTwaL+4yMPYJUARQax02ravBaDFlAyQCylLNm6s3eb4TNql7JnVJLInlkA7d1R3sp0xdQs/PacMcky3JNuIxpKwZF0vE4N3eTojVzZZWwhl08faM/mnnm7h1Fqorz/dVL1dTANa/tLOCtq0CMHTHvbU1KAb1NDZOrGw82O6ezdrg3BtQrLE28h5vAq6JP/MB+dt5X6z9mY6na8rtIgJ697pqMwgghQEypj5n5YdMCM60YOtNmqO2gHvonRET73piLWW46MCPOraR/6fkVW2n0RrRMf1Ixp+vWdDO5ZiK4lZHElWUMQbRMBmNPav+PqB6+XWVLu8j9mOqCgkoI9e81+sxt1B/uhRf79JDvz17v8PEUkXOeHoISIAAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAo5UlEQVR4nAXBV5NlB4IQ6OO9ved6b9JnlveSSq0WNEzPDBMzmIAAAiIg4IWN/Rv8ATY2dl9YHpZgIgYGGqa9Wi2ppSqVymdmpb2Z15tz7/He833gP/t//5+62V8I1Y998HsODufDPb/a979S7ftkpXmL1V+jwUMYdZUqpHyjUhtt9vWV2q3VpXMWacQss1iE4UWnUVNaDxZfaCTxZpTeA2+qn1Ct89NZBJRIsl9XmZWDWdGwXMhPlfSKi7b9wE46YinG0OuD68aHx3CLp759x6Woc8M8/mv04G5zueVQYYPSEHCl4D8pmN/MG7QbDazTP+uEBiDYxD5MHbmDHHRPSMTh8N4S8g1l8W6wHcYWwIOvN6L8m7vCcTjldlUdpyLKmF2v3FYFiLQqI5rISNhzLTqEISjnlx6uYrbz+0TB1kknjx2elPtB+J9HHMtA7pU3vzlgCVw6TzviBW3P0qgJoeVycQKdhINTqVp8tz+Rr6yV+oGtyw1eNcTsY+Z9OhFdakpP5FwfLlD0f3t7JZi6QParLHlhZtMZbc30ydJDzj5A2PDbMYbjdf3CyUrvtJWSaKLVu/0JDiNd1QmraOBlulI799xWCZ3510rEiV2EaL2HQRsFEL2CzK9fmA51ev2sqYuUEaLFmKeo15+mr9kLtoWop0NqbAGDg6H9GH2ugZU0A4ovI5iXCwS94Z0DwqRhxqbxtr6D24u/Cjz4MWFtR9jzvP6P9FIhoNxQj4jC7SPtkDNjVVTOZawLsUgbhZTsB/4pdHjIscv64X/bwHJNNB8AUFxdM+vs2V0N//9HAmzZ9VY1yK5YBj8vNPaHt5NVCTiuMOn9E3pvjJj8ibLPfbzo5i4PeNl+D5xX8Pubcn5089tX+O9yfaWS/LTk5cFvceiPSi4vBhu2RCfvtgj9TtkTdlDREY63r2+foovtkvLlPfJGB1okGH9ZDrdV4oU3X2csEegvN7+YSyt7CgOfG73bjfADZVz8fqzJwed3L6CUwtx/nMO7fU97vUS3MIT1WReBuEmnWtnnNzjw17a4lQLOLiEW2a+oF/v7zxkH9zfR9NWiBZUtHF+DVvPYLYCb+k511nLgi3V6ijwTe8lH89tF+EYwx7urW4+Xp2cnrt50KLAobPkzVvF9UXPtWhhN9pd/VmKs+du/+KHrqmDorajEFX04JA2x7WP2gXrj6smFq8kk1FZj2gF2YEem7roE1Xtz1oRECMlmr7oFf5vLtXp0xMNlIvZzED5gBUHULoOGvfz5yihe2NqhdpP5Z7/UsuP8ofqHrCysI2+6alDl+m9n14ckGFQNH9BECrSXXnP7rrX/ClAWw2sQnLzL/OORz5jWZlwm5/p8ZnkrdFP/XTVBYoHQlZYyrHDQwZl73X1JHsyvsVrTBRDK3CRPeMsc9mb9BVfeemr0V0eGKqwDmsp3XI6zp71NDcpKovW1cO2elYaYtViFA5vLzgHXHaF+YT45zzJlfPkEpnT165t1VCfetK1KSnOhYfQxGIacTknV5J8Sf+9GZE8GKIhEltURQJA+f1b+HyxnPaMwFUOwx1GyEdsOfRb8cGbnHoPlxhJxa4XnQMBDW5AwCVohCmc2WBPvzU+6xFS4EGBlJYLsZmgKxCIv5lEwjzUr1d6WKYLuIuVW53juBrfo+1D1jAVZ2A9z7+lhnoATpfyfVHut5wW5/P0Kiqhkx2eVfFcm9qMZ7ACS470vPG/pD3OPzPLCQU4v54yjhvpFijqjQwJGR0EbvcEvGsi8x4DwIxYwtdzohyq2gJ0cJLFQG4Uv6Zi+/U7CWtnx6NurtKaKhiDPXThFptdbWFHZK4+TMKjfybTj4KoQO8ByC0GJReJ3Bi+vfTPS5Vyyd3tNur18EEFfEwnEhRWVhQw8LdJSU9lNpEJJJnLfNTtelzOTjYP58/k9UliI0WR45O3+Lbu3qE/n3u0yKec2CESrtNlR6xqhHpWspJOjfsYOYhVeQruLc5s942q2S0aLMuXnkxQ2pVT6FR/jGVw1pWm4VW/DfWQrJyRcjvGbIMMeihBizyRiLkzWvHDC6CiZVp0ZDVLupP8O3SHbmsNtHLt9kHATWWVykFDS4CxeYLAFA5ZpjhNcmLazYyKV9XZWkflE9b9MK/NzP82bNJEn6HQlZuaclIP0nOOUq0ExgkPWXEOEGNLa+PUoLQS0eauQLxu7n9fOqXUUpqKLsxJR4Cy+xorFB4Pi4fGy463ZR1Qq4CIF+nkEO/ZILxfFuF5Y8Q3XkWBvOon+Ue7zaYzRtYcx1Z86+cedEhBfUwGXlR039eZmnWpChXUsG/tbRgGNUSswqKge5QJS0KKkSdjLKzNO1Y6UiaKcGLMZo+WDMTPx+M4lQ7mFSoGRyA3FPv2AshUn7di1W1xd1qiJbX1zY4j6bxAxjC7z5dJPvMm8GfE1YVFSfZCdUJUiJpnaWRS7eXKpv7xCkxuB52hkSUfAliV7rW00pYHwG21R1JJBeOE4uU932dU3CgDktgzuqZJnCKFkzikNmtbec6l6XJgtUDbntXRh6DhxKO5blSCRcY7z8+l0VRPdmTWWQhLNmojFMegyj4NEMDQu3OyoNA2lBFjC2fni+MR6DdeH+q68/1dIuPwxxn0MNi0feJb38lg6AYAnizCeAXwxAR2LhQkJRgOlcnrAWbUZteHeDgo8sfjGwK7TC3AyFvqIMtcYBEE5OgycxhJ9QFfww/+KrcEcpuLLfscFfgV1Rz3TcTxPvyMVfDFRbJnpaJ7jdt3aLxxPO6GWKMZcDZZ7LFrM9TLf51VXlvUEZvvZB48lfGqFoy70BjV/zz7+iK2VCCXe7+AXYrTQTjXlSxFYI2E9Lp54EPBq/DcQMRVdCGYNLtDtjch1+u9frzHrbFH78Nv8M/00gqU9ibkpeXR5/5UEepnGQ6TkUGzGTtG1f3KJF596+IUuVj4EbXP766fQiZAQdSwhK69XI/hKyyeYaTJtIvzwIOFz6QapkXjOoNbt4quHo8qb2itrdqAVWxW2fEgDW0COUweLtzhu1Vk1px5+TcwLC3aLOsu4nTaTriFDYJyurQSTxcbU5narf57gEGTOuFSYJQI1m+YKG0/Qwuvs+K1L3E84KCHZiMATPq56dTuPVPw8pGq+z6Q3MN9O6o2iNChf09eg9f+9zdUoifqJC4FBvbxOfOocE0GTx2oQubH5uYLo4K/6dYgUOxXLWDKrXoQ+fMP9Xos+296n7vTdYXDZlCSqN6vkqlv1jWkNY1moTt4VG9UWdzR5bSALzcpBgW2oKQdRJG/NublUjoeZbi97IFAerDWv1Sq5uAJEXAv+N3vufYzTKYFSfY9E1nqayH7FdUocQ+cJmCMUuw7kfLkcd9CC17WJOzZoXH39m1/MEcZOZw29qD1B02WQLdh677vpGpWRzaoUwOnkxCDyfOLik5dU48ePe+/en5WTSVP8lPoUPZ6n5ugiFnLRK0S9Y3Ik6key5NprvXnQceRpl9hD79uOHHiAoZcYz7qUKCrm3Gp427qYlLhboPo7bU5LvRsiEmjOpX4lcVgchm23+Ac19D/jmQQSjgIXO3k92EZbXt10o0G6aJfSiW7nRcwu7N8IRWiFHbZ9w6uvXRik+fZoHnrTayrSUbMTxY6EUCmUy2IiqWGld973Pckc5mnbOFv2v7f1D7mNmRDOgM98L6kiT0s3KqvBsswdbA1vwBlWLrucAfMgNKtRXEDf69VewTr3y8d8ZoIpEVOKv2Y4sKDRMzV2HaZqcL4CBmv+GFsXMhv/ntH/RnW5QRptf1TJ7dnqSE2P7+LkYVjdD8AgVvJhUds6gj6t/+nKzotDQ7EGAqkSoFQor+p327MaY1AFG1gFgEfQl9u9o3FyvrsUGQKMzXyy/KZQWxkpzicsy7uc92BCTE/iH24gpK+ol4X/YcDhIDbofKPRiEsTplQnPJSshz3yyRdL8wRFEMeTg+QBQF+IE9JMteIC32DhOqHDI9L3s45VjPqMFPjoXpWKgvAy1PO7zQJnQdlf0IzH0IVyNQlYMiGhd2WPyNMcDfVwev1SZTMDbR0Io+Ft3BZ5bp8PovORV95yZFqo5cc5hpKQRqdAlH4MM4uczmc8mYnzaGO4jlxRbTshTtDA0mS1gOeBzWNenc6dnSJXHFQ1P6tZs+/5rgTnghxA0d5x+pXF7ZgYKmAdJbmQT1ATjgLoimAZhRkVIZ/woYv4NE77HlynbmB2f4uGOVXMit0QAAWqdx0ZOPhP/+X/zSQvVySOMfDHgHi97+39vv4tftFB8EGhKAJzmvQ15T4Y2RYyAmOyvgWk60qC5IBsnAaZZa86ZA0pANoUATECS2MccYc5rkpSY3TyyagY7iDD4Wmx1A7PIAbrG/huwiE1Mr0+XhJ3IAQixw5VT2UabCqQbpkq5XvNWjFfo15YWuXkllMzQgYpBsXLS7tcWRVpcEmO4+pDcJ2Hzek4DqHHO6msWptQKmqlL7CRPUwuP//gYS6ca3lkOlzSirJXTmcZpaBxBNsl60OoxWuKeEcuFyiKenVJTof2YKFQcQZM490r1AlyFwkkkDeDxjHiTEaLkMyP0+ScsAb8poKZ81i2l4OwCtER0RDaRIZEqWThIzRrc1BWR8sfnM3v36a2jGj3LAh18XA45Y5z96fjuDCz7Y73KFp/mz67yuMv45MX0Ij/tXev63DJVsMQT4R0tX/2P2+xcWl1+K66CDk018T8BZoEQChRkkkpYE1o0qiRsGOAQhmotpaF4o/5WoNJw7CQsq8KQ9t//DTh0wkQG9UyR9Ol4r0cI59sA2pdiBiUZuD8kKChIaOEhXerD5COXzcoOm1C/BccdMvv1ijFHDB0rSTwYwqNMNhFDuwKqud3cWP4zHes55P+7WCPD/nWk60/gfL+n1MXhp+U7QxANxgtl8AhA69TmSbH2bqJE9fECjAgnBFVbHzPB+km9+I1Cq/tplDMxXkYaq73rg1so0dvlKhHXI+HeswvLujhCDMJ6zBV3EZhOQZQ67GJJaCNLzuqPQbjbGfvIBVRqpKJNXHxWFs7Nqsb9+KnAYmwZO3sRuq915RMBI3EwRnsEOHJCP7WD6VW7oT0H5/OE8YF4/Kk+lvodPg7Eh4uff+K0QnU82RZ7PXZVgozPBYXXNgEVAkD+NRZsWhrfjtsXVbzRSURcnbBmvjrCZTS3w/4W6+5JvNSPyvO336aNiTEgXiayWUt44/WNrwnsyGgsERNI+ibSL7uowXym7lGuoiLMBgEidC66gukRNQA1UjhLxlkhk1DxiOu7OvaqW3ngt5lrALTjxlWTDowVBN/Uq6iBqhS7LACHa8qZJluY734AjMDMWvOIVgicV7SU4jJzW0aIR1KmCc2E+tp+OXsF/L/tbtZc4LoWp/Fd8kCj2zkesyXm0fE+56mHdEP1lxU7j7WTQwBKejOKWusltbEB4zu4U67whdMbF6aoW7DrDHBhHFDsULYg25qGHMsVGm16A92PPcn2B5/8yqJPQP1w8nPKkxP2fHzFygP9LA4bU3SPqAgWmaGCAgVqgiUbM7PXjRghAKszpBmE+R9C/S2aniolmtp3iNWQIDQaVPc3nn4o6fkv3Zgd3T97umw4p84NR04J+Hn+4w0QOqlClIMfBDD3p61IM8eNzPkRdODLdoBO/WvP/vmZKVcjex0D8fabZpyAeadA79exead1G/DZEAmij0JAm/mxQQleqW7t55BwY1GrW5/B6eDAKSlSCaOP70VMPAjWrz2YDTXPoDWCyleJizd8DYo30XduhDE39enQTS7gj2SmALTqY0Uqvk28zZbvDL4Qe1XZ4fjarih8NajdT6FNNS/kI6eq31lFhY8JAfSsSclXn4+UzzxqhuQEC19BINyJ+gKcxclUO5NJgoAk9YBv5yLNmTw3i/jEraLiJLFYEJKhzilqfDScYPLuxucxEY0W5I/MLoP6d+2FoPXR5x1ijH9ra6WrKwIEqnzSJLxvGO/BkkwEAcWf6sODTUEd6k8wVBr7i4qWQCxyHYg8Lb8bnaZp4GO1uFoURpjs0O7VX+DrdcQWKto+Dkhwy4EmivMnEs1aQbG2xbnAfLPmiBAp2a/tLCzEBNLo4vLJHqV44xzyKbBvkgnwdAgdCE/CR2qtE12P4zBo9zzImOTz+SIOkdqd71tJo13fbokXUfm/aOBsFRu1lQ6gnregUuSKrwWaxRc4IBb4vAw9LAuASI2AQxw19WAKCDUFTmLrt+7Gb2mLMKWUC0GyBNsvrM9WaDibu1+dVvoDcpsbag5c7o3J+JIFBpaJeDglN393ES2BlbEOLuRa+PsEvEQ2dg0SKsq7uWkJjq7pijkSBo6xUXelg1Q5YvU/fyPLuXVOL/Du3fp2WyMSnnTN3BIf3hZudit8LnS1ZixTPCP/o9/UYHDoNjcx1jewuamO/ddpoaBcbxBNwbqKkQJL1VrJLwMYG+OpdCKDqJijUKYbRIMwWiTiOf8bXV6kiYYBSRjtdaTFokUl4EcYCBZkqw6eifowl4MmpQsrTd9yQ1GyyBCxLJZDbEXp1Dthr0exXEFlzwFwgxP56Fr8su9rT+tfOucBAuk5OFoN2vO2/O7Vs/DkIQyFitrVyxEaAbdtZ04pYTJNPGVc2s4Sc7ylJnOSrM2ZPkLmMN4dSWo8eWKzGyhGRlUkkvI/AogwAGkYTgCOKt2NhwVQwGN8pLwwcmu7SDi54g6WUwQNMNg3MorRvitffoacgi4onJr2YZkDP751FH7AAas0sGXS/HOkA8LxQ58rLIAHsN+eOODPjZ6s5KwsbPehh9apZM7mqgJw8LEW9ua39ZPFY9dXEJu/elesk/CXRFtkDEzi/GFJXiojr62BrIygR5pd6qtHFYxMBG0YgIDJsl6IbAPmI1NKIWJZ/b3yHVMBZmPITpHcn/6ZzdzMCG6DSIiau0kGbIpEcHHupK1vFJiJbJxrauSOUdldY873WrJYG1/xD7iLr9q9qjp3/TH94gahlq//IeelX/rBpCT7iPz7WDHOBhKfxMn9rU4vBME+Sb/tlvPGo4Igf/4X/3zYqmG86R/ElhEP7alPbGSwuCaTxVQaYE3dVpnvHN4GwffdSrtUryyzyA5D+fzVVl+hYXiNI88bNGHE0EyLqlLP+j6YmlbAAs/hOSPwvMTuFslXtmFj3oK+Idt+eb3T6341zE345D6sRk8TKsePH0jQg8D6njhQjLQ7pb13jk06RE+l95V/Z8Dq3rYqxqrcchG2Nnt3M3LECVQygcuJKBzlIQQDOxVIBzwAiYvc1Bzc6NtkLLOMlwKtNQ/yRfU4ss5dfjJOqsR+PpVesYZyxZTSde52TkaSFTHZi6k2Fi3PC/M5XJdAYhuhNkMGwwEfz2fLZjG5QGRx/7wZsl6nWPfMf97OTBu5MTZ/Dra1fDNHxxbLqxbDgzeA9kKltKPxvzo8eb9UOpFlf4s38nvbSP+UBAl+CdGc++crRa2ckN1RAXdtIXtpw0o1u2JOo/Xc6TG87TkmpmfbNAEkBX8gyduVHhIPJSuycXfen5W68L8c+QbbXihJUjHsK0PoGwOwbygFMAqUU8/pX1iHUDcnDCYH3JoZTYDYYb7/vHlbAvr5PxMhSCYKHJjvaEEOZ4Oqv3m6g/1I2iTbmUanx+2RuwWHn63QKRnFyP08PAPEyMYQa9TbJIudbMKfY2l78VWUH33uiSK5akyea0ryHuoIq58mp4562hVmLEh6uSFahnBECDNfX96tvJ/0M3uzs3OD9Pbi5FEUtPmQa/HrTztVCluMWUGlKousVZx8o3ycpjJOZqhxHHa2UJ5YNPlJTHe/O8vJlfUYo2M2ZLMYrGRbK4CrSyIdwAcUT5rduJ+pZ1U5gYDcnrM6bCkxLr9bj2ht2sGPbZ3xr/Z1GvRxuXW9Xb8Eao3fnnOZah5KfGlVkRQRSht/zGnBIQgMTxKMMXdjaF8Mpr5Oi4HQuWC/Tkyt/5wtrxmtmQ5WKGdrj/Br00AGuCioLrkqrAMm3VLFb8jM0r3Z5BCrGEcS0CModddA5idqw/+qDGJ6RLnjpeImsUiBhQBTJs5MxPVmmdL4HaVsAeCRmST0OHmRJyeXcBkK972u9TjSoMMth6FG/kowpOWxpx4s2+EmyViVk+y5fJrRqycQ3Pl9zqmYkgZYneBKfVyHBmFCUZJ9Srw4i8Z7RMcyVVuRoWRawHIMfCaQFBgi4wpUaDOWAT+xK0i698A+pt2Aa+UjG2cSACAqYfVIz1jlpAG1ergVNzmNkA/yV+JxhWdZhq2KMQ3YgFfTV/CWHO6dfm/NuY5Bcx1BIC1IahLYFEgp5vXP9NiGB2/O8VUuLCsvMGQfdKKCQ6kTQjoG9uVvw0l5j74b//dvw9zYIG/gg55m6tk6UooiARaV4NvuUyEarQ085IaNQvn/ELQM9xAKG5xVSqxSGU/l8nqrO9xT2vBeDIgRqXVjU4hs+Yg2VbscLeHTRhBPVynDIXiC38t5owmsTuTZ+FHbG4kxsjAu8aScinNvBgUQ2FQlJGsi1jHcgw1e3VRJhbBLIQh2mU8diHLWMhRd1eBYr8OanutJzecF0bzAYRurEQuIew9g2GRyZUO5qfCWJ1/zViU7vvy5c4lZzfc+lrmQUCYHK0C77pEtQEvuiK8qyPl181tnrfjcmS0XRzlZW0xxFkCSBUR+E3foNQP6WP8LoEXk3av3IgKxHyoLVu7qnuFPl9H4Ax8V0B8ZjER3FnVNgEvvjjZXFfojj8xLl4MZkjKM0KEcUDqQHyBr27ONb5wXPr8gBMHk/VkD/FPkcBhkFHfwEspDQUFEa29/Uj9/L8Uqtv2f8yud6X6sRMUvs/ezpZJSFJUvloiSYRC5mqTGw1yTLZ57i4/Ksbj1wi3VQi5MIDrCTR0uBaBpzuMa77Snu/ROy/7r07IKRrdDWqNDRb+ZSrcPWptVcNLYHKuX2JwCShH6SuboKTt+K80gNK3m8mc3Ip8IGN4HgnlGEZI0IrGUEQbH91glb9UnMyEfyQdiZB73nE7xchftAhUKJHsB/aKG7dLx2xpZ3+Tkupwho/Hp+hnZaxGKyJmgiN9nXhoV0wF6EurEgrd5CWVXjxEo6YM+C0KEBy8Qs+GDR1W3gHuCj7+8psjkHZ2joQa+lZwDVRVWC/t+1f/1TI0BRTsJu9r+MjXa0UG0byNzYHetFR/PEEAFEuCJeJaAFDBcCpw4+x2rpohJnHx91PsH+x2YvQZAAXUK/w9AHFVE0YCwAxqsXXu92YWf6kfU+A72Uw89l6x3k841xUyX/Nbk7OYv56t4iv6QauBYmG7KZu3odpKyKrDSJRel8X5ZNRakRGUG7G3Dwq27tPT9aqWw3KYa5kZfNTNnNZDyr9/GKHVV/UhEpdkq3P3kpV2tnzo6Alf3XD4re6K58oaUF6YCM7L0BIrdurMWNHnM2J206DTMLhQLn6KQXKzFPJI4IDK2PPUJWnyjF+I3S0zFmjmsrxk+YCdMFoFspm8fy0w42uuFiMIP3HZlZccU/P/eBUB6cib7H7YW+deBGN48iJAN3F+rmtgSyO0H/24hFxSAstBMUoReRLeePIu8FUS6E2LLwEFw8QGBG0YAbqXGb5dAT/30JMpjmsjcWlmT+gsK/B2BKO1eHPgs7OGA4lmeGEPS7NVC3zc5KDPUDYq2NHeiOEvAS5BLEB49CFonJo8wQ+33Mc+FQwyWI+8vHqJwRfgxnbaFJ8l6gYRFK4tB6AOfHIG0UH2rltiou1ZMY9tVO5zWUQvcPsMluHrGRMXujt4jpDS3d4khGfvG0Zy8APqamanI+xlpI9aKp2uEl65WgJ+oETCeqape3D9Lhodr4OFOS/BsHh+AQXjj9+yIJxGzJoh33ewXv/dBAoFj8VTblDBnU8ydMtKrLXSBNdZaM5TyYfeZjOqk4e2A3Zsh0GrlQrKCphs0ssL3J4QVkyGOUgOd6oC0Zk9t7aQjkNtJ06Mz2OrR2ddSkjSYIirpk0xGOC4aTX0QUEA2LPKWaEQ9LRsep1+aBTd/UsmjhsfTRdLGVXUSqVaL4Lum9EVm1vs+MW1gFyKM9rC1oz1KQnaF2/pxQFH+wT7FoqmnQJbZxaRX6OBdMWwsDnAICxsfdrhchaFBntZV/R0P2G48PxiBOTp7iFRhm5v/bDCU2QR0CuCoWbzjKB5Rz6floHMC5faG2D5GZgWGM8FXbjW2s1DUeD6iI8lMABxGalRjWBlbnyhILSU9eQBCbUU+IWq9ZpFBM1JRqzC8+4oTGMGvDFl1uE0K3PbGFwcVEdz+z0W/EUCei/GE3/0CTRcH6nDbLwtqmchCvoxEzdFKGGTxZEdg1X2k1t9PjyruIHFg83tThs7lDN4p+/YQgcFc24O8WnE42+CiGySUK8uPNMvXN73yOjT8+ZIHDdTj+q0/HTpT3VnlgQaRDfyEIqjkFfe/XD4J02dDVyT3ZbcuCA3mPf1fuoBAiCYpuYaTQTkzXAdp7xbDNbRlVXZpeCbpHTLuqHC7BZjRtLHAVTJHj36F9xBHDc6m6xLewDp5RLQEEp5ZjJaXV/oFrcqKGQORlMw0tH6yZ+s2CPIrz0rgO46tK9Yz86s71jnla6zR33JqUhDE9YV6aU1ePxdAJS5KHs9Ol8iLpbbELGiO/XMcK4gqyW2fvAJQeezan1a1HkNsvdx4+G5c0xbQ2xVQ5jpd7qaVtwEdQKU3QAtCw+Xv4+n4HcrPdQoIv1AdYUkfQm1Nmdf/Hb9A/oxlh5RGIGkOGJT68CBE7tp4qHZLh/ls2KY8xECbbS15cGLrZODfWjVZksoV2/nZKuiBCWz8efeIVr89Fgvso1MnG15u8DglWi+e4XS3/J86x78CI+za87ugnBAkkxAoPSIhM7gmJER8atQQeOJGlhHxLKTP4WtBKvrYhkFiMnfoxuNPDR7BTE5A+cyQspaEV1kEinsgGCy+hBDTBA9wUk4/k7N1lcECYopR6ZNoWklsbWZ1rOJ49rBLBpjmRLD4/DmvIbdf7MWiOkFXITCWS7fhKhgNfjFITjkwC9W+esXVsdtQNqMQERkv9C8ojbCWAXXS9/VC89THZusAAxBceHs0QfVfVVOkwl24MT2jCFq3Yx+Yq7a21g3NPf8MK4TwtcpeJoUdySxBjKQJ4KBJ+JAmYmGBaJKxTthD7qM0ecKx8+Ygv1IvFx2fdDCYzWAcNsp6D2FZ3A1WS6ejdlosc76QsJ+q1/lZtJlRcpbmeXWl/HLd7Ef1AYU83LiNG4U24UTkUYx1xc91g/T9gpMCNZoEzLG85/FixVDGHZxWz8VZUl+dE8rAetJGMfoVgs2Vu3cb7tRK2ZLPhXlGkC/f2Klf91A2bkYjR0dug/3CwJdqMyGCIRTMdCYVfYgYfuCfrNYQcPX5OsUra6SEggwteh4lnbN0jGhHV2Uka0ffdxDKb/+AVR/iLH5YzY04YU3HPj37r4rz6ObI7GIPu3jf+fcnL8c64BeLC8e9FM6WmN0uZIHCkCyDs2tBJ+dA1WpxtB3Ly4v+OpXO9zczg+hzY7Bek58moj4mvnX+qblQJNaAsWpCiEmRH9CoYq1WBAIKMhyCayBzLODygj/whjNL3cbX0K577I7hTXAM21lCm1pQaKpkyDWqinygz+uz9OTeieW5EnROW5rabOdgE1U0yJaiJJ1Jsrn8pHYWYYBnaX1RfbT7uMp5iq8Qz4dP3JjK0quJs91fZFza/5tO/asqksJhnRpL6G/YAr3wGI79bg0RUq+11SqlpNIR2tR8uJASb1VihTx3l7hjVqdtTdWty/xIHhJU6tfOc/Cr7iNMeRjZG6whKAVcfurTqFqbayTtAJ01oEv7aNLlsbNe/bwdxvoYxm5+uIVfXLCJ0CdCSAxxCFPwGA8EmmjVG9XKYvkssnKKvzxtYoBm10/3YV4lqMr53Ej4UyMYhiV9XNv5pK6IaWn4g+z6CwvH60T+zWlXE1MfG2EDLMkZR6CkJ3J6ATIhfHctNHIvRkU2ebNKTy6Hv2nGCTNj6m28NjYaVfR6tYNKnPSU2kT+v3UFLncTi3FHxjv9GfSzbIIMQvvzNeu4c2nhItCz5uzveLm32doz6yn3jI5C+AAuinCld4aKlSsAMDyVonbUJYBDpz1vIr8GsJfmvOXtfw7kqHacOo7RHKa/hPKApyIdW6KD9TivWg73KumzVuPE5UFRA5gWc79oL1etqU1D0IOB+JoifDUIy//15Y/QVpbt8QaIbuzBAWfE+XCGbgwghq1NbGhe393qC2mi8UZ7vyft2wWOgnms4yuAFsNDs+tnI3cowO3ByILlMd3nKnNgGE5rEHIb4PSlVnx+lojiHGm4l/oBUhoMPlrY68HHl1jCXRvo3RzkuFzs5S4K6ZV/hmAXQxj9FuqN62DWePYOlsaX/rjO7g+vQDQXaakkIKFzCBlYjvFklS2D4sE9/C9cwfG71EVffbejRAkVEDsZpxajHbXDRczUqhC70/LiKralPgAdNgEMcBVRdLrLj/zQPQyCEfZz4cQAqj075TwumhmMU1Z8Zq0NtlFwQeuPkxdtqVxVCLiICWHYO3G7Iyki+sEMJ3lIkFdXAzX+1sVGVOoZ4VcKq2eIEtMU5w1G0l4HNtXgC6RnYwL9P4NWd84c/TbbFqW30FLaXupTi56GgoMl0Prbr7FR6tgkptihLoxEwQCQpG6HTlQJUpCHLkv7V1u/+Kcomv3mqmJhQBnllA0JcWb9g61KqB7QE27sNSHJMLjct62cFmOQrW/9qgEcoq6nA+u1l71Mj3SO8hFM++PbFGpYZGQ6GtofWRqIYByT1kbcEdXx9FbjzFzRYKi0Bxbf99TNvw4UlLOrvp6VkSCmrxOOg6AxCH1wGNz6OKW13qEFqorNB/ta64MUNfbRjCl5sB5pYdBhjiVkOxDeZ78UkxylPr1ok9YehLMLaQhHWQaK6Y68Y0/Abk9/qcr6NXV8ydfmobLILSQ+2jraSmjWBVB1EIpcoly2FEg9FN/0Yx1UDglEsZHqJrAU90Dtrc892kyqy97148R2Zg78clVw+wh+LBYwObrQHOZO7XtjctoqdWL2yOCdvEc3vdDf9ZdUoW/Go1nGBsiCFXOYFj9eg2tvZ3sfl97A/2583ehrLSlufVSjSYKELuuTwOwiH3Cw88zdzV8s8DaTt0TnhUC6vIbf5906B893geDjHWFYWD5rfBc1ZjzPkGDfM5cFVbJH77fsHrFl8wsgSChyEX5ovMqItW9z5CAgMpPImSAsHWTLHDbM36VBqk8y7o0EbSYNas2HqSar53jRLREssyU8BxZnrgnl6u0PsxWCQuodnvR9S6a74rVgB3QoxYUAyzMOnMouZok2cncjMDj6H36MpnINiddMQXKoWCbO40PrGA4YGhQfnwOv50E+dUMMhgr79JqKGlO50FuXnu+GvVPOam+/1U4aIJv2wkdIsr49HzE3D//tmi8DNtx7XJ3zmB5+JIx19xxk+5D+Tq2CFRULlu/6V8uFj5d3ZzuETkScu3FYihcQZpTpcl/2vjOpa+J1SRio8Kb7E9HB8GvebnuJir4H/7zX0LD+QicnSjQvgq9hcTHdcKAPrgzAIdphW8U+DgyLSgB0jlG4YvI82cwFmptcV+tgglKiEqol7mikIQpVXVkNTHgcIdl+1OUKS6yAZhvYO5jgZPziLUcgeFGFprHMC3ghG8v8QQ/QCsqEcIpYlJqQ4efE2sRhj/OF5Vp4mB0gAGkn7hd+PLKhmk+nr0n7JJx83wrerpwRjGB3IUCCAnL3OH89m31YPzH+yWndp3ycRUq7t8SBZUl8CRVwEAFFGt5B3nfpqLmzb+9wiNyy7mokhAZJT7l5L8eUTE1FjU85rtU/Wb/BBoXb14uwYnno1fn5sEXL661s3CmNrGrSaw27mKmNDKKDVtK4ak9Uq6LiKgR55abm211sgP+6n8m6LTKDniYcGdexJ4jwaPZ20/ffFX4CH1kzkDjkwh5Tb0l2RdD+X8D4WBmyqLGFy4AAAAASUVORK5CYII=", "text/plain": [ "" ] @@ -2248,7 +850,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAASsElEQVR4nH163XYbTXJkRGZ1A6SkmfXYHl/sA9rn+NHtndWnHxLoqszYi6xqgNLnxdEhKRLdyN/IyKjmf/z7f/7r533bP0syQz8OSCTeb7fW2uV6zaQZgbzfD3e/vrwI9i//9vfL6wuElMwto4/ITHz9r//6+t//aBf/8pcvr5//sr18buZ//N+v437/+//+e4wBob28ZPR+PyLGtm1OO45Didtx77272acvn/7pn/+ZwP3t9vb+3nsHERFu5s2NdG9Smu/f3/67uRJ2SQggEplmZmUYrUkWmQIz8uh5oR9HF60HeBMMRsbI43ZYuwCMWyCh1O395turNeV4//Z//rG97sbLiBxjkIrQOCIzDRARw0CpI+59IM3ayzVIjgMKKpjKGAmnJILRpNC2cefeXv/6+vp6OUYYaQYHm1umPOz6upu5DpFMJV1+cXNLOd3gMqPTM4NOUMyRyO26y2NkCGGO+/14G+9/aVeYtYv3uEfeBcFAc7qZcaNJio1DbWSXQRZGWlOTRVpGyGRba5srQRMk6BBgL9cLzVMAIQmgmUugWWsbaQCMkACgkcoE05DIYQZQqW5uzXyMPpRba+rqxz3H0Bg/vn09xn1vRHRKYEb0yC6GGQxJCUDmyAyjmtFN6r33Y8RIZSoyB5TubGZkQilIggxt368QABgQKZBGk0TSzDNkdIIZUt1MafQUKAAmZGa6be4t+j1iXPf9fkeMvN/u7j++f/+OTKXu/XCCskxJmUo4B0Z9XD+O47iDpLkSt/sNEkhljN7LEyVAIiXUp0NKa75JgGYKzIxmEkiCAGFupKUkITMjgiQA1jtE0twbwffbTYjWaEYDR+9v377f7wfLvvu7JLPmRpIQMmKMMcbIyDHGiAAwc3rce+9KxcjMlBSZkYFUpgSYOQlJTVJkCJIyMt2NIKD6jEoFJGUSkKRUa83MAKYSWeHg6PefP9+VkRGZSVrv/chDAsTb2+39+rb5BrPpOpGZAkQZTCmCSqSSDCDNGkGtSCkyxhjNpHTbjMYUgQYpI5WSaZpLADSi4kEyNTIFICUat20zc5JSFYMoHbfb2+22e/bRM0NCv937GIKROm73++3Wr5/MPUaXMlPKBAEoEZJYvZkpGbjRHARUsc0YI6KPYRIaTYIypTRwmon1U7UEOD0AqrlBMjOrv6UENTOSgLLf7jnSwAxVDPoY/egxIkJH7/fb7X6/1SjIlFISlMqMiKgSzoiIAcBIQBkpSdIYI6sEMo3m7oAyE6QZjTQz43oB8185UmaSdHOCJN0dgnK2tTJz9Nv9HQpCIwcpEhVlZebIfoz3t/cfP74dxx2g0UA8fVY1pAoGCUCIMcoZCDECM925fjMygkQDyyojbUadMKOISoUyM4MEzcDq2TbrB1AixziO/uPHD+W43UdFQaIEghAEKXC/3YFvpPkXlxQ5zAycCJ2RY3Q3N7eJFhkQ3FvlggAyYwzSY/QxDiQgtEwJWqgCKauDK8QomMw0MzcT6q9CTW5Qodv7+/dv397ef8SI0ZNAa43mpJkJoDKRiJ4Hj/v7233fW9tS6WwEeqTTCilASuh9EBBTsEI8SeZWLmTGGD0i3BygRQylVq2j8kgahCrBzCgHqsxglWESDuG43f749vX79z/GMQrjUvN+ZlWaWDWNGHm73d7efgpyc3M3s1QKMve2NXc3EkhBhEGUkBkpVQfWa4wRUUMBrTD0bF0hAcz71mWRgmxiAtyNNIiC+v3+9Y+vP79/P457RjoNqNJSKmuGKOVCMxcQyn70436HZN7cHMqKm7e2W5ExAHD3qmorIKqIVc8IqQRQIGsRAYkgVNUKKVc3V3+JNHeXZM59381cyXEcP/74+uP7t977OMboQ9DsIQGSkc3pxq21tjVrbmYZOo7j/f3n6AdS9a9QyOiFv24uMBfAkcWYjCBBSTVqUhkRLZU+205VbZHJablgVQxmxhHRzH3bAFfk7efPr3/8cRyHIpQpwojkHCIkqyIyZTSQxzEylKn77fj29Y/+2pFq1pBKRVojcPTDrdFakW1rVpOstWbulY+aVEZkZiBa1QlJI9wskZnZvNkcmYV2kze4e2u720bp9vb2fnvPjAxJMKN71a+M9GaX6763BpBgHxGRGS4xIm/vvY/v9/txvV5aMyVJd1iMYHOMiOiZSWNkmlnb9olnWDzGqh2tZUpePIg0R6SUJKpmTs5D0sy8NdKlzOO43d6VA5mCCtRJs1lybN6uL9fLvhWS8nZkpEHo0MgI9dFvt+P+fr9ed9BSeLlctrZ5s4h0933frTUANNv2PVMgaObmNEJ082Zo1BwoJNwtRRB0A4BMziE3552S/d6jf+3vt/fbW0YoRYBmq2dOd2lu3prRAO4CUlL2EVJmqI8wujVmj9Bw933fvDUakeNyfdm2S2Yc/S5g3y4ww/lBklJujq01UgU+CS5TzcwL6g02RwmgxOjR+3tmHPd7H2MOToMlSOaqRj7gAqQDyERkwSshRMSI2JubGQSTnKCZ2SaFQG+7eYsRMRI00Iw1U6jUhH43M7QqFWVWo2/bbt68tYwI1BhFZGTmGBGRUdxljOJFNqOdBQApFVEFJn/3tuUYvY8ekRCNZk52IUMxYnhy2/zysu/71dse4z5Ea066uZs7amrafEGwdEjNdm7RXq6v0XummrfWGq21tgEouhiRfYwYIzLGSAKRWaNNRRUMRkJOqqB4cplERgqU0I8YRyiYwR7TscUSReP15Xp9+bRd9q3tw5XMfd+9OBsylN7cm5u5mwFockEbd7TRXj59uf/8cb8fEkckYkgEcNz7GF2RvfeMiMyMFJARlZZzq6HRYG4mZmTWuJGQScJixBgHnZYmMSJScvcNvHrbml0u+8uXT+1y8dbatgnDDluriLXWNIaBk1ZJT/yPBjSJkepjTNomuW8E+3HPCGWOMaSsIQEJhIkijKaJ+5pDF6xNMyGqeLeD8K1daDcder9X5ox2afZy2T99ul5eXy6vr7VgAEBW92lmmIaTJai6zB7Mn9a+//zR32+9dxNjRtcg9T4yQxEj8mk1ABftgzFzBlssumGkSSWsMDKHwp3768W5XfbOVI9+PwSiub98ur58+bxdr942MyeYYxzHUYRy8gBiTt9MkpmgiTjJPtrb20/0XiQOcw1WZNZyKGWR+2m2WLkAwKTR1sKAUBJ035o3AUcMMwdp2+X6ct22C/+K1y+fXv7x8uPnj5TM8fr6sl8uVks0oFSP3vsB4xhphoLgE6OBs3ZmJEG26McCwel3ZmaMLB1CONG9zDVNtCE5qY9qZmQQMGveNm8auL68vH76y3bZm7uZkbh8uv4v/9vL508ZkQo6AdDXHCKY2PaLuVWz1vrh3h77Fub3xbvQABk9kVLOFRRRTZupkwLWRCu+rhQyVPcBBGpuDtIYpGXmkfHZbb9ct9aAAncA8G27GLNn5MgYkaHQUDSf2+G+77S1HqgC96GCV/nPV3N3lymmQzpXu9m1p880N5JSaVEyVEurZEmrCObcX0fOiWFmhKBMzHuS9OYU04hhqcgYWTCTyVpGZwsk1sdPe8ui2i8pQM3dLIo+snZ0ZZb7EmjrBnOhsTL6EZKSB1I1Kau8UqLZvu1mAKJWz1w7fCqNVjuqNzAk1aabyqxtsPQ1nCv6XACBDx8NCM1oNJmqR3W+Y+7bhM36YTENYKqBpRdUmGZTab4iR/N927eCvuUn5oKqnGtd7SGTuqeSQhIVI4iqCn0yeZHRp1ejGZRFT2vteegF1Gk8K0nFqKakSBqLiRYDDFXuFZk7SFpGlja1xI3MjOl3LVapiMgIaN4Npicjp1IF1aq3WmClA0Cz6uwpc9SAXdvlk994XMcC1sfWdpK3SqCk1La1kmGmHZnLB62YlqCZxeM59cAlhVQ+UbnAqRWeBmmZ187VcUkqddHDiVVSzxpO/X7+cVFM1YeQ5o7LdTc3zV8rFZx5KGJbRV8oWYhvKg4zC2hyZMKm1rbk3EkIlgEN/ODQWcwfOuUpo3Nt11S1qjAAUDSSbs2ctl1fX0nDKUeVBshVSQqs/WGqmFrerS1qmaCnLuCSoTUtlNq8SQW5EqFnix9dNDtKykVIM7Ocam7N21bqiNt2uV5fX4wLkma1cGFU8tyhniIz1caHQVPixEwNJkGafK4QFe2s8tkLZNaPZ8aFkm/LlIkzMTXFuUA237ZmpLn5tl1fv2ztAixtTE92FoKfEcGSEhZQVS64dJKzlln6ghacLThvs2jWwDjJBnGqLcUclMhTuZgKkwRod2/Nm3vtrG2/tP0CsgaiANbwWctCwVxZk08unJ7w9I9nLXNO/DWETimulTerg5+pQ4HSTGIVb2TiSW+MCCNtacMgzbe2Xby11WSP2ptz5iwe6bRuwdJZ2ThzVKcUJ3riaaTVz+2EGxJT+Z1OTHB9KsbirClpLpepk5wDgJlvu297tW/BE5+Cq1WtjxA+xff5v7N81xCZ1z25cUbGFu3BVNQW4V/sYUJEqRKoiTlPYTLzlFUBM2/btl/8XE0mIONjyOY8WJN8/UEfZsSHqz74+ov9aBFpmcv6qjSiJHybbzOztjWA7D2V8UAhGSmB5r7t2+Xatp1Tz13z56OJeKTjuX6eML7+yOcb/BL4iS1VWK2P3lRCuEAsmaFQnaCQ1vZt2y8xkjSpzlQy6rSmwcx8a/t+aduleP8y8IE/E2Y+jvgPXbom8zlm6neTp6zuXRX9SGnro5PNmtHdzHRqVbUiGoy27ReAI27FVZe4kotms7XmbaNNQpfLhcdInS2gWRqLM/xJdeh0BZiLCz8m4UNCmhu3tu0vL9fLqwERI0aM0QG05tas+e623d/fxois487SpVLuzrlb26QThXVrMoknR33UPOt0DYs1zOp99qX6kudA+M3FR3u0v/7T3172l3a97m13N6QiM0YX4O7mdDakZe8r/FHbigT6Itv8pVtXN55fz3E8actSSJbp0u9G/o6fwqqk8+7tr3/71yael9N9c2+tATAzmowNMU/IJSmy3nguezbp6DRzGiidhbIm5wL5VWAA9DiF+C3CHwJdM/VPWrp529BHiRaAzGSPgK7BnRmjr+XxPDlB7ZjzztI8d10xXSWix0z7veSf2MIK4W/vefbq+b0Fo5mBHJKIVkwLXo/RPK6j0sjWWoxwMzpqwbU1v1eEhcnUZo8+zX09r3t/5shjDAMQH5Ti2QOt4jrT1GKEZdThDdaiVydl5xpKgzfbN89hze0Ufe3DpJYqRUbSPjLyXyucf+bDB2M1HzP4xQs+vwcA0DCl1gkemNE76Qhq2JqbNXqzbT2skZFTGdI89C5FzWGoRwEe8T5Z7Lr/7y374Vc8KfUDQde3X763x3Vzk3jGismlANDczFpr+7YPCwAjJ9uMyNG7JhII+25mCyafo03wz+P+qC1M0H/mrFie/HLNnMSTaRYeEFjK5/RJkJKaqoo32zYnlekRUarRiDyO7iFB5mbu7r4IMPlIxQJ3/FZD67/nWvuIPp9Fhg+Xc2ZAa2qcmHt+pM5aKgUX1Q+0JVeSpGk+ATCfuphDbQXtA8rM0JAng6//S+BTdvhwYRn+0dtHRtjWhOez/ZMNANUCAusskkgj3K21FpEi6klCb9Y2r6MdN1u9tGjxwgKuSH1AmPUGntsC8NQGv0/idVlloD7nEQ/NDezEQQFIjX7EGPWcjHuj7fRmZuY1+ba2tXUgeCZ9FcJsIy2uvqppuTcFrEftPkrpo/X87WvJKhLnRWv0aJbM2sRi9K4QUplh5m1vvjdzt5KSrZk7Sp2y1UmTCP0awYeI8ME+Pr/jSVRcl/zex1wOFOwvHMtV+lNGmIppHd8LMYItzZ3uPE/e3N1bDbGn7KvifTIY6bm8df6gMlnLWJ4x/h/G8tOrkQbEJA1V8ItNksCkxpkKUlkP8qRIM7qAQs955qll8lPl6sQY/TaUzt7g4nXrTw+57czYb77UndtstsVezkRMuVuCUiPqCc6MrCdFzM3d6/QchBndrNjQo/qLT+op1P/fWD7D0pNoudSqKfw83lz3bJmyfAzLJ9YkAZniPGNXjhwjMxBRC4MVdjvm2cQafSs+sz9nZXwQiH6JZfX4U+we91jv4APGdN6OUBMCyt9G/LmD1CyxeZYNgIg+bu8/2za8lQbhZk76egztnFLn18XeZ1f/FvvHYF2/OGtx/QywJtKyH0aa2NiBDKfVU2Y14zTJDRrgbjLb2pb9kAIpoff3wRj7Z7+Y775NDFrRe1bU9OCia6P/LRWPP2n6s4rOWIc0NfOrH2SmBGlCa9bGBU1bRLo5ZDP4iXoQ1czhTZ45WnQbN0s5U2zky9U/f/HLK1tLYz7WjgeRW4TolCOezHwukocqVyXE+W1NwzlZzlOK+gLch/0/jyUt3owFuIsAAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAsUElEQVR4nAXBB3gjh2Eg6qmYGZRB740AQRBg78vlcpdbteqSJcuSbEtyU+LzXe4u3zlxcvmSvHu+8/cuySWXZ+de3GJbsi1ZxfI2Sbur3dVWLpedIAkQhei9AwNgBpjy/h/8xs/+2S069Jknvl5kf2Iw4YcrR5rDPupir7PIGwamFdGPZdA3MTQcGFF0PsxL5gfJD9eyR+wa5rZU5KW1ynS80V09MTySH/rixo8P+1z34/zL3Nn954WB6/d8IO+B8EdjWWWyIK4Lew6DyV8C1sn2sS5XZEesziYMby9uDD14QZgzKH/90A5A+acrD/+yPrXg3p+oSntjZA3HykXpV/qrH8VGVYXaem3tO8M9GtLFgHlOf6N5MAydccuUW/7TW3A+G0tduTVboxqMTLhszDmvHjfdafu051J5Tkkbar5AnHJZqPahR2s4FPvMi1RK3QZRnmT1Txx2ut4f5fMiHzNuZT69bHtEdf7Eb5KZW/X7jdD5bZNKOrhKH8dvGHN7aGsYBxwuzQZxo7K1bPXabp5bi++XA4VPtGNhj6lyqOu9OvSJEFMJqn3dXty6A5mV6h/d3bVUKa0+4NFpfNXuWlxM5zKVNCpZ/RyS+a/tYlrzWGm9A4/camSroqy+OPbkV5SMfChaowdAmoGzAedGuzHggPYrD5ICYZ0EIO+nEMchIjDrwmOhK7mc8YbvneG6xxinDYOI0ay/8p+ZK8abljG0tLys3UuhD6dXsy8p3k0CegpgbRdagiNslSmG2yu8yT9RbxYyl7xHOvH096ss+CKfOApiV4yl/5KzmylZhSpWSPXxG5mrpkwrqt3eDEmnBR00jLEF9hPRi+Cff/m7ctBRzo7IXqqWH3wkcxjM8uPV7p6JDV7M9M277f2DULJUbGdlCQ31os+xnqfNR5oawvg7sq1oVxy7FVxh2Dthat5cVh/WZrSPZx/HANmj2Z/u/Io/DR0d4Y3qyfj9mzHjm0dNe5//b86y1Oo9Ulos0hDNTfRbdyRXlh5Of19854/mjl4bTOqzchwkTFfaBY3SrGnxOcHeXw4t2y96Y/07hjHnpNiR0Gwm2t7kNsvTsce+boNaRlHzb1SiY1ud6tUYOisVyeryKsQRoaF+x4R4Rg7+pOSZ5IH8UULVZ3hPcX3x5DUyKSnPQtytQy/oKsBYlm+47hTN0ERloT84VQWXo+AD9uf6kfabgbMe7nj7Hj+aWHghsvLoEZWapnHMppprBFSZZMscqpX7mrW1o7HvGXXV2LU/vzJST0N0PUvQTUtL1JMWDRMAUTuVOh365t1a6QDHxjMNssGfAqoxdKkkUU9/dtMDmRCIS188YgZntI7xWU1bQ7qUAm3CkW0labHm1unJVvhfkwXvo1Z1Obsk/7Mfl6D7jkuxt9h+earXCKYH5HbH2/GNWyROu0u8kNYQQnGrNXbiJHPmXTwX2PIj8v3PAeqmj9LkitOdPjKSjwby5RgyH//pKI3QOqyQGU/HBszY2YfNgPcieSK8KXF7yqBYQh3VPVQ0SlvTkduBPsfEa8WV8I1ySh+mZbhlomxTJdaWzqYhkdvSfsvho9YGD8WtVI7aK9q4271ucxuhbMGDh4KQ2bnxikieK79/qg8rSe+O0q6WWEensqtyBEFr055MrfAd6DsnhFokTErEvWbNo8IB6dY12490usKHKjDJE+RLPXCmSzXJ++0P7qYNT/NDEwcSaqTvMt9TI0dxQ5Sd46QKoQ17bU/t3JxBg/oNnZDKSSHlEpAbIA6tNq3AmpABt2tmumAA04Gu/PABYHm8f+VaDXJfVLI6rtfVfU4EdUSP2zN9O04lUnpt0nYrg7FydrKiTLjGUuT5Vgyl0b5G/qr90kjlRdszhb5wE3+0npfUknx9A1ECO3dJEbxBj4lPO+IeJDpnkpEvK/FqybzzyQieE7VcIq+JmJRjW6qW+slPvbJpcMf/UQh3Z/RVTdJXEnVFu+tnZX3J0/3bbI+ZfMaYu9V64ERKYHwOFGEBjh7dubLJJelKop87cT4s64y7IBY88fLPxid2ZjO6IlztjE0K4b2qoouQOjazTTrdyhIsIKfCD30Lx5EgxgcilN7jwDObJkDf8Thb16rSiRtt7FU0WQ5hdU+5By0SwifO+sB9yx0WH4htT3ptHYjaZsRnDdkwQuaLNQt5T5b7Ct+f7WDYcnrQi6y4Tn2hll2VFxFpFQiKBZCgSg261TIuPOb9Vf3v/6Z3LHY5AiuWbrEBmdVgNSOxUhXpYfOMvmhJszDkmM5w7dqOiCnAUK1cjgFyTWQWfiABEzkPbM/phWjnR11XaL3N6Vuk2obJwKSaKwalCVp4pFOltw+Ge0RH3Yzxak1HlQze8nOmoqT6/LDVnDn65tA2WeBZ1lQD1RrSLq0oXRqz5fVd75278ZF2VvOCqm2UaRV0UYUTF2pkQ9vqEClbnBxrl8yS8s5y/h8t/2knIpKOPkdpNjfz1men3T1gV17Vst56A2Z2yzOkBzKGO9HUySNRG9YFK+00yQzyuqbcUGhSo5pKZK/GYaljNs5qOGSqoYSq3McGycNG38yqTtHsG+lTOmXzjfajNcIwQsGz5fHH9QP5hiXTKPzmTJrsXJPYKH7HYbf+O/ogOtLVezSRgSzDGYJaz7DSVM7ep9mqSeZPX92CW093mlScHEqQ4uOteGP2LEprRa1/Te7q0sI+/6heM33thDL18xDWtJwqmN8I9xkxw0hs2VyBooM3tVhldSgWE6tMrdGSPlBu9DqapepYE87KtBbaiB6k3br6ViNk6JEY68HKNi0R7ydBkvXltyjR3YEo0AeJc7jkYfDWvcot8bg/cTR+6gcAE3oVJb8MDpfaovf7KBvR3YOxV5IMn+B0Jpqtl+UQYejBzUP3xildZiZgONl4mRmwGRL/VhKvdpfR8LZmRxz2l/SQCDEpi436pB99jrRLL/wZmkA94oI1vHWiw/0DNL1/jOpUG1ThKVN/y8rGyjF8pNhsVBYbw/9cZzJ3JDGY0O7tHj5OcmbpUJdmNLl6LJzjQb2v94DVkR1VRibuSj7HO28pX31F4e7DU7WTo8IdbeVhba1YfFcPRtHWYNdyqywgH+7/EJKFtBQk0mWU7UL+SLfV2rh9NcQX14Peqz+0/lvhGgzaz/UZX7ZUyb5zH5lgCkmoENRUk+p5lQ+Ltu6uk4PfokQ3o3rdH7re3PGfvQ4u/fdvOXs2CShv1StEDWoackq5ewJRXF4s6j9v6XolGBjMg3s2Y9AW+Yu9yd9JV5/fMeXmucEg4h9qoWhy90bGPjkMYHQuWc2qDOMHlrkZcaiQR+hk1okZCD2cB9GcCJCAqASfA9C3oM2C0Xu83EgW8YpdMZjZ2QiJ72ccs3+CDK1XTOMWpAagcrDSziXKJEAniCpmtbq8lehVzPscl7wEbrHZMfRhrvdfBmattiLkYo+ZG2xIdl+shNsKyQioPLr4Up6vAf9nYxBQWKdHi4WUPD5chV68JvlxjHl1dFR2br3h45eHzCZiJjNo9s47j/kGpRIFYCaWlAbzoP5W4uMyGs3WDUinnE0xRlSl1BUCWr/JzGwBlVJkRgQO+CLJmnvYQSFJrqPwAj88lT0DaxKoUpqm6gR3WOiyOXaQaU0oNXqXVmqQZWqzoLIT8wizqK0xUpEfT/Wyt/7nD/56E/yLr/8wNxs0phZhIZigU/PjM1eFRanv+hAZRhSWVI6WaFC6TEZ3ZZ5vAkduPNh2IQeU5mnlK0g0KC2+sy8asHKhtHGJkh8Kh9bG00j+8NEYMFWOHZqsZ6C+SiMq7yCZBsbmasKzenIfvq1jvwJn1iDgGb3xHzb3pDr7abWYKDXvSwpmVmKhOSc2/y+Jpvb1rroLmW/VqrIbHwfPSVzNiWp2Ig4nJvr8Bzm7DeYzDp31sgu6rb07305mvUkahlRary9CVwNrZDOJ5EZaVNMAYXzPAHXkwDBuv1r+w7g1s6mXVrKP0rvXa8X7jqmQjvLxL9VLLQfy5f6j1tBawq1+fPrWEs6QLmNRlWJIng8MKuRN5OlB+xUwZvjxK0Y6i8AGQZZrlYwazJPXR/K9els9kDHUY6J2wnwVS43QNfJDbfb/pJuGAEA98a1+45P51EZcuP6sgrjOzp2mEZ6rOHvO5Lkr4Af/un4z9lMTp/B3M2NWW7U6XVHvug2WvZwKI7JYYoslh8nWvmwKal5ruzWL/loy22Jl9S3T4mK4RerogqFtZXMna4srWcnyRMUQ5zBVJ5BqKLXDyin8qAxLXD6QD3iqFG5pJbvGwj8VgEELdjTea2iZM5zy/8Y6p1NKeihn0w7uxnOgP6C3edvqjiXcTcCwaGR4oJLqNKhMHIM8khZe0Ezb5OvZJqKnq42jDfgidG0yIzFYFWJkFCETN7MqKAPMnzRs+5+Q5XRK7VlNlVrda3qWymGVdrhv22LTmCTu4X5y6NuEMWTIm3iFhDEeMHNrIbZiTA9mSyJMxqfT6kxbq+amrmsz+5HKEYvKsOHK1gFX0fee/YQJ15TMHaWZugH/qKQ8kcRAk2wu3Phk9w+iAtCtQ5+JJXhKtW4BKkhDWOk9YIV7NdZNnkZTd2cJ2JA08I5JCkKtxPRKuyAFX/r3H/S3rsV1cgyDnhF0WwvN0+85fy1sTaCQzzmg6gbNxl449STaKjbl4R5FjB7tMSFbB3GgiL9H0dVywUP0i6zddABlEUQJcBjW3DMbXQS+Du08v2uDZsGd/WWrbayzTlrE9+L4EqOHJ5ToxudR2TmGZqGNkmqoG1Ayo1HRYTWfV3W6k/16y6jyAtUdvnOmNJQVJBITq79/s2QfDA8o0KBqjR38qrho41KBDaoDvTnX82eLk1xP03S+TeyUdsq3X79ek1b4/rG6jgkklMHE8SHuANGVCJYWV9zNm90SVNaobsgPD2ARXhhURdn1ii9SkHcQKNQ6sSNv0n2bDKSUnKwPrPClLV+qKrPus/COMrdmOJkiSykmXQqs9wYFGSMbMcyqAInA2/PkKspOygXWAVg/bx+7eFWoRLnMkxmEaAK9db/ic9Mz20HWsdcozNW+KAR/UXl/bVD2Nvv5z8G/vnj0Yea1uVpgXKq9dr2B9C0GHrXdEwfaYAQ/Mp2g9Md19L7Q6GG0CcBWq+CIQ6QXulkeyITqFqeGjm3K7a8pGP9htQDom317+hBVfu1V6xbDsnG4KxHTHRB6mm2+fVlB22SeyUQV6QFoq53EHsnxM5oSug4mxkILjdP7QIl8Rxb+JuLq5H3dZRP3lJtV57wcVua6pX7SsdfiNXz9+i/jzz2ff489M2Cwuh33LIdLkL32V5IH/m63v9LjpVOqlA6UolZJjEmoVH4mPSoXbcijvQaEkuoMtvdkkSJGxRcuSsF0wSU26TsGETCUWdqsEh6PZEiFn9fMqpEp+U+2tAcRSV5SuCHEq4OmvUccXHqtQvBcHcqMpwrBXoudWzzZlvEiR9tgl+994TBdbyiKuRe6X2opcDVhXj3F0J8lioxaqLItQiW+T5hkPPJ2k7cvqB4pes/dDTCKQ5533Rv+n9DK7n9XiXaCHXpXX1GgNJPPuI7fVx+BcKtJxJqrQkbIOaSUhm2UpfDEwVlu8pbXaY+01MaqqRBtZ/w4Kv1gTXH6us6jvp5fcwYvv9rzaLgyIJfoNexw6o3DJHgqpWh3Q0rRYA4mlxCDrQ44Jb/eS+NlJANpIBSxQAlHW62wyIaEQhGQvqOSRLFgT9NAt+v7tnuFhqE99aCTg6NfU8g1zAkQdSpfsw7wJSBB6u+MQNcTZ5Xj8nnlPLhOVAATPnIIIy4lYrDkuyKVNUTpMbIiNgRbGYHLttjfhn6c/qO5hYFqp7NTPWRelFsU3eOWWfW7S3eIm0fK2ZviF0K61sSxL0TSOIpK4Ke2jO1MohTtYuWpa/MjVlKfIpKmQ1HDW3aTvQ1Dq60fJgq+2V62eIB3ErqMs7WzwDS+qVhSnQrzLF3Fqkz4H2yqidwp2vwQM4MTfKc7eNjdAsqidDvXxiTg2R/8j1mRJHvLP+nRZ6U4VAXFIs+jUznXvrZHpeVGjSFcOxAHINA0q51HoCaflteGdt6/fuc7zNyNI+h4gY+NyqKWqWP37xpwOKUUy9t65QEd0TOgoGidfc904awf9cFqbxm5oxKOqw5qoeO9uba3xCVy/hWiD+5rKZ3Kvt2ksqtmO920NI0FMeUxryHvU+tvXBK94M0xdHJEbIjAnXE9lTt4+ahqb9UuxX67XHr8jNsI1WJONiSolOOlCaLaxKsjmobwG88+zYTXiZKUXOf2NvJA/5hzRvU5HLvadgTHfvZoNdlfGY3o8+ezDhgoy3tB1/Ur9d3obsfBYEZQRVWMaKs/HEhSuuXRmrhOaJ9mez5v/YjSV0OlPP5hS6bvkayLL40auNkkdP6f2nZ8AVFbSnJE25U3IVkhAYfqxfLaaRdiQuoQZg8/VOTacOmyN7b76b3+0idS3aPFhTywUqHB5/78jy1SWNrSt1OYRNnBOqj6lV72V5WOkpNoj1oa4YQdIdoKAwYCsFLtS/9e2TIztqC8uYRwAstv11yvZO6/L5VondaCLDVKzRPDFHUTj6uKdiREoI/FpKtw7HqfcRijpO+JMsOVAQA0Qui+3sDzWWBHJ3MLQtg4OJjI2eUq4lHtnkfqQdS/LgRoPvOErG/mQe/T03mHbZGrcRWgZ6wnUf9A5rmkKl1hZl8PlyPg977x2+TQNlSquvBhmsQIlyi6nJDWnWJkpWlbAFsrYE9nQI29ItZ27DUeqjABLTsBXJodgW2fCbtPWjW1a6jYfUa3UO78RpA+t7lTk/ajxOE+2G9X+PVydTYgMlipOFBPrlawUVV5R+nAuRpaFA+IRR+jrm+6uvXNxJab1PnMhZkIxVAailT5CNjecvojH80JX+RVRbkksGY9OZPYTCu7qKbp2H0Cs22JaPm2DAKf/W/fcmG9unpoTiwbrMkDVSpUr5IuqEc3R8SO3VyuKyEpOuOUwtEuyiTFHJyXNesGPc7KF7TdGsuNop1D82OZ0H2oQ2Jga7sw8LI+k9XWNTTRyoJcl49NpYYb8yKqJ9TwpDk30TBWO/5km5eodOmBDnH5IeI+VkxHANAr1dBZKVZv5lTwvvzd8Yk3nBcb9xr7gIbWkFNdV/pI/nx9rCjgLXk5lxPmtQgDKaHHqtk2T5hCfqEVv11ZDXG37doCHRs4mENqrQSswhSJmCrDbCTFfNUy2C6KW/oOZEnzUuw+k5SiCExlpwX/rqOh57sai/5mrr3t69TFYbgYymVgCUPAWK6vUOp82Fq+i5RxzpZSxlJ1MCmi/9HfTm8IPBLrRS4kTU9tazumgVHuTkUBSzi0Uju724jV3cv9xPypzAL2bMlyfympKigOHKFmvRFpzKzeSFYssStQyfTGeeZ5HJ7QI8MSVrPHSg/KphaclFysh+PZXfCF9FGzWwVb02J9J9kBcW6jFfaT4mdlU8cYBJNdKVyUbIBkT6BlkvSQXvfdvz9nYMXa6qi4LnH2cVxAw0p46Eo9J/I0DWyzlyv6snlzNYrHi+cMN47ORAnvuT3lC4add6cnyPiPNuJPES45nvrl94GM4Q+JOij0lqCtmfJYeNFv+GGvV1whtx+vNmwe9Uczc+xk1iACv/UfXre6p1AxULvfptSbdNV6WuNswt2YEkgCsT5mpkyUNMwyMqPgbw07h51CvnK/EzNjNstYNPepuKPxG0WveqWXgmptZVO33K6P5RS2WSvteq+Jv0x9flc8axe9Xx58/WyUfXs6Pnv5TJP6kDXF1MTQo3xtoTXQkYVvqMAnadmjbIcIcYMLw62pO63QvDmP548XuP/FBY/Sk6bU4WFXx8B3TlnPBFgMIrQMvGUjxj9vdSBQOOHiQKDZUlqicnBiZmIoLgokxQplu+1N/XG/PmW86hfdei0jGpCJE5/Bm5pietBoEjLGyENZ16KYppR3DFQh6q7StNqhnzTAzDmBDqm2Hmgq/kDQLx25cUZsF//iyr6qOXy3Wa392kEVFjT6/fBWcyqOz/02XdvXZl15VFhEpBY5pHlqC9t59ch5VD3JDmzFzEumxWlR9UCjNYu+lBxceCAzk1MWX2xf01zAJ5Rf4WfBl4//oH88DaAdctrA71sEFqzDMoWqJsiSxz3nN1klsHPh3j3pnxruZp5xLf/aWAN2Ot6lURuS2filDDgrYFhXdeakwc+CC93qztV8yM6paH6dJQb38g2EJdj/oLBkGPVm242gMRCihfq2zH2um0jdrmJHDVuNj03K+VlltSuSdVb6aQNf9SEue0k8mFq5acYlBY3IPcZkfyypf0Ok+XDs0tjYfxz+xc1V+/ikr31FJddbiuBfvPlS2emgwyv9lueTEsbpd4tP6rvUPoi1S9kuPiHQ61MmrZB5N6h7pkPH9uCpUZSrNRrpfcQ+w4jbxGAObw+piFDoQMpJKYCzi/bzhnG4SCErko6bMuwsX5j+qj0VUc04KJ5WBQJSkbN9hJdHIyABuBXC76+M/+lj9d+V25pih3KirL9rSTeXvZta8ju1/E+Uk0OZFm/YniWPXjO25wTbyWvf//nklwkmN2gcLjS6ZohZ+Et7lJUY7bgOxZS2E8f3cg/34kwZTdLW0Q3FL+XF+m+3Ez7F2Vi+XUImJhpRfLuC8nsqtSVZkyTUYcjlLcbtv0Hb0lRzH05gEZFYBECYQpIYLwuh9cz5N+d2Cgqrsh45FOWZrhFn+wA0n6gE80hy+J4ffMxLlHaMUaIXoCpkWNLlHq5h4hF2ujWuPe8awCpTZ+gJm9AkmNEUukaHfud8YhBLDPaE8P4PdJaRa1DY99OUOA4BTlx5DN6TfOyjsxYfjhtGrJ13/9aSfZkQrN4TXbO/UePE94VPZIhIOCHl1EaL+JEGFb/BD8LpHwqlD6fUoj5zfB7HORzWDbX6b+d5UxIpAUNOyEfMEt5enVPtm/I7Ch4vaZID4AKvlaQjFwSVd+/I+t8v7NtSkGXUhKhzgmgWQbnKXm9i+78WKRg9vL6GJglz3nUfFR8li10pCWorAvgwfWrs70AD9Tz45p/8HeBV2IgN5paMUntEQlRutIKAodr+wCJ3dhw661aO9mCHTFwfVFcIQxaRqUJrTqeetU+7sUxqazWn/tJgaXVnhVjriy5NOqHqLoePpVLlxUliRacuXQsDMhIRp7s1rSo7JJ5P5AONL2gMe4Yev9YMybrmfoYrsIKlZdx25MT4rLx1K1LnvCPD2oIk0jjoiTCyZmibDw63ccBGnEk3M6WHrZGFoy+cpz5oDX0N0pzImm2wqnqsKlHDa+s5xB617pYT75MdY7YFZFaPrmkKntZoJKzlUYf/eo6m1vrwYbDcDirZrU+jP1k8adFXq3YuMd0U4cZENrCHahCODhqAf15PyZN3Oy+pl5QyBzA2ZB2D+nTxUOZg9kSuvIt+mOshceBOH1Y3RBNGyj+Qy4B1cPX2TMgqHu/4u2sX18MYr1EoOrhKALOI1qXwTOSrWs9162uP6bQPQ+n7j5PNVfBP/79/UgQRFq5VUFAoMp3ZtZfpN/+iM3iy9j0hvKQeACgdjPfCF4OCRz6mbsB6J2HucIkShNsOTZXeDmqUnXG2t3/VZCdERWW3k+3HkUjJrBYB9FESK1y+Gf/jvz778Q+vr2p8YvbpypBurijOuONLl1SL4/SDwoVdjQHK6LgpvnN1X6WZ1QjXywZS6Z0HCm2MEfMq0oLIEploFcTFKAvUccCkP6lH/8p/D4kTfzSiUoNv/Pt7muMPoHsBq8VZ5KXc/iF+yhlTEaZcebgiaepYPx86vK49O9rICSKwrJYUUGBUAVv7Gfbu6r7Hplerc3CzEAXtRLAVOkfw0hzRVNZu2eQD9QTSDG9+ZrIumYQ8+JnQe6KcJZ4xx7nGcQp5v1oc042E7rK2RaAeYMySpAj21pnukG5jlxvr78RWJIszqq4k4uo5ow1tW1wrV/PiJddoEARNG5YVUX1wNFj5FwfUk36gvAajpoGqTMyKS/Q019ihRwJ17U7uAUneS1e5pvxxo2mlpW01rUIzRc/ubcHG7UiUD2qecttxpONwBHJHQeOhDvCu8Vr1BYdhb8/njYp7Xem6+uypvnqRU8ayqSELpsfy1QovvjPc6/V/XVV54TpEDNwd3FQhjmRx9tQ9rXVutod89qJueLZhWJqKq8zWBDgRK6GkIYhG0f4Rh8mXTvn84ujRqgJieg/Lq98GkcPRQbu/0y6jVK6LIFEJ4tLQdqBtqnEcKV9VPXKJnL0DTd0pNCE94CMMyE6v3wXUdbFum6swnDT+ts/x7c5WIzJzZynT94GwawyWq+CoU3OQzetnu1jmq181fvZ2QudUAwxESCw4pHz2087+mBhc8A3+QVo3ItZBCOtVqoqTYIJtDYNv1rHlJjJMB8xVjPiaMnfHrs81q5BHOLlFb+WdcaRA1rd6G5bKME6/MKaHXoPUNVuVmVkj1SuMvitk28Szn7aHHpYVpGpjsvQUJar5ukAaYfqrO0p4DZqd5z3KS2xjiuzZg81GVzlXU0QFVQ2+PjsiZk7m3DZhfOxZQ7enDZDMI7SAbkTkrcHZI4gek3MLY1sUfHBrLNFevCCqZmozbnIKllbQagoTApwmvxMX6uVsQx8OJCMLIvcLIuZuqhnNBwZEMuu9u3DV/+pVNYIiTXlIK789KTl79901qKKNo3SF3OkTMk82exPRcip8MNwKtEtZP60qC5fbu4RZ3R2n1Du1Nu32srrkIRyckCU3RfUgVOlBDQ0cbR1zGPE5//udM/BQQ/YEkOqIo73KvKQ3RiraPWpXfJjLyNQ4XKUgT4NCFFpA+dCx3W+tjxfAw3V22WVqn9ySQczwFwPRVAmPxl2OPrcDoS/sLhvtqSPM2KEOXbMH9G1JVl79tgRq71zTR8/rcUpnvgpV7i70YX1SX6Nil4PtqEbLZ1ZFKM4Mv2In9VkV2ppvTVkauS5jVFF3Nzd7HtXQA7UHnx/9IMK1ehsdPITJ0P1wW0qqmsk7sWkIqHQq5etI8k0RZNXUaKyNeicW7SKabrQlTQmHCIKuK0lJJyr5+rGfp3GNhR5LreGot4xeKhVnx+woYnEUuBIeHtqiBFYlnArKU2y0Z1UtEph907u617ooLv0VxFbfPVimgt8ATzz9nal+W1bNsX6j3vMpVwKs4HR7LJfYU0wMkKzVEdhrsNIdUWpIChflLQah4fJjonbyqOjzDwgCQLV8j3BN8fgN2Nh0pCwbuzGrs1OM6M9PnvhAd/f8JrQ7e1ISuRu7C3VAMWaETG4p0AOTkA21/3ydP+Le5kQwcQwXp1ShDg17s+Z7BG5UJGtZm3e4WweacFEsrjKwCqvWEevXe8KnoyT5UShpxk8TBx8ovjgCubDXX/yu4gTTGfEcw2t6GibrOhZOqz12Vegws7qdLhuTA3m5FgCYXjVFDj74j1XpBl4b+50JbKZ7tT1dr4Xkr5jpW1SevLFva1h1vhJaSOsul7eefo/i+5UUeGX71iHegA1zKkRb36mX2pEYmghB8Re+zCn1Tcd4yJawFMDiEth4+l7hLln244lhkSH061yx3V/p8WWG1B7DqhUZHf5tL4xdSpVaeZ2CvUSeNjb5t6HB09v/cjH1nuSrUuGeBZMItBgsiiPtGtKtDKWlcH5u4raRN3U1ZUGCTQzFo8euHrlzZKHrG5ebRGK3xxCtWvIdW3H6zxo31H1vPMp5NR7cGJtqzHZ3Lipz1z5GiXcV5PQ54byk297WVjwAQEtJBSPhFT6IeSh0NGlh8OfMocD4o+X8ZenBvH0Fr0BiT8U8RYj1oe+pZqeMfOD3qNJYxrSIxMjNCPohFedmprudXuGWCDI0+NclGiX/TpY7fCTGOA1jkLOTpuEK0ylP0v3ARjZfZWJMSMqmOXC/ff7AIBy7cKDg9u8DehF1oLMP4GQ35f+X61hMK/ybr993sThRcQLlmBhRE/M257Z0pAdmhWSi26y6PkQqZKiMkLgIVtx9/F4s+0k/Q0egM5VO5YCUjHsx48u1zOySapKtLXTqrEuq/Qkr3OOc0xbtACAnKC/Oto046CB7j4yiCXX3NLUI7YLMpaTOGDLry18wbMXmKLio5OMdRFJr26tzeZVWmeskgxc2lc1IuHtPAUnfD21KtuWrFp22BtSaw37o+lWeBaZ2lPLLier04/3TfTe0WkJW48w9DdgGJvIIjamrY+IQodL/EZDJ6OBiyjxX3nEkraXXXswP4NkQz8KqE7NEvrFo/PUkP0MrnE11Rz0Erm+tUszfDuO6jEE47OSxL/Q+t+jlVndkB8LxPhaa2B54AjLOhogbsTTqu0Z8jEjcaXSA4HRj7asH7HzBdofI31sdUcx89fk5SE65l4HYe004+aKep9BDPrJb/8ILd0f3uadDahf67/zW721Vg5ciKRHjNqSeXOuqe2VcNuDtkwxJ+RTQmodViQ1weHBYrXl1Z3NT73z3cVOw6DoUjk4k9e06uIoOSBKmn1WmSxXswEZDHF+E4IJI9g0FmqrHwhgGKGJhV2+Ek/1y0bsO/Wtkw7f2+Pw/QtaL6Mv2CCuVTpYD/FSu08vn95p0erRH/KYV8cY7dwZHBEMoYKPXhvPC6HiHdaOxLKPQsr0Qbtu/n7ptm4mxLAp0+tfBP1n4ug+vpDQt/RsbLzXblQa9d/h+Oht3syO9L3XYbnOUIU3U0EGpovy+0foS0DcqqmsQlhjrUovF8XYLt64W+lw0UC8CjSQi7ZOOnjDeidvjQyOFU+s42/o9YYj/v833hbcUJ7YVlFRmexgkobzx8Z+O95soT7TN9jETiVpdfQI/UJPi7HO17V+Nap9Portv31SvfCrnkH4VAxu7SrSpE4NY14EVXH1uj7ioNrDBWNXwn2742ea8p9tcxByaPuPAcncaNFUUZqUqrmEsFxPu3IwWXDdePeR2+7O380T9M01tO9VQVhuCxXaoyqlgRDSX2ViBrYAQr1IE3T5JG9XuUzEgGNz9HovJK1/SeAxPFeZPTEgGT5wnkB5z3zgD/fRB26i0z5oQ6LHUJ4V3Nef7tBwZqq+0S9vo5KuyCgHfHIqccM78jUJcy0+gzTxyv4VS4FNKoX8qymhN6SoraMs66WA0UiO6D1zFgeJ7Xf6d2PqFob7rpJKcQcF2Xcbe632XqDGNmqZ9RnUurT7Tnumd6gOGzn6xl5HDVi2q0Bobt9JXIyZDRMsjdTUmhRxwM/Gwbvm7qhBFhuYf07mlqXIElHYvdwyGO2ikQLlUR9bq0Pm/vh2+vbobvU00f3m2KIdvdCMRVmcHpodUYlO8Nmp+dro4h4gOcZt0rnFQUXFtD+/GoB/RfWs5V20r72hSIrGp8yBh7WhtYuVabtYN37gPNoWzM/pjuzweyI4wtUNy2P1TVry+j4nfIuYPh2Fh6HbpQTz/Tsv3nKwU2oSxU2pTXGNLElEkulOpmAZNfc1rbrHlhdvdl0HR46SzEnxQ7kKSXkIkPtkGWrrCea57ECFdLujSBx4+f4jg2qf1RbMAVUURuyHvpJT7RRGy3mnuc/87CIO9LPFWmN1yFTptjTJFJfDavMFvbnC7nx/W9YOHam3HJgXVAQZ3nfMvk1J9ihOVq+FYR1KR67nwwsJAVpKSfGQ0dMUHr4IJWSLdSitBg5RtN9fpvE0yyqlb8e3TwfCZa8Xi02qoL/YpfGg+HSzvrh7Li/G9YLD5mNWr6+aYXXOIlDdmE1ajHMMlI/VyDXIxBUYvO6JefGT70X3M3v/8JFgCu111wQxBjEh7OrNARPSioyJr5W4t+qRcokOijmxNHDrodlOPUrSYR+uD+bib2km2PRvN1dy0aOWIndqpaIrjKKiFqgU4/6BerQK4/HV5CeH2ttYbf6jpiiY7qlRhDs3QZzOxY1SXyRG6xhiXxOwYNRjNApNVGOswii/WFSbU/xg9+YzG0l/CLN1TFToJyzfnS0xEtg09si3i4NjffW0pyMOnpqGPk6V+jtms8Me8eL7xkJV+H3VvqIt46BK28cV7L2SM6edcyBsPVr8YUEfOzOLtcrPfbOMSkLjXPYB4G9+CjTKOshyc63b8Kz1+QtxAFipUbt6At60DEmB5+VCmDOVvjQBfiRAJ1G1jNsdE39w1vAOdmMpeY1GpbRYrJQPRfbN1uj/BtkwSkT2Z2scAuTBIxuLhEVgNoBikiRso4kaOU1i1OmPA+Vvou61vAIhlPFo3Gp1y1IkYy2O7Vc6EPaUBLzHVfOhhHB2rjFLGj0w9bPXtzpK0qfz6K8cxBtTypr1irTXCLsfzqpV1XoKptbW4IQG8/clCY8r7kTyJcLzRoWWtttadjr668C1JV4w5vtzGAiDpaUjN+kW/NcoxeD7ETsjxllOe1uUHnupUerFlubKVRDghbyaM4oEQtbLt7/b7hAwvQbKNyeQYc2/0ht1eVu8aDkYhrqHgxLVDlAvFBWgzVufge8hV/raQTjVV1gBpllQ1WF2+yZwt0YmAzoDGv7COXIu2PYUUXpd3LLQqx7mK1fGzfXHHvaJvbU/lHD7ym3ZoTHFjGlRwaPZwZeVAembznb76DXoaH91cTGglbrFPXyhpV2eU29iABw+xSVHaUf2lby2Z41X2Of9xkV4MNRrx6J5hj8jVXDj6/4y+lZZsqTJhxkwbrjHfCz/J/A97Z5xmq+D/evvfkrtRBCnfLGJPJeq/6Hm+OS8qQdfLB7gM1sb0Xqehy+QqMoQAkjKFKE51y3sdSTM/PXDCbwZamFgXK5f7pFp5r4mZhop78U5G4J7uM3+yStn7sx0f5JyRMa/YNNtOlNnf77HHUSpzHdT0YbJGO0p0JY9hjqS0KnSIqqo41cR/hYfsCP7lfmNkE2qhkqZIJG+1S+fgW9frCruFWf1M3TDHTtybk3wnWd6Xa9nTUIOXWAcV1/ynz0fG7/5fz+v4qY87VmgW8pw55TTu6DCgy0YILsmlE8GT1O/neMpz9C8DNEXMBe5YiJ6o2a6jDdUv9o2IbNOQQ7rGKfPQ8zcv4PeHn9nd4AJ0XnT394nz//TWR8UbHV9hXL0TahUnnpHXVIG8w103CNBudSOy5iB0eWQtV1MHj8+BZ8Sf/m0HC7h0q2oEowIAY78F099e/sU3Lv/S/gb0TCwsot7oiq+Qb8GyS5md/x9Blqz3JR6dAgAAAABJRU5ErkJggg==", "text/plain": [ "" ] @@ -2276,7 +878,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAUI0lEQVR4nGVa25YbSXIDIiKr2GxJszevvf5Dn/1z27ujkfpCVmUE/JBZRc66pSO12BSZcQMQSPK//v73Py0W6yX33cit70CicN/ukNqykEbS3HqWgZfrKxh//su/X768mlmpzJh965kqff/Hf//4n38WclmX16+/XF6+WLNf//Hbdt/+9h9/TYDA9cvXqur7vve+LM3M9vuWqu1279sG4PX68pe//aeA+3b7eH/f75uA3rsBZg4iwiHFws/7PVS9x1XEDgZ5LzijUJ8Fp5WQlQSZtWc29+ob2L+yM3fKCZiwbTtohO973bI669Y/1RfqUu/5v7/9ellC4dV7V7kKUld2pclZ7AQoGTf1rK5u19wh9l4pFFmqXmk0qAgIDgFqiS1eXy4v61oQw83Y0sNs2/egtTDSqpIQBBY8ws0Bd16N5s0pSKIRIlRVaeZWqs9eS9fa394+Pj4/Lu0XGltbct9z7yRBswiL5mY0QqiOtjRtMnMKJDxsWRYJ6jtosbSIVlXmJpYkCHFZV29ee5oREAAzShIAUaXKMqKEEkgQcPMWZTADAanSSJG5bXvvQO7bnn2/fdxa/Pz1+4/9vkGl7OGr07J3M5IIMycMEpmZYjq9tbZEQ/YEqgQJkkqAYvHmS983SQQNABnrejEDUaRlldGMrBIlAqWCJKEkFVSVmWYSTCSNSpXk7qDfP962267M7Nv93ksf9z1//vyoqr73j9vn9RIglFUiDTTLKoEF7X3bbxvIiAW0+30rFckqZfasnkoWbTHskOTmIAFEa1HqNJIuyd3NDABBABIIkKwqiRJURc7BNphM7kZQwP3ztveNUKaq6n6/7X3bN7lx37aP9/d1ubo3ETRVVVVl7yIBqqokNzeapG27i2zRNJsBKlUJgiCamTshEEZjlQDM447Tjb8BYHxHAEaYEUJ4I40oVUqAoEL2/v72ln2rvlf2qr7t++22VVUVbrf94/1j324gzI5XrMrMnlkqQTACyFJVViWFmUrSzCD13jP3ktzHKJJkABjZJSTIjGZGmpubm1Udp6fRzMzdl3UxdxpKAKokSX27v7199r4js2fPzL13CWaNwr712+f98/PDWjMGAKk06zuqIaMLKKWj0cM8SGIMQSl7Zvbeu6QRmKpDiqMbSGhES6MZ4WZuTkISYFlOdzcQHgEVaELxqHHfttttU1aOBir1zEq5Q4692/2+vb+9x7K09jJgRxBGBQGVQFSpstBgZhAqqzSSlKUCINQ4V2VWFYgwM5obScmevgB3d7JACGhVLZZozchoDYAkjjxWArh9fuz97uq9sqTS/CORAPput4/7W3trl+XrF3euICgIHNOmgRUSCJIQeu41Mi313gcQVZYZpcreMxNUgDb7XjIb6feIqEK0kHTkQOu6enMVozUzSAWwqnrvuW+//vo9+73Xtu+VPatSJaiqAKBzv99pb/T2MzzWl9HYZrPArMzed3ePCGr8s5tZRAOQvZ+RmGnft75vVQlHSCWIAEY/AjR6xKAtgKWStNDWdRVRFAcBACipcr/ff/z66/cfP6r6vvW+D4CpEz+kqqrMvt3t8/3zfWkW4d7IJrCq3F1QZpq5hG3fYx4MJAyYfS9kpqS+W+8dJMpiaBiNI418kBHRu8zczHslVO4t2lrqs2UFglLt+/7zx4/v37/3vplYYs5WwIFsAqoqs9h73m7397fP9eVyuRi5kMiqiX3ubiYoKwNjKgcPZFaZWZVKJdOOrTIjmpCR2aUa7zWpmPQIAeYeEbWDdLMwd1WFO42QqpR9e/v54+dvP277bfDDOIrAGQIBCLPfS8q+9ftt2273tS1uMADKLDjZ2mJmkmRldDpV4mAjiQTn+VBVKEEsMLLneKNZbwiQ+wjAPJpLJN2bqkhv62I0lXrfPz8/fvz4ue336qWUCNKMDkAmk802AGgCJVVWbvv2/vER4W1ZwqKyVOltcbOqBOnuIKsEFUoEjEPEgMCgiUqBVsoolc+01YCzzHIzmoFGc9LNzD22vMPMohGsrO1+e/v54377xNEybuZhkpAQjFZQDbAiqYH9mX3bP35+oCTjdbmWJFQajbHvu0czt+1+670XuS7Fmgw9eBUTV9T7XllRJfcxkjBasbIqnO4BgkZODiMM5ubu7l5WPfvn5+ecV8CMEQwGxDIFYIOGVFJWZfZBGZXZbzdmYa/6vN4u62VSp6P3BF2lysy+06Oswqy1RVAVNEBNMtoobVQVjgBohkJlYqGbj+blKB9BM4/BbEWg+l69DySCRDMfQidQYNDXFjSMs2z7fWfPrMFy2HsVtp73W7++9qU1gdeLh4ebS+nh0YLmpMyscakqqeDjMC1gIpIZB2BI0DjrYHgaJYgY6SHNaCzs923fd1S+v71X5tAhAoExKiY1EYvHdV1aWFVt+0aXSXegVFVDfpTD4Mx79uIee10uS1vMrTKXZXWPUu7bJmVrjYiD8ErqEEGCFTyBuso4RTJoIKbcc+P8J6qy9+yV1ffb/SblJCpCc2HwCNBsabFclhaOFMBtz90SrIkllCAjm7vTQ9ZgJjMPAiVYOGHqmT3T4BGklVRZve99u4MIX9gQfhBEVY1ER2sTzgaAAZAyc5DIvu+979m3I5QiaLSkskYRvEW0JdZ1aa0ZWCA/72DQ0txADP1Jwggj1iWur5f1ZV1eLpV7ljV3ycbIlYo0czcAATe6BVAeV8Q91nZJdADuNKPBIhxE9hz65JBb6D0lZc/ce+89e+/ZVaK5h7FIAKS5t2VpS4t1XdYLBLttRoc7s5GdhFHgwINYPb5cL9dvX5brt2VtmU5qvVwERoY7+7YvrQ1ZAAMiEh0d1qxnxPV6ue23vvWsrJ4lZab53nvCGN5ADgib7VelKlSvzMoSEETzMFsAZZW5e7hHeFxieam+08Jb817dYQUzmLnoL215ucT1urz84evl+uprLLHsqJ0hkAmCYVGWRiNMEBIlEUzKYDQEaNXrfrtn7nvvmeURHl5SRNRS5q3FGGQr5JiY7INOShiAzmVpdK/MQeH0sLa29Vq+v7xmidJ7gXeQQngLj68v69ev1+svX17/8Adrzcw1+VRjTsbqT7Nj4Ro8Ox4VSdDj8/Pz7f1ju90qc997Vnns5iGgtZTYFoQ5PQwjgKzKysQQ6EJWsbKgNZovLxpwTFYiq8x5/eX1+suXP3zbfv352z+/f79t92b+sixf//jt2x9/uVy/XJa1CIFS9b6XJBEGA2VJm3KixvmnMiFIivH5+fn5+ans2avvPaVKmRdIAh69oc01s0AJQvXKqtKQ84czUSmirZfWVjPbtruRktjaZbku68o/84/bv/31+29vb2+AIuLly5dlXd0byLHz9sp931PVcwfNaQLpDrNDWWFYI9TE0bjfb33fULXtPXuVygim08yMlTkaXypwLsUa5IW5wg28zcze96z9JV7a8gJiuVyuX169HTxC+rrGX/705ZdvY5kYEQ7/g6AZBC7LsgDuDhgNVh42h9DPBiOFMmOJkTnGMfveK7OkJI0y9+yWvStTlZITYzkzjEVezys/UcjM23Y3/1il+/22XF5aa26ssf8R5HAHQ4JLpcqRGtBpJGDWlvVozsIUcMT5RUA24I4AwShBA+Yrq4ZPQ5lAZs1JrSpVETZf6/BVOGd7CA4zWHVtt11V99v9+vqteprbXBwlcKh1HTFTYuYwz5zkwOySVCJ1LijncoEpmScXAohpuJx/SoBJOtbamr8lHLDg7gpPCVVuhJlAd59+xhBLtGVZSA5vDScMQ5LMprNmZlT1oZzFqhxyvzBVz3SmjiWKAgFDjSGGENK0LjgdAg2HQPM7nVUw42h6D1NFZHEiAcd0+Niu2Srv5tGWFZxUyGO1GSq/ZOSwqabtxyoQBzSUhDJSwyM9VhVIAGmCYVqLCKAmlPDYGHjWa1RidFLW3Hpkw2kyG97T2GMiGs0cZsz7vbd1NVI5PTPxKEElJJpYrEEp2fu+D/VNSbS5SSeHZJrbyjlzEA0YQAvGbGYaWSQ0/TgdtqqyqjIzO6ePVZr4M6DDYHSP1hpopBNU1bI2cwNrJGW4OsdLjt0Jh71YEmgChhyZ/opYKIgaEhNjq8RsFU5jSTHMjYlzRVB8Xshn9rN67yRZvbKySjX3MMKGDxNNgDHMEMu6Xq9ms29RwIiax5o/RkrCMEHoHA6ahp1c08wBAORwikYpjF4aQ2BEHQGY0Y69ZbbQyFXNEJJpA62zeuUwClJAlZnTx2QTBjPHy/J6uVxQKsmh45ZknL4qc8T1oCUA0wCfLTt38xnhWEsxHdWh8gXSgRmA04o82+gcgVEDjQhgpUL13gd3pATAFGS0tqyruVFgMJaXpQVQkPqcqxp3K6MbD/4YHtxj1DTtJB2QOvsO0PRMCorDB4YExNhyLfPEwGP9H9j9iADpqVKvHMxXEmBVTq5rWy+rm1clI9qyDgOwDv3CgSzH6kRwqgMdX6MrpenxjKkfXKZJ+pIRhU56CTSUVDGWGJtDMHMyie5IQJXIKqhnITOVwxIvUkKELa05bbS5mxs5ZyQp5InjY7kgeOozTY02qaaOpwzYOXYRzhxQKC/Kavgr0lEPPk2CDn/Rxm0NT+7TEJ1VpdHHBVPUWIQm89FIAyhO73Si+tHUAGDE9AvHNI8/H8WYjHF8P2MAKILFgmiCrCgoNCz1cVwzH7vk4fS5u7vRzM0n/h1tII0XG/2KzCzQm9FscOMJxEdXnHKGkzAnR5/8MymLk7T0QCKI4/wYTTkWhipVGGWk09xcFvJDZJpFeGsxQohwib7veciB4QSPiakqsehhPgERmL7aIU8eWmbm+Dgg5+voCPmQzef/AY4uGhcxhTRQSokKcaY7fNw/TZ5w99aiLc1o7nZ5eRl24q1yvE/Nu8N50+LefGnu/rvRPE6FJzlUpQn6YyIem/d8OkCe9cPxnNGPw6y2nMYGECML4cGF7mbdhqyI8GVZWgsza8uyXta+7bfP0H3m6ZD0RnOLFusa7sDp/OmElWMVfAQG8ATJOsLTKXfwNBJHEUhJNmEXJdFIqAKkeXOPhlKhZZcEws2XpUULay3aQqJqG/J0nLAKZgwzc44LcAGqnNBRNqgJ5yp7QNHZGAedPR55AN/wUvEvjx2TP2qSKCqah0WsLy9GGJk5LyNAxrK0dYllCYvt9tH3PqAmq3pmJt3cDKNkJx0dLd5xoOBM6FMI//97HRV4/OTQxg9En8vBQ54Kiuvr12iXy/VipJuNqeu9i4ileXh4WEH7fZwwhzTKksw51ncNYTaI6jhwqTjE3NE7vyvAnGA+jP0zrkNkHD/5fREGSp4sEa+v3yzo0ebsGhH01kSEj88xEAZS89xVg1SMNIM0MzSXldnRUgmsUxmeE/A8DY+Fi8eF5VmboyxnOOcs8xQ5GK5UiwmEEDT1MQCbKgXzdrCfp8f4dMYRQI6xOJXCOF/VIcz5QP2niZi/jm3qKIp4HhxnHZ6XlKeusxJYMdJStPnfSjLZ2JvHWYihxVt4uId7SeEwGseGBWhqjUmfB3k/r+Nn6zz9OtOsM8TjoeNu6pkSeEYnAeoAgKgqqAY+HVQ4Xm/ALsbK7uHLEkuL5t5VDTAd17uoUnb5UJsQYHYcb07cAeuP7n6kWY/nPEc6FdFxuYRBecMRMiiHHBpL/XPNCAgJGc7cQCiAZhZhS8wL0JltKbOUKWQB1ZPj3hwSxBNe5vGf21uHaEABQ9P9yzHGtjCWmXl8jJMdTUXFqM2Zr2OZoeYzZmJIg7O1WJYmFaCeXYJSfdu37W6mYQC0pblc58scZ3lkl0+PPJbEo2DPyT4f4RkBJxaRGq7EWWScS8BT550NN9cPo7tHeFZ2gmCV7vedHzfzrMrhD7kPo+ChZB8n5tNfPMswm/fwFY49bVpOEOZ5KdnQ2jMPDB0feeAxc+fiegwEQR3ekGg43RSBArPnvu8BA+Ee5j5PzxOAzrYxoPjI8tkxOs59INe5cBqnATQ6RkQJTubhjQ5Z5k+vpRMTzoQN+q0aciQisuhdw9WI8AhbljCPiDYuAjGbFzxGYfgFPMt8dM9s1oNPHrzLebcNA8rBooatWBTJGveuAf0Oqw6y0cEwQ39U3++976qUysMXW+CLm0X4srZYL21Zgfm5l/n2h9CZHuGBjXxK/+Odp5P41EYzFicEq8cPnYChCBiZ8dyUD7J/CNtBE7lvW/ZiqSrNY7kssSIiIjyiMZp70zEqkwIeLf40xna0+jGfGiugGaQztkctrCibS9PsJpFmSAeTDDtp7vn4x0yjZEJKqjKSMJXgahFyd4uxttHDI4b6mRvKg8zOZtEZ2DO1clZFR6MfMRzPPvzvIygdb2LGsgCGf/YgwUOSC3wkTpCZzT0D9Ah4I0w0Ta4b9P08nRM1x5r0qIONC5YnYKKo5wLMpjoDeEzFCJeE6ES5xSjO0wA8zcPBL1JlPuxrM/rSzJZpt40vJzQ+o/fEuI+NCho3GpOExt4332em+1GTRxEfAfCIgENlmcxMFePTlKXhV9ZpIwPTWoaglFKVpcwan1Uszc9CoATw+CTiuWc9tcf81/CqjsTrsAZ//8xTvOERjY7PYz2xCjFGmAyYVOMKc5b7pN8afTS6w13G6gJR2W/vP31N95Vhi7eI8Vkl/KuFwnl3p5rUNhtFgIkyHIvXg4QB10EWv0Mlnsg2sIikg+ElZBoMVpQMSPDZSjBzmvdYtG8b4Uaq94+fzFq+xeqLR4S56Tj37wUm5jhpSqySHpc1wCnr9Fw8BUyYtHGEQoLjEYcJMri5RXcLbzvM5dMIpOqQJ6QxXCUsq/Ude/eQQ9Z8eX1dv3xry2JmHXA9cCBVjnGpqTlZBZkIdQ7L4Yl6nuDvmDxg3koOuSDAjmkgxgWBAGLP9n9UjzCnUdLD4QAAAABJRU5ErkJggg==", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAk/klEQVR4nAXB17YkWWIY1uPPCR+ZkT6vK9vV0+MHILikEZb4RP6I/ooP/A3phZKWAAyJAWZNT3d1V9Wt6+9NGxneH6O94f/xX//bor4v7dU3En4lRlbJhZluyh/yYW2PVivRPrP2Erh9b+vkurJmY3h9rJZhKJ6FXCkHZKeiepqNZ3r5Ifl+Z3lxSS7RSr+h9sNTjKAn4TGqcV6CTh3cIEhSvbf1GUZGTqjXcxLP89XuDVna5HpnA9Iv45t/kbPlNH/V+2rKW4Grkn2wm+t8ZMth2x/+fmIGYGd0achts7PQe6e1ts+vcq3K5HD/PG/rukPy1pXO9Zp9VSd9VnSCdSQ/xHEe8bZPLWEleAeXWW4qBSTVbNVLGH2fpTAzAdOfH6xT3v73PcdsyDc6XqQWtZytmXhbkO7bItIm5Pym/VQ/fnVG4918X2+SJLmhThrCpHLBe+sebkjDnuhh6zwNDgZ/ur4heUrhZkzgtu5eEtPuiyzuyO0j4tsfYizodHhScPRQVgPKvGHx+jcMWVHZ9yHrAayrcN/3QUCO3bYylrPEePYEUIsxqHyU5Xedtp4On0dDyDXlE2gh+vALcyue7SXNt3tStiZ5dVC/gF8yNAEU+teDdFOH2Ct9AE4yLXVR3flnpoj/qR7IW9TNFb13uz8WYdiQThaKeK8eip9J2eY8257QHHlkRnQhf+Dv0O2NI/LF0z+PBRvhKVLARA0Z6MNVRf75xJgcgiDs5IuF8FH45/GFblywCR395gVPY53zfXYZ/LIcu4e11coHePTI5bwQyfL6M/nZ3ncR+4cL4JBPHPzGLoRvliqA4GHOm7OJchZYtOx5/rC6QftpsP/hFVwvYCwJiUOwKPFnlWaa4zb7PPt4cuI2huBDPz+P+g1Mn358KeLu775LkHax+V9HZrlr8ocCTSiwFJPasHgaRkt/SuEPLZ0Z0KyIG4hP9Ha9fKA1LpdA3idjFHaIVbq0XyqHLtQyysNeH3O01Z+dOfymvnDoUr7gcXV+WWwOd109VRxbbNlmpCgbUfV10DW7Vf4PgdOdHv/+OpJF35UZ170nkRGtN1Okm5fr/ev7rNhzOKsNq/VKVSlZ18I9e3weo4j6Mr+eOXIRhrOZBwTxOdKuZifbdqx834+K5GOVhfu+ONQXzv/2sTIv+L782IVOo/os95jl/pzu7gWUTq10yYlqDv347FKf3am+3GVKnx469XRokZQLaOOkSo9NXoJp/ZMnkXRJXy2aeuw5b3ZDPv3CV3nC/WhAmHQzduB1Hc/S54M9v/oP1S65jk88bzj1JtpjxW66qhCdBeYzP7YvYUnaoqiS0jXPQz2cVGWl2W7oi+PTBbSr+tNZwFp2N67cgZE+KWJiMOynQdPX35JfXeo2ywmCuqkDG0Nx+Or+RfDqMzOFAObtYCLQ9/Slv70v7HcgGJ9oF7k3ehBoiexKj5CgemBR8G2xmcKjOHBTlYKKed/aJBN+yIjL/NCfTBthmr0h+TMKXkfZtkPOk9Vi2kvnUe6RMiq2/q9DWhSCVe59JnvcR4NXiVGJVyaHjXGK7sa5HXfvw8t+knZos49J34L2aIg67BGlOVrxpZ26MFtaHL1jsi1oeudbLTIBHLtwarMdl+L1S8QX4HD6fABRxzqnOJXQwCReIb9ZeamUanLJ85f25OpGZwvokmJA49NTPFRD27jw7Kpmauoqjf5dlwa1dkm6GlNP2FG90q5rpxp99cbDyJJwvorvk3PXK2yYxs9w9fthUQRJZtYBKcmE4NaburmfIPrW1zJ08V9ILGtQ6km67UAsvAY6api4cEyEAD7x/0J7ouGkcmM8G6/xiZw5lqaC9yGg7NbWpq4sUfnHmooHuxSCBCq1DK33z8/03J6XnTXftEeAe5U03EWWfQJKlUhL0PdDXxM7bBYgpqZuJthvhSr6j8gvTsq4PRIu80gr1FDQsjcHy+qzNIBUo76D3FdWmT3EwAXCvJv4k279nX8iDYTYg4438Sc+Dmej6eSbInzaFpGp3StXMSSQ7ATQjwXtba1x5XZWABsB5PE4/HH04dBCMXkD0VOcR2/XLgBH3BEVVBLKrPXZBPFYZ81kMXhQk7avuHaVAxCvlQp51xwbjduJQA7LZFPlbPBVRnIpgiPhg+c5zGKTpt/tAQskmNSTlRUUktVde32WCf3IRoSUUTD6VZuevF44Ihu1AInGnY15IKu4M9KlWfH1oOUaNF3Ogt6yJ7Btpys+MG3+dsycCiTtrh3Gv/7OLv+W68FZFeJd5TEg/DanBcrGNzZoX+wiJ8zVUUWTpmuMtZaB1KkR9mCprLWs5lAkRBMKPTQIjiuHG9LF5XGAB6cyAeUlBE/Hr4/FNXVP+TSd/U/TF7+0vPcwaAdza/WCmhOAr1LVFJD5CnQ1hcZVqjtYD1OdBSk502/70CHZDw06wqM+bvhG58dSAEYcS4N2cgRviYuv/9l0zBWNWx7GHfiI5sl5p/pBtq+CsbR0USds1Co1zNXk5142G1ISzvIkm1ja4RPZdLxp86KDyE7M0QiuSIuwNA+4+Zm//YZPPFTKxQg+0nKb3hfpj5YqyOBr66WR5vPh3wDMRAuR3duqbKayL19uP+1hc59799/zj82D5KP3vveGN9RbXVumQRUlNFDCHqwNyZvtnkUfDHrMhbhpw3by6RK9hMobM8LDpzJWp9xBtFNeRPXhDbI8NbGU4E5HqpF/c5XZz86DquZ9MAp58ETwjAZuXaUvlMuI1Vb9+BNN/ZIt6IsSy6lrClNZDEy6Rp7SRdLyq/CPgEDcZz7yc2CLPHX8xRvgfK0fb0p0pS2CKdcUaVuN+pkcoxA4pG3anuFz0fbDaDHyj9HBPcj6/36wRsim7zvkgYnTogrtCIUtQyPsnK1+VcFK//ASMNufjtsqo9VI49e3+FMtL8dTMjuqFBynXmBN83DsL6PFwUOMQZ+c8zDw+V18l9OyVR4eyqbuXMBt1qcsCx246dM6XxDs7U/5MJ+6HUikoVP2v58NZ4DkCOOia6hOGqlKNIVg7Nu2yyHXSR5qTyYBmNNRN27FeYWy53/99395JrQGeVCH/WsKi0bl7vz8a5zpXI99x2hw2lXYtVSLsk8s+tXr6Xa7d8ExgO/F78Qhkflui0MmHyK0rgVGbdcKUyZZtJh2WRLZ7/Cs74pGDX3t0o6+OIhJt4/QRXNMpsEVyP9HskPWbGGjrpeHbBtiq5MgkNF1Id3fOHDQ9n3Twk/fHy/4yMy6tjuxcmyZVLUjjzfB+yvFUMwep0Mjg0ZqIJxxnPbN8R7Llg7zXncMEYhdAIReUPe5v/Xsek9ZM8Tx7ktVbOzJibYn8Kpvh4h+Fyyj0y4PvatVcqER9d2WdMbGOgugUOR8FDyD3P7y2gbKYItb3VAHLvV7lndq6EVYkzob6hP8ok8j2NN7kv5Q1CI25up3kXNRJk9HeX8BxKMenXWm05mjvWp6j34T/EOhXOvQFn1q4w4gyw9McDErArvn3qDyXjWIHtazzUnvV9LhFtYN69JPrld2yLaAZYmB6m8SdHyUtxeE67p8En/uQJ+q0rJD3zcsIa5LB0CcfoQvP2b1PSOs1qkE7w3bu4UYqPRzNuFSqLi8a8q2m9bj+sX2lbEuIw5Nd9St92oeejXRfxB253LbD1RDmGZou5TU4ZTBEaHpY2rpAU1Xzik5E4NwnAlvus1eh6uu4N5onDkWdeh4Puaz77BV85Yjl0M7NdE2aQq78geNMdJtw6SxuF5saZ7FxVxQK/UqJUd18oWMHOj0AhDcPQ5/VvZlT6nD5mn/9XgNClmX/T1G8ISfua7MYA7qoMDe0Kl7LtpNCAErRedODEUWm590z0n/RbK62hDKbet8EZ1W7asf07/JatF2lR02yo7OjMwo1KMcJKCNnTnUBSGYqX5CnboqGpuHhLc6x5AgDjHterVYvhLOQe8ubx2wsPfxZ9dd8kfT4/S8X3U+m0q4eRmW6xGEMu4IKT4zMk+CvDykDCHW59OJ/6Xvl0lI+kF5oUPE8/e1G2YGwEMZ4/F7IgKQJJVto/cTc8yrCGl7CL6ap25bbt99bU0D+bSmKitpLZeeKTRtkdGo89vd0IEO0y2tM4BZ54pSnvq46IWBptCL2PSdnQDmWws12uI6PuUD9lMpD6g48VVLm3zIi9OL8g3T1kQsGSYAeR2PkXaw6kdg9NJOP3/thwLKVx0mA+zjlD96l3UB/axvZsObPv45//HRo1/axy9ow/9drRbKBlOnRy+uLs4f/7S2sZfe344K4FrjGTYpkgOQLnMqVkDfCwnqBxwbghjyytzx31vjMddGWZg/OmmjXr3DTCXQ1K7LCfPDNy4ubseqGjvKwpYAzp4wc6C1tO+TW1jC2IVcRZh/8cUVnAXgmD0DGDiEn7AZFJBwNfhkcBamPX5u2/LT8Xiu507P5m/Pfovs7vfokGngNUhaK6tyDFEhyUnBaabyCWUxTU1rCLVylF62gE/57a00TRcy1xoowKNydWhwOLYiX5xbE1vMnK8xjVPToGYDGxA4yYvG7eueadSjYlrX+0HJ6WwBaK8tyT2RLPOmH0inLuAHw6kF/U3U9ffHtIZV01acsh21uELXRgfz8TPtr55jw1tg3Mz5CT1t/8UBcSpBzhqKjWrT4HJnTSGzPAiCFpS69CgQsG8cPM5WJtoGPCw1Y1I0p67KECDXiXv+4ob2Tftil0+v6ISpzhBMuB7V3yUNnFWkVaWgQU2tC+JZGgjwU5zDBpSIIYMcULuSct+akUoq8LNANcoUKsBT+kLuqob34UbVKPvGcW0yJ3Rh/3oSkE5XiCcBuk2WbO6v+FId0WA4ciuIfNt2bYWI5eaDjVkDUdq3SLVq+Lr/a/x/rqN5U8vn5kjeM5ejtbfAn2cP5j7K8g0+ryw9Wn5bdUQwhucvpI3z7tijdv6yGIeeXenM2uFh1Hl02LMBe2M2pDPT9jnpG9F4Opsb/E6c88lpqIuiq/Ptn4m16JfGP2CBIyKVnas9rEw9dD1EJFpYFvBPDz9NJ16ph3HJBAfPExMYn/al8F2RkwMqCeWRcwGDqc5ABw7Px9vfk4vtyzBrzbNjygWbbyEfT3oLdArbj48RVVXmg/Au6GYZ0XwafZ5/oYcZT+thTAgaa51n8AkOIlfWOfJPlA/MtG1mtKwAnVCvjEbRp6/dNxML5z8PcNxT5g9VvX8/E1nyhrJPceeNwzVqc28oFbWm3RirFsmR1crrUaa6PAaSkGTY70sWTKK5twXxS++m3qftYz7S88IerkpPgx51R+/pus3zCoSKOpB0laU6N03jlsbjASvGl2Y4BfUE78se9uSutxzIVADkyDezGp79JC08JZTUxBCADcJtB/P+VG/WHrF0LY11fMFFB6rrID89H+zu2eDjbNrjvO2RQx8GkWCrqp8BJIN1HNz1RB0zYHpiUUxLdk5YZ1Cmx4as05vjgVHqyLHAlnPSp00/nu1BWQAYLGp8pDnqMW5L0Bau6+bGTFrW9dmfRxBRU7yIQ98r4oXHl40yt7bV7vSATWxjJU8VkkwUumH+lE4eds0jv/Fhg66rBmwAjzoPqWHSUkfs++bVS+rl5fm4pgpF3VkHcW4KK2QsCMmVG79IySYEgp7AE2jNgDEgVQWK/nBbDizDPZQMNMDgJ3BYLU97I2bR+3DhjI5jNjqWfcZGBZa9I0at1wut/Ys/DGwWt9LSa6U6i7egAWkRdaTznYVNPJUeIAZHcRq82pF1A2oncM7CD3HVxFZkd3N5ineG8LptEe4v4jBZO9wN9kfLDCh2bh3TiWg0ivAld2guMcT2pCOBs2bAJjbjtAE5EVkrIeBA2g1usgBIxfEVW6H2KhiNR2fbbpupSV0cjwp7ncIh8QcJTFF3Gto7xC0ezM7CBYiWv7Ima2TZSCrbDkfx/vpTwpE8HkpY+xIRSCs4VKP7/K4xqzMt9cdN9XzT1WQTb712wsa2PldBGncGeyKgFL1pjaLcSxPTdpvmJW0fx96Am3E1R3LIkENYU/Fi2OcGtNgfGiodhUVBGNrDTnAgZROB08mRFpCWy57b/pgPLU1MkaQ5QIgg1lp53X7pNo+oZ8SpcJY1Ktfd39Ky2kCl0/bwpXIuCx94QYiPLTJoMEZfJk0lo6NLl8t2ai4b625a4JrGXla3XTuMmmPT2kWC5OKXV/odIQsbuaBHmURlZ3e6Nrf1Katy+E2/DmymWQmI6XtK9UlmMXMuxWyGAca3zRcVK26g5Lj1LecP/8uKa0Ubz0g8Gkt5ogNu5G1ZSGfwdaez+lQVokxQ1q7YdjrNqLfIxRUvbsOpaL7f78/tEUHlD78eGut+GBAEE5Qsh0k+2os/p9XpWR9ng7JDZzsO4bgTCJ3y64MTm4BstsWpOditNcHM1doWvHFRWO50NlQ45R8yhOhovZp9CEevNN3zKizUMQlJxynz0LONen6//9vXT9sNdJCv7C11tc5kG0jzHM1m76vL3RIBdcZSdPA0i/x+8PXjuGlg7fRhMOpui1TQN9V4sJvpb4fl5Jv1BwEA23c+tp9fdpCNyjsPZtUmR7uTJFsJ4vuKIWTmnoSqr7BIBJ6t1/MGphVFWMNI/SGcNOyuoNtftWzEcfEkn5y2CG0f1X627RVHUcc3QrbVuFfQspyIIXCO0MnsT6KJ02OOo+1bMLI/fz3SYfrYtPlfiOyXwi2Lk5pLGlzXdW63QQ/BBACfAf42C7sPZx+It4TTVImz6dkSD6Vljfm3mRttpY9s9HW/x0MEInGOJ8ikVVKehjxHgesw3tSyNVPGAHbN2dXQO5f0yonF6dvrXTgV9qO5KeOkNXhSN/U9KOqM+G4XkNAdgTdYo1pqUeAcPVgwKNIBMHb95raY6LFlQK0BMAxsa6fUjFNjnfzy2n1mCyvCFSGl0/ERVZ8Tye+PW/D8cHdsuoLFGKY477oxufX5JniF7Juvs5HrH04vPx335jOy7KrFJC33pnJTYUDjuKFPEKTQuds+Nd3XQU0X6/ldts5TTmgdzmdTWg/5S+UuWchROFGkzAG4Sz9vqxPGCKCjDsdE6LAVnq3Hf/qSbElRqViLFBrdmWk1dB4hY4VA8WGy0KfJki2zTrcybXXV46w8pNfHFxQGTb8pvdNnNwl1tJ/sL9lv7Wb2wx4C1bw4zng5CD5DKPqO5y11Q2ZTLvzF7JRv9qVsSatt+wX/ZNruNqsTsmzLvibzqSlxqgxKmGW3EjdurrygyfgPpgVNG5OaVphQZDCmVVSB47a4/PUok0TwJo1B1WsBjTWYoujTCnfeLjOXkai3Iu7kvkhQPLT1ZjuYYJi1IV6NR6QJFmYqIMAySsmmOX0OL+es8OVw2P4TG0/v0SH5sSYlo1PM1zhDj8e+5Rnh7lgMH/9E+ve2sr25DrKhBfoJPnLMyJQa6HJysLj4pfRM+n1b3kxdEtr1DFJMMB3J8KlXogadmITghCLkmV7RWDQJN6hjZYDWxhVl+kTcMJ7t/jze8FoJ11aoNmak9dCe1OT4r8oglb7EpMRW5b8AcU7qVjtI9MwcuvPZf7J1/R45KiTjd57f1zc/9bKSxoxJZHX8c3F88wtHgH7ZST3NLEc7+BJM+grqdF9QMIxWFzP0ArMbez19R0D3cIrJwfEH0ZZ5v/OqbLHk3jv3cMrboch0CocQmBXEqj8YX5a2bVt0NGQ1jZtoXwivdCvVDXgpIAMGLCLCL0yNJRJW2TNAd3Lf/z+Yi6JXP21QcvHOW7Y7xgMkzjrfg0xOGwvB3b4xfuYcy+Sz24tBmjoen6w27JwixxjY+/tTW59ca44NSqg5POY/L89CS3U2LCcNo06VxyeNYDeUWH3/ktLsSb2yVtyySRj4XifYKa7K6Twptuhrq3U9PJOhFqeENbnfFHowx+fZweNjWOnjfdxS4AgbUoZAi+2xFy1KxcL96LuLyI73efJKmC0x0tLxqaJcEQBCS7s3r9rf/H+Bu2r+O4gvgmjfKPuuvT/krXZ6Go4CzjGERR06p71w8GLfl1dulW+pPbU6Igc3UCDuyUiP9FKU+XV+XLKzl83na31C8NUwWkyx+dh7q/1o7fQbVcVVDA2XTq/uG8vxZ/pPNba69RUvxcwg5HDbIUWZd8IjEqYc2OiX7+3+/31K9NF157cWarezZip0l4wZFR4XO7HnydzdcW+9mHLmmAFmxy38xYj6pLFMq+OsAYoufcPNdedJOhm+YrN/TeE4UzJggHfQAek+rHV8o8sS3v31p9vGlIsX4rE7e6hEXxIt0/L4P1VfV5K2AVU1q4yZTywxwMuLHC71MOQltVzfZmDErIm7ijyfQMJfT88o61D8j9T+4zrq5E2PFP3Kn7SxxgpjpeohNMNBzupOxN0BoU1RIMnPw+io7KHnUJXDKN0bJzme+pM4G02xbgI/L1cgONlq/GIY/2qxw2kb5XiQ1tFeTF3ZDqask4lDKeqadjCbqVbjV6I/ezHQfRqdKAyzYbGKveDqFbGz30WX7010MW/9IBr4ZFCUOzWpxXQ+DdOiPRU4Xw/CKH2Uh18xVExCaRE1wDLr2ragg8V7r5fzWnEqjlZuMclSWjqwE54+CZ7EViiJYbnmte4eVPqnnVZq36Tr5/Ni9LVP8PHGoAXmeaPUtGH9t++E3nNhcSapoD6Di6snPeQuXMbBvW4J8WcQzeuBrUmHdAR/reghJVSmfiHFbwWAFNc1MlN4ng5sb6eA1WpXbN19Gen3kYO+tVzp9EN0xODZoA5VPXt9r73njiCWzdWVhO1RmR5pt46ROprlEo3FZ91GRIt90wA+60CquQSP88BDy9r3xHL8JoSGpmiIcYmPMdfBZM0DLPB8liqUbUadXN2YrqyiqT1HpEJtJXQBabXLZFueEh1vTqcFX7+B+uuhzOrjCFvk+aVpD++ebYRIDU6UPS6t9fEmRor1BChx9PDwzqBJVVbZaapOsm0zzVtz06cktMFI0qzq2jCEoshNNqPlwbSZbjowUFTJ82nA5unDMEGRtlZWr1AO+7VtzdxQG1nzDknh2JYxKBgktl0jdl5ih3DWovIgt75vVrEAePo2zZLMVKU9jsYjNPy0uyd2PteT0oIHL3YVrez+l5x0h7uwfWvz3lh3qD9EvvBpIgefA5NTCoptb8gweTfDtEJ4mMGp3bWdYnw4HA/at6ePaAxn09tYKp0aWFNK8kxjJupsV4XaDG1a3dH695Y/8STk0JoszmyilRygRoAibAHWsJkZ9PTzAJiLI51hshjwXVXNlz7jttvjCqfjXde1Ll3mtJKZ9uwVZVE2jlNzI/R/1Mh82e365Fu0OX4uY3Ca2eVBEaMGG4aW0cxkzzVEvvPNVe6qvT+AgYPRbBKxx1Sj5a6Rro8A7xwjOdZijVFWExM6+HN1aFHfCfWLTbT1DnPV8nWkYFLuyzJVbYWoS6UCEEh/+vz8a18KVXd06fWDlYf4nj90eetoUTVFX4wxttqhVoYqTw8wBeNzi76n7tv+qmPuhdUB/5sO+fTD2z+6ZxjPZiuhLYCYdABqmO+K7JSd9k3Pq6CnghIIdcuD7e8668Xo0U++rlujUlT3Q31D+uskY0+Z07ssbnCTi5/Lp7d/7aVF2/7T402GBurNGfKaVHayrEmVo+biPbVsEM4qv7VLXC9ItXqpHkmT8CKkIv6S1ybqFZRauEvWdLCNP+oK3paV6gVVj9Yq0OAezc/SLz8XN/Q11o+UUAwZ6lklJdKdXVHVL/yNq4S2WoSpP2rL+ZfZ82otjyPqcer6vNGs1KIKfiefWfhhV3vMM3Y67SZq91dU3twi+InwaI0v0KBjVo/k0FMqJIE05vIFKFwa73OToPpQNd0zz9dhwiV3ZtAJMWT1H8KzyEHxrWKsJTYhLoiAE3nEowtDTPNkiJDgErNi+NwYUBAM4SAo4nTctCc4xV6yrZqGgj5DrB740Kz1RXLx+ZRPkoMRvEu5H5Gui4//2tNqNXw8OCjtpu44lSdMPTIFTkyQwBWsgRTSfcJy2nfI8RDhL4tH/+fGG80yMNegHuzwbCrbSA/l5WiJpKPRaS7s60Gn3DnzCBxUazwsG8emolcnTs6JFGCBjgrdZpSl3B7esGM+NnAgpjXQyN7rZ4WFTdUfj19irtJMPgtNf062/MBjz7IHpbtZNlzfdUO92FN+c6qnZ5ORs7c5YlB5ygYITSQxzJJjVBI7/A7nJWX9EKybrZOH6vdvwZIOJRHEuVoRKNfzmzOxgta459IOwMv+Pu3/aUncxgVJU+ALeBh7nh8d9xpjF8BRMVojPt6Zu7hExzvwGeBRrQKAhT9sahXV9kYnz1uPzb95P1Z08J5h9rEl+ZkFG1SY9KjfvXsYneT50Rvj3ybO77Oufs4HS00m3VWKsVHY8SaBmAgyELjEok7wZHrmeB+OySmcP36zaE1YkuW8F8DgzJ6EMvwvcqEbmDoaAFPrIcP0A8JtfdpjrFiReXosyQ9nwU79NT4k+7PJJ2Tf6VdurSn3ixRGvTZtexpkPm7NV5XaGXqcTYxXxW77EjZqMjU4hE2jCJc6R8FpWzzyeQOgDeQioX938W1ld50LvV/nbzVRGubVo2qHMV7abyFnZoXDqZnVBob/OF//ls+WjhVZnjvm/AytAKLuqfF9CGUFhkIzz1pd+C/tKJtE7UWMVX9DaPFv3c/tR3t9JIaI8T7DqCVXH0du0IWN1B728761lzylmDZX9e7HFfsug6cf7uD+wBAeCYlpj7UiFBHpsTYIJ2NPUW9IC2X9clsYvZ4DvaajsWeHBziGthFWaA2etJ9rv5356Gg/5uQYtccWmUfaJ2XrKiQCt2FSAETndXpQPjBF22Ogz2HEJ+cntjvE/wqhLd/wSLyvVhdTMrl8S1EnH+0Z+j6pXHc0dSG+7J6L6+DVzEU86zdGJnT9Gw4Zegirs2j99x6X7QjKymwMlOTKpcGsIp7XdZo5g0ODqh602nmlV3yG4Mc8vhl5G8exF4zIFgxb83f2oIqeyLl90fIzPQaRD0fLK1gTadtUOGy4L++TQFTMQGlhzj0ytE/A+l52OQ7H5+6Y1yBB0NyoMNiBopVja3Wq0dUvT+XhOU02HP7ny8GBmz6rtD2Ck8i17EbP7Nfn6pyJmrl0rtMOITMGAUUftRv3brPJ3RYQ5A773gV8xMQhn3r05l4Oej0PzgrAi8GXfe2Oxj9IuD805AtbVBE07q54qstPMn8F2zxB+MwKUmo1JgZZ3Cs/9Eb1YyDcX2zUK4Qu7ECVR2UINiWiFwb0vD/TKjlxN0L3D7ZqKuMFb5jCA6xh4VlNIL1CApN1OtU/ZQCBjn4qYeLUGnDcDhXrpiLzqu7lqVTOuHcFcBlildTBPD8x5OQD7Luq1EgioYv1alLgEt04Aob5G5yaNK8yDgMAVLcdGodHwGqr3TLNJ1vVvPaIW77gwrvIiv3LVHGYlrG5dCJL1sPJSinrJ6UrsMJk3OkOeb0yHFwEFwf/3w5ceOsRqKFBohGQSCqWfaRLhy+1Wzw2xTeMCpoFTY2LfGiKQyFthLUna9skRRNkaN9MYTK3dNbzziOKy6ow5bZvNKTuO0sinW4e+/tGqMDnFkYuHT+O63mndCVpt1A9drkcZ7UeN4YYbb3uhavTK7l8a7t+ZWx5NsgG4WRSqIwdwUs45ajxY9+wg5t3PzvapeWP8Za2jeyPHZ64l31BhCnxxzoB7rn7DwW4j+/e/TQ0CgNhh29W71wgeG10J2ygiaemitJ3de7LDtl7oKlmPPRtEi3cSZUpW6CwXCavaD0kg96fxvWCinwc4KrqO+NczKazw5BUgTvLCFPEJYdBDtk0J95fDnmObAMp8RWkxc+pqdSleXMov6K/07832PGLxrF9oj0psmBfDS7+RsDrIa8ODwWd1pNe3DqUHX8Y1qx1fvHNGwKRwHZtlAr0S5ry51gQ5tgq43n36etCLrwnfjKQ+74HbFc/A9Et3goozPg9Jglj9kBse106FVQ4z+CYEOnbuaijV7BtmyOhpsbaNC520Cjtt8+x8mJTKi7LIaom3ePk1hm14mAdZ8hgG3Cdqy5JlNme2tbc1dfqpk3TiloJtrARSNK9Ou/a085zQPtux+5P0itr0NiDpaxiEI1aXoWlf3d82m5IMDr/SSWRvp8C1sv0+PiU4PX+hssDWJDF6ax0mEtzq6vwNhR76AYk1xWr3f5jvG2LnniLeMY9hvouy45WQpvWZ/Q/jD8WaEeKQrKO3Zu/T877f7PqyQAboqe523vdkD+i7Ko3p3b53dmr2tzKksmYV3YUWlDW0MNSO1aghq4yDzodPs39s5KZmpRh6EHfNbVKzuivx6pQB3B+PqutY9AMp+rAwpkp5uPo4CEn7cFl5tThAyIOpKW9aRB/2y0obiAi9Yy87dBng7x1+QscYokatlDEeEizVX2PVsgn6thV92lxtr0k34275MkhayQ1Yr59k66/Ld5s/vGbYFhuUKjHLLx87bidZVkatwA0qumyM/AYCTC7+G3u1uR1vZsQ7A5aWzL68WQDmridA4OVv/r2+Mj2k/ebLT70Pd3s6vVfP911z6ioxmyTysZ7zQcn7sKoD3ocl7ts52Ov5rt6sJLlEl7Sm38azMllsTBU5oPhB6w+xJ9+c/3j6AP4tjgK+Q6RW+tvlP/wXPz/hmo+5+Row6oAAAAASUVORK5CYII=", "text/plain": [ "" ] @@ -2286,7 +888,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAUpklEQVR4nGV6a5PcRpKku0cmUN1NPWZm1+b+4tn+8TW7lTSUyO4qICP8PmSimmNbFMtaVU0gMx4e7p7g//2v//p103a7nccZ4uM8aAO+Px4E+rYpQoyIGOdw1b7vgP72639ur68SE45QjuMcw/bXf/2/P//nj6y83W7729vL7U2NX79+zzz++Y//OAcieHt7hXmeZ1Vtt12K8zwAPB6PcRwUX15e/uOf/6eAx+P+8f37+XhUIcdJmAoAvXfafd8/8nuzz+xvJ3EAnTjKIVXVvRxiVblKSp488mRnVlTz21ZUUiGyhCNHQWQ7T3yUyzXOR52h3scjf//rX7feK5hVSQdAYtDFEi04SYoWT1SO4VMfmTBH2mApCpmELAouUDCrEWm219vL674XjBYSm9RbHKcD7AyCWWWXwUre2NV7S3TdpNb2TSBcjIhi1ajMJo0RdVYemf18vz8ej+Nl2yPaLp5ZORiNal1ore8RUguCzqqq44SiiaSIrQE3k2OcVdXa3roqS03OSAhme7lt0VqNQdJVBgBkZpaDBiozJZbhMtklh1pvEdF6NNg5Rkimxvk4jkfleJynRxKk+Oe3j+M4CYDV9FI+jBOOkKRooghQVWlXSFvvW++oTMzlGAYMitsWfduPx0GUGaJJtn27qRGDIkcChsvjHJVZgAtZ5VK6KpEtcwz1MEWJIWcW3LYOxPevX4/jTGae4zhGVR05vt9PGDny4+N4femkqlwCbcJlu1D2eZ7ncVKK3sl4HI+qIoVyVVaibJIRAcGmQmzgydZ7N2a0BCRB2yOzspIswlUgRhWKVa4sblRIoZAMl0Omy/f39+M8VFWVVeN+5Fk10i1ijPP++Li9fIkWQYq8FjZIgirbcEikDJ+Pw3Dvu22ADGPAIOd/YkQjDKmRzLRh2JU13yurqjJhAkChKouQbdgRTQoQVQWbRhnjPP76/q1yVGVmzoulAYYLj2Pc7/fMo/dXQiRZc6MAGQIARoCsKhIFUyEpXRQFwajMzAE6+tYY9hDZCFQVCi5Ull12VZXtmtsHbJcrOO9zYavWfsqoqsf946/v72OcMwBZHpWsioClMfJ4nI/HA63RQa7yoQ3NO1kKALYBqLVQUEISAIy531EDYESgMLfaKBIkBaRtVxlVvl4ACICYWwFgRATs9T/XLx73+/1+hCurbBgoe6JfkWPE43G8f3xntBYbNa8FzBzOe6ysFNSlABaoTHSBTZJkhFpEOQ2TbFJIEUJSBAx4LeBa5HyRMyQUW2tz7TBclTlof/v2feRBjolkFFizoSozz+N4/66IQMSX159CnRAw40OSM5cAKIokMHKUixSMXBelbZEwKqvKJBue19AMBEwINEmuHgAgMYISSfZtkziTXVU1xuP9/X9+/y3PezJRcBFXWg27amTe7w8TkGTtL6+hiAhRgElm5hhntBatA6yszLTde5CqqrmrygKROUYOV5FsZdhroQRoEJRYM2PrU4bZIkIgKQkECJZd9fj4+O///u+vf32tGiPTE2IlSYRhA+XEWSD4Ht9bk4Jue0SQyKQkIrMqABjnebq1WQUUKZCIaKTsKiBzLJCVlXna5jOVpEgpuFpjIdecW9EiFNDcF20cj8fvv/322x+/5zkIlVl+Yt3K6sSAqnGe4+P98f2v9+PxAEohSUbaRam33nSBChAMQi5UVZZBenVnjZFVNVuizWlHicKsPlGtqRwARBQgqLWIFpImBAF2ehzH1z/++O333+/H3ZV47liz/mYTzc4CAFed53jcx3mceHVrIQrD5YqILXaRVRWhmF8Bs3JISKIIxzULKCpZrSpJErz6BxBbtIklQaUdUu9NZGttf3mJaC6PzPf3b7/9/sf9fq9jZNYCkzVuABEGbYgzXUZV5nke7+/v+76/vb1JG+zK0VqQypGAo81qgTGZHzWbj5zYMTs4M8vVCmikqJUSkkA0hYNGhORqEb01SGqt7zvJzLp/vP/rt9/++vZXjnNeMQTNeEkgBUKeEGM7s2y48jyOb39+IxBb+/L6MzARqECO81CLcD+PY4wB0tvs/CbFQgbYNsFyZWarMkRJlKI1DQHu0QDa1VsbmRFq/a34aH1r2x7Ri3U+Hl+//jmOc4zMKsCz7iesR6hHo+AqVI1MG1UoI6vu97PwfQCPX863L18UEWkxR45G0SPHyEpFVFVrrfeOOVjsld0JJHArr3uDjAgq7IxoDJXdWyDZ1PrmUb3tm0SjRBwfH/f7o/LMPKvSRsSqIZFNsW+9Nc0R+jgPAZk+07MXH49x/vHXedaZ+fLyZsTLvm99ZwhV0VvXrhCwEM2epBRSkAgKRMNoE6xmW0SLiKhy61EOu1prCIVa2zeMDOp4HJl/eZx//PGv87jnODNrNkzVbOJ5MYTQQwyVy6gAj5GJzATsTARjQ4uBfJyHzn3beu8ghut223vbynUch529b5QWUAJVRVAEGU2AjTJsg5wiK1oXUK5ojRWKpmgqVNYYH1Ufx8f7t/dvlWOOm8kls0qL3wOkQRNNQgEIw6AjTCILJHpvb9vLrq5iA0hB4RqZCclAjhznmWJEE+VJdSpzDEGtCcEWEUZVjXQF2Hqn2FpkJTw5Ng1mVuZsxTGO8+P9+zjPzMqyvVhFZs0phloUiWS0VpnGCUrBrqgCRhYQgkQCvcfrl9ft9RaK80wOtAhRCkXI86IRFAQaVdkARQhC2/d91CCoFkExQkEC46gql0fWJFvMUVKNMfI4juOszHIuvkfSk8+4FpNDgWRIgTQgi6iiLbF3pdWjt4gI7l/eXn7+qW09FBEu123fSbYWIRxnttZaD0wSYLslLYaMai+vb4/jY5wD9shzZCoF+/54TGgrWAySNYn2GJXDlZhkGl7jhqrJtxYuEqYUjKbitgPnyFFmKqK1LcDXbd9v7C/7yy9fWm+SWgtX0KwsAAYokWOqSnsWfxmUIReIZmJUfRxnnY/zOM/zFAngPE+SDIHqjRFNoidcLnq6xuuilJiZw8S7MsDYttvt9XU3Xs56/3h8fP848lSLvm1v3G4vL6+/3Paff769vV3kHM5CeekCYg7HpYsNM7k+FmAC7f37+8f7+/k4MvN8HOc4p94bI6kpHeN6Uy0UhghpkVNgfazFvAQEQFARbd+2vm99f8mq71+//fH7Hx/HB1m41f7319d//P2231q0moNijOPxqMqJmE/K7ZoSfWE0CbJmGNvH+/v9foddU0pmFjyyMktiOCgtOi3RhiI1Jv2e1H2NWgJAKCIao0eLLaK13rfX159/efvyFluvf+Y/vv7+119/2o5t+/Ll171vS/LZlXXmuN8/svw4ToqhZmPRUayUeObEqxLa8fjIMQgcj2MOcFeNKhsRghiTcBNSqItE5qnrFVK5FhDTRinYe+tb//LT2y//+MfbL7/ury9qEUDb2s+//v319afZ8IQqyyxYuJjgfrvN9p0zl1oezsXtL7azNoNWWRNbj8fjPM6RZ5XLnumThIUrc0KIwNA5dRynmLOrkobtUcWRimxo2769/fTTbX8RVZknwRIBhFBAVdbISgyUHFM+QLtuk/PYNcWhwCX/SHrCxKWzgEbQVed5jnOMMRWJ7aUJF8ovrJQiYKu1aC2itWgZVa7LEKONTI+sNgYtIWDAJqYvVoAFQzAJ0MVC5XkylvKaCn4VyBOiYV4ffHJ0w0RbonWcmZk1MsdkYzKvMruksahoErbcMM2TypmuygUVS8eRId1uN/WwXJ6qZpZaFa5fklqnM89MLH2amB0Bi8JVLUtZGOAs58nTbaNlzbhfZlAB9Po3vla/hu3SQH3bXdXP4xxnZo4xUiJdC/ulYGvx8vJGTbAjfNkzVVWpy8C6dAJqzpyqJzRPnWsY1MTRaY3Ymk7FTHib8nkaQZ9p+wF/fVGQOXdn5CKaIiKirR9cNhLT/RPV1drW5uSBVrhcNcU4AMLlcnmMcZ7H5POwL8dl3Z6WfU2Cuay5cq9aal66+6oy8uoQX/BWi0DliBpQXA2h6Sz01lDMqkET7C0k7re93268UHaudflOl+NU65V43pBcEtSeZihYLPmSpbTNWdwTyNy4Fv38e7X39OimzzYyz5HtzGi4SP8cBRGKaO5kFcqh2PoWxNvPP8Xeqc/KmfN1XnjK2MX2ol0+31O0rEm8Ev5cElm0jLLpMulyuyg8Jak0M2bDE7zKlc6ctuTIHLwstItRgKEOdrDgHm3vrbX+099+DUa56HQ5c7hMoqoys+hZxcsWXI7DZOXL0ZzbkASUZ2NKQJUdpYJoFNymBTAh11cQJkub9T8LqMa0nk4YE9cnAhWg0Lb3fdsktlCT+suXLz99IT0NEdiTi3OePOSgQiIoT51eq04XCb+8OtAuQCiVPF0yslAiYBcdbtFa7w3kQktiOpQuzjCUneVZSIMHqypeXKO8Irb1/tPby+vry2QWam1//Xtr21XhfnIFe26ghNnahatD5w4WZsBTQ9oACwYHkhUlsMqik5BRLrdo0VqAWj1sJHOAhXk5zDldqRxjDpTGe2W50gbBl9v25cvb1vcqFND21753wDlHbRWeW1iVvBoA9rhwb24Qly70E0MwkFGwqJJJ6OncmkY1RURvjIvlAUqCWcnlqM5bzPON0zJBjHPkGGMMSfu27dtNaokEI/omxTXAF3xyVuYKKhfS1CIpuGAWFwt4ut4lxuzXFU5e02PZ0I3R2lY2Q7OypvU5kjkLeKKTAA2mPTjw8DmGXZkV4kxgprPQ9h6tG89awHNVWGddhulaQDltkrW8uv7BykfBRmnMacfltJrzbYFVU4joZBRDEMDMkDRGppZgJxZtyzFcHjVy5JzqBhFK24ZatN6l5Un/+D7DdsFhuSb3q+egt6tc82dd4Z9pWicUzyisXazSa5PGRmuiBJmokVJEnK4q+zyHyJeXvbVWVSOz7Mw0cKZ7Dw8WFNvWem9943UOgh+c0eXxrlZega41X3MF84LmlR1fTWmAqLJYgFiTQwAuAM3lMhuDEjdtQrVUjz6ay1W59dGivX55sX1/HI/jkemsynKBITGi7fu+v87Yr1lbrko/GYovxT979Tn+sRT0NfYBuDhpgUFULbt/TlZonj0YRgFltyzLhILLDo4cpxlSTUvWRus9xPv79zHyHKPSZZ/piOgtWovetogGr1OJqyKeo8U/vC4uyWtT1+BfshcACpaLUAIA5eWum1cJPZusoRy9b6+vvbVQ5BjjOB6Puysj5pjvwbh///Z4fB1zDp2Zxpl4i9Zi+d/XzKsr1uX/tfyazbe6p37k6s9UYP7CVHhFr/XDBq+yfL4bbl9+/uX28vL600+SWgTsHDnOY3r2CjY2pOt4zNE7RubI0yizB0NXfVd+rt7XXPflAni27Ip3XZ3w+foBqnhV1DJtJwPFRXEueWAbdPvlb38PhShUeZ1uhHQzpsyDTKtEX4RiYgWC6m0dggBYwuyaplW1lMCq8cnBvLRUXSBzNfkFuqs5JuP0lL0LOq+c6YfftFtrHZUewzaqrJiMXItGYhSqxvk4Z30Q0z1ED7XAnMefhOAiZfMQd5ETAywW1tbX+xVHXFBbn9H9bIsZ/TJo02DRwnU7E62cqJqLnnfXda4zQWCBsdxba9FaCxom9x6TXXIKXlxtkHUhBufRuJ+geq3tc434tz75bGbq0o6XgoFR5ASohQWA3SqLlWCsdBvrWYxVvOvIq/d+2/sx2hjNtsytxRW2ee/lbrj8eWL7Yw//UPK8pC2uol7D7ioOIGHOU55ZNwSlIoTnoDFMtCsE624EYNYE4pUVwlYomvbecuuZNcn57MVFq4mpGUgK7XJCPuP+WRrPpMzNGJ/65RLkc2Bp1cGzg4U1xFZby27zSsuSBc1/v9XVZlQouO/N3scYWTXGqESdeT4eandA53EfY/RtV4SXIJqHJ5/d+r9fa5sLKWdWSMAqQwR/oCFeanBWFWywXT01w78k3MVQLlvAYGhapa0HaSbHKMJl3D/u6e9lnMcdAKlofRXKvxX8FQvS/PyOplEzEbPmV/lK025dR5TLiNV1+L6S1yYxtGfNXE33Y/XiE9oBRwBgUQigNsPHcRYfYgPUelNryxgQr9NEX4V/ecPzK1tArUG4ojwDOJXwsl8lXpbi+pyfVdLsoq8b8MeF1/NHwDlG2QCmI4rGdkLF1hgRLdD3Dmyt92gxl7pOu+uzdnjZgn7uZgJgfUr3ywC9FqTnoRtXOa7HIfzcwA9G4xqe16NgmBTRVXk87jlOOmFH77f+pv7Wom9B9qbWom2VqcuV0XVYD9GlVb7GOkD73FMsj0PA8ovWnxmAaTg8t3X9hPl0B4HGH5rLzwnjWmpjIvs4j/u9sgRXDrW+3fauiN6DnGgfivFpzDy9BsOWrqa7igRXkZJGyTBLxc8m+dzB3MXn+tfVDc9mbZ9O0DUjryx8smDAmSmJhRwZlYpQ37VsEQBUROBZ6lef2VfWaZCs5/p/6HDNXOva16qfICHMhzieJXTt5WpjNl0dcTHyi0ktR/aTn4XCa9h6JhflBXXzstIzwGsoSk+aMft0eiaX/UbQYpG0qhKLhq4UgAuD1gMMVw9DpGFRAhsuv3ES1udUW9Gfjs1yftNVrvX4oxTrMRpbEZT0BBt/giZMuJ5PTl2BW9UOExA0Z2dQ6Ymxno/q8HrK49r3swu0Tsmar1qfZuu/j5wLRctl5KjJRmtUnifZ51fkhKbpG69CxBqK+LcrPvvgqTSxasykYj7ZkytNq1WeNjPRrt5db6LYLCAnBb9u40UsMQksMc+ZB5EwyDyP73/+0V/O1l8obX1rvStmQgQmnuJ2rbWutPyg7Pm5E4KW5rEdHcbzwR1D+Ay/xc+vgPUcWRYWo3wKPM9HTApAWRLU9rZXPHKcEuUxvn1V5v5r3/vrtvVQX+SHebH1yyXBD131OZX5TPL8cxlxi6JhuUUTgw1/AhDMIA1KKLJlk8ycR0/XiVJKhSKMIAMeodstxhmjwmx0bLF/+fLy8y99u80HuMrTK5lUWsB0mofdrEuL2Zfv9glTmGazDa2cL3xaj7vp8koBaSJRrq3wcfL/A4sO9SdnU3NtAAAAAElFTkSuQmCC", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAjE0lEQVR4nAXBx65lWYIQ0O3N8efa58JkpKnMMtBINEiICeILkPgFxnwMMz6IFgy6q9pUp42IFxHPXX/s9nuzFvxf/+N/H8mv2R/+dH7QBphlA+zphPL8t5//7t9V23szLDavBG3FYvn5n/7FQzTfgteDspHkP0D7K8oZf6jJeZwIsa+ems4/6Yx1vz1sfvc9ou2yQbnoz39pPhfPy++XWToPPy2/+re1O44Pv37c0fY/fN/EpxCXyHdqWtxdF1eeDj8+/fXm7d9sWlU98d3xgDfHX36E3//xv4B8IE9jd/4g7rZQ5y1v3eJH8qH9rXizrtgT0Zm7nuZzopIod2rzdVffLqdOQPnJ9+gnZbypedFf4O7TTP90W12uweI0GrTRBJ3H5Eu3NuYZ0xHrZdLzLAqhfJHO1efiMaOo2bccEtuC/uNlL+J6wbBbftyPmzpFX2CWoeC68y+Jg+YC3f0T6SuNJlOtV8mJNy8v45+XHwq1KXFTj1qyqHfIrT5u0a17K8zNNCDWzCeXMZl5fUHIrbP8qzyaqgotX7vmCCexuIl3czUc7JuUY5JFuyBfNcX1mTo76p42c2cBhUYjQW45FSyqAfVdGbiDyaesZsC7CwCo8HQ+rBd/giUrhwzI716vrtC6LkzvEAadt3C62S66qp0ww3F+mZdK3TD0yN89dGgXmF9VDqQhPe93zWf05x/qxpq0t2d/XsVHY7ToNmV29TmWus9Cd368yPv8L+s06HkfjuWQSEXvNsUrJ/HIf9HUZllKt9m2ykS90gZ76lvKvLkoECsl9p+mjvia8QHcn8uXosEwtbJ91WVo0XblHRWD/vxwwQl0HqNEXOgWm98nw8Vo9PESnU3WLRVc7JcT+AP++DHUpdU4nnxsJ4JrlJtD9qJeKplIfppQ7PvhZphfXKvNnhjW1vT8ofjcHhDEdiI08jw5aDrdP+J4Bbciltk8lqwFhYlHZa2BxHfIBh+ly+rGIvhQwhkULe4LZF/fmtySieLk5t6TJa9WoIm2oMDPEa/meIppGLKOVh76c988QW9rEI398RyTGi98c4M+D33sDmUbHNQzRvDL57+nv3DVE77FDluQSk8F2uVrpSm0bmxhSITmTqqT57YIfu9PuKQFCdPj+XMmTYh/vG5vKC8u6WLT0Ip200oPIqb73XuyOW58yGHo8oI3XLzMv87HC3LJBI1MPE/HGF3JdZ5QQUycZcb/0Bqw+/I0gOq6+i5bMY2srO5CGaE2ZaoM41icZX5O2uM1Q4uvvmvpAhprp3vrFMoS9XCoBjNj0D1FiPfCIJ7ieS6vAbNnuPwdkVPf74u9SOD6YZ7j7A3CUo9Iwaxt6R1193S7ovELWf1uMb+aPcZLhKdEp996Yv9QmZthChZSKqL0UQcUOD2b3MtJ5ClG0425ph4oV4Ol6lGWGMCCBEqHNLeRgmfwk4AW3RZGRICIVckjFDM/YR9Dit63Lu4anxankNi+93POxYoQi0Wzx5A9W8JrtJDyWmyyMqrxIN7DwY7WnPwcjEcnkN2+7oZqj9XDTw+dosbuP/ZxUFOx/iO/QpfqCU1HT5J16OKjxyT6MrIuHJEIxYj7vCRV3rTS7dRZJKAkZZ6nZEl9Gj+9nLUAb/Ps2Pz9Uf/0D39BNQuxEsbPKvTeGxgATUvGKFTP2Uh+E3hRo6UlcZmq53epN6M9FYBabw0IA+kxjkVWVAkYTzFMAaa4EdTdm2nsJT89TSnM1xbNdgBwb74+o2xPjz5sy7IJmT1zHHiwjOYQZk4q46dIUxYDjfnZqaEfbOlqL8Q6GegGcyNc6THMPmbmzbl4dq+XW/TzJ+XMJASAimGaCcGS7w3HE4A+x7dOXvaLUA54b7tX3RkfcEIzM7TEk97ZZK0LyPq9GboPn9ws292dPG3PrI9kTmicMoc16iM6dKN6DnDv0oXz7QKCyj2HCUSvVHDJnD4QmJ+n9xmzCJKXKaZigCH1MEZEFEv3n409NGscPI1l+5V8fX34uXNZ/vM/vyc4hpxIEzXPERIiJH/WFKToEuI+tVnzGzHkiTq0EtOjxPU48Xaqmv77Wf5GDJEs84fHn9iMkOTp0sXu9jQgxznM2OkI2TgPNAZEZBkJOKnnJccqZDIgqHOKE1oyOOhYi9B3K/jGJN7wumyEP08fMH4duJ+Sm7tNHn3VXuy+L9Kyuz9Vw/W2Hd0Xnm5QG14Ol10YlELwoKd+d4DcY7Lvp0EK1oleRvVS/waujUDXNqYK6vzq0eK/Pxx9nzQ0p3l4Uu93kuUWG9amf27sr/jwPF/SAlrmfIyePifTz5bzlHaDMSAwgfulM/QyP3zpwuOghpQOkQJIWsrFaZwvH05vOCJmxBixDIKUsth5LRfQQrdTs+JArvCb23JCk6yZAVrFyYXWZaC4cP1yeuiEpCD5EZFw3yezpqk/0t2GE9xgD2/PiPPyxAE6fno+1Bmmei1GXBdpMa+ffTvmt1nNQXmKgZuJoFcVhAXTx+MjYoeaJEPg0hYF/1rKpR9TfTHqMrUo8MZbrcW6QN+2XJIzF27Wbkg2STMpcEu8kpV/Q2zliDNJj4VEjL2TmBKe94Ao1btxNdvV+u6t5zhluTR6ypyJUV3K7ATHyJw+1Gi4/lIle6O7B8tp/8scqfl4GmqswdngMgzI43rduKmdkrxKc6p9xsMA9Xk6dKnLgkzZzNTVHKb5M7XVLLmQSwWUmS9DgEm5doCTBgJHKiAqicfTSKjbh2XB9/VWCG8nxDCiokbB7efCjXHa2iljXFlwvGAAQCtLa/vZQZ4LPXq5zGeqsJ+X2+/2Pb4vLkTvXqb54fE5q6y70KXMVJ1hi7vBbd/QiX/+BHFkC++uivPsR5zc3BfZa1f7E3FGyeD3yxnABSliNCSw7Livkc04tmqc5gX0s+VQYpdGo7NUwgg8g3szUXdENECkUdnkFKNn/z7tdX0rspaeTagZKCtPMr2fDrGDxEZk0nYhE/4cURqEn88q+AlG8zD0+ikvWaUvKM58Fzv7YTCUsoxO5zg/g40XzjhanummzSHrueGX9NP5qb4e/KTiZPWMF2vJOETQoWCuVpnNcffQY36xKXjMbZhOzi5EHkjBV3ArE1kxbmYHUsxX7kW2zxa9mm6eJpc/VzSVa4mH21yf+8sRx3NhuY/JsaIc0JEcfVCGa4ehnhyLDl+JIlRfQm4hSFUx6Q5BKYEZxQypU1VpVZFROVEVBA+6nnL0OiskJ3L886Bqc/CWGxugIHMRKPBIgRUGh4ei8iaV4cIVmLr+LBe1pyPqB+4qa1A1EsQLHTr+MJDv45frJdK++7ZYgKtutP5kju63X+Q6i0SUwvAEecHC1IMzUugrz+ur5tZ7bcLTlYgYoNqnMFAbuIGEJVRDHl3+5DyaqNfN2J1v6VLKVSTPtaaBY5TFMkzPx741GInmaOdnXZTrGnfo4uXp+UsM2PGWTTBrQQYBIUVQQNoWNdKbgyP9ZdjFuqX0qrkB8/1pyx/QUZA9937wlUhluBayUpTkWvGmoBRjs5Jv85BTjvoWgQnMIUmr6/uj73o7khuGIwwDjyxUabZjqmnSrIco6r4UgcPb7HqB+/VVvamGrv9XNcYCrDOQnVvAdefq8PRs/nUkauguC9WiTUGRC8x2l12YyPK2KB20stdmimLVoEXMbvs4Ya12OpIGTv4OrZwL/fOT9VOvd4tO9gWcp16bLBHKcy0eLMAIomVcxtZOo43EkF131k+jf+lHA2EUTGCGI7A2wjhqWuIcUkwIE8tYP1+cecpPz77/V3V0ueuIo5f49BE8j0eec4EyfaGme/qP379z19ztEg12nju9F4Yn+9swp9FOyNpnGPsx6kXw6xFO+FIwjy3IuEFh57L6uzQRpW08QLPmdNKHJgHnj+pCn4rzR08dU4VyNsMOdiOMOC+gOHWsmp0yNLCggg5DcAZigZBJtCbIYdDA8ejPP12Wz2M3nrOEoxiJPlo063EEp6eM4y6N4Lx7/fUbQrIMoJklA00IV9/e5PWn87ENO0LgOJXiao0hIfQgBp330Jlusj4kqAAKN623GhpPkEA8VDPjG7E8R8dnHCEuxhITg4LZszAWZ9fBInkXK1MsUHTYa3BgrGakUcEh6JhBRiR19D1N8dxTUbGr1+jffC+AyyeGdGMinFiWg9K08Ln/ET1d6uUfLybXl0tEZNM2JSLXNbuo0f/+e34//LHXc3yUuFaiTtWpMiWwYpW/rmWd0boHFDkSypTF9gxm5aMQC3l8SsOUJrcAVuUJDmi8zIcZzTE9E5LnkdBSfRH9kfm9T1kqodPU2U3D85QCDxPuqnpNoQ0TcURXH6I+7Rq90q8w4b4ym7LJdoM6/QS9emfKIsuMGC5TiQJhpchAWA7ufBo64D/Rb/79uIHCfYOvXfQKna5jDUQBIQznZKbnX6QQSIcT1sPr53J8U9Vb8un4otPOFQGMcUzk3oc4z1zniBrvQMSkU8o6E3tdaf5VNt7ijWQY8jgvhNzU5jKYgxjUEZDU4XMMyb8zS/ONeuXkcGGewXV//Yy9firIx2i/eACmRPmWoSkKWts4kywiBGMmpJIakn7g7g3CxzRaqIDm+8pE4i26URPL2uOvf0U3x+wxTB9r1zo4Hm1CPhpUPIVYjloNRgXgKMLKDzAgTGdTAoIoX2YMVUYiYUU/AaNtIh9F70crl6hI2Vwf2Hmxx81VufQ+5Z+mLsC4hivG6niIL4RXfXY7T5iGgZ1AbAuGfIBHrbEN3fOHxyV6dbaZPt5v4IwOftQCCwwZQHeJqN64ycgy0c3i6i387//tf9I6VDDPZZiNs1DPkK8C3ntU6ZSCgRVKIEGIvNcKQunjumQ40qNXXaIkTN5C4IUgNkHb9/SHbNXzJ1BVkHE38wgeyZiRdcFdCQKEKRn7cYghhqy8Eu2iXKQFQbuQX2NROgDHfUTTS4K0WmRF3ej+yVYNgBACR8CW465Xc10uE5zQnH6iFSpekeTohGNn+sQi0nCBCOBwhYK+67OyXK5GJhNGMcuyVkISsomQS/SJ2Dyoa1lgTRe5NATMDucV0CtB1q+sAQtYFwu2YC3iy5BlvuV4Qz6M7ymqDGgMQPPzbFU+vZyHcC3yBVMaJ8hqCyIT5B1cXl08fzxLfuKqm8gxx0pT56gohacpWQPQH4eIWpxkNgl7YpCFCEGGQHAGEESj2AuwJB+HTDpoGUkI2keZrSpgy5uvbxjVhVbShOWVDeGS47gpSiTcAIIN3roAcigpf9dcw+sehSTBI3g55eWiS322w9uWoixL08nJuc65ZyUAIxhndzwrvHOvdEU/10tcNqGHD8u8mBmPVduzprwEBwA6+GG2o6Ao55VkCxhyUhBvvfDAJ18LoBJXEuF5fO2yxdV3JNhZp/ZdMetHauKXj7u6b9ym6cuKlaXDJclJjJSTltjLAC54JFZLZ5TKuDN1IZug4kWCAjKI8lipwaslQhxifpU5qdR0Mr50wEcncbHK5jHP8PLnT4N+gVn+EG2dXVA6v8mgzefc08HnGXRL1CPjfGSBLngcMrlO3MCiWcV5iuqIfMIUiZt8GHaMyxVQX+aXsBf8d12V5UPNsKJH9dL7jBUQBx/T0IAsJI0+m8XvzLn4x849Hncjyb3v9p4ueYCOdRH5VIICNDMnebVAl9BfPOr25iWNgeWN5oHlvj30+f9Fu9zeiIktfB6SHm2Ygf3ENrPuzPGvIOcn1CP8oSI25cDGTFeze3P1Nn1VrlYUr16BICmvtAmB8IApareb9TebP14XL3XwDsYECKGRi8fTy68nhKoRghSAgcV0mR/2PP51hdx6B3AxMeJ6LiiH1KmAlcn5Fpo5BN0D/hSxU0fvi9XvJm3u2/r7rvRKdLg7goTp63wHzxF88Qpey/40d//Om5fTY/HmrpQ0OYHqv/XCiMA4Mm7Em/dpEHYPDzITScJhsiqhGOFIBpUW/GuMxLT7jBglGmGKU8ojdpXY3lIPKsHQizvB7khlnpUNgLiuFmVBAZZZ+8p0YPaj54Y2HFZDCHkGpR1g+5xHY8OL+ucX8YO7Gfr3WW+1m1EISG1Gcy9AHZt3omCXfo9je06gDyXJuD9xQjxyDyXZQO8dwSJHseB1PF8/l0OaTyiXvMnx6CihnqPTWQ/gaETeKkdgkxqheKRN7WHjDHbmnHPIXbIExkUBdidUU1+MuR9lhQPHhiNU55JsCrIB90rcUYp4XkW87c002G16U7gfxZUYCVrVUoGHyWz7sVVvViJBlRLgSb4Jwb+YTmQeHNBc9xAs0bzQeioAY57ZidNJ7Vdzh+ECzDTbitKLxBHygEFentw4O+BwKiCmEJ9J3TYaxowOqfAB6MmSAEIEkjmP5IRwkGPW8RCAhnoqYuQTIzjYOPrrms9YAQI9H8wQQI639uFM5+rDS9pVHbESZzmf7zhllylBmUYQcLCQwngtYMBsEgdks5fwAT30ofhGlEXaNMWdzGWiB4rSydOlaDAoSJkyAKEDjJdLYQN3wEYMYCI1c8lVbPBmsDJr0UxZWMQ8lUGupGAM1YRo6yXu6cEkhqMdAEHRjiQADJQpaBJK2RfW/gAaebq3OEzfjtbNcyomGxAWZVWtBK5ETkKvz7Cj0PmQ7kkGxCiFwFy3p2/Qq9cs608RLfhyebmMoQiA0nIFwOz6S7IxRiwYpJIEk5QeLY8EIs54HImP85k2SC7ojayWN3VeqoYWcoEVGnCWNctCDBMJjalZmmx36JHyas6FRCLCXE3QuTFp0J39kKghtA/TSiyv30IiEKfm8GVAugeI2eShYjMkU7wQfL0XV0PQaXEawv38jDJm1bRNfo6XzFMLHcqBp4SRBEFGH6RamtrxjEJGAhUVsucDFQ/KIpbnHmroEcnz/XgBqB3NDlPZyDTPLwzlbMDniq8nSkyTlxnmJImuEIFBmJJVIBBOCsRogXmwVMR8uZx/TmbypvOVGZYn6g87ThcBUBcorZYHyrfBe3ax9VZGxLW9ziGa4eWrTJNq2alPFeJ5tiVFgfK8WDkk5WohAA1xhkkYGQUTS1DTAt2lkgLlYMSi5ITnAICUXpDdMErm0VyvviWz7STMGDgPQeGjP5jEeQqk9Gg0LAYCEknaQje/vzwPydpx+rX7OMCqeAcoDHZVzH+7qvItMS87N2cCTzFeLYH3KiUABMYqTAfQUYvML+mv5nz8+ccq36JVjSuCk0kE4u3WP87p6GNusUyM18fBgSrsQuFYT6aU5Ossj84R2vmswnyIIOrL80fZ1IjWoKIh6N15nHiIl/m3yyEaB3ChlxjgRGVRSdQlN0ZOnvf6c6JNllO3LV/YwBzAaMoup6xm0FpkYeZg8a3J+8EBlAsKYwpPF7z6vta0J++up/c7HiOZcqd6c+O1stbhOZ0RvislnqCtUB2jHWRFDk9+U0Z3YCxNhlJgIZB5pB2Ly5T3dDjwq699RAQD67GPECC7SPKkLQ/30b5KYo/CpHs0J485lDmjZM0XX5+yk/6oaHuGpMKTi4lFlJZYf/C332xj3E9VkV3cPkHnkydEegs3hXLPfANq9H7O53RZgZpewHoGI4JhUQmYJkarRz9P3LjZDp3IvK58g6x0C4TyY8zJ3ts8B96Fz9lp0mcA0OZmZQsY7flyRhAxoeg0XKKaCaTu95eGm0F0+abgf8jFFoKMoevj55+n4ZfIE1yhenwzta//OcacguAUmOkdnbppKrMY025CmECfkQqLqhSSb4qyAbSWaH6GdcxN7tORCp6JPEkXZuQzF906Zb4blZ8KfznzVspMMIjR4M+oJHSbPI98BO61q+tQsrryoMuPlNDFZouZsR62sjUhUioyPOdUtNu8MRhWJ0zYffnV6dmV7WZSeabqzO7Ppb/Me6YYCSYvGizhJCHWbA7+44FwNJNQA0IQTAjqPmEFQGMZqmLB8jzbLJoNnd5toCXBEC4LhhJMbN00jYzjvyAs5M0oA2UhO0aC26h9oPkxduwT0j28oJypSuARs4xXDic4yzAiwVmxEhQ5tw0V1MMYgXCzcScxpZcPZXk+pxe8hKauUv11xuwGlk1iL6wQRFFWkAiXxc4T8fU286HngHuqu2QkLsu4IFlhR4HY18H7kTDbyX76+GK174NKCaGyyXip0JbIZXNDolePfA+H08sU5CyMly0k8GmkQ4XImK8aMegjn8r5YALkKGptPA8zBRqwgD1Hb2vZRqOtKQTS2REVt9hSMg1kDZcL00/q6AWOpWPYvCrRybIEdxtb6PPC7wbVg1HmYzppKwsPNI4udnDu7RVEt3JjQQrHhxuFqmVj+IREyUVeEIEZGPx5FGG1zBtWpoRi5a7IljVjFsdfx2n4UwYyn6E1xWCCSwxIQFmUxroBN6uCZVngaT+zaun8pfOzL7CIaLQlyBJ5gcCKm+uGiUu+ppaLtvHYAla5y3wSbgy+OYuuL8X6Ftrp4d1uCXzBrSEVQqjJ1W9OsloQdNidTMCbxX8dXi+v7EzZ4rZpNvVr23omYVngSveibAONPISkBJB27NBHPXB3gh9gkZMMsSnExRKNowNe4qRvmpZgCCnVwWZ1yM+VLvqwsNzBAQUQTHIqFSnLQWmz3lX2+Vy5edrXmUKTx1EUEncgQDAJXtldL9u7775gHIJjLDKLU+juZVuK0s4UIdnfrZXb7Ksv3UmQ63orY9zvD0RBWWCBr0q4ouXircpMjyDkWs/HNF7TlcHV7J8iLjhIIiRNRZGLIpc5Yr3KCKBg2jFZe5ORr2ZjkXPSYrdISGQB4DITC4EEzkle3BatuC3/GF3+6H/eTNlSZYb9QISMELvZCCp2xT3lfSlC57EC6mG3v2UVuGQ1Zui124bwFTyKc9a4Mfa+H7okt68ZW9gXKJTbIQy+7Du2+PHy4JVqEr85+cEf5dH/UL8GkCuPYVvAnFIscVWolNT5Eryy9aaUnqWF+jJDvJAzbRY292fLs7IE0AJWs0xIGkdD5EC64sq9Kr/RSzJwF/1LnGaCRuy5L4biU48ZemZhFQ60Z9U+8JTste6iQYdyZEj7qAjwLQrn444sjub053Cv5q7v5LBZ4LEYPzf/59vVNr9a+TaUG26X/Lo6Pwm+DahPgANs+n6HIvMyQEaLFMAsdNmdIvg9xjVrQcrYBturMxCIRYpYvuFZXWJclKs6cKShihpFLiavOClyVjsKBqcda5njQb71Q6yGAx2K/dDX3/H94y/rfTgXjJw6SDdyzfMUwA6FO/Kfkxxae3iU90Rc504kQPEwmukqy848QB1JbwoxpKndvlmH8V++FLfXbjL7Y2zbTOkdQ5xWy2QddzrdLpqOBUsdQ5imQVgeLQUZlgIEcj5KDbqbAKu6jdHQmPMCzKNAIHlwwIGgHBO/r1357M9tvvinB/wIdzVeNGnjbsnE5+2ZokVbF4H1vZ+n56arsQxeT+cBVPL7Zd7aQ3C4AFLcJdzZCiNIvBMlIQRkVXkY/V7cBUEiI+3XhRiwr/NEoQEcYCYrXlGq7WgyEVDqx2OpQFHdRBtIvoY4guy4hFHBERtkgcRxwudQcLtlWZcqfrkdSWZMcUaPqew+vb9/25ZVVtev1qOe3dnpYf3rckIjYFGy3BHOrlxmBvfZ2SDLPZgvoxkX74g0c5PnpgW5BM49MIDypSiIOD1OZHKLsovaSYeTOuEVo53SCQiCFsiQhC5956kHIZQYV/W6oEtfBBiMJ7Pt9yzM/JoX2x+QHOOsOsCKlfFAjN4JHfXXUzFhx3oW3tHTm+r332SNXqPEMeryoXpdGjG93SNEujO2+HitKiisjX2AMBYJXEFYZRHO02xCeD9PwZAMYnQpXeAJBwOWlPl51N2cEBTH7su8n+LloEPwfkZz9IjOxqUnECMNmARRYIRN4czg0GKT2cSWtxFibEi633HTVLRtF5vTpQKKjjpLwoZ/vdDJMxQU71EzFKFMY01LSbVp5sX9ZcoHxFefEGogQzMAcJ91ki/WUYiVPhB/9y38HBw/v9owm6OceCwAcyXHGfQdKOnEE3H8wGUak3o5WStxStI4GEISAj/xXCuZxD0MJ5mySOArfdFKcRSX0AJoc5Nw+opjUNwYX0baIKQvCLd1HV/ijK24LNMF5CIxDAOhAmd7VGmQFzb6menvQIux8uYK9R0dAsbGlLy+XYyT3+txzkrwfvehQn68O33ycUOVzWZbZnpGUmKWCwqAEqldfzPMYNYf4BXEuKZSCxIEzXWcwW/ztgZ8dUPzesAken1quVMpTht1YS4TnqGiTZ6UsypDwCAEG3CqeqV8o3XMSZ4IJnNkCkVnbQxalqx089lkMGAekIWcJMHQpC/j05RbxM7peUZcygLUOEket4xfhxcAx+6zQTXJ1DjyRZcmCkY8taJprsW09K9f3eR3f7r629eB9ZeJr2pZQsvw9vXw2GtZNADz25j8lNqJBY42pZ4EAYYAhhkjxAVE2zkhgULDBI3JsIRqrPcna38F75+0XKUkCSQTGDWw1fpVK5gcZZ941CHOAH2/eVuWxIDlzFWaI22XQZyEYQ7MrJrzqot515K1wlNVgjry99On2LjaFoR7mBXfZscFJxM3ZzTN4buML2hAagR2wIs1l8ALZ07Mtqg5jbgcLjCLrEQQMWzCOEwyr6dCvaoDAlUoHPQAT5CGhNndd+1X/JsrevQgmqhCTX0fbJr7D7qmORg+7yzG8pqjz5Tgq7qkevwY/Bqxbg99bWEf3ezV4dNoOF7DitN5Mc3JmvYm/xoYGTnlqLXB4/CDkRs47QOB7679mhVZeV2wQMiSAxojirohYqnzHpLP4O6tPSHuOQyTSEOOmjG5OoFhziftJa4glgVJmPpG6kNghVCp0SiDXlivGItuZ2388MlZ278REYjut2fEWRABazXTq479ZCNZKaAKxYZE3Ud7xVdimv1phtlABWGb7EIN7/YRmZBMsUFQTypQnDns81DQWbg0UnEjZEEI5Xido7wv3KXbTaa5wwOcc7OtixBFkG/fh78UJk4TR9yJEs5gIsE+YUpRPKkYIIlIirFAKmGgIvERFHUi1ZZq2W30qsWGjgmR+5OHVs1Eyr+BjeuHmfoGN7CVNf+6HkxtWcP9xwnDxHUEcx6JykTwAZIqwxxEp7HqD/PqjiFSaTvDtXIXBCuk0mCPE8zk4EebWWgmAc4Y8H56nB776XH8wf2naHUc3ts+Y6vp8PzhUsArTEaQqqrJy3j2QIFDiikEjJOJPE7FeTouFEm3DiF87JFEGvDDR2VeF5ScIwx5JRN5GbVXevAOiOrFQ7QHMv3V8IQHO/P5WIPXCOYa7r8cnqxYLpp3lLeypzCfSOLWjolzZnXwkaFojKka+q5444XE5RYQHEllNol/gxU8weRIsVGQzGMzoVW5U5CVHAtAd9MRFHqOoYYUQ1akgs7vu9avli0VO7n3phsreZ0jQw4Vd4uJvDx8ssFi4wG5cepJQkE5pY8O42XCE1wkODfl5AnHn8rPcXjKLvM8zd356fEf/m7a9T8HCS9FSh+ZEZLC5DE45xkQWSRhSkq6LAUzTKxA2MVvCgki9XB6QTpnZE3G5ktaF4tPRfAPzx7CdDGCodA0a7BWrCL+4l5s+F36cIbtncvhcsAnUPYuOoRnZxGXVST51zdFAycIdSD5W8sjtNK/2WJ+qgIXd7zj/tJwp1Ruu1132o2hMZs6Mv5quW6EVKeIdna8BkWGIw+YBvo0e0tNpTzEg6YZ5y1wx84KMEUQ5lOEsaKrYALE6bjwGcRVyMftYu3Z/L5zJXTP5kv5ZTmdGF3xgr6Z8ht+7M6nU9mVMYXp2kiJqEHTLU//CMpv6vMkfuzRfgWdGpfs5sJg0IqgxLJ13721n/Dtp7MTa4FvtquvGwnRGyQ3zQLbw+ZqVbf97Mma2BNyMLEVzuO7l6k7yYxV0uHN5DlhMj2PeVvSkAipN9vt4yP+hT9hQzrCppQ4KL348C+xX779AQZWMfT2w9UDxCY6OYs/N/4iKCddzR5eJDwS/un//cP/B+y41x4pgCFdAAAAAElFTkSuQmCC", "text/plain": [ "" ] diff --git a/examples/progressive_distillation/utils.py b/examples/progressive_distillation/utils.py index 2e9c8b8db78d..bf2f1786194f 100644 --- a/examples/progressive_distillation/utils.py +++ b/examples/progressive_distillation/utils.py @@ -13,7 +13,7 @@ import torch.nn.functional as F import torch -from diffusers import UNet2DModel, DDIMScheduler +from diffusers import UNet2DModel, DDIMScheduler, DDPMScheduler from accelerate import Accelerator from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel @@ -85,8 +85,8 @@ def distill(teacher, n, train_image, training_config, epochs=100, lr=3e-4, batch if accelerator.is_main_process: run = "distill" accelerator.init_trackers(run) - teacher_scheduler = DDIMScheduler(num_train_timesteps=n) - student_scheduler = DDIMScheduler(num_train_timesteps=n // 2) + teacher_scheduler = DDPMScheduler(num_train_timesteps=n, beta_schedule="squaredcos_cap_v2") + student_scheduler = DDPMScheduler(num_train_timesteps=n // 2, beta_schedule="squaredcos_cap_v2") student = get_unet(training_config) student.load_state_dict(teacher.state_dict()) student = accelerator.prepare(student) From a6105a72c6ade0d92da97b967d4a8481f811d0f6 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 27 Oct 2022 19:08:57 -0400 Subject: [PATCH 102/133] add distillation pipeline --- .../community/progressive_distillation.py | 196 ++++++++++++++++++ 1 file changed, 196 insertions(+) diff --git a/examples/community/progressive_distillation.py b/examples/community/progressive_distillation.py index e69de29bb2d1..20065f678231 100644 --- a/examples/community/progressive_distillation.py +++ b/examples/community/progressive_distillation.py @@ -0,0 +1,196 @@ +import d4rl # noqa +import gym +import tqdm +from diffusers import DiffusionPipeline +import torch +from torch.utils.data import Dataset, DataLoader +from PIL import Image +from diffusers import ( + AutoencoderKL, + UNet2DModel, + DDIMPipeline, + DDIMScheduler, + DDPMPipeline, + DDPMScheduler, + UNet1DModel, + DiffusionPipeline, +) +from diffusers.optimization import get_scheduler +from diffusers.training_utils import EMAModel +import math +import requests +from torchvision.transforms import ( + CenterCrop, + Compose, + InterpolationMode, + Normalize, + RandomHorizontalFlip, + Resize, + ToTensor, + ToPILImage, +) +from accelerate import Accelerator +from tqdm import tqdm +import torch.nn.functional as F +import copy +from dataclasses import dataclass +import numpy as np + + +class DistillationPipeline(DiffusionPipeline): + def __init__(self): + pass + + def __call__( + self, + teacher, + n_teacher_trainsteps, + train_data, + epochs=100, + lr=3e-4, + batch_size=64, + gamma=0, + generator=None, + gradient_accumulation_steps=1, + device="cuda", + mixed_precision="fp16", + adam_beta1=0.95, + adam_beta2=0.999, + adam_weight_decay=0.001, + adam_epsilon=1e-08, + ema_inv_gamma=0.9999, + ema_power=3 / 4, + ema_max_decay=0.9999, + use_ema=True, + **kwargs, + ): + # Initialize our accelerator for training + accelerator = Accelerator( + gradient_accumulation_steps=gradient_accumulation_steps, + mixed_precision=mixed_precision, + ) + + if accelerator.is_main_process: + run = "distill" + accelerator.init_trackers(run) + + # Setup a dataloader with the provided train data + train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True) + + # Setup the noise schedulers for the teacher and student + teacher_scheduler = DDPMScheduler(num_train_timesteps=n_teacher_trainsteps, beta_schedule="squaredcos_cap_v2") + student_scheduler = DDPMScheduler( + num_train_timesteps=n_teacher_trainsteps // 2, beta_schedule="squaredcos_cap_v2" + ) + + # Initialize the student model as a direct copy of the teacher + student = copy.deepcopy(teacher) + student.load_state_dict(teacher.state_dict()) + student = accelerator.prepare(student) + student.train() + + # Setup the optimizer for the student + optimizer = torch.optim.AdamW( + student.parameters(), + lr=lr, + betas=(adam_beta1, adam_beta2), + weight_decay=adam_weight_decay, + eps=adam_epsilon, + ) + lr_scheduler = get_scheduler( + "linear", + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=np.ceil((epochs * len(train_dataloader)) // gradient_accumulation_steps), + ) + + # Let accelerate handle moving the model to the correct device + ( + teacher, + student, + optimizer, + lr_scheduler, + train_image, + teacher_scheduler, + student_scheduler, + ) = accelerator.prepare( + teacher, student, optimizer, lr_scheduler, train_image, teacher_scheduler, student_scheduler + ) + ema_model = EMAModel( + student, + inv_gamma=ema_inv_gamma, + power=ema_power, + max_value=ema_max_decay, + ) + global_step = 0 + + # Train the student + for epoch in range(epochs): + dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True) + progress_bar = tqdm(total=len(train_data) // batch_size, disable=not accelerator.is_local_main_process) + progress_bar.set_description(f"Epoch {epoch}") + with accelerator.accumulate(student): + for batch in dataloader: + noise = torch.randn(batch.shape).to(accelerator.device) + bsz = batch.shape[0] + # Sample a random timestep for each image + timesteps = ( + torch.randint( + 0, student_scheduler.config.num_train_timesteps, (bsz,), device=batch.device + ).long() + * 2 + ) + with torch.no_grad(): + # Add noise to the image based on noise scheduler a t=timesteps + alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps + 1, accelerator.device) + z_t = alpha_t * batch + sigma_t * noise + + # Take the first diffusion step with the teacher + noise_pred_t = teacher(z_t, timesteps + 1).sample + x_teacher_z_t = (alpha_t * z_t - sigma_t * noise_pred_t).clip(-1, 1) + + # Add noise to the image based on noise scheduler a t=timesteps-1, to prepare for the next diffusion step + alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma( + batch, timesteps, accelerator.device + ) + z_t_prime = alpha_t_prime * x_teacher_z_t + (sigma_t_prime / sigma_t) * ( + z_t - alpha_t * x_teacher_z_t + ) + # Take the second diffusion step with the teacher + noise_pred_t_prime = teacher(z_t_prime.float(), timesteps).sample + rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * noise_pred_t_prime).clip(-1, 1) + + # V prediction per Appendix D + alpha_t_prime2, sigma_t_prime2 = student_scheduler.get_alpha_sigma( + batch, timesteps // 2, accelerator.device + ) + x_teacher_z_t_prime = (z_t - alpha_t_prime2 * rec_t_prime) / sigma_t_prime2 + z_t_prime_2 = alpha_t_prime2 * x_teacher_z_t_prime - sigma_t_prime2 * rec_t_prime + + noise_pred = student(z_t, timesteps).sample + w = torch.pow(1 + alpha_t_prime2 / sigma_t_prime2, gamma) + loss = F.mse_loss(noise_pred * w, z_t_prime_2 * w) + accelerator.backward(loss) + + if accelerator.sync_gradients: + accelerator.clip_grad_norm_(student.parameters(), 1.0) + optimizer.step() + lr_scheduler.step() + if use_ema: + ema_model.step(student) + optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + global_step += 1 + + logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} + if use_ema: + logs["ema_decay"] = ema_model.decay + progress_bar.set_postfix(**logs) + accelerator.log(logs, step=global_step) + progress_bar.close() + + accelerator.wait_for_everyone() + return student, ema_model, accelerator From 23d8c051ee2557a7337bb5f0dfb1aa40da2ac8d2 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Sat, 29 Oct 2022 13:55:52 -0400 Subject: [PATCH 103/133] add some code to make it work with rl example --- .../community/progressive_distillation.py | 54 ++++++++++--------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/examples/community/progressive_distillation.py b/examples/community/progressive_distillation.py index 20065f678231..41b0a5147180 100644 --- a/examples/community/progressive_distillation.py +++ b/examples/community/progressive_distillation.py @@ -62,6 +62,7 @@ def __call__( ema_power=3 / 4, ema_max_decay=0.9999, use_ema=True, + permute_samples=(0, 1, 2), **kwargs, ): # Initialize our accelerator for training @@ -126,11 +127,10 @@ def __call__( # Train the student for epoch in range(epochs): - dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True) progress_bar = tqdm(total=len(train_data) // batch_size, disable=not accelerator.is_local_main_process) progress_bar.set_description(f"Epoch {epoch}") - with accelerator.accumulate(student): - for batch in dataloader: + for batch in train_dataloader: + with accelerator.accumulate(student): noise = torch.randn(batch.shape).to(accelerator.device) bsz = batch.shape[0] # Sample a random timestep for each image @@ -146,7 +146,9 @@ def __call__( z_t = alpha_t * batch + sigma_t * noise # Take the first diffusion step with the teacher - noise_pred_t = teacher(z_t, timesteps + 1).sample + noise_pred_t = teacher(z_t.permute(*permute_samples), timesteps + 1).sample.permute( + *permute_samples + ) x_teacher_z_t = (alpha_t * z_t - sigma_t * noise_pred_t).clip(-1, 1) # Add noise to the image based on noise scheduler a t=timesteps-1, to prepare for the next diffusion step @@ -157,7 +159,11 @@ def __call__( z_t - alpha_t * x_teacher_z_t ) # Take the second diffusion step with the teacher - noise_pred_t_prime = teacher(z_t_prime.float(), timesteps).sample + noise_pred_t_prime = teacher(z_t_prime.permute(*permute_samples), timesteps).sample.permute( + *permute_samples + ) + if permute_samples: + noise_pred_t_prime = noise_pred_t_prime.permute(0, 2, 1) rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * noise_pred_t_prime).clip(-1, 1) # V prediction per Appendix D @@ -167,29 +173,29 @@ def __call__( x_teacher_z_t_prime = (z_t - alpha_t_prime2 * rec_t_prime) / sigma_t_prime2 z_t_prime_2 = alpha_t_prime2 * x_teacher_z_t_prime - sigma_t_prime2 * rec_t_prime - noise_pred = student(z_t, timesteps).sample + noise_pred = student(z_t.permute(*permute_samples), timesteps).sample.permute(*permute_samples) w = torch.pow(1 + alpha_t_prime2 / sigma_t_prime2, gamma) loss = F.mse_loss(noise_pred * w, z_t_prime_2 * w) accelerator.backward(loss) - if accelerator.sync_gradients: - accelerator.clip_grad_norm_(student.parameters(), 1.0) - optimizer.step() - lr_scheduler.step() - if use_ema: - ema_model.step(student) - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} - if use_ema: - logs["ema_decay"] = ema_model.decay - progress_bar.set_postfix(**logs) - accelerator.log(logs, step=global_step) + if accelerator.sync_gradients: + accelerator.clip_grad_norm_(student.parameters(), 1.0) + optimizer.step() + lr_scheduler.step() + if use_ema: + ema_model.step(student) + optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + global_step += 1 + + logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} + if use_ema: + logs["ema_decay"] = ema_model.decay + progress_bar.set_postfix(**logs) + accelerator.log(logs, step=global_step) progress_bar.close() accelerator.wait_for_everyone() From beeb9b10d3fec1e0120f388115c0af7379258463 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Sat, 29 Oct 2022 14:13:33 -0400 Subject: [PATCH 104/133] code cleanup, use pipeline in example --- .../image_diffusion.ipynb | 715 ++---------------- examples/progressive_distillation/train.py | 538 ------------- examples/progressive_distillation/utils.py | 2 +- src/diffusers/__init__.py | 1 + src/diffusers/pipelines/__init__.py | 1 + .../progressive_distillation/__init__.py | 1 + .../pipeline_progressive_distillation.py | 193 +++++ 7 files changed, 251 insertions(+), 1200 deletions(-) delete mode 100644 examples/progressive_distillation/train.py create mode 100644 src/diffusers/pipelines/progressive_distillation/__init__.py create mode 100644 src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py diff --git a/examples/progressive_distillation/image_diffusion.ipynb b/examples/progressive_distillation/image_diffusion.ipynb index 9e1666638a17..0ae6179c54ba 100644 --- a/examples/progressive_distillation/image_diffusion.ipynb +++ b/examples/progressive_distillation/image_diffusion.ipynb @@ -9,16 +9,16 @@ "name": "stderr", "output_type": "stream", "text": [ - "/usr/lib/python3/dist-packages/requests/__init__.py:89: RequestsDependencyWarning: urllib3 (1.26.12) or chardet (3.0.4) doesn't match a supported version!\n", - " warnings.warn(\"urllib3 ({}) or chardet ({}) doesn't match a supported \"\n", - "WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n" + "c:\\Users\\Ben\\Anaconda3\\envs\\diffusers\\lib\\site-packages\\tqdm\\auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n", + "NOTE: Redirects are currently not supported in Windows or MacOs.\n" ] } ], "source": [ "import torch\n", "from PIL import Image\n", - "from diffusers import AutoencoderKL, UNet2DModel, DDIMPipeline, DDIMScheduler, DDPMPipeline, DDPMScheduler\n", + "from diffusers import AutoencoderKL, UNet2DModel, DDIMPipeline, DDIMScheduler, DDPMPipeline, DDPMScheduler, DistillationPipeline\n", "from diffusers.optimization import get_scheduler\n", "from diffusers.training_utils import EMAModel\n", "import math\n", @@ -33,6 +33,7 @@ " ToTensor,\n", " ToPILImage\n", ")\n", + "from torch.utils.data import Dataset\n", "from accelerate import Accelerator\n", "import utils\n", "from tqdm import tqdm\n", @@ -47,7 +48,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 2, @@ -92,25 +93,54 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ - "train_image = augmentations(image.convert(\"RGB\"))" + "class SingleImageDataset(Dataset):\n", + " def __init__(self, image, batch_size):\n", + " self.image = image\n", + " self.batch_size = batch_size\n", + "\n", + " def __len__(self):\n", + " return self.batch_size\n", + "\n", + " def __getitem__(self, idx):\n", + " return self.image\n" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ - "teacher = UNet2DModel.from_pretrained(\"bglick13/minnie-diffusion\")\n" + "train_image = augmentations(image.convert(\"RGB\"))\n", + "train_dataset = SingleImageDataset(train_image, training_config.batch_size)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Downloading: 100%|██████████| 455M/455M [00:17<00:00, 25.7MB/s] \n", + "Downloading: 100%|██████████| 665/665 [00:00<00:00, 332kB/s]\n" + ] + } + ], + "source": [ + "teacher = UNet2DModel.from_pretrained(\"bglick13/minnie-diffusion\")\n", + "distiller = DistillationPipeline()" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -120,7 +150,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -131,653 +161,16 @@ ] }, { - "name": "stderr", - "output_type": "stream", - "text": [ - "Epoch 0: 0%| | 0/1 [00:00 250\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Epoch 0: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0, loss=0.0107, lr=0.000299, step=1]\n", - "Epoch 1: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0, loss=0.224, lr=0.000298, step=2]\n", - "Epoch 2: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.405, loss=0.151, lr=0.000297, step=3]\n", - "Epoch 3: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.561, loss=0.0401, lr=0.000296, step=4]\n", - "Epoch 4: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.646, loss=0.0595, lr=0.000295, step=5]\n", - "Epoch 5: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.701, loss=0.0553, lr=0.000294, step=6]\n", - "Epoch 6: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.739, loss=0.0353, lr=0.000293, step=7]\n", - "Epoch 7: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.768, loss=0.0306, lr=0.000292, step=8]\n", - "Epoch 8: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.79, loss=0.03, lr=0.000291, step=9]\n", - "Epoch 9: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.808, loss=0.0249, lr=0.00029, step=10]\n", - "Epoch 10: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.822, loss=0.0224, lr=0.000289, step=11]\n", - "Epoch 11: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.834, loss=0.0195, lr=0.000288, step=12]\n", - "Epoch 12: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.845, loss=0.0226, lr=0.000287, step=13]\n", - "Epoch 13: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.854, loss=0.0195, lr=0.000286, step=14]\n", - "Epoch 14: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.862, loss=0.0193, lr=0.000285, step=15]\n", - "Epoch 15: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.869, loss=0.0152, lr=0.000284, step=16]\n", - "Epoch 16: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.875, loss=0.0156, lr=0.000283, step=17]\n", - "Epoch 17: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.881, loss=0.0143, lr=0.000282, step=18]\n", - "Epoch 18: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.886, loss=0.0134, lr=0.000281, step=19]\n", - "Epoch 19: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.89, loss=0.0153, lr=0.00028, step=20]\n", - "Epoch 20: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.894, loss=0.0132, lr=0.000279, step=21]\n", - "Epoch 21: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.898, loss=0.0118, lr=0.000278, step=22]\n", - "Epoch 22: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.902, loss=0.0134, lr=0.000277, step=23]\n", - "Epoch 23: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.905, loss=0.0117, lr=0.000276, step=24]\n", - "Epoch 24: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.908, loss=0.0141, lr=0.000275, step=25]\n", - "Epoch 25: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.911, loss=0.0102, lr=0.000274, step=26]\n", - "Epoch 26: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.913, loss=0.0116, lr=0.000273, step=27]\n", - "Epoch 27: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.916, loss=0.0119, lr=0.000272, step=28]\n", - "Epoch 28: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.918, loss=0.0121, lr=0.000271, step=29]\n", - "Epoch 29: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.92, loss=0.0108, lr=0.00027, step=30]\n", - "Epoch 30: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.922, loss=0.011, lr=0.000269, step=31]\n", - "Epoch 31: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.924, loss=0.00875, lr=0.000268, step=32]\n", - "Epoch 32: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.926, loss=0.0118, lr=0.000267, step=33]\n", - "Epoch 33: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.927, loss=0.0112, lr=0.000266, step=34]\n", - "Epoch 34: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.929, loss=0.0118, lr=0.000265, step=35]\n", - "Epoch 35: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.931, loss=0.011, lr=0.000264, step=36]\n", - "Epoch 36: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.932, loss=0.00908, lr=0.000263, step=37]\n", - "Epoch 37: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.933, loss=0.00896, lr=0.000262, step=38]\n", - "Epoch 38: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.935, loss=0.00762, lr=0.000261, step=39]\n", - "Epoch 39: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.936, loss=0.00613, lr=0.00026, step=40]\n", - "Epoch 40: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.937, loss=0.0107, lr=0.000259, step=41]\n", - "Epoch 41: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.938, loss=0.00722, lr=0.000258, step=42]\n", - "Epoch 42: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.939, loss=0.00732, lr=0.000257, step=43]\n", - "Epoch 43: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.94, loss=0.00643, lr=0.000256, step=44]\n", - "Epoch 44: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.941, loss=0.00885, lr=0.000255, step=45]\n", - "Epoch 45: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.942, loss=0.00879, lr=0.000254, step=46]\n", - "Epoch 46: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.943, loss=0.00853, lr=0.000253, step=47]\n", - "Epoch 47: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.944, loss=0.00693, lr=0.000252, step=48]\n", - "Epoch 48: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.945, loss=0.00799, lr=0.000251, step=49]\n", - "Epoch 49: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.946, loss=0.00809, lr=0.00025, step=50]\n", - "Epoch 50: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.947, loss=0.00722, lr=0.000249, step=51]\n", - "Epoch 51: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.948, loss=0.00527, lr=0.000248, step=52]\n", - "Epoch 52: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.948, loss=0.00783, lr=0.000247, step=53]\n", - "Epoch 53: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.949, loss=0.0091, lr=0.000246, step=54]\n", - "Epoch 54: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.95, loss=0.00612, lr=0.000245, step=55]\n", - "Epoch 55: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.95, loss=0.00603, lr=0.000244, step=56]\n", - "Epoch 56: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.951, loss=0.00843, lr=0.000243, step=57]\n", - "Epoch 57: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.952, loss=0.00627, lr=0.000242, step=58]\n", - "Epoch 58: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.952, loss=0.00612, lr=0.000241, step=59]\n", - "Epoch 59: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.953, loss=0.00628, lr=0.00024, step=60]\n", - "Epoch 60: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.954, loss=0.00735, lr=0.000239, step=61]\n", - "Epoch 61: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.954, loss=0.00793, lr=0.000238, step=62]\n", - "Epoch 62: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.955, loss=0.00607, lr=0.000237, step=63]\n", - "Epoch 63: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.955, loss=0.0048, lr=0.000236, step=64]\n", - "Epoch 64: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.956, loss=0.00702, lr=0.000235, step=65]\n", - "Epoch 65: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.956, loss=0.00506, lr=0.000234, step=66]\n", - "Epoch 66: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.957, loss=0.00716, lr=0.000233, step=67]\n", - "Epoch 67: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.957, loss=0.00583, lr=0.000232, step=68]\n", - "Epoch 68: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.958, loss=0.00688, lr=0.000231, step=69]\n", - "Epoch 69: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.958, loss=0.00721, lr=0.00023, step=70]\n", - "Epoch 70: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.959, loss=0.00843, lr=0.000229, step=71]\n", - "Epoch 71: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.959, loss=0.00606, lr=0.000228, step=72]\n", - "Epoch 72: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.96, loss=0.00544, lr=0.000227, step=73]\n", - "Epoch 73: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.96, loss=0.00708, lr=0.000226, step=74]\n", - "Epoch 74: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.96, loss=0.00804, lr=0.000225, step=75]\n", - "Epoch 75: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.961, loss=0.0055, lr=0.000224, step=76]\n", - "Epoch 76: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.961, loss=0.00608, lr=0.000223, step=77]\n", - "Epoch 77: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.962, loss=0.00663, lr=0.000222, step=78]\n", - "Epoch 78: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.962, loss=0.00649, lr=0.000221, step=79]\n", - "Epoch 79: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.962, loss=0.00556, lr=0.00022, step=80]\n", - "Epoch 80: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.963, loss=0.00533, lr=0.000219, step=81]\n", - "Epoch 81: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.963, loss=0.00641, lr=0.000218, step=82]\n", - "Epoch 82: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.963, loss=0.00868, lr=0.000217, step=83]\n", - "Epoch 83: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.964, loss=0.00413, lr=0.000216, step=84]\n", - "Epoch 84: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.964, loss=0.00535, lr=0.000215, step=85]\n", - "Epoch 85: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.964, loss=0.00741, lr=0.000214, step=86]\n", - "Epoch 86: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.965, loss=0.00523, lr=0.000213, step=87]\n", - "Epoch 87: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.965, loss=0.00688, lr=0.000212, step=88]\n", - "Epoch 88: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.965, loss=0.00624, lr=0.000211, step=89]\n", - "Epoch 89: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.965, loss=0.00483, lr=0.00021, step=90]\n", - "Epoch 90: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.966, loss=0.00431, lr=0.000209, step=91]\n", - "Epoch 91: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.966, loss=0.00821, lr=0.000208, step=92]\n", - "Epoch 92: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.966, loss=0.00732, lr=0.000207, step=93]\n", - "Epoch 93: 100%|██████████| 1/1 [00:00<00:00, 1.23it/s, ema_decay=0.967, loss=0.00642, lr=0.000206, step=94]\n", - "Epoch 94: 100%|██████████| 1/1 [00:00<00:00, 1.30it/s, ema_decay=0.967, loss=0.00755, lr=0.000205, step=95]\n", - "Epoch 95: 100%|██████████| 1/1 [00:00<00:00, 1.24it/s, ema_decay=0.967, loss=0.00393, lr=0.000204, step=96]\n", - "Epoch 96: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.967, loss=0.00645, lr=0.000203, step=97]\n", - "Epoch 97: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.968, loss=0.00373, lr=0.000202, step=98]\n", - "Epoch 98: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.968, loss=0.00435, lr=0.000201, step=99]\n", - "Epoch 99: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.968, loss=0.00522, lr=0.0002, step=100]\n", - "Epoch 100: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.968, loss=0.00514, lr=0.000199, step=101]\n", - "Epoch 101: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.00623, lr=0.000198, step=102]\n", - "Epoch 102: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0071, lr=0.000197, step=103]\n", - "Epoch 103: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.0042, lr=0.000196, step=104]\n", - "Epoch 104: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.969, loss=0.00701, lr=0.000195, step=105]\n", - "Epoch 105: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00496, lr=0.000194, step=106]\n", - "Epoch 106: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.00467, lr=0.000193, step=107]\n", - "Epoch 107: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.97, loss=0.01, lr=0.000192, step=108]\n", - "Epoch 108: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.97, loss=0.00691, lr=0.000191, step=109]\n", - "Epoch 109: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.97, loss=0.00561, lr=0.00019, step=110]\n", - "Epoch 110: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.971, loss=0.00559, lr=0.000189, step=111]\n", - "Epoch 111: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.971, loss=0.00699, lr=0.000188, step=112]\n", - "Epoch 112: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.971, loss=0.00516, lr=0.000187, step=113]\n", - "Epoch 113: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.971, loss=0.00732, lr=0.000186, step=114]\n", - "Epoch 114: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.971, loss=0.00334, lr=0.000185, step=115]\n", - "Epoch 115: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.972, loss=0.00595, lr=0.000184, step=116]\n", - "Epoch 116: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.972, loss=0.00747, lr=0.000183, step=117]\n", - "Epoch 117: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.972, loss=0.00725, lr=0.000182, step=118]\n", - "Epoch 118: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.972, loss=0.00553, lr=0.000181, step=119]\n", - "Epoch 119: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.972, loss=0.00501, lr=0.00018, step=120]\n", - "Epoch 120: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.972, loss=0.00318, lr=0.000179, step=121]\n", - "Epoch 121: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.973, loss=0.00567, lr=0.000178, step=122]\n", - "Epoch 122: 100%|██████████| 1/1 [00:00<00:00, 1.36it/s, ema_decay=0.973, loss=0.00652, lr=0.000177, step=123]\n", - "Epoch 123: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.00609, lr=0.000176, step=124]\n", - "Epoch 124: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.973, loss=0.00759, lr=0.000175, step=125]\n", - "Epoch 125: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.973, loss=0.00636, lr=0.000174, step=126]\n", - "Epoch 126: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.973, loss=0.00476, lr=0.000173, step=127]\n", - "Epoch 127: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.974, loss=0.00731, lr=0.000172, step=128]\n", - "Epoch 128: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.974, loss=0.00717, lr=0.000171, step=129]\n", - "Epoch 129: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.974, loss=0.00539, lr=0.00017, step=130]\n", - "Epoch 130: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.00657, lr=0.000169, step=131]\n", - "Epoch 131: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.974, loss=0.00687, lr=0.000168, step=132]\n", - "Epoch 132: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.974, loss=0.00527, lr=0.000167, step=133]\n", - "Epoch 133: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.974, loss=0.006, lr=0.000166, step=134]\n", - "Epoch 134: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.975, loss=0.00417, lr=0.000165, step=135]\n", - "Epoch 135: 100%|██████████| 1/1 [00:00<00:00, 1.14it/s, ema_decay=0.975, loss=0.00486, lr=0.000164, step=136]\n", - "Epoch 136: 100%|██████████| 1/1 [00:00<00:00, 1.14it/s, ema_decay=0.975, loss=0.00547, lr=0.000163, step=137]\n", - "Epoch 137: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.975, loss=0.00654, lr=0.000162, step=138]\n", - "Epoch 138: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.975, loss=0.00534, lr=0.000161, step=139]\n", - "Epoch 139: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.00464, lr=0.00016, step=140]\n", - "Epoch 140: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.975, loss=0.00334, lr=0.000159, step=141]\n", - "Epoch 141: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.976, loss=0.00722, lr=0.000158, step=142]\n", - "Epoch 142: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.976, loss=0.00416, lr=0.000157, step=143]\n", - "Epoch 143: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.976, loss=0.00475, lr=0.000156, step=144]\n", - "Epoch 144: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.976, loss=0.0043, lr=0.000155, step=145]\n", - "Epoch 145: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.976, loss=0.00579, lr=0.000154, step=146]\n", - "Epoch 146: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.00628, lr=0.000153, step=147]\n", - "Epoch 147: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.976, loss=0.0049, lr=0.000152, step=148]\n", - "Epoch 148: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.976, loss=0.00738, lr=0.000151, step=149]\n", - "Epoch 149: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.00658, lr=0.00015, step=150]\n", - "Epoch 150: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.00557, lr=0.000149, step=151]\n", - "Epoch 151: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.977, loss=0.00363, lr=0.000148, step=152]\n", - "Epoch 152: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.0059, lr=0.000147, step=153]\n", - "Epoch 153: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.977, loss=0.00425, lr=0.000146, step=154]\n", - "Epoch 154: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.977, loss=0.00554, lr=0.000145, step=155]\n", - "Epoch 155: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.977, loss=0.00398, lr=0.000144, step=156]\n", - "Epoch 156: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.977, loss=0.00629, lr=0.000143, step=157]\n", - "Epoch 157: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.977, loss=0.00684, lr=0.000142, step=158]\n", - "Epoch 158: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.978, loss=0.00502, lr=0.000141, step=159]\n", - "Epoch 159: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.978, loss=0.00483, lr=0.00014, step=160]\n", - "Epoch 160: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.978, loss=0.0054, lr=0.000139, step=161]\n", - "Epoch 161: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.978, loss=0.0077, lr=0.000138, step=162]\n", - "Epoch 162: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.978, loss=0.00694, lr=0.000137, step=163]\n", - "Epoch 163: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.978, loss=0.00327, lr=0.000136, step=164]\n", - "Epoch 164: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.978, loss=0.00785, lr=0.000135, step=165]\n", - "Epoch 165: 100%|██████████| 1/1 [00:00<00:00, 1.52it/s, ema_decay=0.978, loss=0.00435, lr=0.000134, step=166]\n", - "Epoch 166: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.978, loss=0.00589, lr=0.000133, step=167]\n", - "Epoch 167: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.978, loss=0.00498, lr=0.000132, step=168]\n", - "Epoch 168: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.00588, lr=0.000131, step=169]\n", - "Epoch 169: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.0056, lr=0.00013, step=170]\n", - "Epoch 170: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.979, loss=0.00431, lr=0.000129, step=171]\n", - "Epoch 171: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.00598, lr=0.000128, step=172]\n", - "Epoch 172: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.00504, lr=0.000127, step=173]\n", - "Epoch 173: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.00407, lr=0.000126, step=174]\n", - "Epoch 174: 100%|██████████| 1/1 [00:00<00:00, 1.14it/s, ema_decay=0.979, loss=0.00472, lr=0.000125, step=175]\n", - "Epoch 175: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.00488, lr=0.000124, step=176]\n", - "Epoch 176: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.00722, lr=0.000123, step=177]\n", - "Epoch 177: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.979, loss=0.00235, lr=0.000122, step=178]\n", - "Epoch 178: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.979, loss=0.00467, lr=0.000121, step=179]\n", - "Epoch 179: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.00694, lr=0.00012, step=180]\n", - "Epoch 180: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.00486, lr=0.000119, step=181]\n", - "Epoch 181: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.98, loss=0.00811, lr=0.000118, step=182]\n", - "Epoch 182: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.00629, lr=0.000117, step=183]\n", - "Epoch 183: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.00593, lr=0.000116, step=184]\n", - "Epoch 184: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.00307, lr=0.000115, step=185]\n", - "Epoch 185: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.00684, lr=0.000114, step=186]\n", - "Epoch 186: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.98, loss=0.00516, lr=0.000113, step=187]\n", - "Epoch 187: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.00525, lr=0.000112, step=188]\n", - "Epoch 188: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.98, loss=0.0049, lr=0.000111, step=189]\n", - "Epoch 189: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.00547, lr=0.00011, step=190]\n", - "Epoch 190: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.98, loss=0.00627, lr=0.000109, step=191]\n", - "Epoch 191: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.00621, lr=0.000108, step=192]\n", - "Epoch 192: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.981, loss=0.00433, lr=0.000107, step=193]\n", - "Epoch 193: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.981, loss=0.00528, lr=0.000106, step=194]\n", - "Epoch 194: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00506, lr=0.000105, step=195]\n", - "Epoch 195: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.00514, lr=0.000104, step=196]\n", - "Epoch 196: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.00575, lr=0.000103, step=197]\n", - "Epoch 197: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.00681, lr=0.000102, step=198]\n", - "Epoch 198: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.981, loss=0.00722, lr=0.000101, step=199]\n", - "Epoch 199: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.981, loss=0.00277, lr=0.0001, step=200]\n", - "Epoch 200: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.981, loss=0.00517, lr=9.9e-5, step=201]\n", - "Epoch 201: 100%|██████████| 1/1 [00:00<00:00, 1.39it/s, ema_decay=0.981, loss=0.0056, lr=9.8e-5, step=202]\n", - "Epoch 202: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.981, loss=0.00584, lr=9.7e-5, step=203]\n", - "Epoch 203: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.00659, lr=9.6e-5, step=204]\n", - "Epoch 204: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.981, loss=0.00422, lr=9.5e-5, step=205]\n", - "Epoch 205: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.982, loss=0.00463, lr=9.4e-5, step=206]\n", - "Epoch 206: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.00653, lr=9.3e-5, step=207]\n", - "Epoch 207: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.0045, lr=9.2e-5, step=208]\n", - "Epoch 208: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.00267, lr=9.1e-5, step=209]\n", - "Epoch 209: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.982, loss=0.00654, lr=9e-5, step=210]\n", - "Epoch 210: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.982, loss=0.00651, lr=8.9e-5, step=211]\n", - "Epoch 211: 100%|██████████| 1/1 [00:00<00:00, 1.28it/s, ema_decay=0.982, loss=0.00358, lr=8.8e-5, step=212]\n", - "Epoch 212: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.982, loss=0.00608, lr=8.7e-5, step=213]\n", - "Epoch 213: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.982, loss=0.00317, lr=8.6e-5, step=214]\n", - "Epoch 214: 100%|██████████| 1/1 [00:00<00:00, 1.38it/s, ema_decay=0.982, loss=0.00618, lr=8.5e-5, step=215]\n", - "Epoch 215: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.982, loss=0.00661, lr=8.4e-5, step=216]\n", - "Epoch 216: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.982, loss=0.00528, lr=8.3e-5, step=217]\n", - "Epoch 217: 100%|██████████| 1/1 [00:00<00:00, 1.40it/s, ema_decay=0.982, loss=0.00469, lr=8.2e-5, step=218]\n", - "Epoch 218: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.982, loss=0.00555, lr=8.1e-5, step=219]\n", - "Epoch 219: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.982, loss=0.00442, lr=8e-5, step=220]\n", - "Epoch 220: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.982, loss=0.00487, lr=7.9e-5, step=221]\n", - "Epoch 221: 100%|██████████| 1/1 [00:00<00:00, 1.42it/s, ema_decay=0.983, loss=0.00532, lr=7.8e-5, step=222]\n", - "Epoch 222: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.983, loss=0.00491, lr=7.7e-5, step=223]\n", - "Epoch 223: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.00352, lr=7.6e-5, step=224]\n", - "Epoch 224: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.983, loss=0.00331, lr=7.5e-5, step=225]\n", - "Epoch 225: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00483, lr=7.4e-5, step=226]\n", - "Epoch 226: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00592, lr=7.3e-5, step=227]\n", - "Epoch 227: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0065, lr=7.2e-5, step=228]\n", - "Epoch 228: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.00474, lr=7.1e-5, step=229]\n", - "Epoch 229: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.983, loss=0.00374, lr=7e-5, step=230]\n", - "Epoch 230: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.00537, lr=6.9e-5, step=231]\n", - "Epoch 231: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00522, lr=6.8e-5, step=232]\n", - "Epoch 232: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00589, lr=6.7e-5, step=233]\n", - "Epoch 233: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00475, lr=6.6e-5, step=234]\n", - "Epoch 234: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.0048, lr=6.5e-5, step=235]\n", - "Epoch 235: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.983, loss=0.00521, lr=6.4e-5, step=236]\n", - "Epoch 236: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00416, lr=6.3e-5, step=237]\n", - "Epoch 237: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.983, loss=0.00338, lr=6.2e-5, step=238]\n", - "Epoch 238: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.983, loss=0.00569, lr=6.1e-5, step=239]\n", - "Epoch 239: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00711, lr=6e-5, step=240]\n", - "Epoch 240: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00644, lr=5.9e-5, step=241]\n", - "Epoch 241: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00508, lr=5.8e-5, step=242]\n", - "Epoch 242: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.005, lr=5.7e-5, step=243]\n", - "Epoch 243: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.984, loss=0.00355, lr=5.6e-5, step=244]\n", - "Epoch 244: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.984, loss=0.00597, lr=5.5e-5, step=245]\n", - "Epoch 245: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.984, loss=0.00407, lr=5.4e-5, step=246]\n", - "Epoch 246: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00645, lr=5.3e-5, step=247]\n", - "Epoch 247: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00626, lr=5.2e-5, step=248]\n", - "Epoch 248: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.0052, lr=5.1e-5, step=249]\n", - "Epoch 249: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.984, loss=0.00666, lr=5e-5, step=250]\n", - "Epoch 250: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.984, loss=0.00453, lr=4.9e-5, step=251]\n", - "Epoch 251: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.984, loss=0.0049, lr=4.8e-5, step=252]\n", - "Epoch 252: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00406, lr=4.7e-5, step=253]\n", - "Epoch 253: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.984, loss=0.00547, lr=4.6e-5, step=254]\n", - "Epoch 254: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.984, loss=0.00668, lr=4.5e-5, step=255]\n", - "Epoch 255: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.984, loss=0.00532, lr=4.4e-5, step=256]\n", - "Epoch 256: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.984, loss=0.00666, lr=4.3e-5, step=257]\n", - "Epoch 257: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.984, loss=0.00526, lr=4.2e-5, step=258]\n", - "Epoch 258: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.984, loss=0.00599, lr=4.1e-5, step=259]\n", - "Epoch 259: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.0056, lr=4e-5, step=260]\n", - "Epoch 260: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.00567, lr=3.9e-5, step=261]\n", - "Epoch 261: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00317, lr=3.8e-5, step=262]\n", - "Epoch 262: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.985, loss=0.00264, lr=3.7e-5, step=263]\n", - "Epoch 263: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.985, loss=0.00495, lr=3.6e-5, step=264]\n", - "Epoch 264: 100%|██████████| 1/1 [00:00<00:00, 1.44it/s, ema_decay=0.985, loss=0.0044, lr=3.5e-5, step=265]\n", - "Epoch 265: 100%|██████████| 1/1 [00:00<00:00, 1.46it/s, ema_decay=0.985, loss=0.00514, lr=3.4e-5, step=266]\n", - "Epoch 266: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00657, lr=3.3e-5, step=267]\n", - "Epoch 267: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00576, lr=3.2e-5, step=268]\n", - "Epoch 268: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00401, lr=3.1e-5, step=269]\n", - "Epoch 269: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00576, lr=3e-5, step=270]\n", - "Epoch 270: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00423, lr=2.9e-5, step=271]\n", - "Epoch 271: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00361, lr=2.8e-5, step=272]\n", - "Epoch 272: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s, ema_decay=0.985, loss=0.00579, lr=2.7e-5, step=273]\n", - "Epoch 273: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00318, lr=2.6e-5, step=274]\n", - "Epoch 274: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00549, lr=2.5e-5, step=275]\n", - "Epoch 275: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.985, loss=0.00357, lr=2.4e-5, step=276]\n", - "Epoch 276: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00473, lr=2.3e-5, step=277]\n", - "Epoch 277: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00442, lr=2.2e-5, step=278]\n", - "Epoch 278: 100%|██████████| 1/1 [00:00<00:00, 1.49it/s, ema_decay=0.985, loss=0.00499, lr=2.1e-5, step=279]\n", - "Epoch 279: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.985, loss=0.00476, lr=2e-5, step=280]\n", - "Epoch 280: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.00466, lr=1.9e-5, step=281]\n", - "Epoch 281: 100%|██████████| 1/1 [00:00<00:00, 1.47it/s, ema_decay=0.985, loss=0.00361, lr=1.8e-5, step=282]\n", - "Epoch 282: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.985, loss=0.00343, lr=1.7e-5, step=283]\n", - "Epoch 283: 100%|██████████| 1/1 [00:00<00:00, 1.41it/s, ema_decay=0.986, loss=0.00322, lr=1.6e-5, step=284]\n", - "Epoch 284: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00514, lr=1.5e-5, step=285]\n", - "Epoch 285: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00478, lr=1.4e-5, step=286]\n", - "Epoch 286: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00424, lr=1.3e-5, step=287]\n", - "Epoch 287: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00506, lr=1.2e-5, step=288]\n", - "Epoch 288: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00438, lr=1.1e-5, step=289]\n", - "Epoch 289: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.986, loss=0.00611, lr=1e-5, step=290]\n", - "Epoch 290: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00441, lr=9e-6, step=291]\n", - "Epoch 291: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00526, lr=8e-6, step=292]\n", - "Epoch 292: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00716, lr=7e-6, step=293]\n", - "Epoch 293: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00508, lr=6e-6, step=294]\n", - "Epoch 294: 100%|██████████| 1/1 [00:00<00:00, 1.48it/s, ema_decay=0.986, loss=0.00412, lr=5e-6, step=295]\n", - "Epoch 295: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00476, lr=4e-6, step=296]\n", - "Epoch 296: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00375, lr=3e-6, step=297]\n", - "Epoch 297: 100%|██████████| 1/1 [00:00<00:00, 1.45it/s, ema_decay=0.986, loss=0.00645, lr=2e-6, step=298]\n", - "Epoch 298: 100%|██████████| 1/1 [00:00<00:00, 1.50it/s, ema_decay=0.986, loss=0.00257, lr=1e-6, step=299]\n", - "Epoch 299: 100%|██████████| 1/1 [00:00<00:00, 1.51it/s, ema_decay=0.986, loss=0.00548, lr=0, step=300]\n" + "ename": "NameError", + "evalue": "name 'DDPMScheduler' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32mc:\\Users\\Ben\\Documents\\diffusers\\examples\\progressive_distillation\\image_diffusion.ipynb Cell 10\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[39mfor\u001b[39;00m distill_step \u001b[39min\u001b[39;00m \u001b[39mrange\u001b[39m(\u001b[39m2\u001b[39m):\n\u001b[0;32m 5\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mDistill step \u001b[39m\u001b[39m{\u001b[39;00mdistill_step\u001b[39m}\u001b[39;00m\u001b[39m from \u001b[39m\u001b[39m{\u001b[39;00mN\u001b[39m}\u001b[39;00m\u001b[39m -> \u001b[39m\u001b[39m{\u001b[39;00mN \u001b[39m/\u001b[39m\u001b[39m/\u001b[39m \u001b[39m2\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m)\n\u001b[1;32m----> 6\u001b[0m teacher, distilled_ema, distill_accelrator \u001b[39m=\u001b[39m distiller(teacher, N, train_dataset, epochs\u001b[39m=\u001b[39;49m\u001b[39m300\u001b[39;49m, batch_size\u001b[39m=\u001b[39;49mtraining_config\u001b[39m.\u001b[39;49mbatch_size)\n\u001b[0;32m 7\u001b[0m N \u001b[39m=\u001b[39m N \u001b[39m/\u001b[39m\u001b[39m/\u001b[39m \u001b[39m2\u001b[39m\n\u001b[0;32m 8\u001b[0m new_scheduler \u001b[39m=\u001b[39m DDPMScheduler(num_train_timesteps\u001b[39m=\u001b[39mN, beta_schedule\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39msquaredcos_cap_v2\u001b[39m\u001b[39m\"\u001b[39m)\n", + "File \u001b[1;32m~\\Documents\\diffusers\\src\\diffusers\\pipelines\\progressive_distillation\\pipeline_progressive_distillation.py:71\u001b[0m, in \u001b[0;36mDistillationPipeline.__call__\u001b[1;34m(self, teacher, n_teacher_trainsteps, train_data, epochs, lr, batch_size, gamma, generator, gradient_accumulation_steps, device, mixed_precision, adam_beta1, adam_beta2, adam_weight_decay, adam_epsilon, ema_inv_gamma, ema_power, ema_max_decay, use_ema, permute_samples, **kwargs)\u001b[0m\n\u001b[0;32m 68\u001b[0m train_dataloader \u001b[39m=\u001b[39m DataLoader(train_data, batch_size\u001b[39m=\u001b[39mbatch_size, shuffle\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m)\n\u001b[0;32m 70\u001b[0m \u001b[39m# Setup the noise schedulers for the teacher and student\u001b[39;00m\n\u001b[1;32m---> 71\u001b[0m teacher_scheduler \u001b[39m=\u001b[39m DDPMScheduler(num_train_timesteps\u001b[39m=\u001b[39mn_teacher_trainsteps, beta_schedule\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39msquaredcos_cap_v2\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m 72\u001b[0m student_scheduler \u001b[39m=\u001b[39m DDPMScheduler(\n\u001b[0;32m 73\u001b[0m num_train_timesteps\u001b[39m=\u001b[39mn_teacher_trainsteps \u001b[39m/\u001b[39m\u001b[39m/\u001b[39m \u001b[39m2\u001b[39m, beta_schedule\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39msquaredcos_cap_v2\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[0;32m 74\u001b[0m )\n\u001b[0;32m 76\u001b[0m \u001b[39m# Initialize the student model as a direct copy of the teacher\u001b[39;00m\n", + "\u001b[1;31mNameError\u001b[0m: name 'DDPMScheduler' is not defined" ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "eaf8bf59d2a242a493e0cfeda6f5e816", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/50 [00:00 {N // 2}\")\n", - " teacher, distilled_ema, distill_accelrator = utils.distill(teacher, N, train_image, training_config, epochs=300, batch_size=64, gamma=0)\n", + " teacher, distilled_ema, distill_accelrator = distiller(teacher, N, train_dataset, epochs=300, batch_size=training_config.batch_size)\n", " N = N // 2\n", " new_scheduler = DDPMScheduler(num_train_timesteps=N, beta_schedule=\"squaredcos_cap_v2\")\n", " pipeline = DDPMPipeline(\n", @@ -919,7 +312,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3.8.10 64-bit", + "display_name": "Python 3.10.6 ('diffusers')", "language": "python", "name": "python3" }, @@ -933,12 +326,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.10.6" }, "orig_nbformat": 4, "vscode": { "interpreter": { - "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + "hash": "ec31fe64df66491ba3476a226e6c778cf4c96edadc68db7b2b237ac062a20c97" } } }, diff --git a/examples/progressive_distillation/train.py b/examples/progressive_distillation/train.py deleted file mode 100644 index a49e45c21a8b..000000000000 --- a/examples/progressive_distillation/train.py +++ /dev/null @@ -1,538 +0,0 @@ -import argparse -import logging -import math -import os -import random -from pathlib import Path -from re import L -from typing import Iterable, Optional - -import numpy as np -import torch -import torch.nn.functional as F -import torch.utils.checkpoint - -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import set_seed -from datasets import load_dataset -from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel -from diffusers.optimization import get_scheduler -from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker -from huggingface_hub import HfFolder, Repository, whoami -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer - -logger = get_logger(__name__) - - -def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): - if token is None: - token = HfFolder.get_token() - if organization is None: - username = whoami(token)["name"] - return f"{username}/{model_id}" - else: - return f"{organization}/{model_id}" - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--dataset_name", - type=str, - default=None, - help=( - "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," - " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," - " or to a folder containing files that 🤗 Datasets can understand." - ), - ) - parser.add_argument( - "--dataset_config_name", - type=str, - default=None, - help="The config of the Dataset, leave as None if there's only one config.", - ) - parser.add_argument( - "--train_data_dir", - type=str, - default=None, - help=( - "A folder containing the training data. Folder contents must follow the structure described in" - " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" - " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." - ), - ) - parser.add_argument( - "--image_column", type=str, default="image", help="The column of the dataset containing an image." - ) - parser.add_argument( - "--caption_column", - type=str, - default="text", - help="The column of the dataset containing a caption or a list of captions.", - ) - parser.add_argument( - "--max_train_samples", - type=int, - default=None, - help=( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="sd-model-distilled", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument( - "--cache_dir", - type=str, - default=None, - help="The directory where the downloaded models and datasets will be stored.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", - action="store_true", - help="Whether to center crop images before resizing to resolution (if not set, random crop will be used)", - ) - parser.add_argument( - "--random_flip", - action="store_true", - help="whether to randomly flip images horizontally", - ) - parser.add_argument( - "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument("--num_train_epochs", type=int, default=100) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=1e-4, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." - ) - parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default="no", - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose" - "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." - "and an Nvidia Ampere GPU." - ), - ) - parser.add_argument( - "--report_to", - type=str, - default="tensorboard", - help=( - 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' - ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' - "Only applicable when `--with_tracking` is passed." - ), - ) - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - - args = parser.parse_args() - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - # Sanity checks - if args.dataset_name is None and args.train_data_dir is None: - raise ValueError("Need either a dataset name or a training folder.") - - return args - - -repo_name = "CompVis/stable-diffusion-v1-4" - -dataset_name_mapping = { - "lambdalabs/pokemon-blip-captions": ("image", "text"), -} - - -def main(): - args = parse_args() - logging_dir = os.path.join(args.output_dir, args.logging_dir) - dataset_name_mapping = { - "lambdalabs/pokemon-blip-captions": ("image", "text"), - } - - dataset = load_dataset(args.dataset_name) - tokenizer = CLIPTokenizer.from_pretrained(repo_name, subfolder="tokenizer") - text_encoder = CLIPTextModel.from_pretrained(repo_name, subfolder="text_encoder") - vae = AutoencoderKL.from_pretrained(repo_name, subfolder="vae") - unet = UNet2DConditionModel.from_pretrained(repo_name, subfolder="unet") - student = UNet2DConditionModel.from_pretrained(repo_name, subfolder="unet") - scheduler = DDPMScheduler.from_pretrained(repo_name, subfolder="scheduler") - student.load_state_dict(unet.state_dict()) - - # Freeze vae and text_encoder - vae.requires_grad_(False) - text_encoder.requires_grad_(False) - - if args.gradient_checkpointing: - unet.enable_gradient_checkpointing() - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Initialize the optimizer - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" - ) - - optimizer_cls = bnb.optim.AdamW8bit - else: - optimizer_cls = torch.optim.AdamW - - optimizer = optimizer_cls( - student.parameters(), - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with=args.report_to, - logging_dir=logging_dir, - ) - - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - - # Handle the repository creation - if accelerator.is_main_process: - if args.push_to_hub: - if args.hub_model_id is None: - repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) - else: - repo_name = args.hub_model_id - repo = Repository(args.output_dir, clone_from=repo_name) - - with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: - if "step_*" not in gitignore: - gitignore.write("step_*\n") - if "epoch_*" not in gitignore: - gitignore.write("epoch_*\n") - elif args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - if args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - dataset = load_dataset( - args.dataset_name, - args.dataset_config_name, - cache_dir=args.cache_dir, - ) - else: - data_files = {} - if args.train_data_dir is not None: - data_files["train"] = os.path.join(args.train_data_dir, "**") - dataset = load_dataset( - "imagefolder", - data_files=data_files, - cache_dir=args.cache_dir, - ) - # See more about loading custom images at - # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder - - # Preprocessing the datasets. - # We need to tokenize inputs and targets. - column_names = dataset["train"].column_names - - # 6. Get the column names for input/target. - dataset_columns = dataset_name_mapping.get(args.dataset_name, None) - if args.image_column is None: - image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] - else: - image_column = args.image_column - if image_column not in column_names: - raise ValueError( - f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" - ) - if args.caption_column is None: - caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] - else: - caption_column = args.caption_column - if caption_column not in column_names: - raise ValueError( - f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" - ) - - # Preprocessing the datasets. - # We need to tokenize input captions and transform the images. - def tokenize_captions(examples, is_train=True): - captions = [] - for caption in examples[caption_column]: - if isinstance(caption, str): - captions.append(caption) - elif isinstance(caption, (list, np.ndarray)): - # take a random caption if there are multiple - captions.append(random.choice(caption) if is_train else caption[0]) - else: - raise ValueError( - f"Caption column `{caption_column}` should contain either strings or lists of strings." - ) - inputs = tokenizer(captions, max_length=tokenizer.model_max_length, padding="do_not_pad", truncation=True) - input_ids = inputs.input_ids - return input_ids - - train_transforms = transforms.Compose( - [ - transforms.Resize((args.resolution, args.resolution), interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), - transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def preprocess_train(examples): - images = [image.convert("RGB") for image in examples[image_column]] - examples["pixel_values"] = [train_transforms(image) for image in images] - examples["input_ids"] = tokenize_captions(examples) - - return examples - - with accelerator.main_process_first(): - if args.max_train_samples is not None: - dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) - # Set the training transforms - train_dataset = dataset["train"].with_transform(preprocess_train) - - def collate_fn(examples): - pixel_values = torch.stack([example["pixel_values"] for example in examples]) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - input_ids = [example["input_ids"] for example in examples] - padded_tokens = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt") - return { - "pixel_values": pixel_values, - "input_ids": padded_tokens.input_ids, - "attention_mask": padded_tokens.attention_mask, - } - - train_dataloader = torch.utils.data.DataLoader( - train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - ) - - unet, student, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, student, optimizer, train_dataloader, lr_scheduler - ) - weight_dtype = torch.float32 - if args.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif args.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move text_encode and vae to gpu. - # For mixed precision training we cast the text_encoder and vae weights to half-precision - # as these models are only used for inference, keeping weights in full precision is not required. - text_encoder.to(accelerator.device, dtype=weight_dtype) - vae.to(accelerator.device, dtype=weight_dtype) - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - accelerator.init_trackers("text2image-fine-tune", config=vars(args)) - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) - progress_bar.set_description("Steps") - global_step = 0 - N = 1000 - for epoch in range(args.num_train_epochs): - unet.load_state_dict(student.state_dict()) - unet.eval() - student.train() - train_loss = 0.0 - N = N // 2 - scheduler.set_timesteps(N) - for step, batch in enumerate(train_dataloader): - with accelerator.accumulate(unet): - # Convert images to latent space - latents = vae.encode(batch["pixel_values"].to(weight_dtype)).latent_dist.sample() - latents = latents * 0.18215 - - # Sample noise that we'll add to the latents - noise = torch.randn_like(latents) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint(0, scheduler.num_train_timesteps, (bsz,), device=latents.device) - timesteps = timesteps.long() - # Get the text embedding for conditioning - encoder_hidden_states = text_encoder(batch["input_ids"])[0] - original_noisy_latents = scheduler.add_noise(latents, noise, timesteps) - noisy_latents = original_noisy_latents.clone() - # Calculate a target for the student by running 2 steps of the diffusion process with the teacher - for target_calc_step in range(2): - # Predict the noise residual and compute loss - noise_pred = unet(noisy_latents, timesteps - target_calc_step, encoder_hidden_states).sample - noisy_latents = scheduler.step(noise_pred, timesteps - target_calc_step, noisy_latents) - - # Z_t-2 - ((\sigma_t-2 / \sigma_t-1) * Z_t) - student_target_numerator = noisy_latents - ( - (scheduler.get_variance(timesteps - 2) / scheduler.get_variance(timesteps - 1)) - * original_noisy_latents - ) - # \alpha_t-2 - ((\sigma_t-2 / \sigma_t-1) * \alpha_t) - student_target_denominator = scheduler.alphas_cumprod[timesteps - 2] - ( - (scheduler.get_variance(timesteps - 2) / scheduler.get_variance(timesteps - 1)) - * scheduler.alphas_cumprod[timesteps - 1] - ) - student_target = student_target_numerator / student_target_denominator - student_noise_pred = student(original_noisy_latents, timesteps, encoder_hidden_states).sample - loss = F.mse_loss(student_noise_pred.float(), student_target.float(), reduction="mean") - - # Gather the losses across all processes for logging (if we use distributed training). - avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() - train_loss += avg_loss.item() / args.gradient_accumulation_steps - - # Backpropagate - accelerator.backward(loss) - if accelerator.sync_gradients: - accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - accelerator.log({"train_loss": train_loss}, step=global_step) - train_loss = 0.0 - - logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - - if global_step >= args.max_train_steps: - break diff --git a/examples/progressive_distillation/utils.py b/examples/progressive_distillation/utils.py index bf2f1786194f..2632abb2680a 100644 --- a/examples/progressive_distillation/utils.py +++ b/examples/progressive_distillation/utils.py @@ -35,7 +35,7 @@ class DiffusionTrainingArgs: ema_inv_gamma: float = 1.0 ema_power: float = 3 / 4 ema_max_decay: float = 0.9999 - batch_size: int = 16 + batch_size: int = 64 num_epochs: int = 500 def get_train_transforms(training_config): diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 2c531cf8cee0..e0236a110e35 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -37,6 +37,7 @@ LDMPipeline, PNDMPipeline, ScoreSdeVePipeline, + DistillationPipeline ) from .schedulers import ( DDIMScheduler, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index b3124af39077..ced2be188fcf 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -9,6 +9,7 @@ from .pndm import PNDMPipeline from .score_sde_ve import ScoreSdeVePipeline from .stochastic_karras_ve import KarrasVePipeline + from .progressive_distillation import DistillationPipeline else: from ..utils.dummy_pt_objects import * # noqa F403 diff --git a/src/diffusers/pipelines/progressive_distillation/__init__.py b/src/diffusers/pipelines/progressive_distillation/__init__.py new file mode 100644 index 000000000000..e7031b1583a7 --- /dev/null +++ b/src/diffusers/pipelines/progressive_distillation/__init__.py @@ -0,0 +1 @@ +from .pipeline_progressive_distillation import DistillationPipeline diff --git a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py new file mode 100644 index 000000000000..4716d26cff8b --- /dev/null +++ b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py @@ -0,0 +1,193 @@ +import tqdm +from diffusers import DiffusionPipeline +import torch +from torch.utils.data import Dataset, DataLoader +from PIL import Image +from diffusers.pipelines.ddpm import DDPMPipeline +from diffusers.schedulers.scheduling_ddpm import DDPMScheduler +from diffusers.optimization import get_scheduler +from diffusers.training_utils import EMAModel +import math +import requests +from torchvision.transforms import ( + CenterCrop, + Compose, + InterpolationMode, + Normalize, + RandomHorizontalFlip, + Resize, + ToTensor, + ToPILImage, +) +from accelerate import Accelerator +from tqdm import tqdm +import torch.nn.functional as F +import copy +from dataclasses import dataclass +import numpy as np + + +class DistillationPipeline(DiffusionPipeline): + def __init__(self): + pass + + def __call__( + self, + teacher, + n_teacher_trainsteps, + train_data, + epochs=100, + lr=3e-4, + batch_size=64, + gamma=0, + generator=None, + gradient_accumulation_steps=1, + device="cuda", + mixed_precision="fp16", + adam_beta1=0.95, + adam_beta2=0.999, + adam_weight_decay=0.001, + adam_epsilon=1e-08, + ema_inv_gamma=0.9999, + ema_power=3 / 4, + ema_max_decay=0.9999, + use_ema=True, + permute_samples=(0, 1, 2), + **kwargs, + ): + # Initialize our accelerator for training + accelerator = Accelerator( + gradient_accumulation_steps=gradient_accumulation_steps, + mixed_precision=mixed_precision, + ) + + if accelerator.is_main_process: + run = "distill" + accelerator.init_trackers(run) + + # Setup a dataloader with the provided train data + train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True) + + # Setup the noise schedulers for the teacher and student + teacher_scheduler = DDPMScheduler(num_train_timesteps=n_teacher_trainsteps, beta_schedule="squaredcos_cap_v2") + student_scheduler = DDPMScheduler( + num_train_timesteps=n_teacher_trainsteps // 2, beta_schedule="squaredcos_cap_v2" + ) + + # Initialize the student model as a direct copy of the teacher + student = copy.deepcopy(teacher) + student.load_state_dict(teacher.state_dict()) + student = accelerator.prepare(student) + student.train() + + # Setup the optimizer for the student + optimizer = torch.optim.AdamW( + student.parameters(), + lr=lr, + betas=(adam_beta1, adam_beta2), + weight_decay=adam_weight_decay, + eps=adam_epsilon, + ) + lr_scheduler = get_scheduler( + "linear", + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=np.ceil((epochs * len(train_dataloader)) // gradient_accumulation_steps), + ) + + # Let accelerate handle moving the model to the correct device + ( + teacher, + student, + optimizer, + lr_scheduler, + train_data, + teacher_scheduler, + student_scheduler, + ) = accelerator.prepare( + teacher, student, optimizer, lr_scheduler, train_data, teacher_scheduler, student_scheduler + ) + ema_model = EMAModel( + student, + inv_gamma=ema_inv_gamma, + power=ema_power, + max_value=ema_max_decay, + ) + global_step = 0 + + # Train the student + for epoch in range(epochs): + progress_bar = tqdm(total=len(train_data) // batch_size, disable=not accelerator.is_local_main_process) + progress_bar.set_description(f"Epoch {epoch}") + for batch in train_dataloader: + with accelerator.accumulate(student): + batch = batch.to(accelerator.device) + noise = torch.randn(batch.shape).to(accelerator.device) + bsz = batch.shape[0] + # Sample a random timestep for each image + timesteps = ( + torch.randint( + 0, student_scheduler.config.num_train_timesteps, (bsz,), device=batch.device + ).long() + * 2 + ) + with torch.no_grad(): + # Add noise to the image based on noise scheduler a t=timesteps + alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps + 1, accelerator.device) + z_t = alpha_t * batch + sigma_t * noise + + # Take the first diffusion step with the teacher + noise_pred_t = teacher(z_t.permute(*permute_samples), timesteps + 1).sample.permute( + *permute_samples + ) + x_teacher_z_t = (alpha_t * z_t - sigma_t * noise_pred_t).clip(-1, 1) + + # Add noise to the image based on noise scheduler a t=timesteps-1, to prepare for the next diffusion step + alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma( + batch, timesteps, accelerator.device + ) + z_t_prime = alpha_t_prime * x_teacher_z_t + (sigma_t_prime / sigma_t) * ( + z_t - alpha_t * x_teacher_z_t + ) + # Take the second diffusion step with the teacher + noise_pred_t_prime = teacher(z_t_prime.permute(*permute_samples), timesteps).sample.permute( + *permute_samples + ) + if permute_samples: + noise_pred_t_prime = noise_pred_t_prime.permute(*permute_samples) + rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * noise_pred_t_prime).clip(-1, 1) + + # V prediction per Appendix D + alpha_t_prime2, sigma_t_prime2 = student_scheduler.get_alpha_sigma( + batch, timesteps // 2, accelerator.device + ) + x_teacher_z_t_prime = (z_t - alpha_t_prime2 * rec_t_prime) / sigma_t_prime2 + z_t_prime_2 = alpha_t_prime2 * x_teacher_z_t_prime - sigma_t_prime2 * rec_t_prime + + noise_pred = student(z_t.permute(*permute_samples), timesteps).sample.permute(*permute_samples) + w = torch.pow(1 + alpha_t_prime2 / sigma_t_prime2, gamma) + loss = F.mse_loss(noise_pred * w, z_t_prime_2 * w) + accelerator.backward(loss) + + if accelerator.sync_gradients: + accelerator.clip_grad_norm_(student.parameters(), 1.0) + optimizer.step() + lr_scheduler.step() + if use_ema: + ema_model.step(student) + optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + global_step += 1 + + logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} + if use_ema: + logs["ema_decay"] = ema_model.decay + progress_bar.set_postfix(**logs) + accelerator.log(logs, step=global_step) + progress_bar.close() + + accelerator.wait_for_everyone() + return student, ema_model, accelerator From e6a24251b99caad89451a7dd2392069f0643ffe5 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 31 Oct 2022 09:04:20 -0400 Subject: [PATCH 105/133] code cleanup --- .../community/progressive_distillation.py | 202 ------------------ examples/progressive_distillation/utils.py | 128 ++--------- src/diffusers/__init__.py | 2 +- src/diffusers/pipelines/__init__.py | 2 +- .../pipeline_progressive_distillation.py | 39 ++-- src/diffusers/schedulers/scheduling_ddim.py | 3 +- src/diffusers/schedulers/scheduling_ddpm.py | 2 +- 7 files changed, 31 insertions(+), 347 deletions(-) delete mode 100644 examples/community/progressive_distillation.py diff --git a/examples/community/progressive_distillation.py b/examples/community/progressive_distillation.py deleted file mode 100644 index 41b0a5147180..000000000000 --- a/examples/community/progressive_distillation.py +++ /dev/null @@ -1,202 +0,0 @@ -import d4rl # noqa -import gym -import tqdm -from diffusers import DiffusionPipeline -import torch -from torch.utils.data import Dataset, DataLoader -from PIL import Image -from diffusers import ( - AutoencoderKL, - UNet2DModel, - DDIMPipeline, - DDIMScheduler, - DDPMPipeline, - DDPMScheduler, - UNet1DModel, - DiffusionPipeline, -) -from diffusers.optimization import get_scheduler -from diffusers.training_utils import EMAModel -import math -import requests -from torchvision.transforms import ( - CenterCrop, - Compose, - InterpolationMode, - Normalize, - RandomHorizontalFlip, - Resize, - ToTensor, - ToPILImage, -) -from accelerate import Accelerator -from tqdm import tqdm -import torch.nn.functional as F -import copy -from dataclasses import dataclass -import numpy as np - - -class DistillationPipeline(DiffusionPipeline): - def __init__(self): - pass - - def __call__( - self, - teacher, - n_teacher_trainsteps, - train_data, - epochs=100, - lr=3e-4, - batch_size=64, - gamma=0, - generator=None, - gradient_accumulation_steps=1, - device="cuda", - mixed_precision="fp16", - adam_beta1=0.95, - adam_beta2=0.999, - adam_weight_decay=0.001, - adam_epsilon=1e-08, - ema_inv_gamma=0.9999, - ema_power=3 / 4, - ema_max_decay=0.9999, - use_ema=True, - permute_samples=(0, 1, 2), - **kwargs, - ): - # Initialize our accelerator for training - accelerator = Accelerator( - gradient_accumulation_steps=gradient_accumulation_steps, - mixed_precision=mixed_precision, - ) - - if accelerator.is_main_process: - run = "distill" - accelerator.init_trackers(run) - - # Setup a dataloader with the provided train data - train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True) - - # Setup the noise schedulers for the teacher and student - teacher_scheduler = DDPMScheduler(num_train_timesteps=n_teacher_trainsteps, beta_schedule="squaredcos_cap_v2") - student_scheduler = DDPMScheduler( - num_train_timesteps=n_teacher_trainsteps // 2, beta_schedule="squaredcos_cap_v2" - ) - - # Initialize the student model as a direct copy of the teacher - student = copy.deepcopy(teacher) - student.load_state_dict(teacher.state_dict()) - student = accelerator.prepare(student) - student.train() - - # Setup the optimizer for the student - optimizer = torch.optim.AdamW( - student.parameters(), - lr=lr, - betas=(adam_beta1, adam_beta2), - weight_decay=adam_weight_decay, - eps=adam_epsilon, - ) - lr_scheduler = get_scheduler( - "linear", - optimizer=optimizer, - num_warmup_steps=0, - num_training_steps=np.ceil((epochs * len(train_dataloader)) // gradient_accumulation_steps), - ) - - # Let accelerate handle moving the model to the correct device - ( - teacher, - student, - optimizer, - lr_scheduler, - train_image, - teacher_scheduler, - student_scheduler, - ) = accelerator.prepare( - teacher, student, optimizer, lr_scheduler, train_image, teacher_scheduler, student_scheduler - ) - ema_model = EMAModel( - student, - inv_gamma=ema_inv_gamma, - power=ema_power, - max_value=ema_max_decay, - ) - global_step = 0 - - # Train the student - for epoch in range(epochs): - progress_bar = tqdm(total=len(train_data) // batch_size, disable=not accelerator.is_local_main_process) - progress_bar.set_description(f"Epoch {epoch}") - for batch in train_dataloader: - with accelerator.accumulate(student): - noise = torch.randn(batch.shape).to(accelerator.device) - bsz = batch.shape[0] - # Sample a random timestep for each image - timesteps = ( - torch.randint( - 0, student_scheduler.config.num_train_timesteps, (bsz,), device=batch.device - ).long() - * 2 - ) - with torch.no_grad(): - # Add noise to the image based on noise scheduler a t=timesteps - alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps + 1, accelerator.device) - z_t = alpha_t * batch + sigma_t * noise - - # Take the first diffusion step with the teacher - noise_pred_t = teacher(z_t.permute(*permute_samples), timesteps + 1).sample.permute( - *permute_samples - ) - x_teacher_z_t = (alpha_t * z_t - sigma_t * noise_pred_t).clip(-1, 1) - - # Add noise to the image based on noise scheduler a t=timesteps-1, to prepare for the next diffusion step - alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma( - batch, timesteps, accelerator.device - ) - z_t_prime = alpha_t_prime * x_teacher_z_t + (sigma_t_prime / sigma_t) * ( - z_t - alpha_t * x_teacher_z_t - ) - # Take the second diffusion step with the teacher - noise_pred_t_prime = teacher(z_t_prime.permute(*permute_samples), timesteps).sample.permute( - *permute_samples - ) - if permute_samples: - noise_pred_t_prime = noise_pred_t_prime.permute(0, 2, 1) - rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * noise_pred_t_prime).clip(-1, 1) - - # V prediction per Appendix D - alpha_t_prime2, sigma_t_prime2 = student_scheduler.get_alpha_sigma( - batch, timesteps // 2, accelerator.device - ) - x_teacher_z_t_prime = (z_t - alpha_t_prime2 * rec_t_prime) / sigma_t_prime2 - z_t_prime_2 = alpha_t_prime2 * x_teacher_z_t_prime - sigma_t_prime2 * rec_t_prime - - noise_pred = student(z_t.permute(*permute_samples), timesteps).sample.permute(*permute_samples) - w = torch.pow(1 + alpha_t_prime2 / sigma_t_prime2, gamma) - loss = F.mse_loss(noise_pred * w, z_t_prime_2 * w) - accelerator.backward(loss) - - if accelerator.sync_gradients: - accelerator.clip_grad_norm_(student.parameters(), 1.0) - optimizer.step() - lr_scheduler.step() - if use_ema: - ema_model.step(student) - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} - if use_ema: - logs["ema_decay"] = ema_model.decay - progress_bar.set_postfix(**logs) - accelerator.log(logs, step=global_step) - progress_bar.close() - - accelerator.wait_for_everyone() - return student, ema_model, accelerator diff --git a/examples/progressive_distillation/utils.py b/examples/progressive_distillation/utils.py index 2632abb2680a..d8cc92356f25 100644 --- a/examples/progressive_distillation/utils.py +++ b/examples/progressive_distillation/utils.py @@ -1,6 +1,6 @@ - - from dataclasses import dataclass + +from diffusers import UNet2DModel from torchvision.transforms import ( CenterCrop, Compose, @@ -10,14 +10,7 @@ Resize, ToTensor, ) -import torch.nn.functional as F -import torch -from diffusers import UNet2DModel, DDIMScheduler, DDPMScheduler -from accelerate import Accelerator -from diffusers.optimization import get_scheduler -from diffusers.training_utils import EMAModel -from tqdm import tqdm @dataclass class DiffusionTrainingArgs: @@ -38,18 +31,22 @@ class DiffusionTrainingArgs: batch_size: int = 64 num_epochs: int = 500 + def get_train_transforms(training_config): + # Get standard image transforms return Compose( - [ - Resize(training_config.resolution, interpolation=InterpolationMode.BILINEAR), - CenterCrop(training_config.resolution), - RandomHorizontalFlip(), - ToTensor(), - Normalize([0.5], [0.5]), - ] -) + [ + Resize(training_config.resolution, interpolation=InterpolationMode.BILINEAR), + CenterCrop(training_config.resolution), + RandomHorizontalFlip(), + ToTensor(), + Normalize([0.5], [0.5]), + ] + ) + def get_unet(training_config): + # Initialize a generic UNet model to use in our example return UNet2DModel( sample_size=training_config.resolution, in_channels=3, @@ -73,100 +70,3 @@ def get_unet(training_config): "UpBlock2D", ), ) - - -def distill(teacher, n, train_image, training_config, epochs=100, lr=3e-4, batch_size=16, gamma=0, generator=None): - if generator is None: - generator = torch.manual_seed(0) - accelerator = Accelerator( - gradient_accumulation_steps=training_config.gradient_accumulation_steps, - mixed_precision=training_config.mixed_precision, -) - if accelerator.is_main_process: - run = "distill" - accelerator.init_trackers(run) - teacher_scheduler = DDPMScheduler(num_train_timesteps=n, beta_schedule="squaredcos_cap_v2") - student_scheduler = DDPMScheduler(num_train_timesteps=n // 2, beta_schedule="squaredcos_cap_v2") - student = get_unet(training_config) - student.load_state_dict(teacher.state_dict()) - student = accelerator.prepare(student) - student.train() - optimizer = torch.optim.AdamW( - student.parameters(), - lr=lr, - betas=(training_config.adam_beta1, training_config.adam_beta2), - weight_decay=0.001, - eps=training_config.adam_epsilon, - ) - lr_scheduler = get_scheduler( - "linear", - optimizer=optimizer, - num_warmup_steps=0, - num_training_steps=(epochs) // training_config.gradient_accumulation_steps, -) - teacher, student, optimizer, lr_scheduler, train_image, teacher_scheduler, student_scheduler = accelerator.prepare( - teacher, student, optimizer, lr_scheduler, train_image,teacher_scheduler, student_scheduler -) - ema_model = EMAModel(student, inv_gamma=training_config.ema_inv_gamma, power=training_config.ema_power, max_value=training_config.ema_max_decay) - global_step = 0 - for epoch in range(epochs): - progress_bar = tqdm(total=1, disable=not accelerator.is_local_main_process) - progress_bar.set_description(f"Epoch {epoch}") - batch = train_image.unsqueeze(0).repeat( - batch_size, 1, 1, 1 - ).to(accelerator.device) - with accelerator.accumulate(student): - noise = torch.randn(batch.shape).to(accelerator.device) - bsz = batch.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint( - 0, student_scheduler.config.num_train_timesteps, (bsz,), device=batch.device - ).long() * 2 - with torch.no_grad(): - # Add noise to the image based on noise scheduler a t=timesteps - alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps + 1, accelerator.device) - z_t = alpha_t * batch + sigma_t * noise - - # Take the first diffusion step with the teacher - noise_pred_t = teacher(z_t, timesteps + 1).sample - x_teacher_z_t = (alpha_t * z_t - sigma_t * noise_pred_t).clip(-1, 1) - - # Add noise to the image based on noise scheduler a t=timesteps-1, to prepare for the next diffusion step - alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma(batch, timesteps, accelerator.device) - z_t_prime = alpha_t_prime * x_teacher_z_t + (sigma_t_prime / sigma_t) * (z_t - alpha_t * x_teacher_z_t) - # Take the second diffusion step with the teacher - noise_pred_t_prime = teacher(z_t_prime.float(), timesteps).sample - rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * noise_pred_t_prime).clip(-1, 1) - - # V prediction per Appendix D - alpha_t_prime2, sigma_t_prime2 = student_scheduler.get_alpha_sigma(batch, timesteps // 2, accelerator.device) - x_teacher_z_t_prime = (z_t - alpha_t_prime2 * rec_t_prime) / sigma_t_prime2 - z_t_prime_2 = alpha_t_prime2 * x_teacher_z_t_prime - sigma_t_prime2 * rec_t_prime - - noise_pred = student(z_t, timesteps).sample - w = torch.pow(1 + alpha_t_prime2 / sigma_t_prime2, gamma) - loss = F.mse_loss(noise_pred * w, z_t_prime_2 * w) - accelerator.backward(loss) - - if accelerator.sync_gradients: - accelerator.clip_grad_norm_(student.parameters(), 1.0) - optimizer.step() - lr_scheduler.step() - if training_config.use_ema: - ema_model.step(student) - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} - if training_config.use_ema: - logs["ema_decay"] = ema_model.decay - progress_bar.set_postfix(**logs) - accelerator.log(logs, step=global_step) - progress_bar.close() - - accelerator.wait_for_everyone() - return student, ema_model, accelerator diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index e0236a110e35..e5a88975dce9 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -33,11 +33,11 @@ DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, + DistillationPipeline, KarrasVePipeline, LDMPipeline, PNDMPipeline, ScoreSdeVePipeline, - DistillationPipeline ) from .schedulers import ( DDIMScheduler, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index ced2be188fcf..0900658cdb83 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -7,9 +7,9 @@ from .ddpm import DDPMPipeline from .latent_diffusion_uncond import LDMPipeline from .pndm import PNDMPipeline + from .progressive_distillation import DistillationPipeline from .score_sde_ve import ScoreSdeVePipeline from .stochastic_karras_ve import KarrasVePipeline - from .progressive_distillation import DistillationPipeline else: from ..utils.dummy_pt_objects import * # noqa F403 diff --git a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py index 4716d26cff8b..da2047d4f29c 100644 --- a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py +++ b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py @@ -1,30 +1,16 @@ +import copy + +import numpy as np +import torch +import torch.nn.functional as F +from torch.utils.data import DataLoader + import tqdm +from accelerate import Accelerator from diffusers import DiffusionPipeline -import torch -from torch.utils.data import Dataset, DataLoader -from PIL import Image -from diffusers.pipelines.ddpm import DDPMPipeline -from diffusers.schedulers.scheduling_ddpm import DDPMScheduler from diffusers.optimization import get_scheduler +from diffusers.schedulers.scheduling_ddpm import DDPMScheduler from diffusers.training_utils import EMAModel -import math -import requests -from torchvision.transforms import ( - CenterCrop, - Compose, - InterpolationMode, - Normalize, - RandomHorizontalFlip, - Resize, - ToTensor, - ToPILImage, -) -from accelerate import Accelerator -from tqdm import tqdm -import torch.nn.functional as F -import copy -from dataclasses import dataclass -import numpy as np class DistillationPipeline(DiffusionPipeline): @@ -40,9 +26,7 @@ def __call__( lr=3e-4, batch_size=64, gamma=0, - generator=None, gradient_accumulation_steps=1, - device="cuda", mixed_precision="fp16", adam_beta1=0.95, adam_beta2=0.999, @@ -53,7 +37,6 @@ def __call__( ema_max_decay=0.9999, use_ema=True, permute_samples=(0, 1, 2), - **kwargs, ): # Initialize our accelerator for training accelerator = Accelerator( @@ -117,7 +100,9 @@ def __call__( # Train the student for epoch in range(epochs): - progress_bar = tqdm(total=len(train_data) // batch_size, disable=not accelerator.is_local_main_process) + progress_bar = tqdm.tqdm( + total=len(train_data) // batch_size, disable=not accelerator.is_local_main_process + ) progress_bar.set_description(f"Epoch {epoch}") for batch in train_dataloader: with accelerator.accumulate(student): diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index 150d724eb334..f8807c07a3ce 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -26,6 +26,7 @@ from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin + def E_(input, t, shape, device): out = torch.gather(input.to(device), 0, t.to(device)) reshape = [shape[0]] + [1] * (len(shape) - 1) @@ -321,4 +322,4 @@ def __len__(self): def get_alpha_sigma(self, x, t, device): alpha = E_(self.sqrt_alphas_cumprod, t, x.shape, device) sigma = E_(self.sqrt_one_minus_alphas_cumprod, t, x.shape, device) - return alpha, sigma \ No newline at end of file + return alpha, sigma diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 165d5182889f..ada562f181b5 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -326,4 +326,4 @@ def __len__(self): def get_alpha_sigma(self, x, t, device): alpha = E_(self.sqrt_alphas_cumprod, t, x.shape, device) sigma = E_(self.sqrt_one_minus_alphas_cumprod, t, x.shape, device) - return alpha, sigma \ No newline at end of file + return alpha, sigma From c0cfe79ff4b0e2d8f1839173f7d894f32ed7b55f Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 31 Oct 2022 09:07:04 -0400 Subject: [PATCH 106/133] remove diffuser stuff from this pr --- .gitignore | 2 - examples/diffuser/README.md | 16 - examples/diffuser/run_diffuser.py | 122 ------- .../diffuser/run_diffuser_gen_trajectories.py | 66 ---- examples/diffuser/run_diffuser_locomotion.py | 66 ---- .../diffuser/run_diffuser_value_guided.py | 69 ---- examples/diffuser/train_diffuser.py | 312 ------------------ .../convert_models_diffuser_to_diffusers.py | 82 ----- 8 files changed, 735 deletions(-) delete mode 100644 examples/diffuser/README.md delete mode 100644 examples/diffuser/run_diffuser.py delete mode 100644 examples/diffuser/run_diffuser_gen_trajectories.py delete mode 100644 examples/diffuser/run_diffuser_locomotion.py delete mode 100644 examples/diffuser/run_diffuser_value_guided.py delete mode 100644 examples/diffuser/train_diffuser.py delete mode 100644 scripts/convert_models_diffuser_to_diffusers.py diff --git a/.gitignore b/.gitignore index f018a111ea33..b5a276da1d4b 100644 --- a/.gitignore +++ b/.gitignore @@ -164,5 +164,3 @@ tags # DS_Store (MacOS) .DS_Store -# RL pipelines may produce mp4 outputs -*.mp4 \ No newline at end of file diff --git a/examples/diffuser/README.md b/examples/diffuser/README.md deleted file mode 100644 index 464ccd57af85..000000000000 --- a/examples/diffuser/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# Overview - -These examples show how to run (Diffuser)[https://arxiv.org/pdf/2205.09991.pdf] in Diffusers. There are two scripts, `run_diffuser_value_guided.py` and `run_diffuser.py`. - -You will need some RL specific requirements to run the examples: - -``` -pip install -f https://download.pytorch.org/whl/torch_stable.html \ - free-mujoco-py \ - einops \ - gym \ - protobuf==3.20.1 \ - git+https://github.com/rail-berkeley/d4rl.git \ - mediapy \ - Pillow==9.0.0 -``` diff --git a/examples/diffuser/run_diffuser.py b/examples/diffuser/run_diffuser.py deleted file mode 100644 index b29d89992dfc..000000000000 --- a/examples/diffuser/run_diffuser.py +++ /dev/null @@ -1,122 +0,0 @@ -import numpy as np -import torch - -import d4rl # noqa -import gym -import tqdm -import train_diffuser -from diffusers import DDPMScheduler, UNet1DModel - - -env_name = "hopper-medium-expert-v2" -env = gym.make(env_name) -data = env.get_dataset() # dataset is only used for normalization in this colab - -DEVICE = "cpu" -DTYPE = torch.float - -# diffusion model settings -n_samples = 4 # number of trajectories planned via diffusion -horizon = 128 # length of sampled trajectories -state_dim = env.observation_space.shape[0] -action_dim = env.action_space.shape[0] -num_inference_steps = 100 # number of difusion steps - - -# Two generators for different parts of the diffusion loop to work in colab -generator_cpu = torch.Generator(device="cpu") - -scheduler = DDPMScheduler(num_train_timesteps=100, beta_schedule="squaredcos_cap_v2") - -# 3 different pretrained models are available for this task. -# The horizion represents the length of trajectories used in training. -network = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) -# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor256").to(device=DEVICE) -# network = TemporalUNet.from_pretrained("fusing/ddpm-unet-rl-hopper-hor512").to(device=DEVICE) - - -# network specific constants for inference -clip_denoised = network.clip_denoised -predict_epsilon = network.predict_epsilon - -# [ observation_dim ] --> [ n_samples x observation_dim ] -obs = env.reset() -total_reward = 0 -done = False -T = 300 -rollout = [obs.copy()] - -try: - for t in tqdm.tqdm(range(T)): - obs_raw = obs - - # normalize observations for forward passes - obs = train_diffuser.normalize(obs, data, "observations") - obs = obs[None].repeat(n_samples, axis=0) - conditions = {0: train_diffuser.to_torch(obs, device=DEVICE)} - - # constants for inference - batch_size = len(conditions[0]) - shape = (batch_size, horizon, state_dim + action_dim) - - # sample random initial noise vector - x1 = torch.randn(shape, device=DEVICE, generator=generator_cpu) - - # this model is conditioned from an initial state, so you will see this function - # multiple times to change the initial state of generated data to the state - # generated via env.reset() above or env.step() below - x = train_diffuser.reset_x0(x1, conditions, action_dim) - - # convert a np observation to torch for model forward pass - x = train_diffuser.to_torch(x) - - eta = 1.0 # noise factor for sampling reconstructed state - - # run the diffusion process - # for i in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): - for i in tqdm.tqdm(scheduler.timesteps): - # create batch of timesteps to pass into model - timesteps = torch.full((batch_size,), i, device=DEVICE, dtype=torch.long) - - # 1. generate prediction from model - with torch.no_grad(): - residual = network(x, timesteps).sample - - # 2. use the model prediction to reconstruct an observation (de-noise) - obs_reconstruct = scheduler.step(residual, i, x, predict_epsilon=predict_epsilon)["prev_sample"] - - # 3. [optional] add posterior noise to the sample - if eta > 0: - noise = torch.randn(obs_reconstruct.shape, generator=generator_cpu).to(obs_reconstruct.device) - posterior_variance = scheduler._get_variance(i) # * noise - # no noise when t == 0 - # NOTE: original implementation missing sqrt on posterior_variance - obs_reconstruct = ( - obs_reconstruct + int(i > 0) * (0.5 * posterior_variance) * eta * noise - ) # MJ had as log var, exponentiated - - # 4. apply conditions to the trajectory - obs_reconstruct_postcond = train_diffuser.reset_x0(obs_reconstruct, conditions, action_dim) - x = train_diffuser.to_torch(obs_reconstruct_postcond) - plans = train_diffuser.helpers.to_np(x[:, :, :action_dim]) - # select random plan - idx = np.random.randint(plans.shape[0]) - # select action at correct time - action = plans[idx, 0, :] - actions = train_diffuser.de_normalize(action, data, "actions") - # execute action in environment - next_observation, reward, terminal, _ = env.step(action) - - # update return - total_reward += reward - print(f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}") - - # save observations for rendering - rollout.append(next_observation.copy()) - obs = next_observation -except KeyboardInterrupt: - pass - -print(f"Total reward: {total_reward}") -render = train_diffuser.MuJoCoRenderer(env) -train_diffuser.show_sample(render, np.expand_dims(np.stack(rollout), axis=0)) diff --git a/examples/diffuser/run_diffuser_gen_trajectories.py b/examples/diffuser/run_diffuser_gen_trajectories.py deleted file mode 100644 index 3de8521343e3..000000000000 --- a/examples/diffuser/run_diffuser_gen_trajectories.py +++ /dev/null @@ -1,66 +0,0 @@ -import d4rl # noqa -import gym -import tqdm -from diffusers import DiffusionPipeline - - -config = dict( - n_samples=64, - horizon=32, - num_inference_steps=20, - n_guide_steps=0, - scale_grad_by_std=True, - scale=0.1, - eta=0.0, - t_grad_cutoff=2, - device="cpu", -) - - -def _run(): - env_name = "hopper-medium-v2" - env = gym.make(env_name) - - pipeline = DiffusionPipeline.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", - env=env, - custom_pipeline="/Users/bglickenhaus/Documents/diffusers/examples/community", - ) - - env.seed(0) - obs = env.reset() - total_reward = 0 - total_score = 0 - T = 1000 - rollout = [obs.copy()] - try: - for t in tqdm.tqdm(range(T)): - # Call the policy - denorm_actions = pipeline(obs, planning_horizon=32) - - # execute action in environment - next_observation, reward, terminal, _ = env.step(denorm_actions) - score = env.get_normalized_score(total_reward) - # update return - total_reward += reward - total_score += score - print( - f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" - f" {total_score}" - ) - # save observations for rendering - rollout.append(next_observation.copy()) - - obs = next_observation - except KeyboardInterrupt: - pass - - print(f"Total reward: {total_reward}") - - -def run(): - _run() - - -if __name__ == "__main__": - run() diff --git a/examples/diffuser/run_diffuser_locomotion.py b/examples/diffuser/run_diffuser_locomotion.py deleted file mode 100644 index 9ac9df28db81..000000000000 --- a/examples/diffuser/run_diffuser_locomotion.py +++ /dev/null @@ -1,66 +0,0 @@ -import d4rl # noqa -import gym -import tqdm -from diffusers import DiffusionPipeline - - -config = dict( - n_samples=64, - horizon=32, - num_inference_steps=20, - n_guide_steps=2, - scale_grad_by_std=True, - scale=0.1, - eta=0.0, - t_grad_cutoff=2, - device="cpu", -) - - -def _run(): - env_name = "hopper-medium-v2" - env = gym.make(env_name) - - pipeline = DiffusionPipeline.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", - env=env, - custom_pipeline="/Users/bglickenhaus/Documents/diffusers/examples/community", - ) - - env.seed(0) - obs = env.reset() - total_reward = 0 - total_score = 0 - T = 1000 - rollout = [obs.copy()] - try: - for t in tqdm.tqdm(range(T)): - # call the policy - denorm_actions = pipeline(obs, planning_horizon=32) - - # execute action in environment - next_observation, reward, terminal, _ = env.step(denorm_actions) - score = env.get_normalized_score(total_reward) - # update return - total_reward += reward - total_score += score - print( - f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" - f" {total_score}" - ) - # save observations for rendering - rollout.append(next_observation.copy()) - - obs = next_observation - except KeyboardInterrupt: - pass - - print(f"Total reward: {total_reward}") - - -def run(): - _run() - - -if __name__ == "__main__": - run() diff --git a/examples/diffuser/run_diffuser_value_guided.py b/examples/diffuser/run_diffuser_value_guided.py deleted file mode 100644 index 707663abb3bf..000000000000 --- a/examples/diffuser/run_diffuser_value_guided.py +++ /dev/null @@ -1,69 +0,0 @@ -import d4rl # noqa -import gym -import tqdm -from diffusers import DiffusionPipeline - - -config = dict( - n_samples=64, - horizon=32, - num_inference_steps=20, - n_guide_steps=2, - scale_grad_by_std=True, - scale=0.1, - eta=0.0, - t_grad_cutoff=2, - device="cpu", -) - - -def _run(): - env_name = "hopper-medium-v2" - env = gym.make(env_name) - - pipeline = DiffusionPipeline.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", - env=env, - custom_pipeline="/Users/bglickenhaus/Documents/diffusers/examples/community", - ) - - # add a batch dimension and repeat for multiple samples - # [ observation_dim ] --> [ n_samples x observation_dim ] - env.seed(0) - obs = env.reset() - total_reward = 0 - total_score = 0 - T = 1000 - rollout = [obs.copy()] - try: - for t in tqdm.tqdm(range(T)): - # 1. Call the policy - # normalize observations for forward passes - denorm_actions = pipeline(obs, planning_horizon=32) - - # execute action in environment - next_observation, reward, terminal, _ = env.step(denorm_actions) - score = env.get_normalized_score(total_reward) - # update return - total_reward += reward - total_score += score - print( - f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" - f" {total_score}" - ) - # save observations for rendering - rollout.append(next_observation.copy()) - - obs = next_observation - except KeyboardInterrupt: - pass - - print(f"Total reward: {total_reward}") - - -def run(): - _run() - - -if __name__ == "__main__": - run() diff --git a/examples/diffuser/train_diffuser.py b/examples/diffuser/train_diffuser.py deleted file mode 100644 index b063a0456d97..000000000000 --- a/examples/diffuser/train_diffuser.py +++ /dev/null @@ -1,312 +0,0 @@ -import os -import warnings - -import numpy as np -import torch - -import d4rl # noqa -import gym -import mediapy as media -import mujoco_py as mjc -import tqdm -from diffusers import DDPMScheduler, UNet1DModel - - -# Define some helper functions - - -DTYPE = torch.float - - -def normalize(x_in, data, key): - means = data[key].mean(axis=0) - stds = data[key].std(axis=0) - return (x_in - means) / stds - - -def de_normalize(x_in, data, key): - means = data[key].mean(axis=0) - stds = data[key].std(axis=0) - return x_in * stds + means - - -def to_torch(x_in, dtype=None, device="cuda"): - dtype = dtype or DTYPE - device = device - if type(x_in) is dict: - return {k: to_torch(v, dtype, device) for k, v in x_in.items()} - elif torch.is_tensor(x_in): - return x_in.to(device).type(dtype) - return torch.tensor(x_in, dtype=dtype, device=device) - - -def reset_x0(x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in - - -def run_diffusion(x, scheduler, network, unet, conditions, action_dim, config): - y = None - for i in tqdm.tqdm(scheduler.timesteps): - # create batch of timesteps to pass into model - timesteps = torch.full((config["n_samples"],), i, device=config["device"], dtype=torch.long) - # 3. call the sample function - for _ in range(config["n_guide_steps"]): - with torch.enable_grad(): - x.requires_grad_() - y = network(x, timesteps).sample - grad = torch.autograd.grad([y.sum()], [x])[0] - if config["scale_grad_by_std"]: - posterior_variance = scheduler._get_variance(i) - model_std = torch.exp(0.5 * posterior_variance) - grad = model_std * grad - grad[timesteps < config["t_grad_cutoff"]] = 0 - x = x.detach() - x = x + config["scale"] * grad - x = reset_x0(x, conditions, action_dim) - # with torch.no_grad(): - prev_x = unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) - x = scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] - - # 3. [optional] add posterior noise to the sample - if config["eta"] > 0: - noise = torch.randn(x.shape).to(x.device) - posterior_variance = scheduler._get_variance(i) # * noise - # no noise when t == 0 - # NOTE: original implementation missing sqrt on posterior_variance - x = x + int(i > 0) * (0.5 * posterior_variance) * config["eta"] * noise # MJ had as log var, exponentiated - - # 4. apply conditions to the trajectory - x = reset_x0(x, conditions, action_dim) - x = to_torch(x, device=config["device"]) - # y = network(x, timesteps).sample - return x, y - - -def to_np(x_in): - if torch.is_tensor(x_in): - x_in = x_in.detach().cpu().numpy() - return x_in - - -# from MJ's Diffuser code -# https://github.com/jannerm/diffuser/blob/76ae49ae85ba1c833bf78438faffdc63b8b4d55d/diffuser/utils/colab.py#L79 -def mkdir(savepath): - """ - returns `True` iff `savepath` is created - """ - if not os.path.exists(savepath): - os.makedirs(savepath) - return True - else: - return False - - -def show_sample(renderer, observations, filename="sample.mp4", savebase="videos"): - """ - observations : [ batch_size x horizon x observation_dim ] - """ - - mkdir(savebase) - savepath = os.path.join(savebase, filename) - - images = [] - for rollout in observations: - # [ horizon x height x width x channels ] - img = renderer._renders(rollout, partial=True) - images.append(img) - - # [ horizon x height x (batch_size * width) x channels ] - images = np.concatenate(images, axis=2) - media.write_video(savepath, images, fps=60) - media.show_video(images, codec="h264", fps=60) - return images - - -# Code adapted from Michael Janner -# source: https://github.com/jannerm/diffuser/blob/main/diffuser/utils/rendering.py - - -def env_map(env_name): - """ - map D4RL dataset names to custom fully-observed - variants for rendering - """ - if "halfcheetah" in env_name: - return "HalfCheetahFullObs-v2" - elif "hopper" in env_name: - return "HopperFullObs-v2" - elif "walker2d" in env_name: - return "Walker2dFullObs-v2" - else: - return env_name - - -def get_image_mask(img): - background = (img == 255).all(axis=-1, keepdims=True) - mask = ~background.repeat(3, axis=-1) - return mask - - -def atmost_2d(x): - while x.ndim > 2: - x = x.squeeze(0) - return x - - -def set_state(env, state): - qpos_dim = env.sim.data.qpos.size - qvel_dim = env.sim.data.qvel.size - if not state.size == qpos_dim + qvel_dim: - warnings.warn( - f"[ utils/rendering ] Expected state of size {qpos_dim + qvel_dim}, but got state of size {state.size}" - ) - state = state[: qpos_dim + qvel_dim] - - env.set_state(state[:qpos_dim], state[qpos_dim:]) - - -class MuJoCoRenderer: - """ - default mujoco renderer - """ - - def __init__(self, env): - if type(env) is str: - env = env_map(env) - self.env = gym.make(env) - else: - self.env = env - # - 1 because the envs in renderer are fully-observed - # @TODO : clean up - self.observation_dim = np.prod(self.env.observation_space.shape) - 1 - self.action_dim = np.prod(self.env.action_space.shape) - try: - self.viewer = mjc.MjRenderContextOffscreen(self.env.sim) - except: - print("[ utils/rendering ] Warning: could not initialize offscreen renderer") - self.viewer = None - - def pad_observation(self, observation): - state = np.concatenate( - [ - np.zeros(1), - observation, - ] - ) - return state - - def pad_observations(self, observations): - qpos_dim = self.env.sim.data.qpos.size - # xpos is hidden - xvel_dim = qpos_dim - 1 - xvel = observations[:, xvel_dim] - xpos = np.cumsum(xvel) * self.env.dt - states = np.concatenate( - [ - xpos[:, None], - observations, - ], - axis=-1, - ) - return states - - def render(self, observation, dim=256, partial=False, qvel=True, render_kwargs=None, conditions=None): - if type(dim) == int: - dim = (dim, dim) - - if self.viewer is None: - return np.zeros((*dim, 3), np.uint8) - - if render_kwargs is None: - xpos = observation[0] if not partial else 0 - render_kwargs = {"trackbodyid": 2, "distance": 3, "lookat": [xpos, -0.5, 1], "elevation": -20} - - for key, val in render_kwargs.items(): - if key == "lookat": - self.viewer.cam.lookat[:] = val[:] - else: - setattr(self.viewer.cam, key, val) - - if partial: - state = self.pad_observation(observation) - else: - state = observation - - qpos_dim = self.env.sim.data.qpos.size - if not qvel or state.shape[-1] == qpos_dim: - qvel_dim = self.env.sim.data.qvel.size - state = np.concatenate([state, np.zeros(qvel_dim)]) - - set_state(self.env, state) - - self.viewer.render(*dim) - data = self.viewer.read_pixels(*dim, depth=False) - data = data[::-1, :, :] - return data - - def _renders(self, observations, **kwargs): - images = [] - for observation in observations: - img = self.render(observation, **kwargs) - images.append(img) - return np.stack(images, axis=0) - - def renders(self, samples, partial=False, **kwargs): - if partial: - samples = self.pad_observations(samples) - partial = False - - sample_images = self._renders(samples, partial=partial, **kwargs) - - composite = np.ones_like(sample_images[0]) * 255 - - for img in sample_images: - mask = get_image_mask(img) - composite[mask] = img[mask] - - return composite - - def __call__(self, *args, **kwargs): - return self.renders(*args, **kwargs) - - -env_name = "hopper-medium-expert-v2" -env = gym.make(env_name) -data = env.get_dataset() # dataset is only used for normalization in this colab - -# Cuda settings for colab -# torch.cuda.get_device_name(0) -DEVICE = "cpu" -DTYPE = torch.float - -# diffusion model settings -n_samples = 4 # number of trajectories planned via diffusion -horizon = 128 # length of sampled trajectories -state_dim = env.observation_space.shape[0] -action_dim = env.action_space.shape[0] -num_inference_steps = 100 # number of difusion steps - -obs = env.reset() -obs_raw = obs - -# normalize observations for forward passes -obs = normalize(obs, data, "observations") - - -# Two generators for different parts of the diffusion loop to work in colab -generator = torch.Generator(device="cuda") -generator_cpu = torch.Generator(device="cpu") -network = UNet1DModel.from_pretrained("fusing/ddpm-unet-rl-hopper-hor128").to(device=DEVICE) - -scheduler = DDPMScheduler(num_train_timesteps=100, beta_schedule="squaredcos_cap_v2") -optimizer = torch.optim.AdamW( - network.parameters(), - lr=0.001, - betas=(0.95, 0.99), - weight_decay=1e-6, - eps=1e-8, -) - -# TODO: Flesh this out using accelerate library (a la other examples) diff --git a/scripts/convert_models_diffuser_to_diffusers.py b/scripts/convert_models_diffuser_to_diffusers.py deleted file mode 100644 index dda41d4c5d35..000000000000 --- a/scripts/convert_models_diffuser_to_diffusers.py +++ /dev/null @@ -1,82 +0,0 @@ -import json -import os - -import torch - -from diffusers import UNet1DModel - - -os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True) -os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True) - -os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) - - -def unet(hor): - if hor == 128: - down_block_types = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") - block_out_channels = (32, 128, 256) - up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D") - - elif hor == 32: - down_block_types = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D") - block_out_channels = (32, 64, 128, 256) - up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D") - model = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch") - state_dict = model.state_dict() - config = dict( - down_block_types=down_block_types, - block_out_channels=block_out_channels, - up_block_types=up_block_types, - layers_per_block=1, - in_channels=14, - out_channels=14, - use_timestep_embedding=True, - out_block_type="OutConv1DBlock", - ) - hf_value_function = UNet1DModel(**config) - print(f"length of state dict: {len(state_dict.keys())}") - print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") - mapping = dict((k, hfk) for k, hfk in zip(model.state_dict().keys(), hf_value_function.state_dict().keys())) - for k, v in mapping.items(): - state_dict[v] = state_dict.pop(k) - hf_value_function.load_state_dict(state_dict) - - torch.save(hf_value_function.state_dict(), f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin") - with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json", "w") as f: - json.dump(config, f) - - -def value_function(): - config = dict( - in_channels=14, - down_block_types=("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), - up_block_types=(), - out_block_type="ValueFunction", - mid_block_type="ValueFunctionMidBlock1D", - block_out_channels=(32, 64, 128, 256), - layers_per_block=1, - always_downsample=True, - ) - - model = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch") - state_dict = model - hf_value_function = UNet1DModel(**config) - print(f"length of state dict: {len(state_dict.keys())}") - print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}") - - mapping = dict((k, hfk) for k, hfk in zip(state_dict.keys(), hf_value_function.state_dict().keys())) - for k, v in mapping.items(): - state_dict[v] = state_dict.pop(k) - - hf_value_function.load_state_dict(state_dict) - - torch.save(hf_value_function.state_dict(), "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin") - with open("hub/hopper-medium-v2/value_function/config.json", "w") as f: - json.dump(config, f) - - -if __name__ == "__main__": - unet(32) - # unet(128) - value_function() From 79021e102ed790baee100cbe95fc66c65b850904 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 31 Oct 2022 09:10:04 -0400 Subject: [PATCH 107/133] remove more diffusers stuff --- examples/community/pipeline.py | 114 -------------------- examples/community/value_guided_diffuser.py | 108 ------------------- 2 files changed, 222 deletions(-) delete mode 100644 examples/community/pipeline.py delete mode 100644 examples/community/value_guided_diffuser.py diff --git a/examples/community/pipeline.py b/examples/community/pipeline.py deleted file mode 100644 index 85e359c5c4c9..000000000000 --- a/examples/community/pipeline.py +++ /dev/null @@ -1,114 +0,0 @@ -import numpy as np -import torch - -import tqdm -from diffusers import DiffusionPipeline -from diffusers.models.unet_1d import UNet1DModel -from diffusers.utils.dummy_pt_objects import DDPMScheduler - - -class ValueGuidedDiffuserPipeline(DiffusionPipeline): - def __init__( - self, - value_function: UNet1DModel, - unet: UNet1DModel, - scheduler: DDPMScheduler, - env, - ): - super().__init__() - self.value_function = value_function - self.unet = unet - self.scheduler = scheduler - self.env = env - self.data = env.get_dataset() - self.means = dict() - for key in self.data.keys(): - try: - self.means[key] = self.data[key].mean() - except: - pass - self.stds = dict() - for key in self.data.keys(): - try: - self.stds[key] = self.data[key].std() - except: - pass - self.state_dim = env.observation_space.shape[0] - self.action_dim = env.action_space.shape[0] - - def normalize(self, x_in, key): - return (x_in - self.means[key]) / self.stds[key] - - def de_normalize(self, x_in, key): - return x_in * self.stds[key] + self.means[key] - - def to_torch(self, x_in): - if type(x_in) is dict: - return {k: self.to_torch(v) for k, v in x_in.items()} - elif torch.is_tensor(x_in): - return x_in.to(self.unet.device) - return torch.tensor(x_in, device=self.unet.device) - - def reset_x0(self, x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in - - def run_diffusion(self, x, conditions, n_guide_steps, scale): - batch_size = x.shape[0] - y = None - for i in tqdm.tqdm(self.scheduler.timesteps): - # create batch of timesteps to pass into model - timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) - for _ in range(n_guide_steps): - with torch.enable_grad(): - x.requires_grad_() - y = self.value_function(x.permute(0, 2, 1), timesteps).sample - grad = torch.autograd.grad([y.sum()], [x])[0] - - posterior_variance = self.scheduler._get_variance(i) - model_std = torch.exp(0.5 * posterior_variance) - grad = model_std * grad - grad[timesteps < 2] = 0 - x = x.detach() - x = x + scale * grad - x = self.reset_x0(x, conditions, self.action_dim) - prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) - x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] - - # apply conditions to the trajectory - x = self.reset_x0(x, conditions, self.action_dim) - x = self.to_torch(x) - return x, y - - def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): - # normalize the observations and create batch dimension - obs = self.normalize(obs, "observations") - obs = obs[None].repeat(batch_size, axis=0) - - conditions = {0: self.to_torch(obs)} - shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) - - # generate initial noise and apply our conditions (to make the trajectories start at current state) - x1 = torch.randn(shape, device=self.unet.device) - x = self.reset_x0(x1, conditions, self.action_dim) - x = self.to_torch(x) - - # run the diffusion process - x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) - - # sort output trajectories by value - sorted_idx = y.argsort(0, descending=True).squeeze() - sorted_values = x[sorted_idx] - actions = sorted_values[:, :, : self.action_dim] - actions = actions.detach().cpu().numpy() - denorm_actions = self.de_normalize(actions, key="actions") - - # select the action with the highest value - if y is not None: - selected_index = 0 - else: - # if we didn't run value guiding, select a random action - selected_index = np.random.randint(0, batch_size) - denorm_actions = denorm_actions[selected_index, 0] - return denorm_actions diff --git a/examples/community/value_guided_diffuser.py b/examples/community/value_guided_diffuser.py deleted file mode 100644 index 6b28e868eddd..000000000000 --- a/examples/community/value_guided_diffuser.py +++ /dev/null @@ -1,108 +0,0 @@ -import torch - -import tqdm -from diffusers import DiffusionPipeline -from diffusers.models.unet_1d import UNet1DModel -from diffusers.utils.dummy_pt_objects import DDPMScheduler - - -class ValueGuidedDiffuserPipeline(DiffusionPipeline): - def __init__( - self, - value_function: UNet1DModel, - unet: UNet1DModel, - scheduler: DDPMScheduler, - env, - ): - super().__init__() - self.value_function = value_function - self.unet = unet - self.scheduler = scheduler - self.env = env - self.data = env.get_dataset() - self.means = dict() - for key in self.data.keys(): - try: - self.means[key] = self.data[key].mean() - except: - pass - self.stds = dict() - for key in self.data.keys(): - try: - self.stds[key] = self.data[key].std() - except: - pass - self.state_dim = env.observation_space.shape[0] - self.action_dim = env.action_space.shape[0] - - def normalize(self, x_in, key): - return (x_in - self.means[key]) / self.stds[key] - - def de_normalize(self, x_in, key): - return x_in * self.stds[key] + self.means[key] - - def to_torch(self, x_in): - if type(x_in) is dict: - return {k: self.to_torch(v) for k, v in x_in.items()} - elif torch.is_tensor(x_in): - return x_in.to(self.unet.device) - return torch.tensor(x_in, device=self.unet.device) - - def reset_x0(self, x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in - - def run_diffusion(self, x, conditions, n_guide_steps, scale): - batch_size = x.shape[0] - y = None - for i in tqdm.tqdm(self.scheduler.timesteps): - # create batch of timesteps to pass into model - timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) - for _ in range(n_guide_steps): - with torch.enable_grad(): - x.requires_grad_() - y = self.value_function(x.permute(0, 2, 1), timesteps).sample - grad = torch.autograd.grad([y.sum()], [x])[0] - - posterior_variance = self.scheduler._get_variance(i) - model_std = torch.exp(0.5 * posterior_variance) - grad = model_std * grad - grad[timesteps < 2] = 0 - x = x.detach() - x = x + scale * grad - x = self.reset_x0(x, conditions, self.action_dim) - prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) - x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] - - # apply conditions to the trajectory - x = self.reset_x0(x, conditions, self.action_dim) - x = self.to_torch(x) - return x, y - - def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): - # normalize the observations and create batch dimension - obs = self.normalize(obs, "observations") - obs = obs[None].repeat(batch_size, axis=0) - - conditions = {0: self.to_torch(obs)} - shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) - - # generate initial noise and apply our conditions (to make the trajectories start at current state) - x1 = torch.randn(shape, device=self.unet.device) - x = self.reset_x0(x1, conditions, self.action_dim) - x = self.to_torch(x) - - # run the diffusion process - x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) - - # sort output trajectories by value - sorted_idx = y.argsort(0, descending=True).squeeze() - sorted_values = x[sorted_idx] - actions = sorted_values[:, :, : self.action_dim] - actions = actions.detach().cpu().numpy() - denorm_actions = self.de_normalize(actions, key="actions") - - # select the action with the highest value - denorm_actions = denorm_actions[0, 0] - return denorm_actions From fcd5dee4935e5e00a65ae941569c9b42fdfb2133 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 31 Oct 2022 09:11:54 -0400 Subject: [PATCH 108/133] rebase main onto branch --- src/diffusers/models/embeddings.py | 13 +- src/diffusers/models/resnet.py | 142 +------ src/diffusers/models/unet_1d.py | 125 ++----- src/diffusers/models/unet_1d_blocks.py | 346 ++---------------- tests/models/test_models_unet_1d.py | 184 +--------- .../dance_diffusion/test_dance_diffusion.py | 4 - 6 files changed, 64 insertions(+), 750 deletions(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index 7d3897d97174..35715e17fc47 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -62,21 +62,14 @@ def get_timestep_embedding( class TimestepEmbedding(nn.Module): - def __init__(self, in_channels: int, time_embed_dim: int, act_fn: str = "silu", out_dim: int = None): + def __init__(self, channel: int, time_embed_dim: int, act_fn: str = "silu"): super().__init__() - self.linear_1 = nn.Linear(in_channels, time_embed_dim) + self.linear_1 = nn.Linear(channel, time_embed_dim) self.act = None if act_fn == "silu": self.act = nn.SiLU() - if act_fn == "mish": - self.act = nn.Mish() - - if out_dim is not None: - time_embed_dim_out = out_dim - else: - time_embed_dim_out = time_embed_dim - self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out) + self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim) def forward(self, sample): sample = self.linear_1(sample) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 99b6092aed06..7bb5416adf24 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -5,75 +5,6 @@ import torch.nn.functional as F -class Upsample1D(nn.Module): - """ - An upsampling layer with an optional convolution. - - Parameters: - channels: channels in the inputs and outputs. - use_conv: a bool determining if a convolution is applied. - use_conv_transpose: - out_channels: - """ - - def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_conv_transpose = use_conv_transpose - self.name = name - - self.conv = None - if use_conv_transpose: - self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1) - elif use_conv: - self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1) - - def forward(self, x): - assert x.shape[1] == self.channels - if self.use_conv_transpose: - return self.conv(x) - - x = F.interpolate(x, scale_factor=2.0, mode="nearest") - - if self.use_conv: - x = self.conv(x) - - return x - - -class Downsample1D(nn.Module): - """ - A downsampling layer with an optional convolution. - - Parameters: - channels: channels in the inputs and outputs. - use_conv: a bool determining if a convolution is applied. - out_channels: - padding: - """ - - def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.padding = padding - stride = 2 - self.name = name - - if use_conv: - self.conv = nn.Conv1d(self.channels, self.out_channels, 3, stride=stride, padding=padding) - else: - assert self.channels == self.out_channels - self.conv = nn.AvgPool1d(kernel_size=stride, stride=stride) - - def forward(self, x): - assert x.shape[1] == self.channels - return self.conv(x) - - class Upsample2D(nn.Module): """ An upsampling layer with an optional convolution. @@ -81,8 +12,7 @@ class Upsample2D(nn.Module): Parameters: channels: channels in the inputs and outputs. use_conv: a bool determining if a convolution is applied. - use_conv_transpose: - out_channels: + dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"): @@ -150,8 +80,7 @@ class Downsample2D(nn.Module): Parameters: channels: channels in the inputs and outputs. use_conv: a bool determining if a convolution is applied. - out_channels: - padding: + dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"): @@ -486,73 +415,6 @@ def forward(self, hidden_states): return hidden_states * torch.tanh(torch.nn.functional.softplus(hidden_states)) -# unet_rl.py -def rearrange_dims(tensor): - if len(tensor.shape) == 2: - return tensor[:, :, None] - if len(tensor.shape) == 3: - return tensor[:, :, None, :] - elif len(tensor.shape) == 4: - return tensor[:, :, 0, :] - else: - raise ValueError(f"`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.") - - -class Conv1dBlock(nn.Module): - """ - Conv1d --> GroupNorm --> Mish - """ - - def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): - super().__init__() - - self.conv1d = nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2) - self.group_norm = nn.GroupNorm(n_groups, out_channels) - self.mish = nn.Mish() - - def forward(self, x): - x = self.conv1d(x) - x = rearrange_dims(x) - x = self.group_norm(x) - x = rearrange_dims(x) - x = self.mish(x) - return x - - -# unet_rl.py -class ResidualTemporalBlock1D(nn.Module): - def __init__(self, inp_channels, out_channels, embed_dim, kernel_size=5): - super().__init__() - - self.blocks = nn.ModuleList( - [ - Conv1dBlock(inp_channels, out_channels, kernel_size), - Conv1dBlock(out_channels, out_channels, kernel_size), - ] - ) - self.time_emb_act = nn.Mish() - self.time_emb = nn.Linear(embed_dim, out_channels) - - self.residual_conv = ( - nn.Conv1d(inp_channels, out_channels, 1) if inp_channels != out_channels else nn.Identity() - ) - - def forward(self, x, t): - """ - Args: - x : [ batch_size x inp_channels x horizon ] - t : [ batch_size x embed_dim ] - - returns: - out : [ batch_size x out_channels x horizon ] - """ - t = self.time_emb_act(t) - t = self.time_emb(t) - out = self.blocks[0](x) + rearrange_dims(t) - out = self.blocks[1](out) - return out + self.residual_conv(x) - - def upsample_2d(hidden_states, kernel=None, factor=2, gain=1): r"""Upsample2D a batch of 2D images with the given filter. Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given diff --git a/src/diffusers/models/unet_1d.py b/src/diffusers/models/unet_1d.py index 7fdee9ea84ba..cc0685deb933 100644 --- a/src/diffusers/models/unet_1d.py +++ b/src/diffusers/models/unet_1d.py @@ -1,17 +1,3 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - from dataclasses import dataclass from typing import Optional, Tuple, Union @@ -22,7 +8,7 @@ from ..modeling_utils import ModelMixin from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps -from .unet_1d_blocks import get_down_block, get_mid_block, get_out_block, get_up_block +from .unet_1d_blocks import get_down_block, get_mid_block, get_up_block @dataclass @@ -44,7 +30,7 @@ class UNet1DModel(ModelMixin, ConfigMixin): implements for all the model (such as downloading or saving, etc.) Parameters: - sample_size (`int`, *optional*): Default length of sample. Should be adaptable at runtime. + sample_size (`int`, *optionl*): Default length of sample. Should be adaptable at runtime. in_channels (`int`, *optional*, defaults to 2): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 2): Number of channels in the output. time_embedding_type (`str`, *optional*, defaults to `"fourier"`): Type of time embedding to use. @@ -57,10 +43,6 @@ class UNet1DModel(ModelMixin, ConfigMixin): obj:`("UpBlock1D", "UpBlock1DNoSkip", "AttnUpBlock1D")`): Tuple of upsample block types. block_out_channels (`Tuple[int]`, *optional*, defaults to : obj:`(32, 32, 64)`): Tuple of block output channels. - mid_block_type: - out_block_type: - act_fn: - norm_num_groups: """ @register_to_config @@ -68,22 +50,17 @@ def __init__( self, sample_size: int = 65536, sample_rate: Optional[int] = None, - in_channels: int = 14, - out_channels: int = 14, + in_channels: int = 2, + out_channels: int = 2, extra_in_channels: int = 0, - time_embedding_type: str = "positional", - flip_sin_to_cos: bool = False, - use_timestep_embedding: bool = True, - downscale_freq_shift: float = 1.0, - down_block_types: Tuple[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), - up_block_types: Tuple[str] = ("UpResnetBlock1D", "UpResnetBlock1D"), - mid_block_type: Tuple[str] = "MidResTemporalBlock1D", - out_block_type: str = None, - block_out_channels: Tuple[int] = (32, 128, 256), - act_fn: str = "mish", - norm_num_groups: int = 8, - layers_per_block: int = 1, - always_downsample: bool = False, + time_embedding_type: str = "fourier", + freq_shift: int = 0, + flip_sin_to_cos: bool = True, + use_timestep_embedding: bool = False, + down_block_types: Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), + mid_block_type: str = "UNetMidBlock1D", + up_block_types: Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), + block_out_channels: Tuple[int] = (32, 32, 64), ): super().__init__() @@ -96,19 +73,12 @@ def __init__( ) timestep_input_dim = 2 * block_out_channels[0] elif time_embedding_type == "positional": - self.time_proj = Timesteps( - block_out_channels[0], flip_sin_to_cos=flip_sin_to_cos, downscale_freq_shift=downscale_freq_shift - ) + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] if use_timestep_embedding: time_embed_dim = block_out_channels[0] * 4 - self.time_mlp = TimestepEmbedding( - in_channels=timestep_input_dim, - time_embed_dim=time_embed_dim, - act_fn=act_fn, - out_dim=block_out_channels[0], - ) + self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) self.down_blocks = nn.ModuleList([]) self.mid_block = None @@ -124,66 +94,38 @@ def __init__( if i == 0: input_channel += extra_in_channels - is_final_block = i == len(block_out_channels) - 1 - down_block = get_down_block( down_block_type, - num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, - temb_channels=block_out_channels[0], - add_downsample=not is_final_block or always_downsample, ) self.down_blocks.append(down_block) # mid self.mid_block = get_mid_block( - mid_block_type, - in_channels=block_out_channels[-1], + mid_block_type=mid_block_type, mid_channels=block_out_channels[-1], - out_channels=block_out_channels[-1], - embed_dim=block_out_channels[0], - num_layers=layers_per_block, - add_downsample=always_downsample, + in_channels=block_out_channels[-1], + out_channels=None, ) # up reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] - if out_block_type is None: - final_upsample_channels = out_channels - else: - final_upsample_channels = block_out_channels[0] - for i, up_block_type in enumerate(up_block_types): prev_output_channel = output_channel - output_channel = ( - reversed_block_out_channels[i + 1] if i < len(up_block_types) - 1 else final_upsample_channels - ) - - is_final_block = i == len(block_out_channels) - 1 + output_channel = reversed_block_out_channels[i + 1] if i < len(up_block_types) - 1 else out_channels up_block = get_up_block( up_block_type, - num_layers=layers_per_block, in_channels=prev_output_channel, out_channels=output_channel, - temb_channels=block_out_channels[0], - add_upsample=not is_final_block, ) self.up_blocks.append(up_block) prev_output_channel = output_channel - # out - num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) - self.out_block = get_out_block( - out_block_type=out_block_type, - num_groups_out=num_groups_out, - embed_dim=block_out_channels[0], - out_channels=out_channels, - act_fn=act_fn, - fc_dim=block_out_channels[-1] // 4, - ) + # TODO(PVP, Nathan) placeholder for RL application to be merged shortly + # Totally fine to add another layer with a if statement - no need for nn.Identity here def forward( self, @@ -202,20 +144,12 @@ def forward( [`~models.unet_1d.UNet1DOutput`] or `tuple`: [`~models.unet_1d.UNet1DOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ - # 1. time - timesteps = timestep - if not torch.is_tensor(timesteps): - timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) - elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: - timesteps = timesteps[None].to(sample.device) - - timestep_embed = self.time_proj(timesteps) - if self.config.use_timestep_embedding: - timestep_embed = self.time_mlp(timestep_embed) - else: - timestep_embed = timestep_embed[..., None] - timestep_embed = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) + if len(timestep.shape) == 0: + timestep = timestep[None] + + timestep_embed = self.time_proj(timestep)[..., None] + timestep_embed = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) # 2. down down_block_res_samples = () @@ -224,18 +158,13 @@ def forward( down_block_res_samples += res_samples # 3. mid - if self.mid_block: - sample = self.mid_block(sample, timestep_embed) + sample = self.mid_block(sample) # 4. up for i, upsample_block in enumerate(self.up_blocks): res_samples = down_block_res_samples[-1:] down_block_res_samples = down_block_res_samples[:-1] - sample = upsample_block(sample, res_hidden_states_tuple=res_samples, temb=timestep_embed) - - # 5. post-process - if self.out_block: - sample = self.out_block(sample, timestep_embed) + sample = upsample_block(sample, res_samples) if not return_dict: return (sample,) diff --git a/src/diffusers/models/unet_1d_blocks.py b/src/diffusers/models/unet_1d_blocks.py index fc758ebbb044..9009071d1e78 100644 --- a/src/diffusers/models/unet_1d_blocks.py +++ b/src/diffusers/models/unet_1d_blocks.py @@ -17,256 +17,6 @@ import torch.nn.functional as F from torch import nn -from .resnet import Downsample1D, ResidualTemporalBlock1D, Upsample1D, rearrange_dims - - -class DownResnetBlock1D(nn.Module): - def __init__( - self, - in_channels, - out_channels=None, - num_layers=1, - conv_shortcut=False, - temb_channels=32, - groups=32, - groups_out=None, - non_linearity=None, - time_embedding_norm="default", - output_scale_factor=1.0, - add_downsample=True, - ): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - self.time_embedding_norm = time_embedding_norm - self.add_downsample = add_downsample - self.output_scale_factor = output_scale_factor - - if groups_out is None: - groups_out = groups - - # there will always be at least one resnet - resnets = [ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=temb_channels)] - - for _ in range(num_layers): - resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels)) - - self.resnets = nn.ModuleList(resnets) - - if non_linearity == "swish": - self.nonlinearity = lambda x: F.silu(x) - elif non_linearity == "mish": - self.nonlinearity = nn.Mish() - elif non_linearity == "silu": - self.nonlinearity = nn.SiLU() - else: - self.nonlinearity = None - - self.downsample = None - if add_downsample: - self.downsample = Downsample1D(out_channels, use_conv=True, padding=1) - - def forward(self, hidden_states, temb=None): - output_states = () - - hidden_states = self.resnets[0](hidden_states, temb) - for resnet in self.resnets[1:]: - hidden_states = resnet(hidden_states, temb) - - output_states += (hidden_states,) - - if self.nonlinearity is not None: - hidden_states = self.nonlinearity(hidden_states) - - if self.downsample is not None: - hidden_states = self.downsample(hidden_states) - - return hidden_states, output_states - - -class UpResnetBlock1D(nn.Module): - def __init__( - self, - in_channels, - out_channels=None, - num_layers=1, - temb_channels=32, - groups=32, - groups_out=None, - non_linearity=None, - time_embedding_norm="default", - output_scale_factor=1.0, - add_upsample=True, - ): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.time_embedding_norm = time_embedding_norm - self.add_upsample = add_upsample - self.output_scale_factor = output_scale_factor - - if groups_out is None: - groups_out = groups - - # there will always be at least one resnet - resnets = [ResidualTemporalBlock1D(2 * in_channels, out_channels, embed_dim=temb_channels)] - - for _ in range(num_layers): - resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels)) - - self.resnets = nn.ModuleList(resnets) - - if non_linearity == "swish": - self.nonlinearity = lambda x: F.silu(x) - elif non_linearity == "mish": - self.nonlinearity = nn.Mish() - elif non_linearity == "silu": - self.nonlinearity = nn.SiLU() - else: - self.nonlinearity = None - - self.upsample = None - if add_upsample: - self.upsample = Upsample1D(out_channels, use_conv_transpose=True) - - def forward(self, hidden_states, res_hidden_states_tuple=None, temb=None): - if res_hidden_states_tuple is not None: - res_hidden_states = res_hidden_states_tuple[-1] - hidden_states = torch.cat((hidden_states, res_hidden_states), dim=1) - - hidden_states = self.resnets[0](hidden_states, temb) - for resnet in self.resnets[1:]: - hidden_states = resnet(hidden_states, temb) - - if self.nonlinearity is not None: - hidden_states = self.nonlinearity(hidden_states) - - if self.upsample is not None: - hidden_states = self.upsample(hidden_states) - - return hidden_states - - -class ValueFunctionMidBlock1D(nn.Module): - def __init__(self, in_channels, out_channels, embed_dim): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.embed_dim = embed_dim - - self.res1 = ResidualTemporalBlock1D(in_channels, in_channels // 2, embed_dim=embed_dim) - self.down1 = Downsample1D(out_channels // 2, use_conv=True) - self.res2 = ResidualTemporalBlock1D(in_channels // 2, in_channels // 4, embed_dim=embed_dim) - self.down2 = Downsample1D(out_channels // 4, use_conv=True) - - def forward(self, x, temb=None): - x = self.res1(x, temb) - x = self.down1(x) - x = self.res2(x, temb) - x = self.down2(x) - return x - - -class MidResTemporalBlock1D(nn.Module): - def __init__( - self, - in_channels, - out_channels, - embed_dim, - num_layers: int = 1, - add_downsample: bool = False, - add_upsample: bool = False, - non_linearity=None, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.add_downsample = add_downsample - - # there will always be at least one resnet - resnets = [ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=embed_dim)] - - for _ in range(num_layers): - resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=embed_dim)) - - self.resnets = nn.ModuleList(resnets) - - if non_linearity == "swish": - self.nonlinearity = lambda x: F.silu(x) - elif non_linearity == "mish": - self.nonlinearity = nn.Mish() - elif non_linearity == "silu": - self.nonlinearity = nn.SiLU() - else: - self.nonlinearity = None - - self.upsample = None - if add_upsample: - self.upsample = Downsample1D(out_channels, use_conv=True) - - self.downsample = None - if add_downsample: - self.downsample = Downsample1D(out_channels, use_conv=True) - - if self.upsample and self.downsample: - raise ValueError("Block cannot downsample and upsample") - - def forward(self, hidden_states, temb): - hidden_states = self.resnets[0](hidden_states, temb) - for resnet in self.resnets[1:]: - hidden_states = resnet(hidden_states, temb) - - if self.upsample: - hidden_states = self.upsample(hidden_states) - if self.downsample: - self.downsample = self.downsample(hidden_states) - - return hidden_states - - -class OutConv1DBlock(nn.Module): - def __init__(self, num_groups_out, out_channels, embed_dim, act_fn): - super().__init__() - self.final_conv1d_1 = nn.Conv1d(embed_dim, embed_dim, 5, padding=2) - self.final_conv1d_gn = nn.GroupNorm(num_groups_out, embed_dim) - if act_fn == "silu": - self.final_conv1d_act = nn.SiLU() - if act_fn == "mish": - self.final_conv1d_act = nn.Mish() - self.final_conv1d_2 = nn.Conv1d(embed_dim, out_channels, 1) - - def forward(self, hidden_states, temb=None): - hidden_states = self.final_conv1d_1(hidden_states) - hidden_states = rearrange_dims(hidden_states) - hidden_states = self.final_conv1d_gn(hidden_states) - hidden_states = rearrange_dims(hidden_states) - hidden_states = self.final_conv1d_act(hidden_states) - hidden_states = self.final_conv1d_2(hidden_states) - return hidden_states - - -class OutValueFunctionBlock(nn.Module): - def __init__(self, fc_dim, embed_dim): - super().__init__() - self.final_block = nn.ModuleList( - [ - nn.Linear(fc_dim + embed_dim, fc_dim // 2), - nn.Mish(), - nn.Linear(fc_dim // 2, 1), - ] - ) - - def forward(self, hidden_states, temb): - hidden_states = hidden_states.view(hidden_states.shape[0], -1) - hidden_states = torch.cat((hidden_states, temb), dim=-1) - for layer in self.final_block: - hidden_states = layer(hidden_states) - - return hidden_states - _kernels = { "linear": [1 / 8, 3 / 8, 3 / 8, 1 / 8], @@ -312,7 +62,7 @@ def __init__(self, kernel="linear", pad_mode="reflect"): self.pad = kernel_1d.shape[0] // 2 - 1 self.register_buffer("kernel", kernel_1d) - def forward(self, hidden_states, temb=None): + def forward(self, hidden_states): hidden_states = F.pad(hidden_states, ((self.pad + 1) // 2,) * 2, self.pad_mode) weight = hidden_states.new_zeros([hidden_states.shape[1], hidden_states.shape[1], self.kernel.shape[0]]) indices = torch.arange(hidden_states.shape[1], device=hidden_states.device) @@ -412,6 +162,32 @@ def forward(self, hidden_states): return output +def get_down_block(down_block_type, out_channels, in_channels): + if down_block_type == "DownBlock1D": + return DownBlock1D(out_channels=out_channels, in_channels=in_channels) + elif down_block_type == "AttnDownBlock1D": + return AttnDownBlock1D(out_channels=out_channels, in_channels=in_channels) + elif down_block_type == "DownBlock1DNoSkip": + return DownBlock1DNoSkip(out_channels=out_channels, in_channels=in_channels) + raise ValueError(f"{down_block_type} does not exist.") + + +def get_up_block(up_block_type, in_channels, out_channels): + if up_block_type == "UpBlock1D": + return UpBlock1D(in_channels=in_channels, out_channels=out_channels) + elif up_block_type == "AttnUpBlock1D": + return AttnUpBlock1D(in_channels=in_channels, out_channels=out_channels) + elif up_block_type == "UpBlock1DNoSkip": + return UpBlock1DNoSkip(in_channels=in_channels, out_channels=out_channels) + raise ValueError(f"{up_block_type} does not exist.") + + +def get_mid_block(mid_block_type, in_channels, mid_channels, out_channels): + if mid_block_type == "UNetMidBlock1D": + return UNetMidBlock1D(in_channels=in_channels, mid_channels=mid_channels, out_channels=out_channels) + raise ValueError(f"{mid_block_type} does not exist.") + + class UNetMidBlock1D(nn.Module): def __init__(self, mid_channels, in_channels, out_channels=None): super().__init__() @@ -441,7 +217,7 @@ def __init__(self, mid_channels, in_channels, out_channels=None): self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) - def forward(self, hidden_states, temb=None): + def forward(self, hidden_states): hidden_states = self.down(hidden_states) for attn, resnet in zip(self.attentions, self.resnets): hidden_states = resnet(hidden_states) @@ -546,7 +322,7 @@ def __init__(self, in_channels, out_channels, mid_channels=None): self.resnets = nn.ModuleList(resnets) self.up = Upsample1d(kernel="cubic") - def forward(self, hidden_states, res_hidden_states_tuple, temb=None): + def forward(self, hidden_states, res_hidden_states_tuple): res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) @@ -573,7 +349,7 @@ def __init__(self, in_channels, out_channels, mid_channels=None): self.resnets = nn.ModuleList(resnets) self.up = Upsample1d(kernel="cubic") - def forward(self, hidden_states, res_hidden_states_tuple, temb=None): + def forward(self, hidden_states, res_hidden_states_tuple): res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) @@ -598,7 +374,7 @@ def __init__(self, in_channels, out_channels, mid_channels=None): self.resnets = nn.ModuleList(resnets) - def forward(self, hidden_states, res_hidden_states_tuple, temb=None): + def forward(self, hidden_states, res_hidden_states_tuple): res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) @@ -606,63 +382,3 @@ def forward(self, hidden_states, res_hidden_states_tuple, temb=None): hidden_states = resnet(hidden_states) return hidden_states - - -def get_down_block(down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample): - if down_block_type == "DownResnetBlock1D": - return DownResnetBlock1D( - in_channels=in_channels, - num_layers=num_layers, - out_channels=out_channels, - temb_channels=temb_channels, - add_downsample=add_downsample, - ) - elif down_block_type == "DownBlock1D": - return DownBlock1D(out_channels=out_channels, in_channels=in_channels) - elif down_block_type == "AttnDownBlock1D": - return AttnDownBlock1D(out_channels=out_channels, in_channels=in_channels) - elif down_block_type == "DownBlock1DNoSkip": - return DownBlock1DNoSkip(out_channels=out_channels, in_channels=in_channels) - raise ValueError(f"{down_block_type} does not exist.") - - -def get_up_block(up_block_type, num_layers, in_channels, out_channels, temb_channels, add_upsample): - if up_block_type == "UpResnetBlock1D": - return UpResnetBlock1D( - in_channels=in_channels, - num_layers=num_layers, - out_channels=out_channels, - temb_channels=temb_channels, - add_upsample=add_upsample, - ) - elif up_block_type == "UpBlock1D": - return UpBlock1D(in_channels=in_channels, out_channels=out_channels) - elif up_block_type == "AttnUpBlock1D": - return AttnUpBlock1D(in_channels=in_channels, out_channels=out_channels) - elif up_block_type == "UpBlock1DNoSkip": - return UpBlock1DNoSkip(in_channels=in_channels, out_channels=out_channels) - raise ValueError(f"{up_block_type} does not exist.") - - -def get_mid_block(mid_block_type, num_layers, in_channels, mid_channels, out_channels, embed_dim, add_downsample): - if mid_block_type == "MidResTemporalBlock1D": - return MidResTemporalBlock1D( - num_layers=num_layers, - in_channels=in_channels, - out_channels=out_channels, - embed_dim=embed_dim, - add_downsample=add_downsample, - ) - elif mid_block_type == "ValueFunctionMidBlock1D": - return ValueFunctionMidBlock1D(in_channels=in_channels, out_channels=out_channels, embed_dim=embed_dim) - elif mid_block_type == "UNetMidBlock1D": - return UNetMidBlock1D(in_channels=in_channels, mid_channels=mid_channels, out_channels=out_channels) - raise ValueError(f"{mid_block_type} does not exist.") - - -def get_out_block(*, out_block_type, num_groups_out, embed_dim, out_channels, act_fn, fc_dim): - if out_block_type == "OutConv1DBlock": - return OutConv1DBlock(num_groups_out, out_channels, embed_dim, act_fn) - elif out_block_type == "ValueFunction": - return OutValueFunctionBlock(fc_dim, embed_dim) - return None diff --git a/tests/models/test_models_unet_1d.py b/tests/models/test_models_unet_1d.py index bf035d8ad96a..286c7525e2ca 100644 --- a/tests/models/test_models_unet_1d.py +++ b/tests/models/test_models_unet_1d.py @@ -18,194 +18,12 @@ import torch from diffusers import UNet1DModel -from diffusers.utils import floats_tensor, slow, torch_device - -from .test_modeling_common import ModelTesterMixin +from diffusers.utils import slow, torch_device torch.backends.cuda.matmul.allow_tf32 = False -class UNet1DModelTests(ModelTesterMixin, unittest.TestCase): - model_class = UNet1DModel - - @property - def dummy_input(self): - batch_size = 4 - num_features = 14 - seq_len = 16 - - noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) - time_step = torch.tensor([10] * batch_size).to(torch_device) - - return {"sample": noise, "timestep": time_step} - - @property - def input_shape(self): - return (4, 14, 16) - - @property - def output_shape(self): - return (4, 14, 16) - - def test_ema_training(self): - pass - - def test_training(self): - pass - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "block_out_channels": (32, 128, 256), - "in_channels": 14, - "out_channels": 14, - "time_embedding_type": "positional", - "use_timestep_embedding": True, - "out_block_type": "OutConv1DBlock", - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - model, loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" - ) - self.assertIsNotNone(model) - self.assertEqual(len(loading_info["missing_keys"]), 0) - - model.to(torch_device) - image = model(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - model = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32", subfolder="unet") - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - num_features = model.in_channels - seq_len = 16 - noise = torch.randn((1, seq_len, num_features)).permute( - 0, 2, 1 - ) # match original, we can update values and remove - time_step = torch.full((num_features,), 0) - - with torch.no_grad(): - output = model(noise, time_step).sample.permute(0, 2, 1) - - output_slice = output[0, -3:, -3:].flatten() - # fmt: off - expected_output_slice = torch.tensor([-2.137172, 1.1426016, 0.3688687, -0.766922, 0.7303146, 0.11038864, -0.4760633, 0.13270172, 0.02591348]) - # fmt: on - self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) - - def test_forward_with_norm_groups(self): - # Not implemented yet for this UNet - pass - - -class UNetRLModelTests(ModelTesterMixin, unittest.TestCase): - model_class = UNet1DModel - - @property - def dummy_input(self): - batch_size = 4 - num_features = 14 - seq_len = 16 - - noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) - time_step = torch.tensor([10] * batch_size).to(torch_device) - - return {"sample": noise, "timestep": time_step} - - @property - def input_shape(self): - return (4, 14, 16) - - @property - def output_shape(self): - return (4, 14, 1) - - def test_output(self): - # UNetRL is a value-function is different output shape - init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() - model = self.model_class(**init_dict) - model.to(torch_device) - model.eval() - - with torch.no_grad(): - output = model(**inputs_dict) - - if isinstance(output, dict): - output = output.sample - - self.assertIsNotNone(output) - expected_shape = torch.Size((inputs_dict["sample"].shape[0], 1)) - self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") - - def test_ema_training(self): - pass - - def test_training(self): - pass - - def prepare_init_args_and_inputs_for_common(self): - init_dict = { - "in_channels": 14, - "out_channels": 1, - "down_block_types": ["DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"], - "up_block_types": [], - "out_block_type": "ValueFunction", - "mid_block_type": "ValueFunctionMidBlock1D", - "block_out_channels": [32, 64, 128, 256], - "layers_per_block": 1, - "always_downsample": True, - "use_timestep_embedding": True, - } - inputs_dict = self.dummy_input - return init_dict, inputs_dict - - def test_from_pretrained_hub(self): - value_function, vf_loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" - ) - self.assertIsNotNone(value_function) - self.assertEqual(len(vf_loading_info["missing_keys"]), 0) - - value_function.to(torch_device) - image = value_function(**self.dummy_input) - - assert image is not None, "Make sure output is not None" - - def test_output_pretrained(self): - value_function, vf_loading_info = UNet1DModel.from_pretrained( - "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" - ) - torch.manual_seed(0) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(0) - - num_features = value_function.in_channels - seq_len = 14 - noise = torch.randn((1, seq_len, num_features)).permute( - 0, 2, 1 - ) # match original, we can update values and remove - time_step = torch.full((num_features,), 0) - - with torch.no_grad(): - output = value_function(noise, time_step).sample - - # fmt: off - expected_output_slice = torch.tensor([165.25] * seq_len) - # fmt: on - self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3)) - - def test_forward_with_norm_groups(self): - # Not implemented yet for this UNet - pass - - class UnetModel1DTests(unittest.TestCase): @slow def test_unet_1d_maestro(self): diff --git a/tests/pipelines/dance_diffusion/test_dance_diffusion.py b/tests/pipelines/dance_diffusion/test_dance_diffusion.py index 76349f4c8a33..737d1c57d154 100644 --- a/tests/pipelines/dance_diffusion/test_dance_diffusion.py +++ b/tests/pipelines/dance_diffusion/test_dance_diffusion.py @@ -44,10 +44,6 @@ def dummy_unet(self): sample_rate=16_000, in_channels=2, out_channels=2, - flip_sin_to_cos=True, - use_timestep_embedding=False, - time_embedding_type="fourier", - mid_block_type="UNetMidBlock1D", down_block_types=["DownBlock1DNoSkip"] + ["DownBlock1D"] + ["AttnDownBlock1D"], up_block_types=["AttnUpBlock1D"] + ["UpBlock1D"] + ["UpBlock1DNoSkip"], ) From c6ac28436b8bab624c800394483aa115dcb56024 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 31 Oct 2022 12:39:18 -0400 Subject: [PATCH 109/133] make fix copies --- .../image_diffusion.ipynb | 154 +++--------------- src/diffusers/utils/dummy_pt_objects.py | 15 ++ 2 files changed, 34 insertions(+), 135 deletions(-) diff --git a/examples/progressive_distillation/image_diffusion.ipynb b/examples/progressive_distillation/image_diffusion.ipynb index 0ae6179c54ba..ba086cc86701 100644 --- a/examples/progressive_distillation/image_diffusion.ipynb +++ b/examples/progressive_distillation/image_diffusion.ipynb @@ -2,19 +2,9 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\Users\\Ben\\Anaconda3\\envs\\diffusers\\lib\\site-packages\\tqdm\\auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n", - "NOTE: Redirects are currently not supported in Windows or MacOs.\n" - ] - } - ], + "outputs": [], "source": [ "import torch\n", "from PIL import Image\n", @@ -42,27 +32,16 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "torch.manual_seed(0)" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -71,7 +50,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -83,7 +62,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -93,7 +72,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -111,7 +90,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -121,18 +100,9 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Downloading: 100%|██████████| 455M/455M [00:17<00:00, 25.7MB/s] \n", - "Downloading: 100%|██████████| 665/665 [00:00<00:00, 332kB/s]\n" - ] - } - ], + "outputs": [], "source": [ "teacher = UNet2DModel.from_pretrained(\"bglick13/minnie-diffusion\")\n", "distiller = DistillationPipeline()" @@ -140,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -150,29 +120,9 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Distill step 0 from 1000 -> 500\n" - ] - }, - { - "ename": "NameError", - "evalue": "name 'DDPMScheduler' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)", - "\u001b[1;32mc:\\Users\\Ben\\Documents\\diffusers\\examples\\progressive_distillation\\image_diffusion.ipynb Cell 10\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[39mfor\u001b[39;00m distill_step \u001b[39min\u001b[39;00m \u001b[39mrange\u001b[39m(\u001b[39m2\u001b[39m):\n\u001b[0;32m 5\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mDistill step \u001b[39m\u001b[39m{\u001b[39;00mdistill_step\u001b[39m}\u001b[39;00m\u001b[39m from \u001b[39m\u001b[39m{\u001b[39;00mN\u001b[39m}\u001b[39;00m\u001b[39m -> \u001b[39m\u001b[39m{\u001b[39;00mN \u001b[39m/\u001b[39m\u001b[39m/\u001b[39m \u001b[39m2\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m)\n\u001b[1;32m----> 6\u001b[0m teacher, distilled_ema, distill_accelrator \u001b[39m=\u001b[39m distiller(teacher, N, train_dataset, epochs\u001b[39m=\u001b[39;49m\u001b[39m300\u001b[39;49m, batch_size\u001b[39m=\u001b[39;49mtraining_config\u001b[39m.\u001b[39;49mbatch_size)\n\u001b[0;32m 7\u001b[0m N \u001b[39m=\u001b[39m N \u001b[39m/\u001b[39m\u001b[39m/\u001b[39m \u001b[39m2\u001b[39m\n\u001b[0;32m 8\u001b[0m new_scheduler \u001b[39m=\u001b[39m DDPMScheduler(num_train_timesteps\u001b[39m=\u001b[39mN, beta_schedule\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39msquaredcos_cap_v2\u001b[39m\u001b[39m\"\u001b[39m)\n", - "File \u001b[1;32m~\\Documents\\diffusers\\src\\diffusers\\pipelines\\progressive_distillation\\pipeline_progressive_distillation.py:71\u001b[0m, in \u001b[0;36mDistillationPipeline.__call__\u001b[1;34m(self, teacher, n_teacher_trainsteps, train_data, epochs, lr, batch_size, gamma, generator, gradient_accumulation_steps, device, mixed_precision, adam_beta1, adam_beta2, adam_weight_decay, adam_epsilon, ema_inv_gamma, ema_power, ema_max_decay, use_ema, permute_samples, **kwargs)\u001b[0m\n\u001b[0;32m 68\u001b[0m train_dataloader \u001b[39m=\u001b[39m DataLoader(train_data, batch_size\u001b[39m=\u001b[39mbatch_size, shuffle\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m)\n\u001b[0;32m 70\u001b[0m \u001b[39m# Setup the noise schedulers for the teacher and student\u001b[39;00m\n\u001b[1;32m---> 71\u001b[0m teacher_scheduler \u001b[39m=\u001b[39m DDPMScheduler(num_train_timesteps\u001b[39m=\u001b[39mn_teacher_trainsteps, beta_schedule\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39msquaredcos_cap_v2\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m 72\u001b[0m student_scheduler \u001b[39m=\u001b[39m DDPMScheduler(\n\u001b[0;32m 73\u001b[0m num_train_timesteps\u001b[39m=\u001b[39mn_teacher_trainsteps \u001b[39m/\u001b[39m\u001b[39m/\u001b[39m \u001b[39m2\u001b[39m, beta_schedule\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39msquaredcos_cap_v2\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[0;32m 74\u001b[0m )\n\u001b[0;32m 76\u001b[0m \u001b[39m# Initialize the student model as a direct copy of the teacher\u001b[39;00m\n", - "\u001b[1;31mNameError\u001b[0m: name 'DDPMScheduler' is not defined" - ] - } - ], + "outputs": [], "source": [ "teacher = UNet2DModel.from_pretrained(\"bglick13/minnie-diffusion\")\n", "N = 1000\n", @@ -204,54 +154,9 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAZwElEQVR4nHV6V7Mk15FeZp5zyra9bubOwA5ADgHCkFySIBeMDZERFHdDetCD/oekn6RHhaQIcUlQjH0Ul9KCFksDEoYzAzP2unZljsvUw+nu2zMAK2LmVlfXqUrz5ZfmNH7v26/lpbl65ajvPRHZrmUJSqvZxXI8GQHp4aAmQonQNF1mzOHRwcnp+b/99//x6vXnyrI4Oz/d3z8KPrR9O7s4+/Pvf/POL/5vAPX8c8/e+NKrg9FkMByfnpzcfu/33/93/2G+mI/G0+efe34xm83n86ZZHV+7rog+/vhOVdanp6fL5Wy1WnzjW9/5yte+qsicPHr0wXvvrdo2z4u2axFgUNe274uiqOq6yIv3339XC0BdV1VZnp8vhsNhiFzXA5YYIkeORV7keRY5zuaLGDl417Z1nudI5L01Rud5EUK8e/fj4XhiTNZ1XYjRBZnN5l3XFFXdtqvfvfMbxfbo8BABY4zW2hjZOR+Zl4vFcDjM8xJIOe+bpvHen52ePHp4kmdZkrV3vu/7+cVZkRdGKwAgotVySYh5lhMpnIzHIUYE0EoprRExM7lWejgYDOrKh2B7q7V2ziGpzBgiVdeDPC+01oN6WOS5yUzXNdb29+7dD56tc4vlcjm70Fqfnpz84fd/0EqVZTmd7lnbzy8uQvCAorVCBJNlWZaNxqM8z4wxHGPTNCJsskwbU1SV1gYE2qaZjMdVWWdZXtU1ADRNa60jRVDk2Wq1AhTvXQgBCX0MiFgWVW7M+dk5IopEEciLDAhZBFE554zJtDZNs8rz0lnXtf38/EwElsvVxcX8/ORktVy+/S9vz2azLDfe+7KqMpNZ21trQ3AHBwdFWSERKdWsVtb2VVUPhqO6rrU21rqmbQQAQJx3RDSeTibTqTbGWhtCQCKlFB0fX80LE6LPsmzVNISUZ6bvW0TM8kwbo3WWZxlztH2vjYoxMkcWJsSqqgBltpgNhqNr156+f//B4uJsUOYa4OJi/tFHH/3p3T/98Y9/jNFrk81mcxYuq6q3fYwBEfu+X62Wfd8J8OnJo9n5eQh+PNkDwEePHt27e3exWCwWc++9946ZBSArciTwwedlySBlVWqlFDAGz0WROeuyLBuOhqu2XUnnrMvyvCoLRcpaz8wiuGoakxVGG6U0IJosH48nJstF+L333uPIwYYiyxrnH56ce36/bTujYLnqTk9PtDZFURChItX1XdOssiy/OD9TpJyzMYYYeTgcE+L5yUnkeHT12DkvIIhIhMwiIpGZiOrB0PZdy0Ln5xeL1dIHDwCROUTnvXfOZ7lBxFWzKsucRUREay0CMcTJdD/LC2VMb511riqrrmvu3/v09u1b1vuu6733McbO+Yv5oizKzoa/3Pnk/OJ8uVrU9SAzmTFZCqr5fL5arUIIMUYkAoDz89PVauW8z/JCBETEGFOUZVFWi8XcOUuk8iIviiLPcwDR1566Pp9fiEgC7o3nnxERRbosqKxKtwhEyjobOYYYCQkUHR4eKW0QiZmD9867rusePHjwyad3S4q5zpfLpu37AHj/4TkRTqfTe/cfvv/+B0eHR48ePbB9n2VFDNFaC4Axhr7vmLmqBlmW+eAiR0Q0xiBiYsPgPSG2TbtcrhCxqgbC7J0TFgrOeufrwQAR66oEhBjZexuZhYFZYoyIKAJFWSilOEZC9M4igPcuBN+2DSLev/9gPpsrpRABUaoq9961bdP3/XKxzPPyj3949+zsIoZIpLxzzBEAYwgxhLZtAMEH13Vt2zZ5ltdVjUjeexEAQGddCCHP8gS/uq4Rse/6oiyIEIlMnhUxCgAysyJSSsUYAbEqK+c8iihSdVVpbSLHoqwSLq21bducn532Xffuu+/2fTco1CCHrMxCZOs8IsYYZ/PZ7du3z87O33rrrYcPH1VVrbQCwCLPrbOkVJblHKNSJMKEChFFZH5xsVqumDmE0DYr5khEtus5Ru9c27TMrHVGAqiU0kqPx2NtNDNroxGJSGVZxizMzCyIlGWZgABhPRgNh6NVM2cO3tkYws9/9s+//fVvmOOtTx7eeXC6ajtAZAYiDYCIZPv+1q1bt/7yl5/97P+E4JeL+XK1CDEUeZHnBQL0XXd68pBFJtM9IvLeL5cLa3siUkqJCAgAQPABRFaLZdc2AGCt1b31hNS1XVKjbTthqaqqbftV08QYtdaRAyKUZWmtjTEm8ztni6KKkX/9m9/+6B/fmp2fg8jFslu0riyyLM+RFAEnczofwmKxHOQPP7n121/+v6Nr11mkLCZGZ7PZzBhT1fXs4gRERGQ+uzAmQ4TgQ54DAIhEk+VE5JzlREYCANi1DTnrQowhBlKotHIueB8Q0HvfrFZEZK1rlp0xeVWWwkwIIYSu64qiEsRbt+/89H//0/nZOXMsjCICRABUIfBwOCRUpDQqMkbneTabLftm+dtfva2UUUhIWBRFCJ6UQsLBaFKWNZFijsKSZTkgIGIIXphNlnVd65ztbb9YzPu+M8aMxhO9XK2m07FzERFsb5VSzAwIIYSiyEUYWPq+39vba5oGIZZFwQDW9QB456P3/sd//58nj06stZmmQV1wY6Og814pLV2vtGZhZDAKh1UxX7bv3bnvsMjyPHgcDkYiHIJbLuZZnh8cXNE6izFqYwChLKsEbmG2thMZhRCU0sYYEOHIWmtjNHnvrbVaae+8d94YzSx1XUURFszzDFC0puGgXq3aoigGg4HWZrlcPLh/70c//NEH73+4pinEpg8+AjMAkAh47xWhQsw0TQalMGR50fb+/sNHb//L223ThBCstYQUgi/yIssKazsiquuBUkoQWQRElFJaa2OMNoYU9V0bQojMs9ns/PyclCIELIrMxyAIzaq9mF1Y67RWbdMCiIiUVWWMXi6XITAIENF8Pv/pT//pnX/9nYiwCAB45wcZaQJEREQAUEoZowdVfuPK8Ob1cV2bEIJz7vz84q0fv/WTn/z0ww8+7Lo+cPTeAyAiLBezGEOel4vF/NOP79z95M5qtVRKlWVNRIpIERERIiBCjNE5p9umL/JiPB51vS2KzPZOhJumvXJw2Pe9rCkI+67zPjAgII3He/NVe/fuveWqYWYABCTAuDcqQedn81YpVebmcDq8sj84GJhpgXfuneeEg9KEqF3k+WL5i1/++tatO9/+2289//yzzrZ2slfXg8XsPDMFx8As52cnV65e77v24PDKZO+AEJRSIQQAAMAsywEACHRk6HsbfAg+5Flme8csIqAUBmZjtHVOESLhYFgJc5YXs9nik3v3l6smhpgMzswKUGsdQqe1ZuZRlX/95pWjvVGpODoXbN+7ZZXXj+a9a12MHEK4e+/+j//xx698+aVXXn15NLzI8/zK8dNlWTlv87x8/oUvXrlybbG4QKLpdMrM3lmttTZGKV0UpYjEGHSZZ6PxxJiMWYajQdN2XefKotZaE2EIkZn3p3s+RBbp+/7T+6cffPKTd975/cXFBSKSohgDoESW3gYCUEoRqVVnm85pBBCIQFVdPXOEjxbuk7MWkWKMMcZKm+8898Xr5cR9/PChMkVZHV97BhDOTh8cX3+qrGqldNPMXd+PxuPRaNR1rTZGK+OcFUkVntIhhizXbdfFGK31ZVmuFk1VVYDIMYpwys29j52Njy5m8+WDZdPOLmbWWqNyYWBmBBSAi1UvqJ13mUEWtWz9w/N5XeTz1p7Puqax92atj6K18kEgwtXJ5NUbNyql+hCWy/74qWezLHPOGmMGwyEg9V1n+76o6siRhQWAmc9nJ8vFHJVKca+VUs7aGBgQAFFrs3dwOJmMZ7NZikUf+HzRPThbPjxbhBDbruu6LgorrWMMhaoRsZEli5wuemMMC1jntYJV210scTgaR4t//vRu2zsWHI8nk+nk9u07iBg4BmanFWj1xptvHl+7LiLL+axZzrI8d9YhYlmWhJgZo5SeTKaKKDPZZLJHSgECIemyrmOMiKSVJqTp4dHzL9w8uXerc/Fi2a/s4tEi3rp/14cYmbu26/veWosIMTKKdGE1zvdQaOVmLGydK/Iiy/Pe+aXlcYBF5+89nM0b50Ooqso6e//+g+l0cv/eg/3BmIwOSl177tnnX38tr0oipRW1zbIqq+FgBABFnvXWTqZTo40xmQgPR8PgIxExMwDo73z37//0u18s5wsk0/ROlv07v3/33qefLts+RCblVm3vfQgh2N72vfXec2REABEB8BIat5rmB8PB4Gx10tvOB6+NMcasOg+k758sTuZtlmWDehCZm6bJsqwsyi8899zrN14QpUZXDr/83b8r60obo7UOvqrqgYggYYwhL0vnHSIqpUU4lRCpOkrkoZG0C3A675vOd72z7gEzF0Xhve9760JIZVQIMfgQYwQRk2XBexFOLUHLTaby4/E1FHrg70aObdsprX1d1IOBznPM8snEPzqds/NVWebG1Ep/9cUXrhwcXn/p5ktvfOPw+nUffIxRKSUAWpvEDQlC3nmtdPK5AKc8AOt8g/p//fCt2WzmnA8hpHIaANq2tdZZ53yIiKCUJkWAkBpTZtbGAJgYAofAILP+DM7hoDx8ZnLjYffQhl4EeheWq+54MLj5/PWbr3zV0/Cff/7zX779S+eDZf7g9NHXvv+97/z9D9ZzEMoAIMbYNCsiZYwBgHQFCVPDCIgiwpEB1hoAgL51+yNFlAIiFUJE5Hxou45IIWGqY4VFkVJKee/TPQAgAEgEzFFkFZfKqmvDpw+nV1o1++Th3dGgrofjvYMr3/z2m69+/W+rwegH//APv/7Vr/71nXf29vZvvPjCzS99SWB9iIiIxBj7ro0xbhIWiMigHqRkj4gIJHK5BADw6vHTiCkHBQBAQhGwzllrkyWST5RSABBj8M6HGEUk5cX0bWJSpfTR4MrTh08fXB2dtKdffuWlb37zb75486Xj608NhsMEWWdt13daayK1XoiYZVmqulN+SK9LCEmVM8K6PElCCwgAJqV15KiU7vo+CcHCgOi9X/uLOb0jVWyIZLIMQ0jTtWQCAElfCcBZd+rP3VkYrdrVS6+8/ObffXc63QsxeO9Tj6uNrqhGROYYQrTWKqWIKM/zNHJL4JFNyS8iiICA8NiBAuvrGgAlSRqZ1xoCpmDABLxkBkkASyYn0iKcXkCkNjogCyyapu37Is9u3bp9cXExGo+zLFsXXlorSgRCSimtBRFTd6G1JiLZ4GOrAKyRCrLtygDTSbKfjjEG75OUAEJEMXIiqRRD6/4YgIiIKClsDGqt+94qRSJirdNap9sAAJCUUl//+t8cXbmSGqiECu89gAizyXJEJKKyLJ1zhASC2ztli/gEG4BNyCKACAABRuZ0kXATzkSEiD6Ez5ohPSvLDAAgglJkjEYkYzQAeh/WdIGISIjELCbPXvvK61VVMnOMMeFQKcUsyUApfkREKy0gkSPH5OBLNZ6QZPtZNtIiol4rhyiRmYWQdsCHSqnMmC5GZrbW0eYoinK5XCESQMTNQaTWCESsqnpvutd3fQghGTspyXwpaQghSZWSozYGERAIEVLbvRZYIE241uS5gVDiAL0NliicblNaxRBT4CpFLI/pn4LBe4cIROsRCG8dSkSkAPDw8HC6NwUApfV27Zp8BWKMCfExRu+9iBARCMCaRYBow/Qbi6xDABMFrVGDiFpALtt8QEQBESJK5okx9n2/Df7klsRRiGv+SZQHgABYlcVgMLyYzV999eXJZGKy7AnlE2UnCKW1iaAJ1fYVLMyRE3eKCAIlcdPXkBy0cYIGAZati9IAlRMfp3kqQNxSxDYkjNbOpbEZpByiFRHi3jB75towHo++92/ezPM8xphWpdyX5NiiPM18tgQv/NnYRUIUWS9Zf7V2RqJw1AKACMyy1kRka9ekFyAmhySEJIGcczvMAMO6uPn0/t64mNbZ8V69f/2FF198MT1h1/xJE2utiBRFsUGUgAAns2+kxA1dpnu2zoGtZoggAiA6fSIi4XV9t419ZmYQIpXgxMzOuZR3Nj4VRMiNvnF975UbB0eTam9cRFEvffO7g/HeLr63yWR7JeFzA4rkhJRjBQFhg5kUXbt0tPbSRivNIABAiKKQo2wdmm5KE94QQjLnJiGoVI4nWx0fjL7yhSvX94q9Sd30oXzqtcNnbm7BkzTf1gW4GTtvhQMAEdyadu2RnSM9ZEN0BPAYvxOwMLNwKrZFQChNRpOl15bZhNdG6PREESHE44PB03vF4bQCxHtdffMrb2Z5vvuaXSG01lvzXcJ9/e4Ue7zltK0O2/PH2UxERHvnlFJCO9ADTMSLRKR1mqcbrYmo3ZRG20dnRl3bq6bD3OTF+580L73x/YOjo21mXONwXX3QXxNo8xFTdkr/J4cnSCeyemJVOqH0AkUqMxkBkADIWh0iyowxxiTjVEVGOyUhACDAsMrHgxzz+tOLePDC115+9bXdamyXPbertlptxrSyce8WGLRNjrBJAtub48ZN6xgAgHSFiDfIJoD18FEplfQ53BtdORxZa1vrmVkpBSAsohDagEupD164+fJX35hMJlsNtxJvg35Xq91g29HhkmG3tt8uTNXKFnvpXKekGGNIVd+WgolIK5UK0v3p6KUbRwjxXpl11qc3cYwiMh7VV4+vvvzGDw6On62qaicuLwWVTaqWTY24C8L1bXBJO1vpP0s+j0eOAIAWYa0zY8y2PNqoTkqrLNPHR3vXjibtcnG6WK1amzh3rb1SL1zf3x+Pr15/tqgGW7g/4YRd8OyeP3asM8paB3wcq4/d+HgkaK11VVVXj6+++OKLw+Gw69oH9x92baO1qqri6tH+eDioy/zj23/58O4ZEmkNictFZDodPnNYD4YjbfLPVpFPSP8EgXxGmjSXws/F2BNy7zpQv/b6699641vfeOONa8fXmKO1drVYzGcX2ujhcDgYDIqyRMQf/rf/OvnLx2Hedn2LADEEUvTC9f2jcT4YTZBwC5JdNXZR9FlvPG7Uz6p0mRf+GvAAQP+n//xfrl65qo2JMZZFMd3bx2vXCREV6URehDGEwXAUQyAQrchoFbUqiuyLz+wTQjWYgOB2aLONy8/Kug2Gz0qDiH8FXE9iadd7iKiffe65tKEEm85da52qNwAQEI7cLGbSzQaFbjpbGKyrTAQO98fX9isA0nnFchlzu8XINgHvfvu5eBBZZ0xE2rXC51r90qsgOvggIllmtrljd+4VYxRgb7tSy7WDUZblzjaDqvCCz1yZlAaZhZROQj9BHdse94nY+HwwbOpkWP8TFAQEkCeD4XFlNh2ZSOoh1iVKEj3GGGMQEVRmOhm9+Mzx4Hy+uOCizLKifGp/AMwCilM7m1btdDZPksnGfLjTnayv4zqIQXZ8AoKQet7HQvlxOIHezRRbFbcWTX+JVFXXx4fjKpNzY3WWV4NRpcR7j1o4CrBIZBGWGEmp7eT1s8a+lOAybgEEHxN9W1TvrN0Fz1pgEBbRAEKkkAARlVJESDtKE5EAKpMPDp6endzdP5iOR1UEQsDV6QNhAVgyx65ZOWeds7bv6uFoOJ7iZpS0K9YTCN68hXCjnsiTDcAu/1xyGqQxIwCwJqVwMzJRijbVYho/p4eyKD3YO1ZZpQ0BkSHFrJgKgYikTx98Mls2zqUfSxEA5kWltJbHGWm3CN3iB4F2+XKr8OfG7lp0EUDagBIJMUmvtuSDG+FZmCWGEEJwPoQYozCjiFbk2gZQZ/UeAJ7eebddzrq2iTEOxtPBaCyb2cnWrtuTDR9tA0JgDZ9LptqBy2YYgZC6HRZmkE31DwCiU3eiFG37pi2ZMHOMHINPZY/JB9EtQRhJ53tPny0+RKHMFISoJIyOnqoGo6KqiUiECSgR4hOGvDTtmkXWJ0+IvvaYgKxnKCibB4hsFwuAaK0UKdol7C0LrSteZu9sOz/rVgv0yyzXeTHNx09fK/fLshyOhqPJvs7yvCi38EUik2XbWccToiMirusGWLfnn5fC5FLFtcSPR1H6mDyAlHJ5cq7wZc0tkYXZtovVyUcCRKiide1ycfjUaLR/lOeFMSbPCyLK8jxFpIjgJgB2U8FufY9IKQzWfAmPO2dNMoCbYF/rurF68kN6mN5otk6HMXKIIcaQbC8sHKMIkDIcAsYY+pUeCK5DRiuliSjtDjELKoVIsplrPIbnnWOD8LS7eclUlxl6LeVazMcAJmtAARIA6F2GSjsLa9szC3P6GCPHyNVo6ue9j2G9Z0MqTRxFxPuQts4JURm9rRe2J0+i6LG0tu1CJCWvtZE3C3frDoINEa2T32UmlhhjCD4hfyeIIzMH71bLGbIwiHOWtMnyPMty56wwiBallDJGrb2OabMe1tUEgAjDpgRaS5fkBMT0K1TcoEB2wbYN5eSSBPNUNySlRERva541bICTbiHGGMKmdYWsHDWPPgq2R4lueeK7lVIqhiAixmRFWQIg6UsySOlJEITXk44EYtkYP2267M5TNr7Z5IlNjD/hs0s4CWBqKWMMkeMOPUuMITFuYEYAMnlkAcC8GoX2vJs/vP/nX1SHN0xRl/WQiFICUUqtbbPhQALgzYgzkTpu6WW78YIbrO+STGJ+2KZUhsePLSzJB5+kh00RtsGPcGQW1pkxWT65dqOaHkVvlSlBpH34/umHvyKQ8XRvPN0zWU6KUgxsNmuQkNbj6vVvZBA37SiuqRTT71xS7iRAgk1PLsnAm1HoLjvJY3G/3inawi7lLFkXl2y0KYuiYykH01m1D4tzXU0wLIXl4Nrz1298oR6MTWaACBFpneovLbRbQm4T7e5W0vq2TV0B26YghchnpN8wwaUO/x8XjiuJ4A0S4QAAAABJRU5ErkJggg==", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Distilled image 0\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAo5UlEQVR4nAXBV5NlB4IQ6OO9ved6b9JnlveSSq0WNEzPDBMzmIAAAiIg4IWN/Rv8ATY2dl9YHpZgIgYGGqa9Wi2ppSqVymdmpb2Z15tz7/He833gP/t//5+62V8I1Y998HsODufDPb/a979S7ftkpXmL1V+jwUMYdZUqpHyjUhtt9vWV2q3VpXMWacQss1iE4UWnUVNaDxZfaCTxZpTeA2+qn1Ct89NZBJRIsl9XmZWDWdGwXMhPlfSKi7b9wE46YinG0OuD68aHx3CLp759x6Woc8M8/mv04G5zueVQYYPSEHCl4D8pmN/MG7QbDazTP+uEBiDYxD5MHbmDHHRPSMTh8N4S8g1l8W6wHcYWwIOvN6L8m7vCcTjldlUdpyLKmF2v3FYFiLQqI5rISNhzLTqEISjnlx6uYrbz+0TB1kknjx2elPtB+J9HHMtA7pU3vzlgCVw6TzviBW3P0qgJoeVycQKdhINTqVp8tz+Rr6yV+oGtyw1eNcTsY+Z9OhFdakpP5FwfLlD0f3t7JZi6QParLHlhZtMZbc30ydJDzj5A2PDbMYbjdf3CyUrvtJWSaKLVu/0JDiNd1QmraOBlulI799xWCZ3510rEiV2EaL2HQRsFEL2CzK9fmA51ev2sqYuUEaLFmKeo15+mr9kLtoWop0NqbAGDg6H9GH2ugZU0A4ovI5iXCwS94Z0DwqRhxqbxtr6D24u/Cjz4MWFtR9jzvP6P9FIhoNxQj4jC7SPtkDNjVVTOZawLsUgbhZTsB/4pdHjIscv64X/bwHJNNB8AUFxdM+vs2V0N//9HAmzZ9VY1yK5YBj8vNPaHt5NVCTiuMOn9E3pvjJj8ibLPfbzo5i4PeNl+D5xX8Pubcn5089tX+O9yfaWS/LTk5cFvceiPSi4vBhu2RCfvtgj9TtkTdlDREY63r2+foovtkvLlPfJGB1okGH9ZDrdV4oU3X2csEegvN7+YSyt7CgOfG73bjfADZVz8fqzJwed3L6CUwtx/nMO7fU97vUS3MIT1WReBuEmnWtnnNzjw17a4lQLOLiEW2a+oF/v7zxkH9zfR9NWiBZUtHF+DVvPYLYCb+k511nLgi3V6ijwTe8lH89tF+EYwx7urW4+Xp2cnrt50KLAobPkzVvF9UXPtWhhN9pd/VmKs+du/+KHrqmDorajEFX04JA2x7WP2gXrj6smFq8kk1FZj2gF2YEem7roE1Xtz1oRECMlmr7oFf5vLtXp0xMNlIvZzED5gBUHULoOGvfz5yihe2NqhdpP5Z7/UsuP8ofqHrCysI2+6alDl+m9n14ckGFQNH9BECrSXXnP7rrX/ClAWw2sQnLzL/OORz5jWZlwm5/p8ZnkrdFP/XTVBYoHQlZYyrHDQwZl73X1JHsyvsVrTBRDK3CRPeMsc9mb9BVfeemr0V0eGKqwDmsp3XI6zp71NDcpKovW1cO2elYaYtViFA5vLzgHXHaF+YT45zzJlfPkEpnT165t1VCfetK1KSnOhYfQxGIacTknV5J8Sf+9GZE8GKIhEltURQJA+f1b+HyxnPaMwFUOwx1GyEdsOfRb8cGbnHoPlxhJxa4XnQMBDW5AwCVohCmc2WBPvzU+6xFS4EGBlJYLsZmgKxCIv5lEwjzUr1d6WKYLuIuVW53juBrfo+1D1jAVZ2A9z7+lhnoATpfyfVHut5wW5/P0Kiqhkx2eVfFcm9qMZ7ACS470vPG/pD3OPzPLCQU4v54yjhvpFijqjQwJGR0EbvcEvGsi8x4DwIxYwtdzohyq2gJ0cJLFQG4Uv6Zi+/U7CWtnx6NurtKaKhiDPXThFptdbWFHZK4+TMKjfybTj4KoQO8ByC0GJReJ3Bi+vfTPS5Vyyd3tNur18EEFfEwnEhRWVhQw8LdJSU9lNpEJJJnLfNTtelzOTjYP58/k9UliI0WR45O3+Lbu3qE/n3u0yKec2CESrtNlR6xqhHpWspJOjfsYOYhVeQruLc5s942q2S0aLMuXnkxQ2pVT6FR/jGVw1pWm4VW/DfWQrJyRcjvGbIMMeihBizyRiLkzWvHDC6CiZVp0ZDVLupP8O3SHbmsNtHLt9kHATWWVykFDS4CxeYLAFA5ZpjhNcmLazYyKV9XZWkflE9b9MK/NzP82bNJEn6HQlZuaclIP0nOOUq0ExgkPWXEOEGNLa+PUoLQS0eauQLxu7n9fOqXUUpqKLsxJR4Cy+xorFB4Pi4fGy463ZR1Qq4CIF+nkEO/ZILxfFuF5Y8Q3XkWBvOon+Ue7zaYzRtYcx1Z86+cedEhBfUwGXlR039eZmnWpChXUsG/tbRgGNUSswqKge5QJS0KKkSdjLKzNO1Y6UiaKcGLMZo+WDMTPx+M4lQ7mFSoGRyA3FPv2AshUn7di1W1xd1qiJbX1zY4j6bxAxjC7z5dJPvMm8GfE1YVFSfZCdUJUiJpnaWRS7eXKpv7xCkxuB52hkSUfAliV7rW00pYHwG21R1JJBeOE4uU932dU3CgDktgzuqZJnCKFkzikNmtbec6l6XJgtUDbntXRh6DhxKO5blSCRcY7z8+l0VRPdmTWWQhLNmojFMegyj4NEMDQu3OyoNA2lBFjC2fni+MR6DdeH+q68/1dIuPwxxn0MNi0feJb38lg6AYAnizCeAXwxAR2LhQkJRgOlcnrAWbUZteHeDgo8sfjGwK7TC3AyFvqIMtcYBEE5OgycxhJ9QFfww/+KrcEcpuLLfscFfgV1Rz3TcTxPvyMVfDFRbJnpaJ7jdt3aLxxPO6GWKMZcDZZ7LFrM9TLf51VXlvUEZvvZB48lfGqFoy70BjV/zz7+iK2VCCXe7+AXYrTQTjXlSxFYI2E9Lp54EPBq/DcQMRVdCGYNLtDtjch1+u9frzHrbFH78Nv8M/00gqU9ibkpeXR5/5UEepnGQ6TkUGzGTtG1f3KJF596+IUuVj4EbXP766fQiZAQdSwhK69XI/hKyyeYaTJtIvzwIOFz6QapkXjOoNbt4quHo8qb2itrdqAVWxW2fEgDW0COUweLtzhu1Vk1px5+TcwLC3aLOsu4nTaTriFDYJyurQSTxcbU5narf57gEGTOuFSYJQI1m+YKG0/Qwuvs+K1L3E84KCHZiMATPq56dTuPVPw8pGq+z6Q3MN9O6o2iNChf09eg9f+9zdUoifqJC4FBvbxOfOocE0GTx2oQubH5uYLo4K/6dYgUOxXLWDKrXoQ+fMP9Xos+296n7vTdYXDZlCSqN6vkqlv1jWkNY1moTt4VG9UWdzR5bSALzcpBgW2oKQdRJG/NublUjoeZbi97IFAerDWv1Sq5uAJEXAv+N3vufYzTKYFSfY9E1nqayH7FdUocQ+cJmCMUuw7kfLkcd9CC17WJOzZoXH39m1/MEcZOZw29qD1B02WQLdh677vpGpWRzaoUwOnkxCDyfOLik5dU48ePe+/en5WTSVP8lPoUPZ6n5ugiFnLRK0S9Y3Ik6key5NprvXnQceRpl9hD79uOHHiAoZcYz7qUKCrm3Gp427qYlLhboPo7bU5LvRsiEmjOpX4lcVgchm23+Ac19D/jmQQSjgIXO3k92EZbXt10o0G6aJfSiW7nRcwu7N8IRWiFHbZ9w6uvXRik+fZoHnrTayrSUbMTxY6EUCmUy2IiqWGld973Pckc5mnbOFv2v7f1D7mNmRDOgM98L6kiT0s3KqvBsswdbA1vwBlWLrucAfMgNKtRXEDf69VewTr3y8d8ZoIpEVOKv2Y4sKDRMzV2HaZqcL4CBmv+GFsXMhv/ntH/RnW5QRptf1TJ7dnqSE2P7+LkYVjdD8AgVvJhUds6gj6t/+nKzotDQ7EGAqkSoFQor+p327MaY1AFG1gFgEfQl9u9o3FyvrsUGQKMzXyy/KZQWxkpzicsy7uc92BCTE/iH24gpK+ol4X/YcDhIDbofKPRiEsTplQnPJSshz3yyRdL8wRFEMeTg+QBQF+IE9JMteIC32DhOqHDI9L3s45VjPqMFPjoXpWKgvAy1PO7zQJnQdlf0IzH0IVyNQlYMiGhd2WPyNMcDfVwev1SZTMDbR0Io+Ft3BZ5bp8PovORV95yZFqo5cc5hpKQRqdAlH4MM4uczmc8mYnzaGO4jlxRbTshTtDA0mS1gOeBzWNenc6dnSJXHFQ1P6tZs+/5rgTnghxA0d5x+pXF7ZgYKmAdJbmQT1ATjgLoimAZhRkVIZ/woYv4NE77HlynbmB2f4uGOVXMit0QAAWqdx0ZOPhP/+X/zSQvVySOMfDHgHi97+39vv4tftFB8EGhKAJzmvQ15T4Y2RYyAmOyvgWk60qC5IBsnAaZZa86ZA0pANoUATECS2MccYc5rkpSY3TyyagY7iDD4Wmx1A7PIAbrG/huwiE1Mr0+XhJ3IAQixw5VT2UabCqQbpkq5XvNWjFfo15YWuXkllMzQgYpBsXLS7tcWRVpcEmO4+pDcJ2Hzek4DqHHO6msWptQKmqlL7CRPUwuP//gYS6ca3lkOlzSirJXTmcZpaBxBNsl60OoxWuKeEcuFyiKenVJTof2YKFQcQZM490r1AlyFwkkkDeDxjHiTEaLkMyP0+ScsAb8poKZ81i2l4OwCtER0RDaRIZEqWThIzRrc1BWR8sfnM3v36a2jGj3LAh18XA45Y5z96fjuDCz7Y73KFp/mz67yuMv45MX0Ij/tXev63DJVsMQT4R0tX/2P2+xcWl1+K66CDk018T8BZoEQChRkkkpYE1o0qiRsGOAQhmotpaF4o/5WoNJw7CQsq8KQ9t//DTh0wkQG9UyR9Ol4r0cI59sA2pdiBiUZuD8kKChIaOEhXerD5COXzcoOm1C/BccdMvv1ijFHDB0rSTwYwqNMNhFDuwKqud3cWP4zHes55P+7WCPD/nWk60/gfL+n1MXhp+U7QxANxgtl8AhA69TmSbH2bqJE9fECjAgnBFVbHzPB+km9+I1Cq/tplDMxXkYaq73rg1so0dvlKhHXI+HeswvLujhCDMJ6zBV3EZhOQZQ67GJJaCNLzuqPQbjbGfvIBVRqpKJNXHxWFs7Nqsb9+KnAYmwZO3sRuq915RMBI3EwRnsEOHJCP7WD6VW7oT0H5/OE8YF4/Kk+lvodPg7Eh4uff+K0QnU82RZ7PXZVgozPBYXXNgEVAkD+NRZsWhrfjtsXVbzRSURcnbBmvjrCZTS3w/4W6+5JvNSPyvO336aNiTEgXiayWUt44/WNrwnsyGgsERNI+ibSL7uowXym7lGuoiLMBgEidC66gukRNQA1UjhLxlkhk1DxiOu7OvaqW3ngt5lrALTjxlWTDowVBN/Uq6iBqhS7LACHa8qZJluY734AjMDMWvOIVgicV7SU4jJzW0aIR1KmCc2E+tp+OXsF/L/tbtZc4LoWp/Fd8kCj2zkesyXm0fE+56mHdEP1lxU7j7WTQwBKejOKWusltbEB4zu4U67whdMbF6aoW7DrDHBhHFDsULYg25qGHMsVGm16A92PPcn2B5/8yqJPQP1w8nPKkxP2fHzFygP9LA4bU3SPqAgWmaGCAgVqgiUbM7PXjRghAKszpBmE+R9C/S2aniolmtp3iNWQIDQaVPc3nn4o6fkv3Zgd3T97umw4p84NR04J+Hn+4w0QOqlClIMfBDD3p61IM8eNzPkRdODLdoBO/WvP/vmZKVcjex0D8fabZpyAeadA79exead1G/DZEAmij0JAm/mxQQleqW7t55BwY1GrW5/B6eDAKSlSCaOP70VMPAjWrz2YDTXPoDWCyleJizd8DYo30XduhDE39enQTS7gj2SmALTqY0Uqvk28zZbvDL4Qe1XZ4fjarih8NajdT6FNNS/kI6eq31lFhY8JAfSsSclXn4+UzzxqhuQEC19BINyJ+gKcxclUO5NJgoAk9YBv5yLNmTw3i/jEraLiJLFYEJKhzilqfDScYPLuxucxEY0W5I/MLoP6d+2FoPXR5x1ijH9ra6WrKwIEqnzSJLxvGO/BkkwEAcWf6sODTUEd6k8wVBr7i4qWQCxyHYg8Lb8bnaZp4GO1uFoURpjs0O7VX+DrdcQWKto+Dkhwy4EmivMnEs1aQbG2xbnAfLPmiBAp2a/tLCzEBNLo4vLJHqV44xzyKbBvkgnwdAgdCE/CR2qtE12P4zBo9zzImOTz+SIOkdqd71tJo13fbokXUfm/aOBsFRu1lQ6gnregUuSKrwWaxRc4IBb4vAw9LAuASI2AQxw19WAKCDUFTmLrt+7Gb2mLMKWUC0GyBNsvrM9WaDibu1+dVvoDcpsbag5c7o3J+JIFBpaJeDglN393ES2BlbEOLuRa+PsEvEQ2dg0SKsq7uWkJjq7pijkSBo6xUXelg1Q5YvU/fyPLuXVOL/Du3fp2WyMSnnTN3BIf3hZudit8LnS1ZixTPCP/o9/UYHDoNjcx1jewuamO/ddpoaBcbxBNwbqKkQJL1VrJLwMYG+OpdCKDqJijUKYbRIMwWiTiOf8bXV6kiYYBSRjtdaTFokUl4EcYCBZkqw6eifowl4MmpQsrTd9yQ1GyyBCxLJZDbEXp1Dthr0exXEFlzwFwgxP56Fr8su9rT+tfOucBAuk5OFoN2vO2/O7Vs/DkIQyFitrVyxEaAbdtZ04pYTJNPGVc2s4Sc7ylJnOSrM2ZPkLmMN4dSWo8eWKzGyhGRlUkkvI/AogwAGkYTgCOKt2NhwVQwGN8pLwwcmu7SDi54g6WUwQNMNg3MorRvitffoacgi4onJr2YZkDP751FH7AAas0sGXS/HOkA8LxQ58rLIAHsN+eOODPjZ6s5KwsbPehh9apZM7mqgJw8LEW9ua39ZPFY9dXEJu/elesk/CXRFtkDEzi/GFJXiojr62BrIygR5pd6qtHFYxMBG0YgIDJsl6IbAPmI1NKIWJZ/b3yHVMBZmPITpHcn/6ZzdzMCG6DSIiau0kGbIpEcHHupK1vFJiJbJxrauSOUdldY873WrJYG1/xD7iLr9q9qjp3/TH94gahlq//IeelX/rBpCT7iPz7WDHOBhKfxMn9rU4vBME+Sb/tlvPGo4Igf/4X/3zYqmG86R/ElhEP7alPbGSwuCaTxVQaYE3dVpnvHN4GwffdSrtUryyzyA5D+fzVVl+hYXiNI88bNGHE0EyLqlLP+j6YmlbAAs/hOSPwvMTuFslXtmFj3oK+Idt+eb3T6341zE345D6sRk8TKsePH0jQg8D6njhQjLQ7pb13jk06RE+l95V/Z8Dq3rYqxqrcchG2Nnt3M3LECVQygcuJKBzlIQQDOxVIBzwAiYvc1Bzc6NtkLLOMlwKtNQ/yRfU4ss5dfjJOqsR+PpVesYZyxZTSde52TkaSFTHZi6k2Fi3PC/M5XJdAYhuhNkMGwwEfz2fLZjG5QGRx/7wZsl6nWPfMf97OTBu5MTZ/Dra1fDNHxxbLqxbDgzeA9kKltKPxvzo8eb9UOpFlf4s38nvbSP+UBAl+CdGc++crRa2ckN1RAXdtIXtpw0o1u2JOo/Xc6TG87TkmpmfbNAEkBX8gyduVHhIPJSuycXfen5W68L8c+QbbXihJUjHsK0PoGwOwbygFMAqUU8/pX1iHUDcnDCYH3JoZTYDYYb7/vHlbAvr5PxMhSCYKHJjvaEEOZ4Oqv3m6g/1I2iTbmUanx+2RuwWHn63QKRnFyP08PAPEyMYQa9TbJIudbMKfY2l78VWUH33uiSK5akyea0ryHuoIq58mp4562hVmLEh6uSFahnBECDNfX96tvJ/0M3uzs3OD9Pbi5FEUtPmQa/HrTztVCluMWUGlKousVZx8o3ycpjJOZqhxHHa2UJ5YNPlJTHe/O8vJlfUYo2M2ZLMYrGRbK4CrSyIdwAcUT5rduJ+pZ1U5gYDcnrM6bCkxLr9bj2ht2sGPbZ3xr/Z1GvRxuXW9Xb8Eao3fnnOZah5KfGlVkRQRSht/zGnBIQgMTxKMMXdjaF8Mpr5Oi4HQuWC/Tkyt/5wtrxmtmQ5WKGdrj/Br00AGuCioLrkqrAMm3VLFb8jM0r3Z5BCrGEcS0CModddA5idqw/+qDGJ6RLnjpeImsUiBhQBTJs5MxPVmmdL4HaVsAeCRmST0OHmRJyeXcBkK972u9TjSoMMth6FG/kowpOWxpx4s2+EmyViVk+y5fJrRqycQ3Pl9zqmYkgZYneBKfVyHBmFCUZJ9Srw4i8Z7RMcyVVuRoWRawHIMfCaQFBgi4wpUaDOWAT+xK0i698A+pt2Aa+UjG2cSACAqYfVIz1jlpAG1ergVNzmNkA/yV+JxhWdZhq2KMQ3YgFfTV/CWHO6dfm/NuY5Bcx1BIC1IahLYFEgp5vXP9NiGB2/O8VUuLCsvMGQfdKKCQ6kTQjoG9uVvw0l5j74b//dvw9zYIG/gg55m6tk6UooiARaV4NvuUyEarQ085IaNQvn/ELQM9xAKG5xVSqxSGU/l8nqrO9xT2vBeDIgRqXVjU4hs+Yg2VbscLeHTRhBPVynDIXiC38t5owmsTuTZ+FHbG4kxsjAu8aScinNvBgUQ2FQlJGsi1jHcgw1e3VRJhbBLIQh2mU8diHLWMhRd1eBYr8OanutJzecF0bzAYRurEQuIew9g2GRyZUO5qfCWJ1/zViU7vvy5c4lZzfc+lrmQUCYHK0C77pEtQEvuiK8qyPl181tnrfjcmS0XRzlZW0xxFkCSBUR+E3foNQP6WP8LoEXk3av3IgKxHyoLVu7qnuFPl9H4Ax8V0B8ZjER3FnVNgEvvjjZXFfojj8xLl4MZkjKM0KEcUDqQHyBr27ONb5wXPr8gBMHk/VkD/FPkcBhkFHfwEspDQUFEa29/Uj9/L8Uqtv2f8yud6X6sRMUvs/ezpZJSFJUvloiSYRC5mqTGw1yTLZ57i4/Ksbj1wi3VQi5MIDrCTR0uBaBpzuMa77Snu/ROy/7r07IKRrdDWqNDRb+ZSrcPWptVcNLYHKuX2JwCShH6SuboKTt+K80gNK3m8mc3Ip8IGN4HgnlGEZI0IrGUEQbH91glb9UnMyEfyQdiZB73nE7xchftAhUKJHsB/aKG7dLx2xpZ3+Tkupwho/Hp+hnZaxGKyJmgiN9nXhoV0wF6EurEgrd5CWVXjxEo6YM+C0KEBy8Qs+GDR1W3gHuCj7+8psjkHZ2joQa+lZwDVRVWC/t+1f/1TI0BRTsJu9r+MjXa0UG0byNzYHetFR/PEEAFEuCJeJaAFDBcCpw4+x2rpohJnHx91PsH+x2YvQZAAXUK/w9AHFVE0YCwAxqsXXu92YWf6kfU+A72Uw89l6x3k841xUyX/Nbk7OYv56t4iv6QauBYmG7KZu3odpKyKrDSJRel8X5ZNRakRGUG7G3Dwq27tPT9aqWw3KYa5kZfNTNnNZDyr9/GKHVV/UhEpdkq3P3kpV2tnzo6Alf3XD4re6K58oaUF6YCM7L0BIrdurMWNHnM2J206DTMLhQLn6KQXKzFPJI4IDK2PPUJWnyjF+I3S0zFmjmsrxk+YCdMFoFspm8fy0w42uuFiMIP3HZlZccU/P/eBUB6cib7H7YW+deBGN48iJAN3F+rmtgSyO0H/24hFxSAstBMUoReRLeePIu8FUS6E2LLwEFw8QGBG0YAbqXGb5dAT/30JMpjmsjcWlmT+gsK/B2BKO1eHPgs7OGA4lmeGEPS7NVC3zc5KDPUDYq2NHeiOEvAS5BLEB49CFonJo8wQ+33Mc+FQwyWI+8vHqJwRfgxnbaFJ8l6gYRFK4tB6AOfHIG0UH2rltiou1ZMY9tVO5zWUQvcPsMluHrGRMXujt4jpDS3d4khGfvG0Zy8APqamanI+xlpI9aKp2uEl65WgJ+oETCeqape3D9Lhodr4OFOS/BsHh+AQXjj9+yIJxGzJoh33ewXv/dBAoFj8VTblDBnU8ydMtKrLXSBNdZaM5TyYfeZjOqk4e2A3Zsh0GrlQrKCphs0ssL3J4QVkyGOUgOd6oC0Zk9t7aQjkNtJ06Mz2OrR2ddSkjSYIirpk0xGOC4aTX0QUEA2LPKWaEQ9LRsep1+aBTd/UsmjhsfTRdLGVXUSqVaL4Lum9EVm1vs+MW1gFyKM9rC1oz1KQnaF2/pxQFH+wT7FoqmnQJbZxaRX6OBdMWwsDnAICxsfdrhchaFBntZV/R0P2G48PxiBOTp7iFRhm5v/bDCU2QR0CuCoWbzjKB5Rz6floHMC5faG2D5GZgWGM8FXbjW2s1DUeD6iI8lMABxGalRjWBlbnyhILSU9eQBCbUU+IWq9ZpFBM1JRqzC8+4oTGMGvDFl1uE0K3PbGFwcVEdz+z0W/EUCei/GE3/0CTRcH6nDbLwtqmchCvoxEzdFKGGTxZEdg1X2k1t9PjyruIHFg83tThs7lDN4p+/YQgcFc24O8WnE42+CiGySUK8uPNMvXN73yOjT8+ZIHDdTj+q0/HTpT3VnlgQaRDfyEIqjkFfe/XD4J02dDVyT3ZbcuCA3mPf1fuoBAiCYpuYaTQTkzXAdp7xbDNbRlVXZpeCbpHTLuqHC7BZjRtLHAVTJHj36F9xBHDc6m6xLewDp5RLQEEp5ZjJaXV/oFrcqKGQORlMw0tH6yZ+s2CPIrz0rgO46tK9Yz86s71jnla6zR33JqUhDE9YV6aU1ePxdAJS5KHs9Ol8iLpbbELGiO/XMcK4gqyW2fvAJQeezan1a1HkNsvdx4+G5c0xbQ2xVQ5jpd7qaVtwEdQKU3QAtCw+Xv4+n4HcrPdQoIv1AdYUkfQm1Nmdf/Hb9A/oxlh5RGIGkOGJT68CBE7tp4qHZLh/ls2KY8xECbbS15cGLrZODfWjVZksoV2/nZKuiBCWz8efeIVr89Fgvso1MnG15u8DglWi+e4XS3/J86x78CI+za87ugnBAkkxAoPSIhM7gmJER8atQQeOJGlhHxLKTP4WtBKvrYhkFiMnfoxuNPDR7BTE5A+cyQspaEV1kEinsgGCy+hBDTBA9wUk4/k7N1lcECYopR6ZNoWklsbWZ1rOJ49rBLBpjmRLD4/DmvIbdf7MWiOkFXITCWS7fhKhgNfjFITjkwC9W+esXVsdtQNqMQERkv9C8ojbCWAXXS9/VC89THZusAAxBceHs0QfVfVVOkwl24MT2jCFq3Yx+Yq7a21g3NPf8MK4TwtcpeJoUdySxBjKQJ4KBJ+JAmYmGBaJKxTthD7qM0ecKx8+Ygv1IvFx2fdDCYzWAcNsp6D2FZ3A1WS6ejdlosc76QsJ+q1/lZtJlRcpbmeXWl/HLd7Ef1AYU83LiNG4U24UTkUYx1xc91g/T9gpMCNZoEzLG85/FixVDGHZxWz8VZUl+dE8rAetJGMfoVgs2Vu3cb7tRK2ZLPhXlGkC/f2Klf91A2bkYjR0dug/3CwJdqMyGCIRTMdCYVfYgYfuCfrNYQcPX5OsUra6SEggwteh4lnbN0jGhHV2Uka0ffdxDKb/+AVR/iLH5YzY04YU3HPj37r4rz6ObI7GIPu3jf+fcnL8c64BeLC8e9FM6WmN0uZIHCkCyDs2tBJ+dA1WpxtB3Ly4v+OpXO9zczg+hzY7Bek58moj4mvnX+qblQJNaAsWpCiEmRH9CoYq1WBAIKMhyCayBzLODygj/whjNL3cbX0K577I7hTXAM21lCm1pQaKpkyDWqinygz+uz9OTeieW5EnROW5rabOdgE1U0yJaiJJ1Jsrn8pHYWYYBnaX1RfbT7uMp5iq8Qz4dP3JjK0quJs91fZFza/5tO/asqksJhnRpL6G/YAr3wGI79bg0RUq+11SqlpNIR2tR8uJASb1VihTx3l7hjVqdtTdWty/xIHhJU6tfOc/Cr7iNMeRjZG6whKAVcfurTqFqbayTtAJ01oEv7aNLlsbNe/bwdxvoYxm5+uIVfXLCJ0CdCSAxxCFPwGA8EmmjVG9XKYvkssnKKvzxtYoBm10/3YV4lqMr53Ej4UyMYhiV9XNv5pK6IaWn4g+z6CwvH60T+zWlXE1MfG2EDLMkZR6CkJ3J6ATIhfHctNHIvRkU2ebNKTy6Hv2nGCTNj6m28NjYaVfR6tYNKnPSU2kT+v3UFLncTi3FHxjv9GfSzbIIMQvvzNeu4c2nhItCz5uzveLm32doz6yn3jI5C+AAuinCld4aKlSsAMDyVonbUJYBDpz1vIr8GsJfmvOXtfw7kqHacOo7RHKa/hPKApyIdW6KD9TivWg73KumzVuPE5UFRA5gWc79oL1etqU1D0IOB+JoifDUIy//15Y/QVpbt8QaIbuzBAWfE+XCGbgwghq1NbGhe393qC2mi8UZ7vyft2wWOgnms4yuAFsNDs+tnI3cowO3ByILlMd3nKnNgGE5rEHIb4PSlVnx+lojiHGm4l/oBUhoMPlrY68HHl1jCXRvo3RzkuFzs5S4K6ZV/hmAXQxj9FuqN62DWePYOlsaX/rjO7g+vQDQXaakkIKFzCBlYjvFklS2D4sE9/C9cwfG71EVffbejRAkVEDsZpxajHbXDRczUqhC70/LiKralPgAdNgEMcBVRdLrLj/zQPQyCEfZz4cQAqj075TwumhmMU1Z8Zq0NtlFwQeuPkxdtqVxVCLiICWHYO3G7Iyki+sEMJ3lIkFdXAzX+1sVGVOoZ4VcKq2eIEtMU5w1G0l4HNtXgC6RnYwL9P4NWd84c/TbbFqW30FLaXupTi56GgoMl0Prbr7FR6tgkptihLoxEwQCQpG6HTlQJUpCHLkv7V1u/+Kcomv3mqmJhQBnllA0JcWb9g61KqB7QE27sNSHJMLjct62cFmOQrW/9qgEcoq6nA+u1l71Mj3SO8hFM++PbFGpYZGQ6GtofWRqIYByT1kbcEdXx9FbjzFzRYKi0Bxbf99TNvw4UlLOrvp6VkSCmrxOOg6AxCH1wGNz6OKW13qEFqorNB/ta64MUNfbRjCl5sB5pYdBhjiVkOxDeZ78UkxylPr1ok9YehLMLaQhHWQaK6Y68Y0/Abk9/qcr6NXV8ydfmobLILSQ+2jraSmjWBVB1EIpcoly2FEg9FN/0Yx1UDglEsZHqJrAU90Dtrc892kyqy97148R2Zg78clVw+wh+LBYwObrQHOZO7XtjctoqdWL2yOCdvEc3vdDf9ZdUoW/Go1nGBsiCFXOYFj9eg2tvZ3sfl97A/2583ehrLSlufVSjSYKELuuTwOwiH3Cw88zdzV8s8DaTt0TnhUC6vIbf5906B893geDjHWFYWD5rfBc1ZjzPkGDfM5cFVbJH77fsHrFl8wsgSChyEX5ovMqItW9z5CAgMpPImSAsHWTLHDbM36VBqk8y7o0EbSYNas2HqSar53jRLREssyU8BxZnrgnl6u0PsxWCQuodnvR9S6a74rVgB3QoxYUAyzMOnMouZok2cncjMDj6H36MpnINiddMQXKoWCbO40PrGA4YGhQfnwOv50E+dUMMhgr79JqKGlO50FuXnu+GvVPOam+/1U4aIJv2wkdIsr49HzE3D//tmi8DNtx7XJ3zmB5+JIx19xxk+5D+Tq2CFRULlu/6V8uFj5d3ZzuETkScu3FYihcQZpTpcl/2vjOpa+J1SRio8Kb7E9HB8GvebnuJir4H/7zX0LD+QicnSjQvgq9hcTHdcKAPrgzAIdphW8U+DgyLSgB0jlG4YvI82cwFmptcV+tgglKiEqol7mikIQpVXVkNTHgcIdl+1OUKS6yAZhvYO5jgZPziLUcgeFGFprHMC3ghG8v8QQ/QCsqEcIpYlJqQ4efE2sRhj/OF5Vp4mB0gAGkn7hd+PLKhmk+nr0n7JJx83wrerpwRjGB3IUCCAnL3OH89m31YPzH+yWndp3ycRUq7t8SBZUl8CRVwEAFFGt5B3nfpqLmzb+9wiNyy7mokhAZJT7l5L8eUTE1FjU85rtU/Wb/BBoXb14uwYnno1fn5sEXL661s3CmNrGrSaw27mKmNDKKDVtK4ak9Uq6LiKgR55abm211sgP+6n8m6LTKDniYcGdexJ4jwaPZ20/ffFX4CH1kzkDjkwh5Tb0l2RdD+X8D4WBmyqLGFy4AAAAASUVORK5CYII=", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Distilled image 1\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAsUElEQVR4nAXBB3gjh2Eg6qmYGZRB740AQRBg78vlcpdbteqSJcuSbEtyU+LzXe4u3zlxcvmSvHu+8/cuySWXZ+de3GJbsi1ZxfI2Sbur3dVWLpedIAkQhei9AwNgBpjy/h/8xs/+2S069Jknvl5kf2Iw4YcrR5rDPupir7PIGwamFdGPZdA3MTQcGFF0PsxL5gfJD9eyR+wa5rZU5KW1ynS80V09MTySH/rixo8P+1z34/zL3Nn954WB6/d8IO+B8EdjWWWyIK4Lew6DyV8C1sn2sS5XZEesziYMby9uDD14QZgzKH/90A5A+acrD/+yPrXg3p+oSntjZA3HykXpV/qrH8VGVYXaem3tO8M9GtLFgHlOf6N5MAydccuUW/7TW3A+G0tduTVboxqMTLhszDmvHjfdafu051J5Tkkbar5AnHJZqPahR2s4FPvMi1RK3QZRnmT1Txx2ut4f5fMiHzNuZT69bHtEdf7Eb5KZW/X7jdD5bZNKOrhKH8dvGHN7aGsYBxwuzQZxo7K1bPXabp5bi++XA4VPtGNhj6lyqOu9OvSJEFMJqn3dXty6A5mV6h/d3bVUKa0+4NFpfNXuWlxM5zKVNCpZ/RyS+a/tYlrzWGm9A4/camSroqy+OPbkV5SMfChaowdAmoGzAedGuzHggPYrD5ICYZ0EIO+nEMchIjDrwmOhK7mc8YbvneG6xxinDYOI0ay/8p+ZK8abljG0tLys3UuhD6dXsy8p3k0CegpgbRdagiNslSmG2yu8yT9RbxYyl7xHOvH096ss+CKfOApiV4yl/5KzmylZhSpWSPXxG5mrpkwrqt3eDEmnBR00jLEF9hPRi+Cff/m7ctBRzo7IXqqWH3wkcxjM8uPV7p6JDV7M9M277f2DULJUbGdlCQ31os+xnqfNR5oawvg7sq1oVxy7FVxh2Dthat5cVh/WZrSPZx/HANmj2Z/u/Io/DR0d4Y3qyfj9mzHjm0dNe5//b86y1Oo9Ulos0hDNTfRbdyRXlh5Of19854/mjl4bTOqzchwkTFfaBY3SrGnxOcHeXw4t2y96Y/07hjHnpNiR0Gwm2t7kNsvTsce+boNaRlHzb1SiY1ud6tUYOisVyeryKsQRoaF+x4R4Rg7+pOSZ5IH8UULVZ3hPcX3x5DUyKSnPQtytQy/oKsBYlm+47hTN0ERloT84VQWXo+AD9uf6kfabgbMe7nj7Hj+aWHghsvLoEZWapnHMppprBFSZZMscqpX7mrW1o7HvGXXV2LU/vzJST0N0PUvQTUtL1JMWDRMAUTuVOh365t1a6QDHxjMNssGfAqoxdKkkUU9/dtMDmRCIS188YgZntI7xWU1bQ7qUAm3CkW0labHm1unJVvhfkwXvo1Z1Obsk/7Mfl6D7jkuxt9h+earXCKYH5HbH2/GNWyROu0u8kNYQQnGrNXbiJHPmXTwX2PIj8v3PAeqmj9LkitOdPjKSjwby5RgyH//pKI3QOqyQGU/HBszY2YfNgPcieSK8KXF7yqBYQh3VPVQ0SlvTkduBPsfEa8WV8I1ySh+mZbhlomxTJdaWzqYhkdvSfsvho9YGD8WtVI7aK9q4271ucxuhbMGDh4KQ2bnxikieK79/qg8rSe+O0q6WWEensqtyBEFr055MrfAd6DsnhFokTErEvWbNo8IB6dY12490usKHKjDJE+RLPXCmSzXJ++0P7qYNT/NDEwcSaqTvMt9TI0dxQ5Sd46QKoQ17bU/t3JxBg/oNnZDKSSHlEpAbIA6tNq3AmpABt2tmumAA04Gu/PABYHm8f+VaDXJfVLI6rtfVfU4EdUSP2zN9O04lUnpt0nYrg7FydrKiTLjGUuT5Vgyl0b5G/qr90kjlRdszhb5wE3+0npfUknx9A1ECO3dJEbxBj4lPO+IeJDpnkpEvK/FqybzzyQieE7VcIq+JmJRjW6qW+slPvbJpcMf/UQh3Z/RVTdJXEnVFu+tnZX3J0/3bbI+ZfMaYu9V64ERKYHwOFGEBjh7dubLJJelKop87cT4s64y7IBY88fLPxid2ZjO6IlztjE0K4b2qoouQOjazTTrdyhIsIKfCD30Lx5EgxgcilN7jwDObJkDf8Thb16rSiRtt7FU0WQ5hdU+5By0SwifO+sB9yx0WH4htT3ptHYjaZsRnDdkwQuaLNQt5T5b7Ct+f7WDYcnrQi6y4Tn2hll2VFxFpFQiKBZCgSg261TIuPOb9Vf3v/6Z3LHY5AiuWbrEBmdVgNSOxUhXpYfOMvmhJszDkmM5w7dqOiCnAUK1cjgFyTWQWfiABEzkPbM/phWjnR11XaL3N6Vuk2obJwKSaKwalCVp4pFOltw+Ge0RH3Yzxak1HlQze8nOmoqT6/LDVnDn65tA2WeBZ1lQD1RrSLq0oXRqz5fVd75278ZF2VvOCqm2UaRV0UYUTF2pkQ9vqEClbnBxrl8yS8s5y/h8t/2knIpKOPkdpNjfz1men3T1gV17Vst56A2Z2yzOkBzKGO9HUySNRG9YFK+00yQzyuqbcUGhSo5pKZK/GYaljNs5qOGSqoYSq3McGycNG38yqTtHsG+lTOmXzjfajNcIwQsGz5fHH9QP5hiXTKPzmTJrsXJPYKH7HYbf+O/ogOtLVezSRgSzDGYJaz7DSVM7ep9mqSeZPX92CW093mlScHEqQ4uOteGP2LEprRa1/Te7q0sI+/6heM33thDL18xDWtJwqmN8I9xkxw0hs2VyBooM3tVhldSgWE6tMrdGSPlBu9DqapepYE87KtBbaiB6k3br6ViNk6JEY68HKNi0R7ydBkvXltyjR3YEo0AeJc7jkYfDWvcot8bg/cTR+6gcAE3oVJb8MDpfaovf7KBvR3YOxV5IMn+B0Jpqtl+UQYejBzUP3xildZiZgONl4mRmwGRL/VhKvdpfR8LZmRxz2l/SQCDEpi436pB99jrRLL/wZmkA94oI1vHWiw/0DNL1/jOpUG1ThKVN/y8rGyjF8pNhsVBYbw/9cZzJ3JDGY0O7tHj5OcmbpUJdmNLl6LJzjQb2v94DVkR1VRibuSj7HO28pX31F4e7DU7WTo8IdbeVhba1YfFcPRtHWYNdyqywgH+7/EJKFtBQk0mWU7UL+SLfV2rh9NcQX14Peqz+0/lvhGgzaz/UZX7ZUyb5zH5lgCkmoENRUk+p5lQ+Ltu6uk4PfokQ3o3rdH7re3PGfvQ4u/fdvOXs2CShv1StEDWoackq5ewJRXF4s6j9v6XolGBjMg3s2Y9AW+Yu9yd9JV5/fMeXmucEg4h9qoWhy90bGPjkMYHQuWc2qDOMHlrkZcaiQR+hk1okZCD2cB9GcCJCAqASfA9C3oM2C0Xu83EgW8YpdMZjZ2QiJ72ccs3+CDK1XTOMWpAagcrDSziXKJEAniCpmtbq8lehVzPscl7wEbrHZMfRhrvdfBmattiLkYo+ZG2xIdl+shNsKyQioPLr4Up6vAf9nYxBQWKdHi4WUPD5chV68JvlxjHl1dFR2br3h45eHzCZiJjNo9s47j/kGpRIFYCaWlAbzoP5W4uMyGs3WDUinnE0xRlSl1BUCWr/JzGwBlVJkRgQO+CLJmnvYQSFJrqPwAj88lT0DaxKoUpqm6gR3WOiyOXaQaU0oNXqXVmqQZWqzoLIT8wizqK0xUpEfT/Wyt/7nD/56E/yLr/8wNxs0phZhIZigU/PjM1eFRanv+hAZRhSWVI6WaFC6TEZ3ZZ5vAkduPNh2IQeU5mnlK0g0KC2+sy8asHKhtHGJkh8Kh9bG00j+8NEYMFWOHZqsZ6C+SiMq7yCZBsbmasKzenIfvq1jvwJn1iDgGb3xHzb3pDr7abWYKDXvSwpmVmKhOSc2/y+Jpvb1rroLmW/VqrIbHwfPSVzNiWp2Ig4nJvr8Bzm7DeYzDp31sgu6rb07305mvUkahlRary9CVwNrZDOJ5EZaVNMAYXzPAHXkwDBuv1r+w7g1s6mXVrKP0rvXa8X7jqmQjvLxL9VLLQfy5f6j1tBawq1+fPrWEs6QLmNRlWJIng8MKuRN5OlB+xUwZvjxK0Y6i8AGQZZrlYwazJPXR/K9els9kDHUY6J2wnwVS43QNfJDbfb/pJuGAEA98a1+45P51EZcuP6sgrjOzp2mEZ6rOHvO5Lkr4Af/un4z9lMTp/B3M2NWW7U6XVHvug2WvZwKI7JYYoslh8nWvmwKal5ruzWL/loy22Jl9S3T4mK4RerogqFtZXMna4srWcnyRMUQ5zBVJ5BqKLXDyin8qAxLXD6QD3iqFG5pJbvGwj8VgEELdjTea2iZM5zy/8Y6p1NKeihn0w7uxnOgP6C3edvqjiXcTcCwaGR4oJLqNKhMHIM8khZe0Ezb5OvZJqKnq42jDfgidG0yIzFYFWJkFCETN7MqKAPMnzRs+5+Q5XRK7VlNlVrda3qWymGVdrhv22LTmCTu4X5y6NuEMWTIm3iFhDEeMHNrIbZiTA9mSyJMxqfT6kxbq+amrmsz+5HKEYvKsOHK1gFX0fee/YQJ15TMHaWZugH/qKQ8kcRAk2wu3Phk9w+iAtCtQ5+JJXhKtW4BKkhDWOk9YIV7NdZNnkZTd2cJ2JA08I5JCkKtxPRKuyAFX/r3H/S3rsV1cgyDnhF0WwvN0+85fy1sTaCQzzmg6gbNxl449STaKjbl4R5FjB7tMSFbB3GgiL9H0dVywUP0i6zddABlEUQJcBjW3DMbXQS+Du08v2uDZsGd/WWrbayzTlrE9+L4EqOHJ5ToxudR2TmGZqGNkmqoG1Ayo1HRYTWfV3W6k/16y6jyAtUdvnOmNJQVJBITq79/s2QfDA8o0KBqjR38qrho41KBDaoDvTnX82eLk1xP03S+TeyUdsq3X79ek1b4/rG6jgkklMHE8SHuANGVCJYWV9zNm90SVNaobsgPD2ARXhhURdn1ii9SkHcQKNQ6sSNv0n2bDKSUnKwPrPClLV+qKrPus/COMrdmOJkiSykmXQqs9wYFGSMbMcyqAInA2/PkKspOygXWAVg/bx+7eFWoRLnMkxmEaAK9db/ic9Mz20HWsdcozNW+KAR/UXl/bVD2Nvv5z8G/vnj0Yea1uVpgXKq9dr2B9C0GHrXdEwfaYAQ/Mp2g9Md19L7Q6GG0CcBWq+CIQ6QXulkeyITqFqeGjm3K7a8pGP9htQDom317+hBVfu1V6xbDsnG4KxHTHRB6mm2+fVlB22SeyUQV6QFoq53EHsnxM5oSug4mxkILjdP7QIl8Rxb+JuLq5H3dZRP3lJtV57wcVua6pX7SsdfiNXz9+i/jzz2ff489M2Cwuh33LIdLkL32V5IH/m63v9LjpVOqlA6UolZJjEmoVH4mPSoXbcijvQaEkuoMtvdkkSJGxRcuSsF0wSU26TsGETCUWdqsEh6PZEiFn9fMqpEp+U+2tAcRSV5SuCHEq4OmvUccXHqtQvBcHcqMpwrBXoudWzzZlvEiR9tgl+994TBdbyiKuRe6X2opcDVhXj3F0J8lioxaqLItQiW+T5hkPPJ2k7cvqB4pes/dDTCKQ5533Rv+n9DK7n9XiXaCHXpXX1GgNJPPuI7fVx+BcKtJxJqrQkbIOaSUhm2UpfDEwVlu8pbXaY+01MaqqRBtZ/w4Kv1gTXH6us6jvp5fcwYvv9rzaLgyIJfoNexw6o3DJHgqpWh3Q0rRYA4mlxCDrQ44Jb/eS+NlJANpIBSxQAlHW62wyIaEQhGQvqOSRLFgT9NAt+v7tnuFhqE99aCTg6NfU8g1zAkQdSpfsw7wJSBB6u+MQNcTZ5Xj8nnlPLhOVAATPnIIIy4lYrDkuyKVNUTpMbIiNgRbGYHLttjfhn6c/qO5hYFqp7NTPWRelFsU3eOWWfW7S3eIm0fK2ZviF0K61sSxL0TSOIpK4Ke2jO1MohTtYuWpa/MjVlKfIpKmQ1HDW3aTvQ1Dq60fJgq+2V62eIB3ErqMs7WzwDS+qVhSnQrzLF3Fqkz4H2yqidwp2vwQM4MTfKc7eNjdAsqidDvXxiTg2R/8j1mRJHvLP+nRZ6U4VAXFIs+jUznXvrZHpeVGjSFcOxAHINA0q51HoCaflteGdt6/fuc7zNyNI+h4gY+NyqKWqWP37xpwOKUUy9t65QEd0TOgoGidfc904awf9cFqbxm5oxKOqw5qoeO9uba3xCVy/hWiD+5rKZ3Kvt2ksqtmO920NI0FMeUxryHvU+tvXBK94M0xdHJEbIjAnXE9lTt4+ahqb9UuxX67XHr8jNsI1WJONiSolOOlCaLaxKsjmobwG88+zYTXiZKUXOf2NvJA/5hzRvU5HLvadgTHfvZoNdlfGY3o8+ezDhgoy3tB1/Ur9d3obsfBYEZQRVWMaKs/HEhSuuXRmrhOaJ9mez5v/YjSV0OlPP5hS6bvkayLL40auNkkdP6f2nZ8AVFbSnJE25U3IVkhAYfqxfLaaRdiQuoQZg8/VOTacOmyN7b76b3+0idS3aPFhTywUqHB5/78jy1SWNrSt1OYRNnBOqj6lV72V5WOkpNoj1oa4YQdIdoKAwYCsFLtS/9e2TIztqC8uYRwAstv11yvZO6/L5VondaCLDVKzRPDFHUTj6uKdiREoI/FpKtw7HqfcRijpO+JMsOVAQA0Qui+3sDzWWBHJ3MLQtg4OJjI2eUq4lHtnkfqQdS/LgRoPvOErG/mQe/T03mHbZGrcRWgZ6wnUf9A5rmkKl1hZl8PlyPg977x2+TQNlSquvBhmsQIlyi6nJDWnWJkpWlbAFsrYE9nQI29ItZ27DUeqjABLTsBXJodgW2fCbtPWjW1a6jYfUa3UO78RpA+t7lTk/ajxOE+2G9X+PVydTYgMlipOFBPrlawUVV5R+nAuRpaFA+IRR+jrm+6uvXNxJab1PnMhZkIxVAailT5CNjecvojH80JX+RVRbkksGY9OZPYTCu7qKbp2H0Cs22JaPm2DAKf/W/fcmG9unpoTiwbrMkDVSpUr5IuqEc3R8SO3VyuKyEpOuOUwtEuyiTFHJyXNesGPc7KF7TdGsuNop1D82OZ0H2oQ2Jga7sw8LI+k9XWNTTRyoJcl49NpYYb8yKqJ9TwpDk30TBWO/5km5eodOmBDnH5IeI+VkxHANAr1dBZKVZv5lTwvvzd8Yk3nBcb9xr7gIbWkFNdV/pI/nx9rCjgLXk5lxPmtQgDKaHHqtk2T5hCfqEVv11ZDXG37doCHRs4mENqrQSswhSJmCrDbCTFfNUy2C6KW/oOZEnzUuw+k5SiCExlpwX/rqOh57sai/5mrr3t69TFYbgYymVgCUPAWK6vUOp82Fq+i5RxzpZSxlJ1MCmi/9HfTm8IPBLrRS4kTU9tazumgVHuTkUBSzi0Uju724jV3cv9xPypzAL2bMlyfympKigOHKFmvRFpzKzeSFYssStQyfTGeeZ5HJ7QI8MSVrPHSg/KphaclFysh+PZXfCF9FGzWwVb02J9J9kBcW6jFfaT4mdlU8cYBJNdKVyUbIBkT6BlkvSQXvfdvz9nYMXa6qi4LnH2cVxAw0p46Eo9J/I0DWyzlyv6snlzNYrHi+cMN47ORAnvuT3lC4add6cnyPiPNuJPES45nvrl94GM4Q+JOij0lqCtmfJYeNFv+GGvV1whtx+vNmwe9Uczc+xk1iACv/UfXre6p1AxULvfptSbdNV6WuNswt2YEkgCsT5mpkyUNMwyMqPgbw07h51CvnK/EzNjNstYNPepuKPxG0WveqWXgmptZVO33K6P5RS2WSvteq+Jv0x9flc8axe9Xx58/WyUfXs6Pnv5TJP6kDXF1MTQo3xtoTXQkYVvqMAnadmjbIcIcYMLw62pO63QvDmP548XuP/FBY/Sk6bU4WFXx8B3TlnPBFgMIrQMvGUjxj9vdSBQOOHiQKDZUlqicnBiZmIoLgokxQplu+1N/XG/PmW86hfdei0jGpCJE5/Bm5pietBoEjLGyENZ16KYppR3DFQh6q7StNqhnzTAzDmBDqm2Hmgq/kDQLx25cUZsF//iyr6qOXy3Wa392kEVFjT6/fBWcyqOz/02XdvXZl15VFhEpBY5pHlqC9t59ch5VD3JDmzFzEumxWlR9UCjNYu+lBxceCAzk1MWX2xf01zAJ5Rf4WfBl4//oH88DaAdctrA71sEFqzDMoWqJsiSxz3nN1klsHPh3j3pnxruZp5xLf/aWAN2Ot6lURuS2filDDgrYFhXdeakwc+CC93qztV8yM6paH6dJQb38g2EJdj/oLBkGPVm242gMRCihfq2zH2um0jdrmJHDVuNj03K+VlltSuSdVb6aQNf9SEue0k8mFq5acYlBY3IPcZkfyypf0Ok+XDs0tjYfxz+xc1V+/ikr31FJddbiuBfvPlS2emgwyv9lueTEsbpd4tP6rvUPoi1S9kuPiHQ61MmrZB5N6h7pkPH9uCpUZSrNRrpfcQ+w4jbxGAObw+piFDoQMpJKYCzi/bzhnG4SCErko6bMuwsX5j+qj0VUc04KJ5WBQJSkbN9hJdHIyABuBXC76+M/+lj9d+V25pih3KirL9rSTeXvZta8ju1/E+Uk0OZFm/YniWPXjO25wTbyWvf//nklwkmN2gcLjS6ZohZ+Et7lJUY7bgOxZS2E8f3cg/34kwZTdLW0Q3FL+XF+m+3Ez7F2Vi+XUImJhpRfLuC8nsqtSVZkyTUYcjlLcbtv0Hb0lRzH05gEZFYBECYQpIYLwuh9cz5N+d2Cgqrsh45FOWZrhFn+wA0n6gE80hy+J4ffMxLlHaMUaIXoCpkWNLlHq5h4hF2ujWuPe8awCpTZ+gJm9AkmNEUukaHfud8YhBLDPaE8P4PdJaRa1DY99OUOA4BTlx5DN6TfOyjsxYfjhtGrJ13/9aSfZkQrN4TXbO/UePE94VPZIhIOCHl1EaL+JEGFb/BD8LpHwqlD6fUoj5zfB7HORzWDbX6b+d5UxIpAUNOyEfMEt5enVPtm/I7Ch4vaZID4AKvlaQjFwSVd+/I+t8v7NtSkGXUhKhzgmgWQbnKXm9i+78WKRg9vL6GJglz3nUfFR8li10pCWorAvgwfWrs70AD9Tz45p/8HeBV2IgN5paMUntEQlRutIKAodr+wCJ3dhw661aO9mCHTFwfVFcIQxaRqUJrTqeetU+7sUxqazWn/tJgaXVnhVjriy5NOqHqLoePpVLlxUliRacuXQsDMhIRp7s1rSo7JJ5P5AONL2gMe4Yev9YMybrmfoYrsIKlZdx25MT4rLx1K1LnvCPD2oIk0jjoiTCyZmibDw63ccBGnEk3M6WHrZGFoy+cpz5oDX0N0pzImm2wqnqsKlHDa+s5xB617pYT75MdY7YFZFaPrmkKntZoJKzlUYf/eo6m1vrwYbDcDirZrU+jP1k8adFXq3YuMd0U4cZENrCHahCODhqAf15PyZN3Oy+pl5QyBzA2ZB2D+nTxUOZg9kSuvIt+mOshceBOH1Y3RBNGyj+Qy4B1cPX2TMgqHu/4u2sX18MYr1EoOrhKALOI1qXwTOSrWs9162uP6bQPQ+n7j5PNVfBP/79/UgQRFq5VUFAoMp3ZtZfpN/+iM3iy9j0hvKQeACgdjPfCF4OCRz6mbsB6J2HucIkShNsOTZXeDmqUnXG2t3/VZCdERWW3k+3HkUjJrBYB9FESK1y+Gf/jvz778Q+vr2p8YvbpypBurijOuONLl1SL4/SDwoVdjQHK6LgpvnN1X6WZ1QjXywZS6Z0HCm2MEfMq0oLIEploFcTFKAvUccCkP6lH/8p/D4kTfzSiUoNv/Pt7muMPoHsBq8VZ5KXc/iF+yhlTEaZcebgiaepYPx86vK49O9rICSKwrJYUUGBUAVv7Gfbu6r7Hplerc3CzEAXtRLAVOkfw0hzRVNZu2eQD9QTSDG9+ZrIumYQ8+JnQe6KcJZ4xx7nGcQp5v1oc042E7rK2RaAeYMySpAj21pnukG5jlxvr78RWJIszqq4k4uo5ow1tW1wrV/PiJddoEARNG5YVUX1wNFj5FwfUk36gvAajpoGqTMyKS/Q019ihRwJ17U7uAUneS1e5pvxxo2mlpW01rUIzRc/ubcHG7UiUD2qecttxpONwBHJHQeOhDvCu8Vr1BYdhb8/njYp7Xem6+uypvnqRU8ayqSELpsfy1QovvjPc6/V/XVV54TpEDNwd3FQhjmRx9tQ9rXVutod89qJueLZhWJqKq8zWBDgRK6GkIYhG0f4Rh8mXTvn84ujRqgJieg/Lq98GkcPRQbu/0y6jVK6LIFEJ4tLQdqBtqnEcKV9VPXKJnL0DTd0pNCE94CMMyE6v3wXUdbFum6swnDT+ts/x7c5WIzJzZynT94GwawyWq+CoU3OQzetnu1jmq181fvZ2QudUAwxESCw4pHz2087+mBhc8A3+QVo3ItZBCOtVqoqTYIJtDYNv1rHlJjJMB8xVjPiaMnfHrs81q5BHOLlFb+WdcaRA1rd6G5bKME6/MKaHXoPUNVuVmVkj1SuMvitk28Szn7aHHpYVpGpjsvQUJar5ukAaYfqrO0p4DZqd5z3KS2xjiuzZg81GVzlXU0QFVQ2+PjsiZk7m3DZhfOxZQ7enDZDMI7SAbkTkrcHZI4gek3MLY1sUfHBrLNFevCCqZmozbnIKllbQagoTApwmvxMX6uVsQx8OJCMLIvcLIuZuqhnNBwZEMuu9u3DV/+pVNYIiTXlIK789KTl79901qKKNo3SF3OkTMk82exPRcip8MNwKtEtZP60qC5fbu4RZ3R2n1Du1Nu32srrkIRyckCU3RfUgVOlBDQ0cbR1zGPE5//udM/BQQ/YEkOqIo73KvKQ3RiraPWpXfJjLyNQ4XKUgT4NCFFpA+dCx3W+tjxfAw3V22WVqn9ySQczwFwPRVAmPxl2OPrcDoS/sLhvtqSPM2KEOXbMH9G1JVl79tgRq71zTR8/rcUpnvgpV7i70YX1SX6Nil4PtqEbLZ1ZFKM4Mv2In9VkV2ppvTVkauS5jVFF3Nzd7HtXQA7UHnx/9IMK1ehsdPITJ0P1wW0qqmsk7sWkIqHQq5etI8k0RZNXUaKyNeicW7SKabrQlTQmHCIKuK0lJJyr5+rGfp3GNhR5LreGot4xeKhVnx+woYnEUuBIeHtqiBFYlnArKU2y0Z1UtEph907u617ooLv0VxFbfPVimgt8ATzz9nal+W1bNsX6j3vMpVwKs4HR7LJfYU0wMkKzVEdhrsNIdUWpIChflLQah4fJjonbyqOjzDwgCQLV8j3BN8fgN2Nh0pCwbuzGrs1OM6M9PnvhAd/f8JrQ7e1ISuRu7C3VAMWaETG4p0AOTkA21/3ydP+Le5kQwcQwXp1ShDg17s+Z7BG5UJGtZm3e4WweacFEsrjKwCqvWEevXe8KnoyT5UShpxk8TBx8ovjgCubDXX/yu4gTTGfEcw2t6GibrOhZOqz12Vegws7qdLhuTA3m5FgCYXjVFDj74j1XpBl4b+50JbKZ7tT1dr4Xkr5jpW1SevLFva1h1vhJaSOsul7eefo/i+5UUeGX71iHegA1zKkRb36mX2pEYmghB8Re+zCn1Tcd4yJawFMDiEth4+l7hLln244lhkSH061yx3V/p8WWG1B7DqhUZHf5tL4xdSpVaeZ2CvUSeNjb5t6HB09v/cjH1nuSrUuGeBZMItBgsiiPtGtKtDKWlcH5u4raRN3U1ZUGCTQzFo8euHrlzZKHrG5ebRGK3xxCtWvIdW3H6zxo31H1vPMp5NR7cGJtqzHZ3Lipz1z5GiXcV5PQ54byk297WVjwAQEtJBSPhFT6IeSh0NGlh8OfMocD4o+X8ZenBvH0Fr0BiT8U8RYj1oe+pZqeMfOD3qNJYxrSIxMjNCPohFedmprudXuGWCDI0+NclGiX/TpY7fCTGOA1jkLOTpuEK0ylP0v3ARjZfZWJMSMqmOXC/ff7AIBy7cKDg9u8DehF1oLMP4GQ35f+X61hMK/ybr993sThRcQLlmBhRE/M257Z0pAdmhWSi26y6PkQqZKiMkLgIVtx9/F4s+0k/Q0egM5VO5YCUjHsx48u1zOySapKtLXTqrEuq/Qkr3OOc0xbtACAnKC/Oto046CB7j4yiCXX3NLUI7YLMpaTOGDLry18wbMXmKLio5OMdRFJr26tzeZVWmeskgxc2lc1IuHtPAUnfD21KtuWrFp22BtSaw37o+lWeBaZ2lPLLier04/3TfTe0WkJW48w9DdgGJvIIjamrY+IQodL/EZDJ6OBiyjxX3nEkraXXXswP4NkQz8KqE7NEvrFo/PUkP0MrnE11Rz0Erm+tUszfDuO6jEE47OSxL/Q+t+jlVndkB8LxPhaa2B54AjLOhogbsTTqu0Z8jEjcaXSA4HRj7asH7HzBdofI31sdUcx89fk5SE65l4HYe004+aKep9BDPrJb/8ILd0f3uadDahf67/zW721Vg5ciKRHjNqSeXOuqe2VcNuDtkwxJ+RTQmodViQ1weHBYrXl1Z3NT73z3cVOw6DoUjk4k9e06uIoOSBKmn1WmSxXswEZDHF+E4IJI9g0FmqrHwhgGKGJhV2+Ek/1y0bsO/Wtkw7f2+Pw/QtaL6Mv2CCuVTpYD/FSu08vn95p0erRH/KYV8cY7dwZHBEMoYKPXhvPC6HiHdaOxLKPQsr0Qbtu/n7ptm4mxLAp0+tfBP1n4ug+vpDQt/RsbLzXblQa9d/h+Oht3syO9L3XYbnOUIU3U0EGpovy+0foS0DcqqmsQlhjrUovF8XYLt64W+lw0UC8CjSQi7ZOOnjDeidvjQyOFU+s42/o9YYj/v833hbcUJ7YVlFRmexgkobzx8Z+O95soT7TN9jETiVpdfQI/UJPi7HO17V+Nap9Portv31SvfCrnkH4VAxu7SrSpE4NY14EVXH1uj7ioNrDBWNXwn2742ea8p9tcxByaPuPAcncaNFUUZqUqrmEsFxPu3IwWXDdePeR2+7O380T9M01tO9VQVhuCxXaoyqlgRDSX2ViBrYAQr1IE3T5JG9XuUzEgGNz9HovJK1/SeAxPFeZPTEgGT5wnkB5z3zgD/fRB26i0z5oQ6LHUJ4V3Nef7tBwZqq+0S9vo5KuyCgHfHIqccM78jUJcy0+gzTxyv4VS4FNKoX8qymhN6SoraMs66WA0UiO6D1zFgeJ7Xf6d2PqFob7rpJKcQcF2Xcbe632XqDGNmqZ9RnUurT7Tnumd6gOGzn6xl5HDVi2q0Bobt9JXIyZDRMsjdTUmhRxwM/Gwbvm7qhBFhuYf07mlqXIElHYvdwyGO2ikQLlUR9bq0Pm/vh2+vbobvU00f3m2KIdvdCMRVmcHpodUYlO8Nmp+dro4h4gOcZt0rnFQUXFtD+/GoB/RfWs5V20r72hSIrGp8yBh7WhtYuVabtYN37gPNoWzM/pjuzweyI4wtUNy2P1TVry+j4nfIuYPh2Fh6HbpQTz/Tsv3nKwU2oSxU2pTXGNLElEkulOpmAZNfc1rbrHlhdvdl0HR46SzEnxQ7kKSXkIkPtkGWrrCea57ECFdLujSBx4+f4jg2qf1RbMAVUURuyHvpJT7RRGy3mnuc/87CIO9LPFWmN1yFTptjTJFJfDavMFvbnC7nx/W9YOHam3HJgXVAQZ3nfMvk1J9ihOVq+FYR1KR67nwwsJAVpKSfGQ0dMUHr4IJWSLdSitBg5RtN9fpvE0yyqlb8e3TwfCZa8Xi02qoL/YpfGg+HSzvrh7Li/G9YLD5mNWr6+aYXXOIlDdmE1ajHMMlI/VyDXIxBUYvO6JefGT70X3M3v/8JFgCu111wQxBjEh7OrNARPSioyJr5W4t+qRcokOijmxNHDrodlOPUrSYR+uD+bib2km2PRvN1dy0aOWIndqpaIrjKKiFqgU4/6BerQK4/HV5CeH2ttYbf6jpiiY7qlRhDs3QZzOxY1SXyRG6xhiXxOwYNRjNApNVGOswii/WFSbU/xg9+YzG0l/CLN1TFToJyzfnS0xEtg09si3i4NjffW0pyMOnpqGPk6V+jtms8Me8eL7xkJV+H3VvqIt46BK28cV7L2SM6edcyBsPVr8YUEfOzOLtcrPfbOMSkLjXPYB4G9+CjTKOshyc63b8Kz1+QtxAFipUbt6At60DEmB5+VCmDOVvjQBfiRAJ1G1jNsdE39w1vAOdmMpeY1GpbRYrJQPRfbN1uj/BtkwSkT2Z2scAuTBIxuLhEVgNoBikiRso4kaOU1i1OmPA+Vvou61vAIhlPFo3Gp1y1IkYy2O7Vc6EPaUBLzHVfOhhHB2rjFLGj0w9bPXtzpK0qfz6K8cxBtTypr1irTXCLsfzqpV1XoKptbW4IQG8/clCY8r7kTyJcLzRoWWtttadjr668C1JV4w5vtzGAiDpaUjN+kW/NcoxeD7ETsjxllOe1uUHnupUerFlubKVRDghbyaM4oEQtbLt7/b7hAwvQbKNyeQYc2/0ht1eVu8aDkYhrqHgxLVDlAvFBWgzVufge8hV/raQTjVV1gBpllQ1WF2+yZwt0YmAzoDGv7COXIu2PYUUXpd3LLQqx7mK1fGzfXHHvaJvbU/lHD7ym3ZoTHFjGlRwaPZwZeVAembznb76DXoaH91cTGglbrFPXyhpV2eU29iABw+xSVHaUf2lby2Z41X2Of9xkV4MNRrx6J5hj8jVXDj6/4y+lZZsqTJhxkwbrjHfCz/J/A97Z5xmq+D/evvfkrtRBCnfLGJPJeq/6Hm+OS8qQdfLB7gM1sb0Xqehy+QqMoQAkjKFKE51y3sdSTM/PXDCbwZamFgXK5f7pFp5r4mZhop78U5G4J7uM3+yStn7sx0f5JyRMa/YNNtOlNnf77HHUSpzHdT0YbJGO0p0JY9hjqS0KnSIqqo41cR/hYfsCP7lfmNkE2qhkqZIJG+1S+fgW9frCruFWf1M3TDHTtybk3wnWd6Xa9nTUIOXWAcV1/ynz0fG7/5fz+v4qY87VmgW8pw55TTu6DCgy0YILsmlE8GT1O/neMpz9C8DNEXMBe5YiJ6o2a6jDdUv9o2IbNOQQ7rGKfPQ8zcv4PeHn9nd4AJ0XnT394nz//TWR8UbHV9hXL0TahUnnpHXVIG8w103CNBudSOy5iB0eWQtV1MHj8+BZ8Sf/m0HC7h0q2oEowIAY78F099e/sU3Lv/S/gb0TCwsot7oiq+Qb8GyS5md/x9Blqz3JR6dAgAAAABJRU5ErkJggg==", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Display train image for reference\n", "train_image_display = train_image * 0.5 + 0.5\n", @@ -266,30 +171,9 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAk/klEQVR4nAXB17YkWWIY1uPPCR+ZkT6vK9vV0+MHILikEZb4RP6I/ooP/A3phZKWAAyJAWZNT3d1V9Wt6+9NGxneH6O94f/xX//bor4v7dU3En4lRlbJhZluyh/yYW2PVivRPrP2Erh9b+vkurJmY3h9rJZhKJ6FXCkHZKeiepqNZ3r5Ifl+Z3lxSS7RSr+h9sNTjKAn4TGqcV6CTh3cIEhSvbf1GUZGTqjXcxLP89XuDVna5HpnA9Iv45t/kbPlNH/V+2rKW4Grkn2wm+t8ZMth2x/+fmIGYGd0achts7PQe6e1ts+vcq3K5HD/PG/rukPy1pXO9Zp9VSd9VnSCdSQ/xHEe8bZPLWEleAeXWW4qBSTVbNVLGH2fpTAzAdOfH6xT3v73PcdsyDc6XqQWtZytmXhbkO7bItIm5Pym/VQ/fnVG4918X2+SJLmhThrCpHLBe+sebkjDnuhh6zwNDgZ/ur4heUrhZkzgtu5eEtPuiyzuyO0j4tsfYizodHhScPRQVgPKvGHx+jcMWVHZ9yHrAayrcN/3QUCO3bYylrPEePYEUIsxqHyU5Xedtp4On0dDyDXlE2gh+vALcyue7SXNt3tStiZ5dVC/gF8yNAEU+teDdFOH2Ct9AE4yLXVR3flnpoj/qR7IW9TNFb13uz8WYdiQThaKeK8eip9J2eY8257QHHlkRnQhf+Dv0O2NI/LF0z+PBRvhKVLARA0Z6MNVRf75xJgcgiDs5IuF8FH45/GFblywCR395gVPY53zfXYZ/LIcu4e11coHePTI5bwQyfL6M/nZ3ncR+4cL4JBPHPzGLoRvliqA4GHOm7OJchZYtOx5/rC6QftpsP/hFVwvYCwJiUOwKPFnlWaa4zb7PPt4cuI2huBDPz+P+g1Mn358KeLu775LkHax+V9HZrlr8ocCTSiwFJPasHgaRkt/SuEPLZ0Z0KyIG4hP9Ha9fKA1LpdA3idjFHaIVbq0XyqHLtQyysNeH3O01Z+dOfymvnDoUr7gcXV+WWwOd109VRxbbNlmpCgbUfV10DW7Vf4PgdOdHv/+OpJF35UZ170nkRGtN1Okm5fr/ev7rNhzOKsNq/VKVSlZ18I9e3weo4j6Mr+eOXIRhrOZBwTxOdKuZifbdqx834+K5GOVhfu+ONQXzv/2sTIv+L782IVOo/os95jl/pzu7gWUTq10yYlqDv347FKf3am+3GVKnx469XRokZQLaOOkSo9NXoJp/ZMnkXRJXy2aeuw5b3ZDPv3CV3nC/WhAmHQzduB1Hc/S54M9v/oP1S65jk88bzj1JtpjxW66qhCdBeYzP7YvYUnaoqiS0jXPQz2cVGWl2W7oi+PTBbSr+tNZwFp2N67cgZE+KWJiMOynQdPX35JfXeo2ywmCuqkDG0Nx+Or+RfDqMzOFAObtYCLQ9/Slv70v7HcgGJ9oF7k3ehBoiexKj5CgemBR8G2xmcKjOHBTlYKKed/aJBN+yIjL/NCfTBthmr0h+TMKXkfZtkPOk9Vi2kvnUe6RMiq2/q9DWhSCVe59JnvcR4NXiVGJVyaHjXGK7sa5HXfvw8t+knZos49J34L2aIg67BGlOVrxpZ26MFtaHL1jsi1oeudbLTIBHLtwarMdl+L1S8QX4HD6fABRxzqnOJXQwCReIb9ZeamUanLJ85f25OpGZwvokmJA49NTPFRD27jw7Kpmauoqjf5dlwa1dkm6GlNP2FG90q5rpxp99cbDyJJwvorvk3PXK2yYxs9w9fthUQRJZtYBKcmE4NaburmfIPrW1zJ08V9ILGtQ6km67UAsvAY6api4cEyEAD7x/0J7ouGkcmM8G6/xiZw5lqaC9yGg7NbWpq4sUfnHmooHuxSCBCq1DK33z8/03J6XnTXftEeAe5U03EWWfQJKlUhL0PdDXxM7bBYgpqZuJthvhSr6j8gvTsq4PRIu80gr1FDQsjcHy+qzNIBUo76D3FdWmT3EwAXCvJv4k279nX8iDYTYg4438Sc+Dmej6eSbInzaFpGp3StXMSSQ7ATQjwXtba1x5XZWABsB5PE4/HH04dBCMXkD0VOcR2/XLgBH3BEVVBLKrPXZBPFYZ81kMXhQk7avuHaVAxCvlQp51xwbjduJQA7LZFPlbPBVRnIpgiPhg+c5zGKTpt/tAQskmNSTlRUUktVde32WCf3IRoSUUTD6VZuevF44Ihu1AInGnY15IKu4M9KlWfH1oOUaNF3Ogt6yJ7Btpys+MG3+dsycCiTtrh3Gv/7OLv+W68FZFeJd5TEg/DanBcrGNzZoX+wiJ8zVUUWTpmuMtZaB1KkR9mCprLWs5lAkRBMKPTQIjiuHG9LF5XGAB6cyAeUlBE/Hr4/FNXVP+TSd/U/TF7+0vPcwaAdza/WCmhOAr1LVFJD5CnQ1hcZVqjtYD1OdBSk502/70CHZDw06wqM+bvhG58dSAEYcS4N2cgRviYuv/9l0zBWNWx7GHfiI5sl5p/pBtq+CsbR0USds1Co1zNXk5142G1ISzvIkm1ja4RPZdLxp86KDyE7M0QiuSIuwNA+4+Zm//YZPPFTKxQg+0nKb3hfpj5YqyOBr66WR5vPh3wDMRAuR3duqbKayL19uP+1hc59799/zj82D5KP3vveGN9RbXVumQRUlNFDCHqwNyZvtnkUfDHrMhbhpw3by6RK9hMobM8LDpzJWp9xBtFNeRPXhDbI8NbGU4E5HqpF/c5XZz86DquZ9MAp58ETwjAZuXaUvlMuI1Vb9+BNN/ZIt6IsSy6lrClNZDEy6Rp7SRdLyq/CPgEDcZz7yc2CLPHX8xRvgfK0fb0p0pS2CKdcUaVuN+pkcoxA4pG3anuFz0fbDaDHyj9HBPcj6/36wRsim7zvkgYnTogrtCIUtQyPsnK1+VcFK//ASMNufjtsqo9VI49e3+FMtL8dTMjuqFBynXmBN83DsL6PFwUOMQZ+c8zDw+V18l9OyVR4eyqbuXMBt1qcsCx246dM6XxDs7U/5MJ+6HUikoVP2v58NZ4DkCOOia6hOGqlKNIVg7Nu2yyHXSR5qTyYBmNNRN27FeYWy53/99395JrQGeVCH/WsKi0bl7vz8a5zpXI99x2hw2lXYtVSLsk8s+tXr6Xa7d8ExgO/F78Qhkflui0MmHyK0rgVGbdcKUyZZtJh2WRLZ7/Cs74pGDX3t0o6+OIhJt4/QRXNMpsEVyP9HskPWbGGjrpeHbBtiq5MgkNF1Id3fOHDQ9n3Twk/fHy/4yMy6tjuxcmyZVLUjjzfB+yvFUMwep0Mjg0ZqIJxxnPbN8R7Llg7zXncMEYhdAIReUPe5v/Xsek9ZM8Tx7ktVbOzJibYn8Kpvh4h+Fyyj0y4PvatVcqER9d2WdMbGOgugUOR8FDyD3P7y2gbKYItb3VAHLvV7lndq6EVYkzob6hP8ok8j2NN7kv5Q1CI25up3kXNRJk9HeX8BxKMenXWm05mjvWp6j34T/EOhXOvQFn1q4w4gyw9McDErArvn3qDyXjWIHtazzUnvV9LhFtYN69JPrld2yLaAZYmB6m8SdHyUtxeE67p8En/uQJ+q0rJD3zcsIa5LB0CcfoQvP2b1PSOs1qkE7w3bu4UYqPRzNuFSqLi8a8q2m9bj+sX2lbEuIw5Nd9St92oeejXRfxB253LbD1RDmGZou5TU4ZTBEaHpY2rpAU1Xzik5E4NwnAlvus1eh6uu4N5onDkWdeh4Puaz77BV85Yjl0M7NdE2aQq78geNMdJtw6SxuF5saZ7FxVxQK/UqJUd18oWMHOj0AhDcPQ5/VvZlT6nD5mn/9XgNClmX/T1G8ISfua7MYA7qoMDe0Kl7LtpNCAErRedODEUWm590z0n/RbK62hDKbet8EZ1W7asf07/JatF2lR02yo7OjMwo1KMcJKCNnTnUBSGYqX5CnboqGpuHhLc6x5AgDjHterVYvhLOQe8ubx2wsPfxZ9dd8kfT4/S8X3U+m0q4eRmW6xGEMu4IKT4zMk+CvDykDCHW59OJ/6Xvl0lI+kF5oUPE8/e1G2YGwEMZ4/F7IgKQJJVto/cTc8yrCGl7CL6ap25bbt99bU0D+bSmKitpLZeeKTRtkdGo89vd0IEO0y2tM4BZ54pSnvq46IWBptCL2PSdnQDmWws12uI6PuUD9lMpD6g48VVLm3zIi9OL8g3T1kQsGSYAeR2PkXaw6kdg9NJOP3/thwLKVx0mA+zjlD96l3UB/axvZsObPv45//HRo1/axy9ow/9drRbKBlOnRy+uLs4f/7S2sZfe344K4FrjGTYpkgOQLnMqVkDfCwnqBxwbghjyytzx31vjMddGWZg/OmmjXr3DTCXQ1K7LCfPDNy4ubseqGjvKwpYAzp4wc6C1tO+TW1jC2IVcRZh/8cUVnAXgmD0DGDiEn7AZFJBwNfhkcBamPX5u2/LT8Xiu507P5m/Pfovs7vfokGngNUhaK6tyDFEhyUnBaabyCWUxTU1rCLVylF62gE/57a00TRcy1xoowKNydWhwOLYiX5xbE1vMnK8xjVPToGYDGxA4yYvG7eueadSjYlrX+0HJ6WwBaK8tyT2RLPOmH0inLuAHw6kF/U3U9ffHtIZV01acsh21uELXRgfz8TPtr55jw1tg3Mz5CT1t/8UBcSpBzhqKjWrT4HJnTSGzPAiCFpS69CgQsG8cPM5WJtoGPCw1Y1I0p67KECDXiXv+4ob2Tftil0+v6ISpzhBMuB7V3yUNnFWkVaWgQU2tC+JZGgjwU5zDBpSIIYMcULuSct+akUoq8LNANcoUKsBT+kLuqob34UbVKPvGcW0yJ3Rh/3oSkE5XiCcBuk2WbO6v+FId0WA4ciuIfNt2bYWI5eaDjVkDUdq3SLVq+Lr/a/x/rqN5U8vn5kjeM5ejtbfAn2cP5j7K8g0+ryw9Wn5bdUQwhucvpI3z7tijdv6yGIeeXenM2uFh1Hl02LMBe2M2pDPT9jnpG9F4Opsb/E6c88lpqIuiq/Ptn4m16JfGP2CBIyKVnas9rEw9dD1EJFpYFvBPDz9NJ16ph3HJBAfPExMYn/al8F2RkwMqCeWRcwGDqc5ABw7Px9vfk4vtyzBrzbNjygWbbyEfT3oLdArbj48RVVXmg/Au6GYZ0XwafZ5/oYcZT+thTAgaa51n8AkOIlfWOfJPlA/MtG1mtKwAnVCvjEbRp6/dNxML5z8PcNxT5g9VvX8/E1nyhrJPceeNwzVqc28oFbWm3RirFsmR1crrUaa6PAaSkGTY70sWTKK5twXxS++m3qftYz7S88IerkpPgx51R+/pus3zCoSKOpB0laU6N03jlsbjASvGl2Y4BfUE78se9uSutxzIVADkyDezGp79JC08JZTUxBCADcJtB/P+VG/WHrF0LY11fMFFB6rrID89H+zu2eDjbNrjvO2RQx8GkWCrqp8BJIN1HNz1RB0zYHpiUUxLdk5YZ1Cmx4as05vjgVHqyLHAlnPSp00/nu1BWQAYLGp8pDnqMW5L0Bau6+bGTFrW9dmfRxBRU7yIQ98r4oXHl40yt7bV7vSATWxjJU8VkkwUumH+lE4eds0jv/Fhg66rBmwAjzoPqWHSUkfs++bVS+rl5fm4pgpF3VkHcW4KK2QsCMmVG79IySYEgp7AE2jNgDEgVQWK/nBbDizDPZQMNMDgJ3BYLU97I2bR+3DhjI5jNjqWfcZGBZa9I0at1wut/Ys/DGwWt9LSa6U6i7egAWkRdaTznYVNPJUeIAZHcRq82pF1A2oncM7CD3HVxFZkd3N5ineG8LptEe4v4jBZO9wN9kfLDCh2bh3TiWg0ivAld2guMcT2pCOBs2bAJjbjtAE5EVkrIeBA2g1usgBIxfEVW6H2KhiNR2fbbpupSV0cjwp7ncIh8QcJTFF3Gto7xC0ezM7CBYiWv7Ima2TZSCrbDkfx/vpTwpE8HkpY+xIRSCs4VKP7/K4xqzMt9cdN9XzT1WQTb712wsa2PldBGncGeyKgFL1pjaLcSxPTdpvmJW0fx96Am3E1R3LIkENYU/Fi2OcGtNgfGiodhUVBGNrDTnAgZROB08mRFpCWy57b/pgPLU1MkaQ5QIgg1lp53X7pNo+oZ8SpcJY1Ktfd39Ky2kCl0/bwpXIuCx94QYiPLTJoMEZfJk0lo6NLl8t2ai4b625a4JrGXla3XTuMmmPT2kWC5OKXV/odIQsbuaBHmURlZ3e6Nrf1Katy+E2/DmymWQmI6XtK9UlmMXMuxWyGAca3zRcVK26g5Lj1LecP/8uKa0Ubz0g8Gkt5ogNu5G1ZSGfwdaez+lQVokxQ1q7YdjrNqLfIxRUvbsOpaL7f78/tEUHlD78eGut+GBAEE5Qsh0k+2os/p9XpWR9ng7JDZzsO4bgTCJ3y64MTm4BstsWpOditNcHM1doWvHFRWO50NlQ45R8yhOhovZp9CEevNN3zKizUMQlJxynz0LONen6//9vXT9sNdJCv7C11tc5kG0jzHM1m76vL3RIBdcZSdPA0i/x+8PXjuGlg7fRhMOpui1TQN9V4sJvpb4fl5Jv1BwEA23c+tp9fdpCNyjsPZtUmR7uTJFsJ4vuKIWTmnoSqr7BIBJ6t1/MGphVFWMNI/SGcNOyuoNtftWzEcfEkn5y2CG0f1X627RVHUcc3QrbVuFfQspyIIXCO0MnsT6KJ02OOo+1bMLI/fz3SYfrYtPlfiOyXwi2Lk5pLGlzXdW63QQ/BBACfAf42C7sPZx+It4TTVImz6dkSD6Vljfm3mRttpY9s9HW/x0MEInGOJ8ikVVKehjxHgesw3tSyNVPGAHbN2dXQO5f0yonF6dvrXTgV9qO5KeOkNXhSN/U9KOqM+G4XkNAdgTdYo1pqUeAcPVgwKNIBMHb95raY6LFlQK0BMAxsa6fUjFNjnfzy2n1mCyvCFSGl0/ERVZ8Tye+PW/D8cHdsuoLFGKY477oxufX5JniF7Juvs5HrH04vPx335jOy7KrFJC33pnJTYUDjuKFPEKTQuds+Nd3XQU0X6/ldts5TTmgdzmdTWg/5S+UuWchROFGkzAG4Sz9vqxPGCKCjDsdE6LAVnq3Hf/qSbElRqViLFBrdmWk1dB4hY4VA8WGy0KfJki2zTrcybXXV46w8pNfHFxQGTb8pvdNnNwl1tJ/sL9lv7Wb2wx4C1bw4zng5CD5DKPqO5y11Q2ZTLvzF7JRv9qVsSatt+wX/ZNruNqsTsmzLvibzqSlxqgxKmGW3EjdurrygyfgPpgVNG5OaVphQZDCmVVSB47a4/PUok0TwJo1B1WsBjTWYoujTCnfeLjOXkai3Iu7kvkhQPLT1ZjuYYJi1IV6NR6QJFmYqIMAySsmmOX0OL+es8OVw2P4TG0/v0SH5sSYlo1PM1zhDj8e+5Rnh7lgMH/9E+ve2sr25DrKhBfoJPnLMyJQa6HJysLj4pfRM+n1b3kxdEtr1DFJMMB3J8KlXogadmITghCLkmV7RWDQJN6hjZYDWxhVl+kTcMJ7t/jze8FoJ11aoNmak9dCe1OT4r8oglb7EpMRW5b8AcU7qVjtI9MwcuvPZf7J1/R45KiTjd57f1zc/9bKSxoxJZHX8c3F88wtHgH7ZST3NLEc7+BJM+grqdF9QMIxWFzP0ArMbez19R0D3cIrJwfEH0ZZ5v/OqbLHk3jv3cMrboch0CocQmBXEqj8YX5a2bVt0NGQ1jZtoXwivdCvVDXgpIAMGLCLCL0yNJRJW2TNAd3Lf/z+Yi6JXP21QcvHOW7Y7xgMkzjrfg0xOGwvB3b4xfuYcy+Sz24tBmjoen6w27JwixxjY+/tTW59ca44NSqg5POY/L89CS3U2LCcNo06VxyeNYDeUWH3/ktLsSb2yVtyySRj4XifYKa7K6Twptuhrq3U9PJOhFqeENbnfFHowx+fZweNjWOnjfdxS4AgbUoZAi+2xFy1KxcL96LuLyI73efJKmC0x0tLxqaJcEQBCS7s3r9rf/H+Bu2r+O4gvgmjfKPuuvT/krXZ6Go4CzjGERR06p71w8GLfl1dulW+pPbU6Igc3UCDuyUiP9FKU+XV+XLKzl83na31C8NUwWkyx+dh7q/1o7fQbVcVVDA2XTq/uG8vxZ/pPNba69RUvxcwg5HDbIUWZd8IjEqYc2OiX7+3+/31K9NF157cWarezZip0l4wZFR4XO7HnydzdcW+9mHLmmAFmxy38xYj6pLFMq+OsAYoufcPNdedJOhm+YrN/TeE4UzJggHfQAek+rHV8o8sS3v31p9vGlIsX4rE7e6hEXxIt0/L4P1VfV5K2AVU1q4yZTywxwMuLHC71MOQltVzfZmDErIm7ijyfQMJfT88o61D8j9T+4zrq5E2PFP3Kn7SxxgpjpeohNMNBzupOxN0BoU1RIMnPw+io7KHnUJXDKN0bJzme+pM4G02xbgI/L1cgONlq/GIY/2qxw2kb5XiQ1tFeTF3ZDqask4lDKeqadjCbqVbjV6I/ezHQfRqdKAyzYbGKveDqFbGz30WX7010MW/9IBr4ZFCUOzWpxXQ+DdOiPRU4Xw/CKH2Uh18xVExCaRE1wDLr2ragg8V7r5fzWnEqjlZuMclSWjqwE54+CZ7EViiJYbnmte4eVPqnnVZq36Tr5/Ni9LVP8PHGoAXmeaPUtGH9t++E3nNhcSapoD6Di6snPeQuXMbBvW4J8WcQzeuBrUmHdAR/reghJVSmfiHFbwWAFNc1MlN4ng5sb6eA1WpXbN19Gen3kYO+tVzp9EN0xODZoA5VPXt9r73njiCWzdWVhO1RmR5pt46ROprlEo3FZ91GRIt90wA+60CquQSP88BDy9r3xHL8JoSGpmiIcYmPMdfBZM0DLPB8liqUbUadXN2YrqyiqT1HpEJtJXQBabXLZFueEh1vTqcFX7+B+uuhzOrjCFvk+aVpD++ebYRIDU6UPS6t9fEmRor1BChx9PDwzqBJVVbZaapOsm0zzVtz06cktMFI0qzq2jCEoshNNqPlwbSZbjowUFTJ82nA5unDMEGRtlZWr1AO+7VtzdxQG1nzDknh2JYxKBgktl0jdl5ih3DWovIgt75vVrEAePo2zZLMVKU9jsYjNPy0uyd2PteT0oIHL3YVrez+l5x0h7uwfWvz3lh3qD9EvvBpIgefA5NTCoptb8gweTfDtEJ4mMGp3bWdYnw4HA/at6ePaAxn09tYKp0aWFNK8kxjJupsV4XaDG1a3dH695Y/8STk0JoszmyilRygRoAibAHWsJkZ9PTzAJiLI51hshjwXVXNlz7jttvjCqfjXde1Ll3mtJKZ9uwVZVE2jlNzI/R/1Mh82e365Fu0OX4uY3Ca2eVBEaMGG4aW0cxkzzVEvvPNVe6qvT+AgYPRbBKxx1Sj5a6Rro8A7xwjOdZijVFWExM6+HN1aFHfCfWLTbT1DnPV8nWkYFLuyzJVbYWoS6UCEEh/+vz8a18KVXd06fWDlYf4nj90eetoUTVFX4wxttqhVoYqTw8wBeNzi76n7tv+qmPuhdUB/5sO+fTD2z+6ZxjPZiuhLYCYdABqmO+K7JSd9k3Pq6CnghIIdcuD7e8668Xo0U++rlujUlT3Q31D+uskY0+Z07ssbnCTi5/Lp7d/7aVF2/7T402GBurNGfKaVHayrEmVo+biPbVsEM4qv7VLXC9ItXqpHkmT8CKkIv6S1ybqFZRauEvWdLCNP+oK3paV6gVVj9Yq0OAezc/SLz8XN/Q11o+UUAwZ6lklJdKdXVHVL/yNq4S2WoSpP2rL+ZfZ82otjyPqcer6vNGs1KIKfiefWfhhV3vMM3Y67SZq91dU3twi+InwaI0v0KBjVo/k0FMqJIE05vIFKFwa73OToPpQNd0zz9dhwiV3ZtAJMWT1H8KzyEHxrWKsJTYhLoiAE3nEowtDTPNkiJDgErNi+NwYUBAM4SAo4nTctCc4xV6yrZqGgj5DrB740Kz1RXLx+ZRPkoMRvEu5H5Gui4//2tNqNXw8OCjtpu44lSdMPTIFTkyQwBWsgRTSfcJy2nfI8RDhL4tH/+fGG80yMNegHuzwbCrbSA/l5WiJpKPRaS7s60Gn3DnzCBxUazwsG8emolcnTs6JFGCBjgrdZpSl3B7esGM+NnAgpjXQyN7rZ4WFTdUfj19irtJMPgtNf062/MBjz7IHpbtZNlzfdUO92FN+c6qnZ5ORs7c5YlB5ygYITSQxzJJjVBI7/A7nJWX9EKybrZOH6vdvwZIOJRHEuVoRKNfzmzOxgta459IOwMv+Pu3/aUncxgVJU+ALeBh7nh8d9xpjF8BRMVojPt6Zu7hExzvwGeBRrQKAhT9sahXV9kYnz1uPzb95P1Z08J5h9rEl+ZkFG1SY9KjfvXsYneT50Rvj3ybO77Oufs4HS00m3VWKsVHY8SaBmAgyELjEok7wZHrmeB+OySmcP36zaE1YkuW8F8DgzJ6EMvwvcqEbmDoaAFPrIcP0A8JtfdpjrFiReXosyQ9nwU79NT4k+7PJJ2Tf6VdurSn3ixRGvTZtexpkPm7NV5XaGXqcTYxXxW77EjZqMjU4hE2jCJc6R8FpWzzyeQOgDeQioX938W1ld50LvV/nbzVRGubVo2qHMV7abyFnZoXDqZnVBob/OF//ls+WjhVZnjvm/AytAKLuqfF9CGUFhkIzz1pd+C/tKJtE7UWMVX9DaPFv3c/tR3t9JIaI8T7DqCVXH0du0IWN1B728761lzylmDZX9e7HFfsug6cf7uD+wBAeCYlpj7UiFBHpsTYIJ2NPUW9IC2X9clsYvZ4DvaajsWeHBziGthFWaA2etJ9rv5356Gg/5uQYtccWmUfaJ2XrKiQCt2FSAETndXpQPjBF22Ogz2HEJ+cntjvE/wqhLd/wSLyvVhdTMrl8S1EnH+0Z+j6pXHc0dSG+7J6L6+DVzEU86zdGJnT9Gw4Zegirs2j99x6X7QjKymwMlOTKpcGsIp7XdZo5g0ODqh602nmlV3yG4Mc8vhl5G8exF4zIFgxb83f2oIqeyLl90fIzPQaRD0fLK1gTadtUOGy4L++TQFTMQGlhzj0ytE/A+l52OQ7H5+6Y1yBB0NyoMNiBopVja3Wq0dUvT+XhOU02HP7ny8GBmz6rtD2Ck8i17EbP7Nfn6pyJmrl0rtMOITMGAUUftRv3brPJ3RYQ5A773gV8xMQhn3r05l4Oej0PzgrAi8GXfe2Oxj9IuD805AtbVBE07q54qstPMn8F2zxB+MwKUmo1JgZZ3Cs/9Eb1YyDcX2zUK4Qu7ECVR2UINiWiFwb0vD/TKjlxN0L3D7ZqKuMFb5jCA6xh4VlNIL1CApN1OtU/ZQCBjn4qYeLUGnDcDhXrpiLzqu7lqVTOuHcFcBlildTBPD8x5OQD7Luq1EgioYv1alLgEt04Aob5G5yaNK8yDgMAVLcdGodHwGqr3TLNJ1vVvPaIW77gwrvIiv3LVHGYlrG5dCJL1sPJSinrJ6UrsMJk3OkOeb0yHFwEFwf/3w5ceOsRqKFBohGQSCqWfaRLhy+1Wzw2xTeMCpoFTY2LfGiKQyFthLUna9skRRNkaN9MYTK3dNbzziOKy6ow5bZvNKTuO0sinW4e+/tGqMDnFkYuHT+O63mndCVpt1A9drkcZ7UeN4YYbb3uhavTK7l8a7t+ZWx5NsgG4WRSqIwdwUs45ajxY9+wg5t3PzvapeWP8Za2jeyPHZ64l31BhCnxxzoB7rn7DwW4j+/e/TQ0CgNhh29W71wgeG10J2ygiaemitJ3de7LDtl7oKlmPPRtEi3cSZUpW6CwXCavaD0kg96fxvWCinwc4KrqO+NczKazw5BUgTvLCFPEJYdBDtk0J95fDnmObAMp8RWkxc+pqdSleXMov6K/07832PGLxrF9oj0psmBfDS7+RsDrIa8ODwWd1pNe3DqUHX8Y1qx1fvHNGwKRwHZtlAr0S5ry51gQ5tgq43n36etCLrwnfjKQ+74HbFc/A9Et3goozPg9Jglj9kBse106FVQ4z+CYEOnbuaijV7BtmyOhpsbaNC520Cjtt8+x8mJTKi7LIaom3ePk1hm14mAdZ8hgG3Cdqy5JlNme2tbc1dfqpk3TiloJtrARSNK9Ou/a085zQPtux+5P0itr0NiDpaxiEI1aXoWlf3d82m5IMDr/SSWRvp8C1sv0+PiU4PX+hssDWJDF6ax0mEtzq6vwNhR76AYk1xWr3f5jvG2LnniLeMY9hvouy45WQpvWZ/Q/jD8WaEeKQrKO3Zu/T877f7PqyQAboqe523vdkD+i7Ko3p3b53dmr2tzKksmYV3YUWlDW0MNSO1aghq4yDzodPs39s5KZmpRh6EHfNbVKzuivx6pQB3B+PqutY9AMp+rAwpkp5uPo4CEn7cFl5tThAyIOpKW9aRB/2y0obiAi9Yy87dBng7x1+QscYokatlDEeEizVX2PVsgn6thV92lxtr0k34275MkhayQ1Yr59k66/Ld5s/vGbYFhuUKjHLLx87bidZVkatwA0qumyM/AYCTC7+G3u1uR1vZsQ7A5aWzL68WQDmridA4OVv/r2+Mj2k/ebLT70Pd3s6vVfP911z6ioxmyTysZ7zQcn7sKoD3ocl7ts52Ov5rt6sJLlEl7Sm38azMllsTBU5oPhB6w+xJ9+c/3j6AP4tjgK+Q6RW+tvlP/wXPz/hmo+5+Row6oAAAAASUVORK5CYII=", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAjE0lEQVR4nAXBx65lWYIQ0O3N8efa58JkpKnMMtBINEiICeILkPgFxnwMMz6IFgy6q9pUp42IFxHPXX/s9nuzFvxf/+N/H8mv2R/+dH7QBphlA+zphPL8t5//7t9V23szLDavBG3FYvn5n/7FQzTfgteDspHkP0D7K8oZf6jJeZwIsa+ems4/6Yx1vz1sfvc9ou2yQbnoz39pPhfPy++XWToPPy2/+re1O44Pv37c0fY/fN/EpxCXyHdqWtxdF1eeDj8+/fXm7d9sWlU98d3xgDfHX36E3//xv4B8IE9jd/4g7rZQ5y1v3eJH8qH9rXizrtgT0Zm7nuZzopIod2rzdVffLqdOQPnJ9+gnZbypedFf4O7TTP90W12uweI0GrTRBJ3H5Eu3NuYZ0xHrZdLzLAqhfJHO1efiMaOo2bccEtuC/uNlL+J6wbBbftyPmzpFX2CWoeC68y+Jg+YC3f0T6SuNJlOtV8mJNy8v45+XHwq1KXFTj1qyqHfIrT5u0a17K8zNNCDWzCeXMZl5fUHIrbP8qzyaqgotX7vmCCexuIl3czUc7JuUY5JFuyBfNcX1mTo76p42c2cBhUYjQW45FSyqAfVdGbiDyaesZsC7CwCo8HQ+rBd/giUrhwzI716vrtC6LkzvEAadt3C62S66qp0ww3F+mZdK3TD0yN89dGgXmF9VDqQhPe93zWf05x/qxpq0t2d/XsVHY7ToNmV29TmWus9Cd368yPv8L+s06HkfjuWQSEXvNsUrJ/HIf9HUZllKt9m2ykS90gZ76lvKvLkoECsl9p+mjvia8QHcn8uXosEwtbJ91WVo0XblHRWD/vxwwQl0HqNEXOgWm98nw8Vo9PESnU3WLRVc7JcT+AP++DHUpdU4nnxsJ4JrlJtD9qJeKplIfppQ7PvhZphfXKvNnhjW1vT8ofjcHhDEdiI08jw5aDrdP+J4Bbciltk8lqwFhYlHZa2BxHfIBh+ly+rGIvhQwhkULe4LZF/fmtySieLk5t6TJa9WoIm2oMDPEa/meIppGLKOVh76c988QW9rEI398RyTGi98c4M+D33sDmUbHNQzRvDL57+nv3DVE77FDluQSk8F2uVrpSm0bmxhSITmTqqT57YIfu9PuKQFCdPj+XMmTYh/vG5vKC8u6WLT0Ip200oPIqb73XuyOW58yGHo8oI3XLzMv87HC3LJBI1MPE/HGF3JdZ5QQUycZcb/0Bqw+/I0gOq6+i5bMY2srO5CGaE2ZaoM41icZX5O2uM1Q4uvvmvpAhprp3vrFMoS9XCoBjNj0D1FiPfCIJ7ieS6vAbNnuPwdkVPf74u9SOD6YZ7j7A3CUo9Iwaxt6R1193S7ovELWf1uMb+aPcZLhKdEp996Yv9QmZthChZSKqL0UQcUOD2b3MtJ5ClG0425ph4oV4Ol6lGWGMCCBEqHNLeRgmfwk4AW3RZGRICIVckjFDM/YR9Dit63Lu4anxankNi+93POxYoQi0Wzx5A9W8JrtJDyWmyyMqrxIN7DwY7WnPwcjEcnkN2+7oZqj9XDTw+dosbuP/ZxUFOx/iO/QpfqCU1HT5J16OKjxyT6MrIuHJEIxYj7vCRV3rTS7dRZJKAkZZ6nZEl9Gj+9nLUAb/Ps2Pz9Uf/0D39BNQuxEsbPKvTeGxgATUvGKFTP2Uh+E3hRo6UlcZmq53epN6M9FYBabw0IA+kxjkVWVAkYTzFMAaa4EdTdm2nsJT89TSnM1xbNdgBwb74+o2xPjz5sy7IJmT1zHHiwjOYQZk4q46dIUxYDjfnZqaEfbOlqL8Q6GegGcyNc6THMPmbmzbl4dq+XW/TzJ+XMJASAimGaCcGS7w3HE4A+x7dOXvaLUA54b7tX3RkfcEIzM7TEk97ZZK0LyPq9GboPn9ws292dPG3PrI9kTmicMoc16iM6dKN6DnDv0oXz7QKCyj2HCUSvVHDJnD4QmJ+n9xmzCJKXKaZigCH1MEZEFEv3n409NGscPI1l+5V8fX34uXNZ/vM/vyc4hpxIEzXPERIiJH/WFKToEuI+tVnzGzHkiTq0EtOjxPU48Xaqmv77Wf5GDJEs84fHn9iMkOTp0sXu9jQgxznM2OkI2TgPNAZEZBkJOKnnJccqZDIgqHOKE1oyOOhYi9B3K/jGJN7wumyEP08fMH4duJ+Sm7tNHn3VXuy+L9Kyuz9Vw/W2Hd0Xnm5QG14Ol10YlELwoKd+d4DcY7Lvp0EK1oleRvVS/waujUDXNqYK6vzq0eK/Pxx9nzQ0p3l4Uu93kuUWG9amf27sr/jwPF/SAlrmfIyePifTz5bzlHaDMSAwgfulM/QyP3zpwuOghpQOkQJIWsrFaZwvH05vOCJmxBixDIKUsth5LRfQQrdTs+JArvCb23JCk6yZAVrFyYXWZaC4cP1yeuiEpCD5EZFw3yezpqk/0t2GE9xgD2/PiPPyxAE6fno+1Bmmei1GXBdpMa+ffTvmt1nNQXmKgZuJoFcVhAXTx+MjYoeaJEPg0hYF/1rKpR9TfTHqMrUo8MZbrcW6QN+2XJIzF27Wbkg2STMpcEu8kpV/Q2zliDNJj4VEjL2TmBKe94Ao1btxNdvV+u6t5zhluTR6ypyJUV3K7ATHyJw+1Gi4/lIle6O7B8tp/8scqfl4GmqswdngMgzI43rduKmdkrxKc6p9xsMA9Xk6dKnLgkzZzNTVHKb5M7XVLLmQSwWUmS9DgEm5doCTBgJHKiAqicfTSKjbh2XB9/VWCG8nxDCiokbB7efCjXHa2iljXFlwvGAAQCtLa/vZQZ4LPXq5zGeqsJ+X2+/2Pb4vLkTvXqb54fE5q6y70KXMVJ1hi7vBbd/QiX/+BHFkC++uivPsR5zc3BfZa1f7E3FGyeD3yxnABSliNCSw7Livkc04tmqc5gX0s+VQYpdGo7NUwgg8g3szUXdENECkUdnkFKNn/z7tdX0rspaeTagZKCtPMr2fDrGDxEZk0nYhE/4cURqEn88q+AlG8zD0+ikvWaUvKM58Fzv7YTCUsoxO5zg/g40XzjhanummzSHrueGX9NP5qb4e/KTiZPWMF2vJOETQoWCuVpnNcffQY36xKXjMbZhOzi5EHkjBV3ArE1kxbmYHUsxX7kW2zxa9mm6eJpc/VzSVa4mH21yf+8sRx3NhuY/JsaIc0JEcfVCGa4ehnhyLDl+JIlRfQm4hSFUx6Q5BKYEZxQypU1VpVZFROVEVBA+6nnL0OiskJ3L886Bqc/CWGxugIHMRKPBIgRUGh4ei8iaV4cIVmLr+LBe1pyPqB+4qa1A1EsQLHTr+MJDv45frJdK++7ZYgKtutP5kju63X+Q6i0SUwvAEecHC1IMzUugrz+ur5tZ7bcLTlYgYoNqnMFAbuIGEJVRDHl3+5DyaqNfN2J1v6VLKVSTPtaaBY5TFMkzPx741GInmaOdnXZTrGnfo4uXp+UsM2PGWTTBrQQYBIUVQQNoWNdKbgyP9ZdjFuqX0qrkB8/1pyx/QUZA9937wlUhluBayUpTkWvGmoBRjs5Jv85BTjvoWgQnMIUmr6/uj73o7khuGIwwDjyxUabZjqmnSrIco6r4UgcPb7HqB+/VVvamGrv9XNcYCrDOQnVvAdefq8PRs/nUkauguC9WiTUGRC8x2l12YyPK2KB20stdmimLVoEXMbvs4Ya12OpIGTv4OrZwL/fOT9VOvd4tO9gWcp16bLBHKcy0eLMAIomVcxtZOo43EkF131k+jf+lHA2EUTGCGI7A2wjhqWuIcUkwIE8tYP1+cecpPz77/V3V0ueuIo5f49BE8j0eec4EyfaGme/qP379z19ztEg12nju9F4Yn+9swp9FOyNpnGPsx6kXw6xFO+FIwjy3IuEFh57L6uzQRpW08QLPmdNKHJgHnj+pCn4rzR08dU4VyNsMOdiOMOC+gOHWsmp0yNLCggg5DcAZigZBJtCbIYdDA8ejPP12Wz2M3nrOEoxiJPlo063EEp6eM4y6N4Lx7/fUbQrIMoJklA00IV9/e5PWn87ENO0LgOJXiao0hIfQgBp330Jlusj4kqAAKN623GhpPkEA8VDPjG7E8R8dnHCEuxhITg4LZszAWZ9fBInkXK1MsUHTYa3BgrGakUcEh6JhBRiR19D1N8dxTUbGr1+jffC+AyyeGdGMinFiWg9K08Ln/ET1d6uUfLybXl0tEZNM2JSLXNbuo0f/+e34//LHXc3yUuFaiTtWpMiWwYpW/rmWd0boHFDkSypTF9gxm5aMQC3l8SsOUJrcAVuUJDmi8zIcZzTE9E5LnkdBSfRH9kfm9T1kqodPU2U3D85QCDxPuqnpNoQ0TcURXH6I+7Rq90q8w4b4ym7LJdoM6/QS9emfKIsuMGC5TiQJhpchAWA7ufBo64D/Rb/79uIHCfYOvXfQKna5jDUQBIQznZKbnX6QQSIcT1sPr53J8U9Vb8un4otPOFQGMcUzk3oc4z1zniBrvQMSkU8o6E3tdaf5VNt7ijWQY8jgvhNzU5jKYgxjUEZDU4XMMyb8zS/ONeuXkcGGewXV//Yy9firIx2i/eACmRPmWoSkKWts4kywiBGMmpJIakn7g7g3CxzRaqIDm+8pE4i26URPL2uOvf0U3x+wxTB9r1zo4Hm1CPhpUPIVYjloNRgXgKMLKDzAgTGdTAoIoX2YMVUYiYUU/AaNtIh9F70crl6hI2Vwf2Hmxx81VufQ+5Z+mLsC4hivG6niIL4RXfXY7T5iGgZ1AbAuGfIBHrbEN3fOHxyV6dbaZPt5v4IwOftQCCwwZQHeJqN64ycgy0c3i6i387//tf9I6VDDPZZiNs1DPkK8C3ntU6ZSCgRVKIEGIvNcKQunjumQ40qNXXaIkTN5C4IUgNkHb9/SHbNXzJ1BVkHE38wgeyZiRdcFdCQKEKRn7cYghhqy8Eu2iXKQFQbuQX2NROgDHfUTTS4K0WmRF3ej+yVYNgBACR8CW465Xc10uE5zQnH6iFSpekeTohGNn+sQi0nCBCOBwhYK+67OyXK5GJhNGMcuyVkISsomQS/SJ2Dyoa1lgTRe5NATMDucV0CtB1q+sAQtYFwu2YC3iy5BlvuV4Qz6M7ymqDGgMQPPzbFU+vZyHcC3yBVMaJ8hqCyIT5B1cXl08fzxLfuKqm8gxx0pT56gohacpWQPQH4eIWpxkNgl7YpCFCEGGQHAGEESj2AuwJB+HTDpoGUkI2keZrSpgy5uvbxjVhVbShOWVDeGS47gpSiTcAIIN3roAcigpf9dcw+sehSTBI3g55eWiS322w9uWoixL08nJuc65ZyUAIxhndzwrvHOvdEU/10tcNqGHD8u8mBmPVduzprwEBwA6+GG2o6Ao55VkCxhyUhBvvfDAJ18LoBJXEuF5fO2yxdV3JNhZp/ZdMetHauKXj7u6b9ym6cuKlaXDJclJjJSTltjLAC54JFZLZ5TKuDN1IZug4kWCAjKI8lipwaslQhxifpU5qdR0Mr50wEcncbHK5jHP8PLnT4N+gVn+EG2dXVA6v8mgzefc08HnGXRL1CPjfGSBLngcMrlO3MCiWcV5iuqIfMIUiZt8GHaMyxVQX+aXsBf8d12V5UPNsKJH9dL7jBUQBx/T0IAsJI0+m8XvzLn4x849Hncjyb3v9p4ueYCOdRH5VIICNDMnebVAl9BfPOr25iWNgeWN5oHlvj30+f9Fu9zeiIktfB6SHm2Ygf3ENrPuzPGvIOcn1CP8oSI25cDGTFeze3P1Nn1VrlYUr16BICmvtAmB8IApareb9TebP14XL3XwDsYECKGRi8fTy68nhKoRghSAgcV0mR/2PP51hdx6B3AxMeJ6LiiH1KmAlcn5Fpo5BN0D/hSxU0fvi9XvJm3u2/r7rvRKdLg7goTp63wHzxF88Qpey/40d//Om5fTY/HmrpQ0OYHqv/XCiMA4Mm7Em/dpEHYPDzITScJhsiqhGOFIBpUW/GuMxLT7jBglGmGKU8ojdpXY3lIPKsHQizvB7khlnpUNgLiuFmVBAZZZ+8p0YPaj54Y2HFZDCHkGpR1g+5xHY8OL+ucX8YO7Gfr3WW+1m1EISG1Gcy9AHZt3omCXfo9je06gDyXJuD9xQjxyDyXZQO8dwSJHseB1PF8/l0OaTyiXvMnx6CihnqPTWQ/gaETeKkdgkxqheKRN7WHjDHbmnHPIXbIExkUBdidUU1+MuR9lhQPHhiNU55JsCrIB90rcUYp4XkW87c002G16U7gfxZUYCVrVUoGHyWz7sVVvViJBlRLgSb4Jwb+YTmQeHNBc9xAs0bzQeioAY57ZidNJ7Vdzh+ECzDTbitKLxBHygEFentw4O+BwKiCmEJ9J3TYaxowOqfAB6MmSAEIEkjmP5IRwkGPW8RCAhnoqYuQTIzjYOPrrms9YAQI9H8wQQI639uFM5+rDS9pVHbESZzmf7zhllylBmUYQcLCQwngtYMBsEgdks5fwAT30ofhGlEXaNMWdzGWiB4rSydOlaDAoSJkyAKEDjJdLYQN3wEYMYCI1c8lVbPBmsDJr0UxZWMQ8lUGupGAM1YRo6yXu6cEkhqMdAEHRjiQADJQpaBJK2RfW/gAaebq3OEzfjtbNcyomGxAWZVWtBK5ETkKvz7Cj0PmQ7kkGxCiFwFy3p2/Qq9cs608RLfhyebmMoQiA0nIFwOz6S7IxRiwYpJIEk5QeLY8EIs54HImP85k2SC7ojayWN3VeqoYWcoEVGnCWNctCDBMJjalZmmx36JHyas6FRCLCXE3QuTFp0J39kKghtA/TSiyv30IiEKfm8GVAugeI2eShYjMkU7wQfL0XV0PQaXEawv38jDJm1bRNfo6XzFMLHcqBp4SRBEFGH6RamtrxjEJGAhUVsucDFQ/KIpbnHmroEcnz/XgBqB3NDlPZyDTPLwzlbMDniq8nSkyTlxnmJImuEIFBmJJVIBBOCsRogXmwVMR8uZx/TmbypvOVGZYn6g87ThcBUBcorZYHyrfBe3ax9VZGxLW9ziGa4eWrTJNq2alPFeJ5tiVFgfK8WDkk5WohAA1xhkkYGQUTS1DTAt2lkgLlYMSi5ITnAICUXpDdMErm0VyvviWz7STMGDgPQeGjP5jEeQqk9Gg0LAYCEknaQje/vzwPydpx+rX7OMCqeAcoDHZVzH+7qvItMS87N2cCTzFeLYH3KiUABMYqTAfQUYvML+mv5nz8+ccq36JVjSuCk0kE4u3WP87p6GNusUyM18fBgSrsQuFYT6aU5Ossj84R2vmswnyIIOrL80fZ1IjWoKIh6N15nHiIl/m3yyEaB3ChlxjgRGVRSdQlN0ZOnvf6c6JNllO3LV/YwBzAaMoup6xm0FpkYeZg8a3J+8EBlAsKYwpPF7z6vta0J++up/c7HiOZcqd6c+O1stbhOZ0RvislnqCtUB2jHWRFDk9+U0Z3YCxNhlJgIZB5pB2Ly5T3dDjwq699RAQD67GPECC7SPKkLQ/30b5KYo/CpHs0J485lDmjZM0XX5+yk/6oaHuGpMKTi4lFlJZYf/C332xj3E9VkV3cPkHnkydEegs3hXLPfANq9H7O53RZgZpewHoGI4JhUQmYJkarRz9P3LjZDp3IvK58g6x0C4TyY8zJ3ts8B96Fz9lp0mcA0OZmZQsY7flyRhAxoeg0XKKaCaTu95eGm0F0+abgf8jFFoKMoevj55+n4ZfIE1yhenwzta//OcacguAUmOkdnbppKrMY025CmECfkQqLqhSSb4qyAbSWaH6GdcxN7tORCp6JPEkXZuQzF906Zb4blZ8KfznzVspMMIjR4M+oJHSbPI98BO61q+tQsrryoMuPlNDFZouZsR62sjUhUioyPOdUtNu8MRhWJ0zYffnV6dmV7WZSeabqzO7Ppb/Me6YYCSYvGizhJCHWbA7+44FwNJNQA0IQTAjqPmEFQGMZqmLB8jzbLJoNnd5toCXBEC4LhhJMbN00jYzjvyAs5M0oA2UhO0aC26h9oPkxduwT0j28oJypSuARs4xXDic4yzAiwVmxEhQ5tw0V1MMYgXCzcScxpZcPZXk+pxe8hKauUv11xuwGlk1iL6wQRFFWkAiXxc4T8fU286HngHuqu2QkLsu4IFlhR4HY18H7kTDbyX76+GK174NKCaGyyXip0JbIZXNDolePfA+H08sU5CyMly0k8GmkQ4XImK8aMegjn8r5YALkKGptPA8zBRqwgD1Hb2vZRqOtKQTS2REVt9hSMg1kDZcL00/q6AWOpWPYvCrRybIEdxtb6PPC7wbVg1HmYzppKwsPNI4udnDu7RVEt3JjQQrHhxuFqmVj+IREyUVeEIEZGPx5FGG1zBtWpoRi5a7IljVjFsdfx2n4UwYyn6E1xWCCSwxIQFmUxroBN6uCZVngaT+zaun8pfOzL7CIaLQlyBJ5gcCKm+uGiUu+ppaLtvHYAla5y3wSbgy+OYuuL8X6Ftrp4d1uCXzBrSEVQqjJ1W9OsloQdNidTMCbxX8dXi+v7EzZ4rZpNvVr23omYVngSveibAONPISkBJB27NBHPXB3gh9gkZMMsSnExRKNowNe4qRvmpZgCCnVwWZ1yM+VLvqwsNzBAQUQTHIqFSnLQWmz3lX2+Vy5edrXmUKTx1EUEncgQDAJXtldL9u7775gHIJjLDKLU+juZVuK0s4UIdnfrZXb7Ksv3UmQ63orY9zvD0RBWWCBr0q4ouXircpMjyDkWs/HNF7TlcHV7J8iLjhIIiRNRZGLIpc5Yr3KCKBg2jFZe5ORr2ZjkXPSYrdISGQB4DITC4EEzkle3BatuC3/GF3+6H/eTNlSZYb9QISMELvZCCp2xT3lfSlC57EC6mG3v2UVuGQ1Zui124bwFTyKc9a4Mfa+H7okt68ZW9gXKJTbIQy+7Du2+PHy4JVqEr85+cEf5dH/UL8GkCuPYVvAnFIscVWolNT5Eryy9aaUnqWF+jJDvJAzbRY292fLs7IE0AJWs0xIGkdD5EC64sq9Kr/RSzJwF/1LnGaCRuy5L4biU48ZemZhFQ60Z9U+8JTste6iQYdyZEj7qAjwLQrn444sjub053Cv5q7v5LBZ4LEYPzf/59vVNr9a+TaUG26X/Lo6Pwm+DahPgANs+n6HIvMyQEaLFMAsdNmdIvg9xjVrQcrYBturMxCIRYpYvuFZXWJclKs6cKShihpFLiavOClyVjsKBqcda5njQb71Q6yGAx2K/dDX3/H94y/rfTgXjJw6SDdyzfMUwA6FO/Kfkxxae3iU90Rc504kQPEwmukqy848QB1JbwoxpKndvlmH8V++FLfXbjL7Y2zbTOkdQ5xWy2QddzrdLpqOBUsdQ5imQVgeLQUZlgIEcj5KDbqbAKu6jdHQmPMCzKNAIHlwwIGgHBO/r1357M9tvvinB/wIdzVeNGnjbsnE5+2ZokVbF4H1vZ+n56arsQxeT+cBVPL7Zd7aQ3C4AFLcJdzZCiNIvBMlIQRkVXkY/V7cBUEiI+3XhRiwr/NEoQEcYCYrXlGq7WgyEVDqx2OpQFHdRBtIvoY4guy4hFHBERtkgcRxwudQcLtlWZcqfrkdSWZMcUaPqew+vb9/25ZVVtev1qOe3dnpYf3rckIjYFGy3BHOrlxmBvfZ2SDLPZgvoxkX74g0c5PnpgW5BM49MIDypSiIOD1OZHKLsovaSYeTOuEVo53SCQiCFsiQhC5956kHIZQYV/W6oEtfBBiMJ7Pt9yzM/JoX2x+QHOOsOsCKlfFAjN4JHfXXUzFhx3oW3tHTm+r332SNXqPEMeryoXpdGjG93SNEujO2+HitKiisjX2AMBYJXEFYZRHO02xCeD9PwZAMYnQpXeAJBwOWlPl51N2cEBTH7su8n+LloEPwfkZz9IjOxqUnECMNmARRYIRN4czg0GKT2cSWtxFibEi633HTVLRtF5vTpQKKjjpLwoZ/vdDJMxQU71EzFKFMY01LSbVp5sX9ZcoHxFefEGogQzMAcJ91ki/WUYiVPhB/9y38HBw/v9owm6OceCwAcyXHGfQdKOnEE3H8wGUak3o5WStxStI4GEISAj/xXCuZxD0MJ5mySOArfdFKcRSX0AJoc5Nw+opjUNwYX0baIKQvCLd1HV/ijK24LNMF5CIxDAOhAmd7VGmQFzb6menvQIux8uYK9R0dAsbGlLy+XYyT3+txzkrwfvehQn68O33ycUOVzWZbZnpGUmKWCwqAEqldfzPMYNYf4BXEuKZSCxIEzXWcwW/ztgZ8dUPzesAken1quVMpTht1YS4TnqGiTZ6UsypDwCAEG3CqeqV8o3XMSZ4IJnNkCkVnbQxalqx089lkMGAekIWcJMHQpC/j05RbxM7peUZcygLUOEket4xfhxcAx+6zQTXJ1DjyRZcmCkY8taJprsW09K9f3eR3f7r629eB9ZeJr2pZQsvw9vXw2GtZNADz25j8lNqJBY42pZ4EAYYAhhkjxAVE2zkhgULDBI3JsIRqrPcna38F75+0XKUkCSQTGDWw1fpVK5gcZZ941CHOAH2/eVuWxIDlzFWaI22XQZyEYQ7MrJrzqot515K1wlNVgjry99On2LjaFoR7mBXfZscFJxM3ZzTN4buML2hAagR2wIs1l8ALZ07Mtqg5jbgcLjCLrEQQMWzCOEwyr6dCvaoDAlUoHPQAT5CGhNndd+1X/JsrevQgmqhCTX0fbJr7D7qmORg+7yzG8pqjz5Tgq7qkevwY/Bqxbg99bWEf3ezV4dNoOF7DitN5Mc3JmvYm/xoYGTnlqLXB4/CDkRs47QOB7679mhVZeV2wQMiSAxojirohYqnzHpLP4O6tPSHuOQyTSEOOmjG5OoFhziftJa4glgVJmPpG6kNghVCp0SiDXlivGItuZ2388MlZ278REYjut2fEWRABazXTq479ZCNZKaAKxYZE3Ud7xVdimv1phtlABWGb7EIN7/YRmZBMsUFQTypQnDns81DQWbg0UnEjZEEI5Xido7wv3KXbTaa5wwOcc7OtixBFkG/fh78UJk4TR9yJEs5gIsE+YUpRPKkYIIlIirFAKmGgIvERFHUi1ZZq2W30qsWGjgmR+5OHVs1Eyr+BjeuHmfoGN7CVNf+6HkxtWcP9xwnDxHUEcx6JykTwAZIqwxxEp7HqD/PqjiFSaTvDtXIXBCuk0mCPE8zk4EebWWgmAc4Y8H56nB776XH8wf2naHUc3ts+Y6vp8PzhUsArTEaQqqrJy3j2QIFDiikEjJOJPE7FeTouFEm3DiF87JFEGvDDR2VeF5ScIwx5JRN5GbVXevAOiOrFQ7QHMv3V8IQHO/P5WIPXCOYa7r8cnqxYLpp3lLeypzCfSOLWjolzZnXwkaFojKka+q5444XE5RYQHEllNol/gxU8weRIsVGQzGMzoVW5U5CVHAtAd9MRFHqOoYYUQ1akgs7vu9avli0VO7n3phsreZ0jQw4Vd4uJvDx8ssFi4wG5cepJQkE5pY8O42XCE1wkODfl5AnHn8rPcXjKLvM8zd356fEf/m7a9T8HCS9FSh+ZEZLC5DE45xkQWSRhSkq6LAUzTKxA2MVvCgki9XB6QTpnZE3G5ktaF4tPRfAPzx7CdDGCodA0a7BWrCL+4l5s+F36cIbtncvhcsAnUPYuOoRnZxGXVST51zdFAycIdSD5W8sjtNK/2WJ+qgIXd7zj/tJwp1Ruu1132o2hMZs6Mv5quW6EVKeIdna8BkWGIw+YBvo0e0tNpTzEg6YZ5y1wx84KMEUQ5lOEsaKrYALE6bjwGcRVyMftYu3Z/L5zJXTP5kv5ZTmdGF3xgr6Z8ht+7M6nU9mVMYXp2kiJqEHTLU//CMpv6vMkfuzRfgWdGpfs5sJg0IqgxLJ13721n/Dtp7MTa4FvtquvGwnRGyQ3zQLbw+ZqVbf97Mma2BNyMLEVzuO7l6k7yYxV0uHN5DlhMj2PeVvSkAipN9vt4yP+hT9hQzrCppQ4KL348C+xX779AQZWMfT2w9UDxCY6OYs/N/4iKCddzR5eJDwS/un//cP/B+y41x4pgCFdAAAAAElFTkSuQmCC", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "display(Image.fromarray(images_processed[0]))\n", "display(Image.fromarray(images_processed[1]))" @@ -331,7 +215,7 @@ "orig_nbformat": 4, "vscode": { "interpreter": { - "hash": "ec31fe64df66491ba3476a226e6c778cf4c96edadc68db7b2b237ac062a20c97" + "hash": "77f6871a522595648ebba7232d315a2f946cc4cd5f56470cb61e517ec9b94e2e" } } }, diff --git a/src/diffusers/utils/dummy_pt_objects.py b/src/diffusers/utils/dummy_pt_objects.py index 93834c76550b..2b9b5b1af805 100644 --- a/src/diffusers/utils/dummy_pt_objects.py +++ b/src/diffusers/utils/dummy_pt_objects.py @@ -182,6 +182,21 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) +class DistillationPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + class KarrasVePipeline(metaclass=DummyObject): _backends = ["torch"] From 1ab27a42df16526013a7ca659870993b99f7602a Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Mon, 31 Oct 2022 11:02:55 -0700 Subject: [PATCH 110/133] Update docs/source/api/models.mdx --- docs/source/api/models.mdx | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/source/api/models.mdx b/docs/source/api/models.mdx index a6d342f575a9..c3f5e65edfbd 100644 --- a/docs/source/api/models.mdx +++ b/docs/source/api/models.mdx @@ -37,12 +37,6 @@ The models are built on the base class ['ModelMixin'] that is a `torch.nn.module ## DecoderOutput [[autodoc]] models.vae.DecoderOutput -## UNet1DModel -[[autodoc]] UNet1DModel - -## UNet1DOutput -[[autodoc]] models.unet_1d.UNet1DOutput - ## VQEncoderOutput [[autodoc]] models.vae.VQEncoderOutput From 9bb7818a81ff91150f7a6d84f7aa6a895baee8a0 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 1 Nov 2022 09:28:41 -0400 Subject: [PATCH 111/133] code cleanup and start writing tests --- .../image_diffusion.ipynb | 73 ++++++++-- .../pipeline_progressive_distillation.py | 2 +- src/diffusers/schedulers/scheduling_ddim.py | 14 +- src/diffusers/schedulers/scheduling_ddpm.py | 18 +-- .../test_progressive_distillation.py | 125 ++++++++++++++++++ 5 files changed, 205 insertions(+), 27 deletions(-) create mode 100644 tests/pipelines/progressive_distillation/test_progressive_distillation.py diff --git a/examples/progressive_distillation/image_diffusion.ipynb b/examples/progressive_distillation/image_diffusion.ipynb index ba086cc86701..74289d1835b1 100644 --- a/examples/progressive_distillation/image_diffusion.ipynb +++ b/examples/progressive_distillation/image_diffusion.ipynb @@ -2,9 +2,18 @@ "cells": [ { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n", + "WARNING:torch.distributed.elastic.multiprocessing.redirects:NOTE: Redirects are currently not supported in Windows or MacOs.\n" + ] + } + ], "source": [ "import torch\n", "from PIL import Image\n", @@ -32,16 +41,27 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "torch.manual_seed(0)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -50,7 +70,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -62,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -72,7 +92,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -90,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -100,9 +120,38 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "2b23b591496741a299b75e4e9448b29a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Downloading: 0%| | 0.00/455M [00:00 0 else self.one + alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else torch.tensor(1.0) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample @@ -255,7 +257,7 @@ def step( # 1. compute alphas, betas alpha_prod_t = self.alphas_cumprod[t] - alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one + alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else torch.tensor(1.0) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev @@ -323,7 +325,7 @@ def add_noise( def __len__(self): return self.config.num_train_timesteps - def get_alpha_sigma(self, x, t, device): - alpha = E_(self.sqrt_alphas_cumprod, t, x.shape, device) - sigma = E_(self.sqrt_one_minus_alphas_cumprod, t, x.shape, device) + def get_alpha_sigma(self, sample, timesteps, device): + alpha = expand_to_shape(self.sqrt_alphas_cumprod, timesteps, sample.shape, device) + sigma = expand_to_shape(self.sqrt_one_minus_alphas_cumprod, timesteps, sample.shape, device) return alpha, sigma diff --git a/tests/pipelines/progressive_distillation/test_progressive_distillation.py b/tests/pipelines/progressive_distillation/test_progressive_distillation.py new file mode 100644 index 000000000000..a57e154a8bf6 --- /dev/null +++ b/tests/pipelines/progressive_distillation/test_progressive_distillation.py @@ -0,0 +1,125 @@ +import gc +import unittest + +import numpy as np +import torch +from torch.utils.data import Dataset +from diffusers import DistillationPipeline, UNet2DModel, DDPMScheduler +from diffusers.utils import slow, torch_device +from diffusers.utils.testing_utils import require_torch_gpu + + +torch.backends.cuda.matmul.allow_tf32 = False + + +class SingleImageDataset(Dataset): + def __init__(self, image, batch_size): + self.image = image + self.batch_size = batch_size + + def __len__(self): + return self.batch_size + + def __getitem__(self, idx): + return self.image + + +class PipelineFastTests(unittest.TestCase): + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + torch.cuda.empty_cache() + + @property + def dummy_unet(self): + torch.manual_seed(0) + model = UNet2DModel( + sample_size=64, + in_channels=3, + out_channels=3, + layers_per_block=2, + block_out_channels=(128, 128, 256, 256, 512, 512), + down_block_types=( + "DownBlock2D", + "DownBlock2D", + "DownBlock2D", + "DownBlock2D", + "AttnDownBlock2D", + "DownBlock2D", + ), + up_block_types=( + "UpBlock2D", + "AttnUpBlock2D", + "UpBlock2D", + "UpBlock2D", + "UpBlock2D", + "UpBlock2D", + ), + ) + return model + + def test_dance_diffusion(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + pipe = DistillationPipeline() + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + # create a dummy dataset with a random image + generator = torch.Generator(device=device).manual_seed(0) + output = pipe(distiller=self.dummy_unet, N=100, epochs=1) + + audio_slice = audio[0, -3:, -3:] + audio_from_tuple_slice = audio_from_tuple[0, -3:, -3:] + + assert audio.shape == (1, 2, self.dummy_unet.sample_size) + expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000]) + assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 + assert np.abs(audio_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + + +@slow +@require_torch_gpu +class PipelineIntegrationTests(unittest.TestCase): + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + torch.cuda.empty_cache() + + def test_dance_diffusion(self): + device = torch_device + + pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k", device_map="auto") + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device=device).manual_seed(0) + output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) + audio = output.audios + + audio_slice = audio[0, -3:, -3:] + + assert audio.shape == (1, 2, pipe.unet.sample_size) + expected_slice = np.array([-0.1576, -0.1526, -0.127, -0.2699, -0.2762, -0.2487]) + assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 + + def test_dance_diffusion_fp16(self): + device = torch_device + + pipe = DanceDiffusionPipeline.from_pretrained( + "harmonai/maestro-150k", torch_dtype=torch.float16, device_map="auto" + ) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device=device).manual_seed(0) + output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) + audio = output.audios + + audio_slice = audio[0, -3:, -3:] + + assert audio.shape == (1, 2, pipe.unet.sample_size) + expected_slice = np.array([-0.1693, -0.1698, -0.1447, -0.3044, -0.3203, -0.2937]) + assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 From c584c3c1db22b00478976bfc2382ba97473504eb Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 1 Nov 2022 10:06:22 -0400 Subject: [PATCH 112/133] fast test passing --- .../pipeline_progressive_distillation.py | 13 ++- .../test_progressive_distillation.py | 79 +++++-------------- 2 files changed, 31 insertions(+), 61 deletions(-) diff --git a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py index 371911bcdcd9..9cc756f72d1a 100644 --- a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py +++ b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py @@ -36,7 +36,8 @@ def __call__( ema_power=3 / 4, ema_max_decay=0.9999, use_ema=True, - permute_samples=(0, 1, 2), + permute_samples=(0, 1, 2, 3), + generator=None, ): # Initialize our accelerator for training accelerator = Accelerator( @@ -90,6 +91,8 @@ def __call__( ) = accelerator.prepare( teacher, student, optimizer, lr_scheduler, train_data, teacher_scheduler, student_scheduler ) + if generator: + generator = accelerator.prepare(generator) ema_model = EMAModel( student, inv_gamma=ema_inv_gamma, @@ -107,12 +110,16 @@ def __call__( for batch in train_dataloader: with accelerator.accumulate(student): batch = batch.to(accelerator.device) - noise = torch.randn(batch.shape).to(accelerator.device) + noise = torch.randn(batch.shape, generator=generator).to(accelerator.device) bsz = batch.shape[0] # Sample a random timestep for each image timesteps = ( torch.randint( - 0, student_scheduler.config.num_train_timesteps, (bsz,), device=batch.device + 0, + student_scheduler.config.num_train_timesteps, + (bsz,), + device=batch.device, + generator=generator, ).long() * 2 ) diff --git a/tests/pipelines/progressive_distillation/test_progressive_distillation.py b/tests/pipelines/progressive_distillation/test_progressive_distillation.py index a57e154a8bf6..03547a9cd6db 100644 --- a/tests/pipelines/progressive_distillation/test_progressive_distillation.py +++ b/tests/pipelines/progressive_distillation/test_progressive_distillation.py @@ -4,7 +4,7 @@ import numpy as np import torch from torch.utils.data import Dataset -from diffusers import DistillationPipeline, UNet2DModel, DDPMScheduler +from diffusers import DistillationPipeline, UNet2DModel, DDPMScheduler, DDPMPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu @@ -59,67 +59,30 @@ def dummy_unet(self): ) return model - def test_dance_diffusion(self): + def test_progressive_distillation(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator pipe = DistillationPipeline() - pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) - - # create a dummy dataset with a random image generator = torch.Generator(device=device).manual_seed(0) - output = pipe(distiller=self.dummy_unet, N=100, epochs=1) - - audio_slice = audio[0, -3:, -3:] - audio_from_tuple_slice = audio_from_tuple[0, -3:, -3:] - - assert audio.shape == (1, 2, self.dummy_unet.sample_size) - expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000]) - assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(audio_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - -@slow -@require_torch_gpu -class PipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_dance_diffusion(self): - device = torch_device - - pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k", device_map="auto") - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device=device).manual_seed(0) - output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) - audio = output.audios - - audio_slice = audio[0, -3:, -3:] - - assert audio.shape == (1, 2, pipe.unet.sample_size) - expected_slice = np.array([-0.1576, -0.1526, -0.127, -0.2699, -0.2762, -0.2487]) - assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 - - def test_dance_diffusion_fp16(self): - device = torch_device - - pipe = DanceDiffusionPipeline.from_pretrained( - "harmonai/maestro-150k", torch_dtype=torch.float16, device_map="auto" + # create a dummy dataset with a random image + image = torch.rand(3, 64, 64, device=device, generator=generator) + dataset = SingleImageDataset(image, batch_size=2) + teacher, distilled_ema, distill_accelrator = pipe( + teacher=self.dummy_unet, train_data=dataset, n_teacher_trainsteps=100, epochs=1, generator=generator + ) + new_scheduler = DDPMScheduler(num_train_timesteps=50, beta_schedule="squaredcos_cap_v2") + pipeline = DDPMPipeline( + unet=distill_accelrator.unwrap_model(distilled_ema.averaged_model), + scheduler=new_scheduler, ) - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device=device).manual_seed(0) - output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) - audio = output.audios - - audio_slice = audio[0, -3:, -3:] - assert audio.shape == (1, 2, pipe.unet.sample_size) - expected_slice = np.array([-0.1693, -0.1698, -0.1447, -0.3044, -0.3203, -0.2937]) - assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 + # run pipeline in inference (sample random noise and denoise) + images = pipeline(generator=generator, batch_size=2, output_type="numpy").images + image_slice = images[0, -3:, -3:].flatten()[:10] + print(image_slice) + assert images.shape == (2, 64, 64, 3) + expected_slice = np.array( + [0.11791468, 0.04737437, 0.0, 0.74979293, 0.3200513, 0.43817604, 0.83634996, 0.10667279, 0.0, 0.29753304] + ) + assert np.abs(image_slice - expected_slice).max() < 1e-2 From 1261c2f4094c4af4059e5e248f8aa7d0661c774f Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 1 Nov 2022 13:12:49 -0400 Subject: [PATCH 113/133] accomodate dict collation --- .../pipeline_progressive_distillation.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py index 9cc756f72d1a..8c084bbcded0 100644 --- a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py +++ b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py @@ -109,6 +109,8 @@ def __call__( progress_bar.set_description(f"Epoch {epoch}") for batch in train_dataloader: with accelerator.accumulate(student): + if isinstance(batch, dict): + batch = batch["images"] batch = batch.to(accelerator.device) noise = torch.randn(batch.shape, generator=generator).to(accelerator.device) bsz = batch.shape[0] From 47d29139c024a9e1ce494e4253f3fc78302c0d69 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 1 Nov 2022 13:23:06 -0400 Subject: [PATCH 114/133] colab script for debugging --- examples/progressive_distillation/colab.py | 67 ++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 examples/progressive_distillation/colab.py diff --git a/examples/progressive_distillation/colab.py b/examples/progressive_distillation/colab.py new file mode 100644 index 000000000000..ad6ef177dcf7 --- /dev/null +++ b/examples/progressive_distillation/colab.py @@ -0,0 +1,67 @@ +from dataclasses import dataclass + + +@dataclass +class TrainingConfig: + image_size = 128 # the generated image resolution + train_batch_size = 16 + eval_batch_size = 16 # how many images to sample during evaluation + num_epochs = 50 + gradient_accumulation_steps = 1 + learning_rate = 1e-4 + lr_warmup_steps = 500 + save_image_epochs = 10 + save_model_epochs = 30 + mixed_precision = "no" # `no` for float32, `fp16` for automatic mixed precision + output_dir = "ddpm-butterflies-128" # the model namy locally and on the HF Hub + + push_to_hub = True # whether to upload the saved model to the HF Hub + hub_private_repo = False + overwrite_output_dir = True # overwrite the old model when re-running the notebook + seed = 0 + + +config = TrainingConfig() + +from datasets import load_dataset + +config.dataset_name = "huggan/smithsonian_butterflies_subset" +dataset = load_dataset(config.dataset_name, split="train") +from torchvision import transforms + +preprocess = transforms.Compose( + [ + transforms.Resize((config.image_size, config.image_size)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), + ] +) + + +def transform(examples): + images = [preprocess(image.convert("RGB")) for image in examples["image"]] + return {"images": images} + + +dataset.set_transform(transform) +import torch +import os + +from diffusers import UNet2DModel, DistillationPipeline, DDPMPipeline, DDPMScheduler +from accelerate import Accelerator + +teacher = UNet2DModel.from_pretrained("bglick13/ddpm-butterflies-128", subfolder="unet") + +accelerator = Accelerator( + mixed_precision=config.mixed_precision, + gradient_accumulation_steps=config.gradient_accumulation_steps, + log_with="tensorboard", + logging_dir=os.path.join(config.output_dir, "logs"), +) +teacher = accelerator.prepare(teacher) +distiller = DistillationPipeline() +n_teacher_trainsteps = 1000 +new_teacher, distilled_ema, distill_accelrator = distiller( + teacher, n_teacher_trainsteps, dataset, epochs=50, batch_size=config.train_batch_size, mixed_precision="no" +) From 97801e3943c7508dbbf10da8965c69ace192cffd Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Wed, 2 Nov 2022 18:45:59 -0400 Subject: [PATCH 115/133] some v diffusion support --- examples/progressive_distillation/colab.py | 41 ++- .../train_butterflies.py | 239 ++++++++++++++++++ .../pipeline_progressive_distillation.py | 76 +++++- src/diffusers/schedulers/scheduling_ddpm.py | 23 +- 4 files changed, 354 insertions(+), 25 deletions(-) create mode 100644 examples/progressive_distillation/train_butterflies.py diff --git a/examples/progressive_distillation/colab.py b/examples/progressive_distillation/colab.py index ad6ef177dcf7..92b5d3f39d10 100644 --- a/examples/progressive_distillation/colab.py +++ b/examples/progressive_distillation/colab.py @@ -12,7 +12,7 @@ class TrainingConfig: lr_warmup_steps = 500 save_image_epochs = 10 save_model_epochs = 30 - mixed_precision = "no" # `no` for float32, `fp16` for automatic mixed precision + mixed_precision = "fp16" # `no` for float32, `fp16` for automatic mixed precision output_dir = "ddpm-butterflies-128" # the model namy locally and on the HF Hub push_to_hub = True # whether to upload the saved model to the HF Hub @@ -53,15 +53,38 @@ def transform(examples): teacher = UNet2DModel.from_pretrained("bglick13/ddpm-butterflies-128", subfolder="unet") -accelerator = Accelerator( - mixed_precision=config.mixed_precision, - gradient_accumulation_steps=config.gradient_accumulation_steps, - log_with="tensorboard", - logging_dir=os.path.join(config.output_dir, "logs"), -) -teacher = accelerator.prepare(teacher) +# accelerator = Accelerator( +# mixed_precision=config.mixed_precision, +# gradient_accumulation_steps=config.gradient_accumulation_steps, +# log_with="tensorboard", +# logging_dir=os.path.join(config.output_dir, "logs"), +# ) +# teacher = accelerator.prepare(teacher) distiller = DistillationPipeline() n_teacher_trainsteps = 1000 new_teacher, distilled_ema, distill_accelrator = distiller( - teacher, n_teacher_trainsteps, dataset, epochs=50, batch_size=config.train_batch_size, mixed_precision="no" + teacher, + n_teacher_trainsteps, + dataset, + epochs=100, + batch_size=1, + mixed_precision="fp16", + sample_every=1, + gamma=0.0, + lr=0.3 * 5e-5, +) +new_scheduler = DDPMScheduler(num_train_timesteps=500, beta_schedule="squaredcos_cap_v2") +pipeline = DDPMPipeline( + unet=distill_accelrator.unwrap_model(distilled_ema.averaged_model), + scheduler=new_scheduler, ) + +# run pipeline in inference (sample random noise and denoise) +images = pipeline(batch_size=4, output_type="numpy", generator=torch.manual_seed(0)).images + +# denormalize the images and save to tensorboard +images_processed = (images * 255).round().astype("uint8") +from PIL import Image + +img = Image.fromarray(images_processed[0]) +img.save("denoised.png") diff --git a/examples/progressive_distillation/train_butterflies.py b/examples/progressive_distillation/train_butterflies.py new file mode 100644 index 000000000000..7d17d066732f --- /dev/null +++ b/examples/progressive_distillation/train_butterflies.py @@ -0,0 +1,239 @@ +from dataclasses import dataclass + + +@dataclass +class TrainingConfig: + image_size = 128 # the generated image resolution + train_batch_size = 16 + eval_batch_size = 16 # how many images to sample during evaluation + num_epochs = 50 + gradient_accumulation_steps = 1 + learning_rate = 1e-4 + lr_warmup_steps = 500 + save_image_epochs = 10 + save_model_epochs = 30 + mixed_precision = "fp16" # `no` for float32, `fp16` for automatic mixed precision + output_dir = "ddpm-butterflies-128" # the model namy locally and on the HF Hub + + push_to_hub = True # whether to upload the saved model to the HF Hub + hub_private_repo = False + overwrite_output_dir = True # overwrite the old model when re-running the notebook + seed = 0 + + +config = TrainingConfig() +from datasets import load_dataset + +config.dataset_name = "huggan/smithsonian_butterflies_subset" +dataset = load_dataset(config.dataset_name, split="train") + +import matplotlib.pyplot as plt + +from torchvision import transforms + +preprocess = transforms.Compose( + [ + transforms.Resize((config.image_size, config.image_size)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), + ] +) + + +def transform(examples): + images = [preprocess(image.convert("RGB")) for image in examples["image"]] + return {"images": images} + + +dataset.set_transform(transform) + +import torch + +train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True) + +from diffusers import UNet2DModel + + +model = UNet2DModel( + sample_size=config.image_size, # the target image resolution + in_channels=3, # the number of input channels, 3 for RGB images + out_channels=3, # the number of output channels + layers_per_block=2, # how many ResNet layers to use per UNet block + block_out_channels=(128, 128, 256, 256, 512, 512), # the number of output channes for each UNet block + down_block_types=( + "DownBlock2D", # a regular ResNet downsampling block + "DownBlock2D", + "DownBlock2D", + "DownBlock2D", + "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention + "DownBlock2D", + ), + up_block_types=( + "UpBlock2D", # a regular ResNet upsampling block + "AttnUpBlock2D", # a ResNet upsampling block with spatial self-attention + "UpBlock2D", + "UpBlock2D", + "UpBlock2D", + "UpBlock2D", + ), +) + +from diffusers import DDPMScheduler + +noise_scheduler = DDPMScheduler( + num_train_timesteps=1000, + beta_schedule="squaredcos_cap_v2", + variance_type="fixed_small_log", +) + +import torch +import torch.nn.functional as F + +from PIL import Image + +optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate) + + +from diffusers.optimization import get_cosine_schedule_with_warmup + +lr_scheduler = get_cosine_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=config.lr_warmup_steps, + num_training_steps=(len(train_dataloader) * config.num_epochs), +) + +from diffusers import DDPMPipeline + +import math + + +def make_grid(images, rows, cols): + w, h = images[0].size + grid = Image.new("RGB", size=(cols * w, rows * h)) + for i, image in enumerate(images): + grid.paste(image, box=(i % cols * w, i // cols * h)) + return grid + + +def evaluate(config, epoch, pipeline): + # Sample some images from random noise (this is the backward diffusion process). + # The default pipeline output type is `List[PIL.Image]` + images = pipeline( + batch_size=config.eval_batch_size, + generator=torch.manual_seed(config.seed), + ).images + + # Make a grid out of the images + image_grid = make_grid(images, rows=4, cols=4) + + # Save the images + test_dir = os.path.join(config.output_dir, "samples") + os.makedirs(test_dir, exist_ok=True) + image_grid.save(f"{test_dir}/{epoch:04d}.png") + + +from accelerate import Accelerator +from diffusers.hub_utils import init_git_repo, push_to_hub + +from tqdm.auto import tqdm +import os + + +def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler): + # Initialize accelerator and tensorboard logging + accelerator = Accelerator( + mixed_precision=config.mixed_precision, + gradient_accumulation_steps=config.gradient_accumulation_steps, + log_with="tensorboard", + logging_dir=os.path.join(config.output_dir, "logs"), + ) + if accelerator.is_main_process: + if config.push_to_hub: + repo = init_git_repo(config, at_init=True) + accelerator.init_trackers("train_example") + + # Prepare everything + # There is no specific order to remember, you just need to unpack the + # objects in the same order you gave them to the prepare method. + model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, lr_scheduler + ) + + global_step = 0 + + # Now you train the model + for epoch in range(config.num_epochs): + progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process) + progress_bar.set_description(f"Epoch {epoch}") + + for step, batch in enumerate(train_dataloader): + clean_images = batch["images"] + # Sample noise to add to the images + noise = torch.randn(clean_images.shape).to(clean_images.device) + bs = clean_images.shape[0] + + # Sample a random timestep for each image + timesteps = torch.randint(0, noise_scheduler.num_train_timesteps, (bs,), device=clean_images.device).long() + + with accelerator.accumulate(model): + # Predict the noise residual + alpha_t, sigma_t = noise_scheduler.get_alpha_sigma(clean_images, timesteps, accelerator.device) + z_t = alpha_t * clean_images + sigma_t * noise + noise_pred = model(z_t, timesteps).sample + v = alpha_t * noise - sigma_t * clean_images + loss = F.mse_loss(noise_pred, v) + accelerator.backward(loss) + + accelerator.clip_grad_norm_(model.parameters(), 1.0) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + progress_bar.update(1) + logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} + progress_bar.set_postfix(**logs) + accelerator.log(logs, step=global_step) + global_step += 1 + + # After each epoch you optionally sample some demo images with evaluate() and save the model + if accelerator.is_main_process: + pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) + + if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1: + evaluate(config, epoch, pipeline) + + if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1: + if config.push_to_hub: + push_to_hub(config, pipeline, repo, commit_message=f"Epoch {epoch}", blocking=True) + else: + pipeline.save_pretrained(config.output_dir) + + +"""## Let's train! + +Let's launch the training (including multi-GPU training) from the notebook using Accelerate's `notebook_launcher` function: +""" + +from accelerate import notebook_launcher + +args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler) + +train_loop(*args) + +"""Let's have a look at the final image grid produced by the trained diffusion model:""" + +import glob + +sample_images = sorted(glob.glob(f"{config.output_dir}/samples/*.png")) +Image.open(sample_images[-1]) + +"""Not bad! There's room for improvement of course, so feel free to play with the hyperparameters, model definition and image augmentations 🤗 + +If you've chosen to upload the model to the Hugging Face Hub, its repository should now look like so: +https://huggingface.co/anton-l/ddpm-butterflies-128 + +If you want to dive deeper into the code, we also have more advanced training scripts with features like Exponential Moving Average of model weights here: + +https://github.com/huggingface/diffusers/tree/main/examples +""" diff --git a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py index 8c084bbcded0..6e880a6a419e 100644 --- a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py +++ b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py @@ -1,15 +1,17 @@ import copy - +from random import sample +import os import numpy as np import torch import torch.nn.functional as F from torch.utils.data import DataLoader - -import tqdm +from PIL import Image +from tqdm.auto import tqdm from accelerate import Accelerator from diffusers import DiffusionPipeline from diffusers.optimization import get_scheduler from diffusers.schedulers.scheduling_ddpm import DDPMScheduler +from diffusers.pipelines.ddpm import DDPMPipeline from diffusers.training_utils import EMAModel @@ -38,12 +40,17 @@ def __call__( use_ema=True, permute_samples=(0, 1, 2, 3), generator=None, + accelerator=None, + sample_every: int = None, + sample_path: str = "distillation_samples", ): # Initialize our accelerator for training - accelerator = Accelerator( - gradient_accumulation_steps=gradient_accumulation_steps, - mixed_precision=mixed_precision, - ) + os.makedirs(os.path.join(sample_path, f"{n_teacher_trainsteps}"), exist_ok=True) + if accelerator is None: + accelerator = Accelerator( + gradient_accumulation_steps=gradient_accumulation_steps, + mixed_precision=mixed_precision, + ) if accelerator.is_main_process: run = "distill" @@ -53,9 +60,15 @@ def __call__( train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True) # Setup the noise schedulers for the teacher and student - teacher_scheduler = DDPMScheduler(num_train_timesteps=n_teacher_trainsteps, beta_schedule="squaredcos_cap_v2") + teacher_scheduler = DDPMScheduler( + num_train_timesteps=n_teacher_trainsteps, + beta_schedule="squaredcos_cap_v2", + variance_type="fixed_small_log", + ) student_scheduler = DDPMScheduler( - num_train_timesteps=n_teacher_trainsteps // 2, beta_schedule="squaredcos_cap_v2" + num_train_timesteps=n_teacher_trainsteps // 2, + beta_schedule="squaredcos_cap_v2", + variance_type="fixed_small_log", ) # Initialize the student model as a direct copy of the teacher @@ -101,11 +114,24 @@ def __call__( ) global_step = 0 + # run pipeline in inference (sample random noise and denoise) on our teacher model as a baseline + pipeline = DDPMPipeline( + unet=teacher, + scheduler=teacher_scheduler, + ) + + images = pipeline(batch_size=4, output_type="numpy", generator=torch.manual_seed(0)).images + + # denormalize the images and save to tensorboard + images_processed = (images * 255).round().astype("uint8") + for sample_number, img in enumerate(images_processed): + img = Image.fromarray(img) + + img.save(os.path.join(sample_path, f"{n_teacher_trainsteps}", f"baseline_sample_{sample_number}.png")) + # Train the student for epoch in range(epochs): - progress_bar = tqdm.tqdm( - total=len(train_data) // batch_size, disable=not accelerator.is_local_main_process - ) + progress_bar = tqdm(total=len(train_data) // batch_size, disable=not accelerator.is_local_main_process) progress_bar.set_description(f"Epoch {epoch}") for batch in train_dataloader: with accelerator.accumulate(student): @@ -147,8 +173,6 @@ def __call__( noise_pred_t_prime = teacher(z_t_prime.permute(*permute_samples), timesteps).sample.permute( *permute_samples ) - if permute_samples: - noise_pred_t_prime = noise_pred_t_prime.permute(*permute_samples) rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * noise_pred_t_prime).clip(-1, 1) # V prediction per Appendix D @@ -182,6 +206,30 @@ def __call__( progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) progress_bar.close() + if sample_every is not None: + if (epoch + 1) % sample_every == 0: + new_scheduler = DDPMScheduler( + num_train_timesteps=n_teacher_trainsteps // 2, + beta_schedule="squaredcos_cap_v2", + variance_type="fixed_small_log", + ) + pipeline = DDPMPipeline( + unet=accelerator.unwrap_model(ema_model.averaged_model if use_ema else student), + scheduler=new_scheduler, + ) + + # run pipeline in inference (sample random noise and denoise) + images = pipeline(batch_size=4, output_type="numpy", generator=torch.manual_seed(0)).images + # denormalize the images and save to tensorboard + images_processed = (images * 255).round().astype("uint8") + for sample_number, img in enumerate(images_processed): + img = Image.fromarray(img) + + img.save( + os.path.join( + sample_path, f"{n_teacher_trainsteps}", f"epoch_{epoch}_sample_{sample_number}.png" + ) + ) accelerator.wait_for_everyone() return student, ema_model, accelerator diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 6738eb18eeba..d54abe6ce7a7 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -227,6 +227,7 @@ def step( predict_epsilon=True, generator=None, return_dict: bool = True, + v_prediction: bool = True, ) -> Union[DDPMSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion @@ -263,11 +264,26 @@ def step( # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf - if predict_epsilon: + if v_prediction: + # x_recon in p_mean_variance + pred_original_sample = ( + sample * self.sqrt_alphas_cumprod[timestep] + - model_output * self.sqrt_one_minus_alphas_cumprod[timestep] + ) + eps = ( + model_output * self.sqrt_alphas_cumprod[timestep] + - sample * self.sqrt_one_minus_alphas_cumprod[timestep] + ) + elif predict_epsilon: pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) else: pred_original_sample = model_output + # pred_original_sample = ( + # sample * self.sqrt_alphas_cumprod[timestep] - model_output * self.sqrt_one_minus_alphas_cumprod[timestep] + # ) + # eps = model_output * self.sqrt_alphas_cumprod[timestep] - sample * self.sqrt_one_minus_alphas_cumprod[timestep] + # 3. Clip "predicted x_0" if self.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -1, 1) @@ -288,7 +304,10 @@ def step( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator ).to(model_output.device) if self.variance_type == "fixed_small_log": - variance = self._get_variance(t, predicted_variance=predicted_variance) * noise + if v_prediction: + variance = torch.exp(0.5 * self._get_variance(t, predicted_variance)) * noise + else: + variance = self._get_variance(t, predicted_variance=predicted_variance) * noise else: variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise From 2aec5b1c32241906d611de3c7a7536943d5e7287 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 3 Nov 2022 10:48:13 -0400 Subject: [PATCH 116/133] v diffusion and training on butterflies example with it --- .../progressive_distillation/train_butterflies.py | 6 +++--- src/diffusers/schedulers/scheduling_ddpm.py | 15 ++++++++++----- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/examples/progressive_distillation/train_butterflies.py b/examples/progressive_distillation/train_butterflies.py index 7d17d066732f..aa3fffdd9026 100644 --- a/examples/progressive_distillation/train_butterflies.py +++ b/examples/progressive_distillation/train_butterflies.py @@ -8,14 +8,14 @@ class TrainingConfig: eval_batch_size = 16 # how many images to sample during evaluation num_epochs = 50 gradient_accumulation_steps = 1 - learning_rate = 1e-4 + learning_rate = 5e-5 lr_warmup_steps = 500 save_image_epochs = 10 save_model_epochs = 30 mixed_precision = "fp16" # `no` for float32, `fp16` for automatic mixed precision output_dir = "ddpm-butterflies-128" # the model namy locally and on the HF Hub - push_to_hub = True # whether to upload the saved model to the HF Hub + push_to_hub = False # whether to upload the saved model to the HF Hub hub_private_repo = False overwrite_output_dir = True # overwrite the old model when re-running the notebook seed = 0 @@ -84,7 +84,7 @@ def transform(examples): noise_scheduler = DDPMScheduler( num_train_timesteps=1000, beta_schedule="squaredcos_cap_v2", - variance_type="fixed_small_log", + variance_type="v_diffusion", ) import torch diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 996675b4818d..baa898b918d0 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -211,7 +211,6 @@ def _get_variance(self, t, predicted_variance=None, variance_type=None): # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": variance = torch.log(torch.clamp(variance, min=1e-20)) - variance = torch.exp(0.5 * variance) elif variance_type == "fixed_large": variance = self.betas[t] elif variance_type == "fixed_large_log": @@ -224,6 +223,8 @@ def _get_variance(self, t, predicted_variance=None, variance_type=None): max_log = self.betas[t] frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log + elif variance_type == "v_diffusion": + variance = torch.log(self.betas[t] * (1 - alpha_prod_t_prev) / (1 - alpha_prod_t)) return variance @@ -312,10 +313,9 @@ def step( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator ).to(model_output.device) if self.variance_type == "fixed_small_log": - if v_prediction: - variance = torch.exp(0.5 * self._get_variance(t, predicted_variance)) * noise - else: - variance = self._get_variance(t, predicted_variance=predicted_variance) * noise + variance = self._get_variance(t, predicted_variance=predicted_variance) * noise + elif self.variance_type == "v_diffusion": + variance = torch.exp(0.5 * self._get_variance(t, predicted_variance)) * noise else: variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise @@ -332,6 +332,11 @@ def add_noise( noise: torch.FloatTensor, timesteps: torch.IntTensor, ) -> torch.FloatTensor: + if self.variance_type == "v_diffusion": + alpha, sigma = self.get_alpha_sigma(original_samples, timesteps, original_samples.device) + z_t = alpha * original_samples + sigma * noise + return z_t + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) From a48b0266e170747f35d02be7d5e394f3ed796357 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 3 Nov 2022 11:00:04 -0400 Subject: [PATCH 117/133] v diffusion support for ddpm --- src/diffusers/schedulers/scheduling_ddpm.py | 92 ++++++++++++++------- 1 file changed, 60 insertions(+), 32 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index ee4f608e09aa..19acf3dc2ff5 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -16,16 +16,25 @@ import math from dataclasses import dataclass -from typing import Optional, Tuple, Union - +from typing import Optional, Tuple, Union, Literal import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput, deprecate +from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin +def expand_to_shape(input, timesteps, shape, device): + """ + Helper indexes a 1D tensor `input` using a 1D index tensor `timesteps`, then reshapes the result to broadcast nicely with `shape`. Useful for parellizing operations over `shape[0]` number of diffusion steps at once. + """ + out = torch.gather(input.to(device), 0, timesteps.to(device)) + reshape = [shape[0]] + [1] * (len(shape) - 1) + out = out.reshape(*reshape) + return out + + @dataclass class DDPMSchedulerOutput(BaseOutput): """ @@ -102,6 +111,14 @@ class DDPMScheduler(SchedulerMixin, ConfigMixin): """ + _compatible_classes = [ + "DDIMScheduler", + "PNDMScheduler", + "LMSDiscreteScheduler", + "EulerDiscreteScheduler", + "EulerAncestralDiscreteScheduler", + ] + @register_to_config def __init__( self, @@ -112,15 +129,7 @@ def __init__( trained_betas: Optional[np.ndarray] = None, variance_type: str = "fixed_small", clip_sample: bool = True, - **kwargs, ): - deprecate( - "tensor_format", - "0.6.0", - "If you're running your code in PyTorch, you can safely remove this argument.", - take_from=kwargs, - ) - if trained_betas is not None: self.betas = torch.from_numpy(trained_betas) elif beta_schedule == "linear": @@ -142,8 +151,8 @@ def __init__( self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - self.sigmas = 1 - self.alphas**2 - self.one = torch.tensor(1.0) + self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod) + self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1 - self.alphas_cumprod) # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 @@ -185,11 +194,11 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic def _get_variance(self, timestep, predicted_variance=None, variance_type=None): alpha_prod_t = self.alphas_cumprod[timestep] - alpha_prod_t_prev = self.alphas_cumprod[timestep - 1] if timestep > 0 else self.one + alpha_prod_t_prev = self.alphas_cumprod[timestep - 1] if timestep > 0 else torch.tensor(1.0) - # For timestep > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) + # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample - # x_{timestep-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample + # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[timestep] if variance_type is None: @@ -213,6 +222,8 @@ def _get_variance(self, timestep, predicted_variance=None, variance_type=None): max_log = self.betas[timestep] frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log + elif variance_type == "v_diffusion": + variance = torch.log(self.betas[timestep] * (1 - alpha_prod_t_prev) / (1 - alpha_prod_t)) return variance @@ -221,9 +232,10 @@ def step( model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, - prediction_type: str = "epsilon", + prediction_type: Literal["epsilon", "sample", "v"] = "epsilon", generator=None, return_dict: bool = True, + v_prediction: bool = True, ) -> Union[DDPMSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion @@ -234,10 +246,8 @@ def step( timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. - prediction_type (`str`): - prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion - process), `sample` (directly predicting the noisy sample), or `v` (see section 2.4 - https://imagen.research.google/video/paper.pdf) + prediction_type (`Literal["epsilon", "sample", "v"]`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process), `sample` (directly predicting the noisy sample`) or `v` (see section 2.4 https://imagen.research.google/video/paper.pdf) generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class @@ -254,23 +264,26 @@ def step( # 1. compute alphas, betas alpha_prod_t = self.alphas_cumprod[timestep] - alpha_prod_t_prev = self.alphas_cumprod[timestep - 1] if timestep > 0 else self.one + alpha_prod_t_prev = self.alphas_cumprod[timestep - 1] if timestep > 0 else torch.tensor(1.0) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf - if prediction_type == "epsilon": + if prediction_type == "v": + # x_recon in p_mean_variance + pred_original_sample = ( + sample * self.sqrt_alphas_cumprod[timestep] + - model_output * self.sqrt_one_minus_alphas_cumprod[timestep] + ) + eps = ( + model_output * self.sqrt_alphas_cumprod[timestep] + - sample * self.sqrt_one_minus_alphas_cumprod[timestep] + ) + elif prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) - elif prediction_type == "sample": - pred_original_sample = model_output - elif prediction_type == "v": - # v_t = alpha_t * epsilon - sigma_t * x - # need to merge the PRs for sigma to be available in DDPM - pred = sample * self.alphas[timestep] - model_output * self.sigmas[timestep] - eps = model_output * self.alphas[timestep] - sample * self.sigmas[timestep] else: - raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") + pred_original_sample = model_output # 3. Clip "predicted x_0" if self.config.clip_sample: @@ -291,7 +304,12 @@ def step( noise = torch.randn( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator ).to(model_output.device) - variance = (self._get_variance(timestep, predicted_variance=predicted_variance) ** 0.5) * noise + if self.variance_type == "fixed_small_log": + variance = self._get_variance(t, predicted_variance=predicted_variance) * noise + elif self.variance_type == "v_diffusion": + variance = torch.exp(0.5 * self._get_variance(timestep, predicted_variance)) * noise + else: + variance = (self._get_variance(timestep, predicted_variance=predicted_variance) ** 0.5) * noise pred_prev_sample = pred_prev_sample + variance @@ -306,6 +324,11 @@ def add_noise( noise: torch.FloatTensor, timesteps: torch.IntTensor, ) -> torch.FloatTensor: + if self.variance_type == "v_diffusion": + alpha, sigma = self.get_alpha_sigma(original_samples, timesteps, original_samples.device) + z_t = alpha * original_samples + sigma * noise + return z_t + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) @@ -325,3 +348,8 @@ def add_noise( def __len__(self): return self.config.num_train_timesteps + + def get_alpha_sigma(self, sample, timesteps, device): + alpha = expand_to_shape(self.sqrt_alphas_cumprod, timesteps, sample.shape, device) + sigma = expand_to_shape(self.sqrt_one_minus_alphas_cumprod, timesteps, sample.shape, device) + return alpha, sigma From 3d702c6d652cb4d7a50d9cea750ca7115b3c4375 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 3 Nov 2022 11:03:26 -0400 Subject: [PATCH 118/133] quality and style --- src/diffusers/schedulers/scheduling_ddpm.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 19acf3dc2ff5..c3c72171f1cc 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -16,7 +16,8 @@ import math from dataclasses import dataclass -from typing import Optional, Tuple, Union, Literal +from typing import Literal, Optional, Tuple, Union + import numpy as np import torch @@ -27,7 +28,8 @@ def expand_to_shape(input, timesteps, shape, device): """ - Helper indexes a 1D tensor `input` using a 1D index tensor `timesteps`, then reshapes the result to broadcast nicely with `shape`. Useful for parellizing operations over `shape[0]` number of diffusion steps at once. + Helper indexes a 1D tensor `input` using a 1D index tensor `timesteps`, then reshapes the result to broadcast + nicely with `shape`. Useful for parellizing operations over `shape[0]` number of diffusion steps at once. """ out = torch.gather(input.to(device), 0, timesteps.to(device)) reshape = [shape[0]] + [1] * (len(shape) - 1) @@ -247,7 +249,9 @@ def step( sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. prediction_type (`Literal["epsilon", "sample", "v"]`, optional): - prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process), `sample` (directly predicting the noisy sample`) or `v` (see section 2.4 https://imagen.research.google/video/paper.pdf) + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample`) or `v` (see section 2.4 + https://imagen.research.google/video/paper.pdf) generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class @@ -276,10 +280,6 @@ def step( sample * self.sqrt_alphas_cumprod[timestep] - model_output * self.sqrt_one_minus_alphas_cumprod[timestep] ) - eps = ( - model_output * self.sqrt_alphas_cumprod[timestep] - - sample * self.sqrt_one_minus_alphas_cumprod[timestep] - ) elif prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) else: @@ -305,7 +305,7 @@ def step( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator ).to(model_output.device) if self.variance_type == "fixed_small_log": - variance = self._get_variance(t, predicted_variance=predicted_variance) * noise + variance = self._get_variance(timestep, predicted_variance=predicted_variance) * noise elif self.variance_type == "v_diffusion": variance = torch.exp(0.5 * self._get_variance(timestep, predicted_variance)) * noise else: From 0889fd1d1178fc203321e5a095b737a111c9c9fb Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 3 Nov 2022 11:04:44 -0400 Subject: [PATCH 119/133] variable name consistency --- src/diffusers/schedulers/scheduling_ddpm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index c3c72171f1cc..25b081da87f1 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -198,9 +198,9 @@ def _get_variance(self, timestep, predicted_variance=None, variance_type=None): alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[timestep - 1] if timestep > 0 else torch.tensor(1.0) - # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) + # For timestep > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample - # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample + # x_{timestep-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[timestep] if variance_type is None: From f7c709518fe1c036866ea60b09119fd3013b534b Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 3 Nov 2022 11:07:38 -0400 Subject: [PATCH 120/133] missing base case --- src/diffusers/schedulers/scheduling_ddpm.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 25b081da87f1..878d67e817d1 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -282,8 +282,11 @@ def step( ) elif prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) - else: + + elif prediction_type == "sample": pred_original_sample = model_output + else: + raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") # 3. Clip "predicted x_0" if self.config.clip_sample: From 0c23e1162ad31fee62b327ce6ce4ab5ba7a82188 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 3 Nov 2022 11:23:35 -0400 Subject: [PATCH 121/133] pass prediction type along in the pipeline --- src/diffusers/pipelines/ddpm/pipeline_ddpm.py | 7 +++++-- src/diffusers/schedulers/scheduling_ddpm.py | 3 ++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/diffusers/pipelines/ddpm/pipeline_ddpm.py b/src/diffusers/pipelines/ddpm/pipeline_ddpm.py index aae29737aae3..3d5afc94e3df 100644 --- a/src/diffusers/pipelines/ddpm/pipeline_ddpm.py +++ b/src/diffusers/pipelines/ddpm/pipeline_ddpm.py @@ -14,7 +14,7 @@ # limitations under the License. -from typing import Optional, Tuple, Union +from typing import Optional, Tuple, Union, Literal import torch @@ -44,6 +44,7 @@ def __call__( generator: Optional[torch.Generator] = None, output_type: Optional[str] = "pil", return_dict: bool = True, + prediction_type: Literal["epsilon", "sample", "v"] = "epsilon", **kwargs, ) -> Union[ImagePipelineOutput, Tuple]: r""" @@ -80,7 +81,9 @@ def __call__( model_output = self.unet(image, t).sample # 2. compute previous image: x_t -> t_t-1 - image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample + image = self.scheduler.step( + model_output, t, image, generator=generator, prediction_type=prediction_type + ).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 878d67e817d1..1813592a069d 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -237,7 +237,6 @@ def step( prediction_type: Literal["epsilon", "sample", "v"] = "epsilon", generator=None, return_dict: bool = True, - v_prediction: bool = True, ) -> Union[DDPMSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion @@ -261,6 +260,8 @@ def step( returning a tuple, the first element is the sample tensor. """ + if self.variance_type == "v_diffusion": + assert prediction_type == "v", "Need to use v prediction with v_diffusion" if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) else: From d887e580faef076fca3a2917e83b959e5f350f75 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Thu, 3 Nov 2022 17:13:18 -0400 Subject: [PATCH 122/133] correct variance type --- .../pipeline_progressive_distillation.py | 55 ++++++++++--------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py index 6e880a6a419e..eb6920ebc9b7 100644 --- a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py +++ b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py @@ -63,12 +63,12 @@ def __call__( teacher_scheduler = DDPMScheduler( num_train_timesteps=n_teacher_trainsteps, beta_schedule="squaredcos_cap_v2", - variance_type="fixed_small_log", + variance_type="v_diffusion", ) student_scheduler = DDPMScheduler( num_train_timesteps=n_teacher_trainsteps // 2, beta_schedule="squaredcos_cap_v2", - variance_type="fixed_small_log", + variance_type="v_diffusion", ) # Initialize the student model as a direct copy of the teacher @@ -76,14 +76,15 @@ def __call__( student.load_state_dict(teacher.state_dict()) student = accelerator.prepare(student) student.train() + teacher.eval() # Setup the optimizer for the student optimizer = torch.optim.AdamW( student.parameters(), lr=lr, - betas=(adam_beta1, adam_beta2), - weight_decay=adam_weight_decay, - eps=adam_epsilon, + # betas=(adam_beta1, adam_beta2), + # weight_decay=adam_weight_decay, + # eps=adam_epsilon, ) lr_scheduler = get_scheduler( "linear", @@ -115,19 +116,19 @@ def __call__( global_step = 0 # run pipeline in inference (sample random noise and denoise) on our teacher model as a baseline - pipeline = DDPMPipeline( - unet=teacher, - scheduler=teacher_scheduler, - ) + # pipeline = DDPMPipeline( + # unet=teacher, + # scheduler=teacher_scheduler, + # ) - images = pipeline(batch_size=4, output_type="numpy", generator=torch.manual_seed(0)).images + # images = pipeline(batch_size=4, output_type="numpy", generator=torch.manual_seed(0)).images - # denormalize the images and save to tensorboard - images_processed = (images * 255).round().astype("uint8") - for sample_number, img in enumerate(images_processed): - img = Image.fromarray(img) + # # denormalize the images and save to tensorboard + # images_processed = (images * 255).round().astype("uint8") + # for sample_number, img in enumerate(images_processed): + # img = Image.fromarray(img) - img.save(os.path.join(sample_path, f"{n_teacher_trainsteps}", f"baseline_sample_{sample_number}.png")) + # img.save(os.path.join(sample_path, f"{n_teacher_trainsteps}", f"baseline_sample_{sample_number}.png")) # Train the student for epoch in range(epochs): @@ -187,18 +188,18 @@ def __call__( loss = F.mse_loss(noise_pred * w, z_t_prime_2 * w) accelerator.backward(loss) - if accelerator.sync_gradients: - accelerator.clip_grad_norm_(student.parameters(), 1.0) - optimizer.step() - lr_scheduler.step() - if use_ema: - ema_model.step(student) - optimizer.zero_grad() + if accelerator.sync_gradients: + accelerator.clip_grad_norm_(student.parameters(), 1.0) + optimizer.step() + lr_scheduler.step() + if use_ema: + ema_model.step(student) + optimizer.zero_grad() - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + global_step += 1 logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} if use_ema: @@ -211,7 +212,7 @@ def __call__( new_scheduler = DDPMScheduler( num_train_timesteps=n_teacher_trainsteps // 2, beta_schedule="squaredcos_cap_v2", - variance_type="fixed_small_log", + variance_type="v_diffusion", ) pipeline = DDPMPipeline( unet=accelerator.unwrap_model(ema_model.averaged_model if use_ema else student), From b46327e89e079b2eaaa2351a27c86900d77c168d Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 7 Nov 2022 09:19:22 -0500 Subject: [PATCH 123/133] put prediction type in scheduler config --- src/diffusers/pipelines/ddpm/pipeline_ddpm.py | 7 ++----- src/diffusers/schedulers/scheduling_ddpm.py | 16 ++++++++++------ 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/diffusers/pipelines/ddpm/pipeline_ddpm.py b/src/diffusers/pipelines/ddpm/pipeline_ddpm.py index 3d5afc94e3df..a9284063e884 100644 --- a/src/diffusers/pipelines/ddpm/pipeline_ddpm.py +++ b/src/diffusers/pipelines/ddpm/pipeline_ddpm.py @@ -14,7 +14,7 @@ # limitations under the License. -from typing import Optional, Tuple, Union, Literal +from typing import Literal, Optional, Tuple, Union import torch @@ -44,7 +44,6 @@ def __call__( generator: Optional[torch.Generator] = None, output_type: Optional[str] = "pil", return_dict: bool = True, - prediction_type: Literal["epsilon", "sample", "v"] = "epsilon", **kwargs, ) -> Union[ImagePipelineOutput, Tuple]: r""" @@ -81,9 +80,7 @@ def __call__( model_output = self.unet(image, t).sample # 2. compute previous image: x_t -> t_t-1 - image = self.scheduler.step( - model_output, t, image, generator=generator, prediction_type=prediction_type - ).prev_sample + image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 1813592a069d..0327c44e3c4a 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -131,6 +131,7 @@ def __init__( trained_betas: Optional[np.ndarray] = None, variance_type: str = "fixed_small", clip_sample: bool = True, + prediction_type: Literal["epsilon", "sample", "v"] = "epsilon", ): if trained_betas is not None: self.betas = torch.from_numpy(trained_betas) @@ -164,6 +165,7 @@ def __init__( self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) self.variance_type = variance_type + self.prediction_type = prediction_type def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ @@ -234,7 +236,7 @@ def step( model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, - prediction_type: Literal["epsilon", "sample", "v"] = "epsilon", + # prediction_type: Literal["epsilon", "sample", "v"] = "epsilon", generator=None, return_dict: bool = True, ) -> Union[DDPMSchedulerOutput, Tuple]: @@ -261,7 +263,7 @@ def step( """ if self.variance_type == "v_diffusion": - assert prediction_type == "v", "Need to use v prediction with v_diffusion" + assert self.prediction_type == "v", "Need to use v prediction with v_diffusion" if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) else: @@ -275,19 +277,21 @@ def step( # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf - if prediction_type == "v": + if self.prediction_type == "v": # x_recon in p_mean_variance pred_original_sample = ( sample * self.sqrt_alphas_cumprod[timestep] - model_output * self.sqrt_one_minus_alphas_cumprod[timestep] ) - elif prediction_type == "epsilon": + elif self.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) - elif prediction_type == "sample": + elif self.prediction_type == "sample": pred_original_sample = model_output else: - raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") + raise ValueError( + f"prediction_type given as {self.prediction_type} must be one of `epsilon`, `sample`, or `v`" + ) # 3. Clip "predicted x_0" if self.config.clip_sample: From 45c36c85d71ae278ca7d7ad87263394128b30eb1 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Mon, 7 Nov 2022 09:20:16 -0500 Subject: [PATCH 124/133] style --- src/diffusers/pipelines/ddpm/pipeline_ddpm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/ddpm/pipeline_ddpm.py b/src/diffusers/pipelines/ddpm/pipeline_ddpm.py index a9284063e884..aae29737aae3 100644 --- a/src/diffusers/pipelines/ddpm/pipeline_ddpm.py +++ b/src/diffusers/pipelines/ddpm/pipeline_ddpm.py @@ -14,7 +14,7 @@ # limitations under the License. -from typing import Literal, Optional, Tuple, Union +from typing import Optional, Tuple, Union import torch From f00d896a1e693b371858f120f311b4bc536105c3 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Wed, 9 Nov 2022 14:33:15 -0500 Subject: [PATCH 125/133] DDPM changes to support v diffusion (#1121) * v diffusion support for ddpm * quality and style * variable name consistency * missing base case * pass prediction type along in the pipeline * put prediction type in scheduler config * style --- src/diffusers/schedulers/scheduling_ddpm.py | 88 +++++++++++++++------ 1 file changed, 62 insertions(+), 26 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index ee4f608e09aa..0327c44e3c4a 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -16,16 +16,27 @@ import math from dataclasses import dataclass -from typing import Optional, Tuple, Union +from typing import Literal, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput, deprecate +from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin +def expand_to_shape(input, timesteps, shape, device): + """ + Helper indexes a 1D tensor `input` using a 1D index tensor `timesteps`, then reshapes the result to broadcast + nicely with `shape`. Useful for parellizing operations over `shape[0]` number of diffusion steps at once. + """ + out = torch.gather(input.to(device), 0, timesteps.to(device)) + reshape = [shape[0]] + [1] * (len(shape) - 1) + out = out.reshape(*reshape) + return out + + @dataclass class DDPMSchedulerOutput(BaseOutput): """ @@ -102,6 +113,14 @@ class DDPMScheduler(SchedulerMixin, ConfigMixin): """ + _compatible_classes = [ + "DDIMScheduler", + "PNDMScheduler", + "LMSDiscreteScheduler", + "EulerDiscreteScheduler", + "EulerAncestralDiscreteScheduler", + ] + @register_to_config def __init__( self, @@ -112,15 +131,8 @@ def __init__( trained_betas: Optional[np.ndarray] = None, variance_type: str = "fixed_small", clip_sample: bool = True, - **kwargs, + prediction_type: Literal["epsilon", "sample", "v"] = "epsilon", ): - deprecate( - "tensor_format", - "0.6.0", - "If you're running your code in PyTorch, you can safely remove this argument.", - take_from=kwargs, - ) - if trained_betas is not None: self.betas = torch.from_numpy(trained_betas) elif beta_schedule == "linear": @@ -142,8 +154,8 @@ def __init__( self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - self.sigmas = 1 - self.alphas**2 - self.one = torch.tensor(1.0) + self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod) + self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1 - self.alphas_cumprod) # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 @@ -153,6 +165,7 @@ def __init__( self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) self.variance_type = variance_type + self.prediction_type = prediction_type def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ @@ -185,7 +198,7 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic def _get_variance(self, timestep, predicted_variance=None, variance_type=None): alpha_prod_t = self.alphas_cumprod[timestep] - alpha_prod_t_prev = self.alphas_cumprod[timestep - 1] if timestep > 0 else self.one + alpha_prod_t_prev = self.alphas_cumprod[timestep - 1] if timestep > 0 else torch.tensor(1.0) # For timestep > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample @@ -213,6 +226,8 @@ def _get_variance(self, timestep, predicted_variance=None, variance_type=None): max_log = self.betas[timestep] frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log + elif variance_type == "v_diffusion": + variance = torch.log(self.betas[timestep] * (1 - alpha_prod_t_prev) / (1 - alpha_prod_t)) return variance @@ -221,7 +236,7 @@ def step( model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, - prediction_type: str = "epsilon", + # prediction_type: Literal["epsilon", "sample", "v"] = "epsilon", generator=None, return_dict: bool = True, ) -> Union[DDPMSchedulerOutput, Tuple]: @@ -234,9 +249,9 @@ def step( timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. - prediction_type (`str`): + prediction_type (`Literal["epsilon", "sample", "v"]`, optional): prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion - process), `sample` (directly predicting the noisy sample), or `v` (see section 2.4 + process), `sample` (directly predicting the noisy sample`) or `v` (see section 2.4 https://imagen.research.google/video/paper.pdf) generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class @@ -247,6 +262,8 @@ def step( returning a tuple, the first element is the sample tensor. """ + if self.variance_type == "v_diffusion": + assert self.prediction_type == "v", "Need to use v prediction with v_diffusion" if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) else: @@ -254,23 +271,27 @@ def step( # 1. compute alphas, betas alpha_prod_t = self.alphas_cumprod[timestep] - alpha_prod_t_prev = self.alphas_cumprod[timestep - 1] if timestep > 0 else self.one + alpha_prod_t_prev = self.alphas_cumprod[timestep - 1] if timestep > 0 else torch.tensor(1.0) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf - if prediction_type == "epsilon": + if self.prediction_type == "v": + # x_recon in p_mean_variance + pred_original_sample = ( + sample * self.sqrt_alphas_cumprod[timestep] + - model_output * self.sqrt_one_minus_alphas_cumprod[timestep] + ) + elif self.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) - elif prediction_type == "sample": + + elif self.prediction_type == "sample": pred_original_sample = model_output - elif prediction_type == "v": - # v_t = alpha_t * epsilon - sigma_t * x - # need to merge the PRs for sigma to be available in DDPM - pred = sample * self.alphas[timestep] - model_output * self.sigmas[timestep] - eps = model_output * self.alphas[timestep] - sample * self.sigmas[timestep] else: - raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") + raise ValueError( + f"prediction_type given as {self.prediction_type} must be one of `epsilon`, `sample`, or `v`" + ) # 3. Clip "predicted x_0" if self.config.clip_sample: @@ -291,7 +312,12 @@ def step( noise = torch.randn( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator ).to(model_output.device) - variance = (self._get_variance(timestep, predicted_variance=predicted_variance) ** 0.5) * noise + if self.variance_type == "fixed_small_log": + variance = self._get_variance(timestep, predicted_variance=predicted_variance) * noise + elif self.variance_type == "v_diffusion": + variance = torch.exp(0.5 * self._get_variance(timestep, predicted_variance)) * noise + else: + variance = (self._get_variance(timestep, predicted_variance=predicted_variance) ** 0.5) * noise pred_prev_sample = pred_prev_sample + variance @@ -306,6 +332,11 @@ def add_noise( noise: torch.FloatTensor, timesteps: torch.IntTensor, ) -> torch.FloatTensor: + if self.variance_type == "v_diffusion": + alpha, sigma = self.get_alpha_sigma(original_samples, timesteps, original_samples.device) + z_t = alpha * original_samples + sigma * noise + return z_t + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) @@ -325,3 +356,8 @@ def add_noise( def __len__(self): return self.config.num_train_timesteps + + def get_alpha_sigma(self, sample, timesteps, device): + alpha = expand_to_shape(self.sqrt_alphas_cumprod, timesteps, sample.shape, device) + sigma = expand_to_shape(self.sqrt_one_minus_alphas_cumprod, timesteps, sample.shape, device) + return alpha, sigma From 56164f56fb0ab92a212335b0f112a47766559b41 Mon Sep 17 00:00:00 2001 From: Nathan Lambert Date: Wed, 9 Nov 2022 11:53:25 -0800 Subject: [PATCH 126/133] quality --- src/diffusers/schedulers/scheduling_ddpm.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 03659040d769..d403c4f5959c 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -274,8 +274,6 @@ def step( new_config["predict_epsilon"] = predict_epsilon self._internal_dict = FrozenDict(new_config) - t = timestep - if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) else: From 13404a6857e96a2314df4a4d82b268a1ccb4922d Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Wed, 9 Nov 2022 15:16:21 -0500 Subject: [PATCH 127/133] try to train on ddim --- examples/v_prediction/train_butterflies.py | 239 ++++++++++++++++++++ src/diffusers/schedulers/scheduling_ddim.py | 83 ++++--- 2 files changed, 295 insertions(+), 27 deletions(-) create mode 100644 examples/v_prediction/train_butterflies.py diff --git a/examples/v_prediction/train_butterflies.py b/examples/v_prediction/train_butterflies.py new file mode 100644 index 000000000000..8eaa971c80d3 --- /dev/null +++ b/examples/v_prediction/train_butterflies.py @@ -0,0 +1,239 @@ +from dataclasses import dataclass + + +@dataclass +class TrainingConfig: + image_size = 128 # the generated image resolution + train_batch_size = 16 + eval_batch_size = 16 # how many images to sample during evaluation + num_epochs = 50 + gradient_accumulation_steps = 1 + learning_rate = 5e-5 + lr_warmup_steps = 500 + save_image_epochs = 10 + save_model_epochs = 30 + mixed_precision = "fp16" # `no` for float32, `fp16` for automatic mixed precision + output_dir = "ddim-butterflies-128-v-diffusion" # the model namy locally and on the HF Hub + + push_to_hub = False # whether to upload the saved model to the HF Hub + hub_private_repo = False + overwrite_output_dir = True # overwrite the old model when re-running the notebook + seed = 0 + + +config = TrainingConfig() +from datasets import load_dataset + +config.dataset_name = "huggan/smithsonian_butterflies_subset" +dataset = load_dataset(config.dataset_name, split="train") + +import matplotlib.pyplot as plt + +from torchvision import transforms + +preprocess = transforms.Compose( + [ + transforms.Resize((config.image_size, config.image_size)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), + ] +) + + +def transform(examples): + images = [preprocess(image.convert("RGB")) for image in examples["image"]] + return {"images": images} + + +dataset.set_transform(transform) + +import torch + +train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True) + +from diffusers import UNet2DModel + + +model = UNet2DModel( + sample_size=config.image_size, # the target image resolution + in_channels=3, # the number of input channels, 3 for RGB images + out_channels=3, # the number of output channels + layers_per_block=2, # how many ResNet layers to use per UNet block + block_out_channels=(128, 128, 256, 256, 512, 512), # the number of output channes for each UNet block + down_block_types=( + "DownBlock2D", # a regular ResNet downsampling block + "DownBlock2D", + "DownBlock2D", + "DownBlock2D", + "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention + "DownBlock2D", + ), + up_block_types=( + "UpBlock2D", # a regular ResNet upsampling block + "AttnUpBlock2D", # a ResNet upsampling block with spatial self-attention + "UpBlock2D", + "UpBlock2D", + "UpBlock2D", + "UpBlock2D", + ), +) + +from diffusers import DDPMScheduler, DDIMPipeline, DDIMScheduler + +noise_scheduler = DDIMScheduler( + num_train_timesteps=1000, + beta_schedule="squaredcos_cap_v2", + variance_type="v_diffusion", +) + +import torch +import torch.nn.functional as F + +from PIL import Image + +optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate) + + +from diffusers.optimization import get_cosine_schedule_with_warmup + +lr_scheduler = get_cosine_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=config.lr_warmup_steps, + num_training_steps=(len(train_dataloader) * config.num_epochs), +) + +from diffusers import DDPMPipeline + +import math + + +def make_grid(images, rows, cols): + w, h = images[0].size + grid = Image.new("RGB", size=(cols * w, rows * h)) + for i, image in enumerate(images): + grid.paste(image, box=(i % cols * w, i // cols * h)) + return grid + + +def evaluate(config, epoch, pipeline): + # Sample some images from random noise (this is the backward diffusion process). + # The default pipeline output type is `List[PIL.Image]` + images = pipeline( + batch_size=config.eval_batch_size, + generator=torch.manual_seed(config.seed), + ).images + + # Make a grid out of the images + image_grid = make_grid(images, rows=4, cols=4) + + # Save the images + test_dir = os.path.join(config.output_dir, "samples") + os.makedirs(test_dir, exist_ok=True) + image_grid.save(f"{test_dir}/{epoch:04d}.png") + + +from accelerate import Accelerator +from diffusers.hub_utils import init_git_repo, push_to_hub + +from tqdm.auto import tqdm +import os + + +def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler): + # Initialize accelerator and tensorboard logging + accelerator = Accelerator( + mixed_precision=config.mixed_precision, + gradient_accumulation_steps=config.gradient_accumulation_steps, + log_with="tensorboard", + logging_dir=os.path.join(config.output_dir, "logs"), + ) + if accelerator.is_main_process: + if config.push_to_hub: + repo = init_git_repo(config, at_init=True) + accelerator.init_trackers("train_example") + + # Prepare everything + # There is no specific order to remember, you just need to unpack the + # objects in the same order you gave them to the prepare method. + model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, lr_scheduler + ) + + global_step = 0 + + # Now you train the model + for epoch in range(config.num_epochs): + progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process) + progress_bar.set_description(f"Epoch {epoch}") + + for step, batch in enumerate(train_dataloader): + clean_images = batch["images"] + # Sample noise to add to the images + noise = torch.randn(clean_images.shape).to(clean_images.device) + bs = clean_images.shape[0] + + # Sample a random timestep for each image + timesteps = torch.randint(0, noise_scheduler.num_train_timesteps, (bs,), device=clean_images.device).long() + + with accelerator.accumulate(model): + # Predict the noise residual + alpha_t, sigma_t = noise_scheduler.get_alpha_sigma(clean_images, timesteps, accelerator.device) + z_t = alpha_t * clean_images + sigma_t * noise + noise_pred = model(z_t, timesteps).sample + v = alpha_t * noise - sigma_t * clean_images + loss = F.mse_loss(noise_pred, v) + accelerator.backward(loss) + + accelerator.clip_grad_norm_(model.parameters(), 1.0) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + progress_bar.update(1) + logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} + progress_bar.set_postfix(**logs) + accelerator.log(logs, step=global_step) + global_step += 1 + + # After each epoch you optionally sample some demo images with evaluate() and save the model + if accelerator.is_main_process: + pipeline = DDIMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) + + if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1: + evaluate(config, epoch, pipeline) + + if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1: + if config.push_to_hub: + push_to_hub(config, pipeline, repo, commit_message=f"Epoch {epoch}", blocking=True) + else: + pipeline.save_pretrained(config.output_dir) + + +"""## Let's train! + +Let's launch the training (including multi-GPU training) from the notebook using Accelerate's `notebook_launcher` function: +""" + +from accelerate import notebook_launcher + +args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler) + +train_loop(*args) + +"""Let's have a look at the final image grid produced by the trained diffusion model:""" + +import glob + +sample_images = sorted(glob.glob(f"{config.output_dir}/samples/*.png")) +Image.open(sample_images[-1]) + +"""Not bad! There's room for improvement of course, so feel free to play with the hyperparameters, model definition and image augmentations 🤗 + +If you've chosen to upload the model to the Hugging Face Hub, its repository should now look like so: +https://huggingface.co/anton-l/ddpm-butterflies-128 + +If you want to dive deeper into the code, we also have more advanced training scripts with features like Exponential Moving Average of model weights here: + +https://github.com/huggingface/diffusers/tree/main/examples +""" diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index abdcb3e81a58..177bd65dc517 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -17,7 +17,7 @@ import math from dataclasses import dataclass -from typing import Optional, Tuple, Union +from typing import Optional, Tuple, Union, Literal import numpy as np import torch @@ -27,11 +27,21 @@ from .scheduling_utils import SchedulerMixin +def expand_to_shape(input, timesteps, shape, device): + """ + Helper indexes a 1D tensor `input` using a 1D index tensor `timesteps`, then reshapes the result to broadcast + nicely with `shape`. Useful for parellizing operations over `shape[0]` number of diffusion steps at once. + """ + out = torch.gather(input.to(device), 0, timesteps.to(device)) + reshape = [shape[0]] + [1] * (len(shape) - 1) + out = out.reshape(*reshape) + return out + + @dataclass class DDIMSchedulerOutput(BaseOutput): """ Output class for the scheduler's step function output. - Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the @@ -49,16 +59,12 @@ def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999) -> torch.Tensor """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. - Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. - - Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. - Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ @@ -78,14 +84,11 @@ class DDIMScheduler(SchedulerMixin, ConfigMixin): """ Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with non-Markovian guidance. - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and [`~ConfigMixin.from_config`] functions. - For more details, see the original paper: https://arxiv.org/abs/2010.02502 - Args: num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the starting `beta` value of inference. @@ -105,7 +108,6 @@ class DDIMScheduler(SchedulerMixin, ConfigMixin): an offset added to the inference steps. You can use a combination of `offset=1` and `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in stable diffusion. - """ @register_to_config @@ -118,7 +120,9 @@ def __init__( trained_betas: Optional[np.ndarray] = None, clip_sample: bool = True, set_alpha_to_one: bool = True, + variance_type: str = "fixed", steps_offset: int = 0, + prediction_type: Literal["epsilon", "sample", "v"] = "epsilon", **kwargs, ): deprecate( @@ -159,35 +163,42 @@ def __init__( # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) + self.variance_type = variance_type + self.prediction_type = prediction_type def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. - Args: sample (`torch.FloatTensor`): input sample timestep (`int`, optional): current timestep - Returns: `torch.FloatTensor`: scaled input sample """ return sample def _get_variance(self, timestep, prev_timestep): - alpha_prod_t = self.alphas_cumprod[timestep] - alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod - beta_prod_t = 1 - alpha_prod_t - beta_prod_t_prev = 1 - alpha_prod_t_prev - - variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + if self.variance_type == "fixed": + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + elif self.variance_type == "v_diffusion": + # If eta > 0, adjust the scaling factor for the predicted noise + # downward according to the amount of additional noise to add + ddim_sigma = (self.sigmas[timestep + 1] ** 2 / self.sigmas[timestep] ** 2).sqrt() * ( + 1 - self.alphas[timestep] ** 2 / self.alphas[timestep + 1] ** 2 + ).sqrt() + variance = (self.sigmas[timestep + 1] ** 2 - ddim_sigma**2).sqrt() return variance def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None, **kwargs): """ Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. - Args: num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. @@ -219,7 +230,6 @@ def step( """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). - Args: model_output (`torch.FloatTensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. @@ -233,12 +243,10 @@ def step( use_clipped_model_output (`bool`): TODO generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class - Returns: [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`: [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - """ if self.num_inference_steps is None: raise ValueError( @@ -295,19 +303,31 @@ def step( model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output + if self.prediction_type == "epsilon": + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output - # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + eps * pred_sample_direction + # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + eps * pred_sample_direction + else: + if timestep < len(self.timesteps) - 1: + prev_sample = pred_original_sample + self.alphas[timestep + 1] + eps * variance + else: + prev_sample = None if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 device = model_output.device if torch.is_tensor(model_output) else "cpu" noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator).to(device) - variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise + if self.prediction_type == "epsilon": - prev_sample = prev_sample + variance + variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise + prev_sample = prev_sample + variance + else: + ddim_sigma = (self.sigmas[timestep + 1] ** 2 / self.sigmas[timestep] ** 2).sqrt() * ( + 1 - self.alphas[timestep] ** 2 / self.alphas[timestep + 1] ** 2 + ).sqrt() + prev_sample = prev_sample + ddim_sigma * noise if not return_dict: return (prev_sample,) @@ -319,6 +339,10 @@ def add_noise( noise: torch.FloatTensor, timesteps: torch.IntTensor, ) -> torch.FloatTensor: + if self.variance_type == "v_diffusion": + alpha, sigma = self.get_alpha_sigma(original_samples, timesteps, original_samples.device) + z_t = alpha * original_samples + sigma * noise + return z_t # Make sure alphas_cumprod and timestep have same device and dtype as original_samples self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) @@ -338,3 +362,8 @@ def add_noise( def __len__(self): return self.config.num_train_timesteps + + def get_alpha_sigma(self, sample, timesteps, device): + alpha = expand_to_shape(self.sqrt_alphas_cumprod, timesteps, sample.shape, device) + sigma = expand_to_shape(self.sqrt_one_minus_alphas_cumprod, timesteps, sample.shape, device) + return alpha, sigma From 1fa3cc8ad7099d929adb7982508087cb5f6afb7a Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Tue, 15 Nov 2022 12:27:10 -0500 Subject: [PATCH 128/133] changes to ddim --- src/diffusers/schedulers/scheduling_ddim.py | 45 ++++++++++++++------- 1 file changed, 30 insertions(+), 15 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index 177bd65dc517..eebd1f2cd975 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -80,6 +80,14 @@ def alpha_bar(time_step): return torch.tensor(betas) +def t_to_alpha_sigma(num_diffusion_timesteps): + """Returns the scaling factors for the clean image and for the noise, given + a timestep.""" + alphas = torch.cos(torch.tensor([t * math.pi / 2 for t in range(num_diffusion_timesteps)])) + sigmas = torch.sin(torch.tensor([t * math.pi / 2 for t in range(num_diffusion_timesteps)])) + return alphas, sigmas + + class DDIMScheduler(SchedulerMixin, ConfigMixin): """ Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising @@ -149,7 +157,8 @@ def __init__( self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - self.sigmas = 1 - self.alphas**2 + if prediction_type == "v": + self.alphas, self.sigmas = t_to_alpha_sigma(num_train_timesteps) # At every step in ddim, we are looking into the previous alphas_cumprod # For the final step, there is no previous alphas_cumprod because we are already at 0 @@ -178,7 +187,7 @@ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = """ return sample - def _get_variance(self, timestep, prev_timestep): + def _get_variance(self, timestep, prev_timestep, eta=0): if self.variance_type == "fixed": alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod @@ -189,9 +198,14 @@ def _get_variance(self, timestep, prev_timestep): elif self.variance_type == "v_diffusion": # If eta > 0, adjust the scaling factor for the predicted noise # downward according to the amount of additional noise to add - ddim_sigma = (self.sigmas[timestep + 1] ** 2 / self.sigmas[timestep] ** 2).sqrt() * ( - 1 - self.alphas[timestep] ** 2 / self.alphas[timestep + 1] ** 2 - ).sqrt() + if eta: + numerator = ( + eta * (self.sigmas[timestep + 1] ** 2 / self.sigmas[timestep] ** 2).clamp(min=1.0e-7).sqrt() + ) + else: + numerator = 0 + denominator = (self.alphas[timestep + 1] / self.alphas[timestep]).clamp(min=1.0e-7).sqrt() + ddim_sigma = (numerator / denominator).clamp(min=1.0e-7) variance = (self.sigmas[timestep + 1] ** 2 - ddim_sigma**2).sqrt() return variance @@ -221,7 +235,6 @@ def step( model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, - prediction_type: str = "epsilon", eta: float = 0.0, use_clipped_model_output: bool = False, generator=None, @@ -275,19 +288,21 @@ def step( # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - if prediction_type == "epsilon": + if self.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) eps = torch.tensor(1) - elif prediction_type == "sample": + elif self.prediction_type == "sample": pred_original_sample = model_output eps = torch.tensor(1) - elif prediction_type == "v": + elif self.prediction_type == "v": # v_t = alpha_t * epsilon - sigma_t * x # need to merge the PRs for sigma to be available in DDPM pred_original_sample = sample * self.alphas[timestep] - model_output * self.sigmas[timestep] - eps = model_output * self.alphas[timestep] - sample * self.sigmas[timestep] + eps = model_output * self.alphas[timestep] + sample * self.sigmas[timestep] else: - raise ValueError(f"prediction_type given as {prediction_type} must be one of `epsilon`, `sample`, or `v`") + raise ValueError( + f"prediction_type given as {self.prediction_type} must be one of `epsilon`, `sample`, or `v`" + ) # 4. Clip "predicted x_0" if self.config.clip_sample: @@ -295,7 +310,7 @@ def step( # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) - variance = self._get_variance(timestep, prev_timestep) + variance = self._get_variance(timestep, prev_timestep, eta) std_dev_t = eta * variance ** (0.5) if use_clipped_model_output: @@ -309,7 +324,7 @@ def step( # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + eps * pred_sample_direction else: - if timestep < len(self.timesteps) - 1: + if timestep < len(self.alphas) - 1: prev_sample = pred_original_sample + self.alphas[timestep + 1] + eps * variance else: prev_sample = None @@ -364,6 +379,6 @@ def __len__(self): return self.config.num_train_timesteps def get_alpha_sigma(self, sample, timesteps, device): - alpha = expand_to_shape(self.sqrt_alphas_cumprod, timesteps, sample.shape, device) - sigma = expand_to_shape(self.sqrt_one_minus_alphas_cumprod, timesteps, sample.shape, device) + alpha = expand_to_shape(self.alphas, timesteps, sample.shape, device) + sigma = expand_to_shape(self.sigmas, timesteps, sample.shape, device) return alpha, sigma From 0b60c2b427450506bff7672550327fe22649ff7a Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Wed, 16 Nov 2022 09:49:12 -0500 Subject: [PATCH 129/133] ddim v prediction works to train butterflies example --- examples/v_prediction/train_butterflies.py | 40 +++++++++++++++--- src/diffusers/schedulers/scheduling_ddim.py | 46 +++++++++++---------- 2 files changed, 59 insertions(+), 27 deletions(-) diff --git a/examples/v_prediction/train_butterflies.py b/examples/v_prediction/train_butterflies.py index 8eaa971c80d3..bb87671a238b 100644 --- a/examples/v_prediction/train_butterflies.py +++ b/examples/v_prediction/train_butterflies.py @@ -81,11 +81,20 @@ def transform(examples): from diffusers import DDPMScheduler, DDIMPipeline, DDIMScheduler -noise_scheduler = DDIMScheduler( - num_train_timesteps=1000, - beta_schedule="squaredcos_cap_v2", - variance_type="v_diffusion", -) +if config.output_dir.startswith("ddpm"): + noise_scheduler = DDPMScheduler( + num_train_timesteps=1000, + beta_schedule="squaredcos_cap_v2", + variance_type="v_diffusion", + prediction_type="v", + ) +else: + noise_scheduler = DDIMScheduler( + num_train_timesteps=1000, + beta_schedule="squaredcos_cap_v2", + variance_type="v_diffusion", + prediction_type="v", + ) import torch import torch.nn.functional as F @@ -162,6 +171,21 @@ def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_s global_step = 0 + if config.output_dir.startswith("ddpm"): + + pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) + else: + pipeline = DDIMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) + + def t_to_alpha_sigma(t): + """Returns the scaling factors for the clean image and for the noise, given + a timestep.""" + return torch.cos(t * math.pi / 2), torch.sin(t * math.pi / 2) + + alpha_sigmas = [t_to_alpha_sigma(t) for t in noise_scheduler.timesteps] + + evaluate(config, 0, pipeline) + # Now you train the model for epoch in range(config.num_epochs): progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process) @@ -198,7 +222,11 @@ def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_s # After each epoch you optionally sample some demo images with evaluate() and save the model if accelerator.is_main_process: - pipeline = DDIMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) + if config.output_dir.startswith("ddpm"): + + pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) + else: + pipeline = DDIMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1: evaluate(config, epoch, pipeline) diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index eebd1f2cd975..b6651e7c8daf 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -83,8 +83,12 @@ def alpha_bar(time_step): def t_to_alpha_sigma(num_diffusion_timesteps): """Returns the scaling factors for the clean image and for the noise, given a timestep.""" - alphas = torch.cos(torch.tensor([t * math.pi / 2 for t in range(num_diffusion_timesteps)])) - sigmas = torch.sin(torch.tensor([t * math.pi / 2 for t in range(num_diffusion_timesteps)])) + alphas = torch.cos( + torch.tensor([(t / num_diffusion_timesteps) * math.pi / 2 for t in range(num_diffusion_timesteps)]) + ) + sigmas = torch.sin( + torch.tensor([(t / num_diffusion_timesteps) * math.pi / 2 for t in range(num_diffusion_timesteps)]) + ) return alphas, sigmas @@ -155,6 +159,7 @@ def __init__( else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") + self.variance_type = variance_type self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) if prediction_type == "v": @@ -165,6 +170,7 @@ def __init__( # `set_alpha_to_one` decides whether we set this parameter simply to one or # whether we use the final alpha of the "non-previous" one. self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + self.final_sigma = torch.tensor(0.0) if set_alpha_to_one else self.sigmas[0] # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 @@ -188,26 +194,29 @@ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = return sample def _get_variance(self, timestep, prev_timestep, eta=0): + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + if self.variance_type == "fixed": - alpha_prod_t = self.alphas_cumprod[timestep] - alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod - beta_prod_t = 1 - alpha_prod_t - beta_prod_t_prev = 1 - alpha_prod_t_prev variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) elif self.variance_type == "v_diffusion": # If eta > 0, adjust the scaling factor for the predicted noise # downward according to the amount of additional noise to add + # variance = torch.log(self.betas[timestep] * (1 - alpha_prod_t_prev) / (1 - alpha_prod_t)) + alpha_prev = self.alphas[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + sigma_prev = self.sigmas[prev_timestep] if prev_timestep >= 0 else self.final_sigma if eta: - numerator = ( - eta * (self.sigmas[timestep + 1] ** 2 / self.sigmas[timestep] ** 2).clamp(min=1.0e-7).sqrt() - ) + numerator = eta * (sigma_prev**2 / self.sigmas[timestep] ** 2).clamp(min=1.0e-7).sqrt() else: numerator = 0 - denominator = (self.alphas[timestep + 1] / self.alphas[timestep]).clamp(min=1.0e-7).sqrt() - ddim_sigma = (numerator / denominator).clamp(min=1.0e-7) - variance = (self.sigmas[timestep + 1] ** 2 - ddim_sigma**2).sqrt() - + denominator = (1 - self.alphas[timestep] ** 2 / alpha_prev**2).clamp(min=1.0e-7).sqrt() + ddim_sigma = (numerator * denominator).clamp(min=1.0e-7) + variance = (sigma_prev**2 - ddim_sigma**2).sqrt() + if torch.isnan(variance): + variance = 0 return variance def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None, **kwargs): @@ -324,10 +333,8 @@ def step( # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + eps * pred_sample_direction else: - if timestep < len(self.alphas) - 1: - prev_sample = pred_original_sample + self.alphas[timestep + 1] + eps * variance - else: - prev_sample = None + alpha_prev = self.alphas[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + prev_sample = pred_original_sample * alpha_prev + eps * variance if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 @@ -339,10 +346,7 @@ def step( prev_sample = prev_sample + variance else: - ddim_sigma = (self.sigmas[timestep + 1] ** 2 / self.sigmas[timestep] ** 2).sqrt() * ( - 1 - self.alphas[timestep] ** 2 / self.alphas[timestep + 1] ** 2 - ).sqrt() - prev_sample = prev_sample + ddim_sigma * noise + prev_sample = prev_sample + variance * noise if not return_dict: return (prev_sample,) From 8311d8980547ee5a6e4559573407236958220d11 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Wed, 16 Nov 2022 10:03:07 -0500 Subject: [PATCH 130/133] fix bad merge, style and quality --- examples/v_prediction/train_butterflies.py | 70 +++++---------------- src/diffusers/schedulers/scheduling_ddim.py | 61 ++++++++---------- 2 files changed, 40 insertions(+), 91 deletions(-) diff --git a/examples/v_prediction/train_butterflies.py b/examples/v_prediction/train_butterflies.py index bb87671a238b..5074ece86a98 100644 --- a/examples/v_prediction/train_butterflies.py +++ b/examples/v_prediction/train_butterflies.py @@ -1,5 +1,19 @@ +import glob +import os from dataclasses import dataclass +import torch +import torch.nn.functional as F + +from accelerate import Accelerator +from datasets import load_dataset +from diffusers import DDIMPipeline, DDIMScheduler, DDPMPipeline, DDPMScheduler, UNet2DModel +from diffusers.hub_utils import init_git_repo, push_to_hub +from diffusers.optimization import get_cosine_schedule_with_warmup +from PIL import Image +from torchvision import transforms +from tqdm.auto import tqdm + @dataclass class TrainingConfig: @@ -22,14 +36,11 @@ class TrainingConfig: config = TrainingConfig() -from datasets import load_dataset + config.dataset_name = "huggan/smithsonian_butterflies_subset" dataset = load_dataset(config.dataset_name, split="train") -import matplotlib.pyplot as plt - -from torchvision import transforms preprocess = transforms.Compose( [ @@ -48,12 +59,9 @@ def transform(examples): dataset.set_transform(transform) -import torch train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True) -from diffusers import UNet2DModel - model = UNet2DModel( sample_size=config.image_size, # the target image resolution @@ -79,7 +87,6 @@ def transform(examples): ), ) -from diffusers import DDPMScheduler, DDIMPipeline, DDIMScheduler if config.output_dir.startswith("ddpm"): noise_scheduler = DDPMScheduler( @@ -96,26 +103,16 @@ def transform(examples): prediction_type="v", ) -import torch -import torch.nn.functional as F - -from PIL import Image optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate) -from diffusers.optimization import get_cosine_schedule_with_warmup - lr_scheduler = get_cosine_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=config.lr_warmup_steps, num_training_steps=(len(train_dataloader) * config.num_epochs), ) -from diffusers import DDPMPipeline - -import math - def make_grid(images, rows, cols): w, h = images[0].size @@ -142,13 +139,6 @@ def evaluate(config, epoch, pipeline): image_grid.save(f"{test_dir}/{epoch:04d}.png") -from accelerate import Accelerator -from diffusers.hub_utils import init_git_repo, push_to_hub - -from tqdm.auto import tqdm -import os - - def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler): # Initialize accelerator and tensorboard logging accelerator = Accelerator( @@ -172,18 +162,10 @@ def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_s global_step = 0 if config.output_dir.startswith("ddpm"): - pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) else: pipeline = DDIMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) - def t_to_alpha_sigma(t): - """Returns the scaling factors for the clean image and for the noise, given - a timestep.""" - return torch.cos(t * math.pi / 2), torch.sin(t * math.pi / 2) - - alpha_sigmas = [t_to_alpha_sigma(t) for t in noise_scheduler.timesteps] - evaluate(config, 0, pipeline) # Now you train the model @@ -223,7 +205,6 @@ def t_to_alpha_sigma(t): # After each epoch you optionally sample some demo images with evaluate() and save the model if accelerator.is_main_process: if config.output_dir.startswith("ddpm"): - pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) else: pipeline = DDIMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) @@ -238,30 +219,9 @@ def t_to_alpha_sigma(t): pipeline.save_pretrained(config.output_dir) -"""## Let's train! - -Let's launch the training (including multi-GPU training) from the notebook using Accelerate's `notebook_launcher` function: -""" - -from accelerate import notebook_launcher - args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler) train_loop(*args) -"""Let's have a look at the final image grid produced by the trained diffusion model:""" - -import glob - sample_images = sorted(glob.glob(f"{config.output_dir}/samples/*.png")) Image.open(sample_images[-1]) - -"""Not bad! There's room for improvement of course, so feel free to play with the hyperparameters, model definition and image augmentations 🤗 - -If you've chosen to upload the model to the Hugging Face Hub, its repository should now look like so: -https://huggingface.co/anton-l/ddpm-butterflies-128 - -If you want to dive deeper into the code, we also have more advanced training scripts with features like Exponential Moving Average of model weights here: - -https://github.com/huggingface/diffusers/tree/main/examples -""" diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index 9e4dc2ee0627..a41ba49cb156 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -17,7 +17,7 @@ import math from dataclasses import dataclass -from typing import Optional, Tuple, Union, Literal +from typing import Literal, Optional, Tuple, Union import numpy as np import torch @@ -42,8 +42,8 @@ def expand_to_shape(input, timesteps, shape, device): # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class DDIMSchedulerOutput(BaseOutput): """ - Output class for the scheduler's step function output. Args: + Output class for the scheduler's step function output. prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. @@ -58,13 +58,12 @@ class DDIMSchedulerOutput(BaseOutput): def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999) -> torch.Tensor: """ - Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of - (1-beta) over time from t = [0,1]. - Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up - to that part of the diffusion process. Args: - num_diffusion_timesteps (`int`): the number of betas to produce. - max_beta (`float`): the maximum beta to use; use values lower than 1 to + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the + cumulative product of (1-beta) up to that part of the diffusion process. + num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; + use values lower than 1 to prevent singularities. Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs @@ -95,18 +94,15 @@ def t_to_alpha_sigma(num_diffusion_timesteps): class DDIMScheduler(SchedulerMixin, ConfigMixin): """ - Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising - diffusion probabilistic models (DDPMs) with non-Markovian guidance. - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`~ConfigMixin`] also provides general loading and saving functionality via the [`~ConfigMixin.save_config`] and - [`~ConfigMixin.from_config`] functions. - For more details, see the original paper: https://arxiv.org/abs/2010.02502 Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - beta_start (`float`): the starting `beta` value of inference. - beta_end (`float`): the final `beta` value. - beta_schedule (`str`): + Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising + diffusion probabilistic models (DDPMs) with non-Markovian guidance. [`~ConfigMixin`] takes care of storing all + config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can + be accessed via `scheduler.config.num_train_timesteps`. [`~ConfigMixin`] also provides general loading and saving + functionality via the [`~ConfigMixin.save_config`] and [`~ConfigMixin.from_config`] functions. For more details, + see the original paper: https://arxiv.org/abs/2010.02502 + num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the + starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, optional): @@ -186,11 +182,10 @@ def __init__( def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ + Args: Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. - Args: - sample (`torch.FloatTensor`): input sample - timestep (`int`, optional): current timestep + sample (`torch.FloatTensor`): input sample timestep (`int`, optional): current timestep Returns: `torch.FloatTensor`: scaled input sample """ @@ -203,7 +198,6 @@ def _get_variance(self, timestep, prev_timestep, eta=0): beta_prod_t_prev = 1 - alpha_prod_t_prev if self.variance_type == "fixed": - variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) elif self.variance_type == "v_diffusion": # If eta > 0, adjust the scaling factor for the predicted noise @@ -224,8 +218,8 @@ def _get_variance(self, timestep, prev_timestep, eta=0): def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ - Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. Args: + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. """ @@ -249,24 +243,23 @@ def step( return_dict: bool = True, ) -> Union[DDIMSchedulerOutput, Tuple]: """ + Args: Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`torch.FloatTensor`): + model_output (`torch.FloatTensor`): direct output from learned diffusion model. timestep (`int`): current + discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. prediction_type (`str`): prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process), `sample` (directly predicting the noisy sample), or `v` (see section 2.4 https://imagen.research.google/video/paper.pdf) - eta (`float`): weight of noise for added noise in diffusion step. - use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped + eta (`float`): weight of noise for added noise in diffusion step. use_clipped_model_output (`bool`): if + `True`, compute "corrected" `model_output` from the clipped predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would coincide with the one provided as input and `use_clipped_model_output` will have not effect. - generator: random number generator. - variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we + generator: random number generator. variance_noise (`torch.FloatTensor`): instead of generating noise for + the variance using `generator`, we can directly provide the noise for the variance itself. This is useful for methods such as CycleDiffusion. (https://arxiv.org/abs/2210.05559) return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class @@ -362,10 +355,6 @@ def step( variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * variance_noise prev_sample = prev_sample + variance - - prev_sample = prev_sample + variance - else: - prev_sample = prev_sample + variance * noise if not return_dict: return (prev_sample,) From f0c0dee34272dfd3440278fa0f574979f234fe20 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Fri, 18 Nov 2022 16:06:15 -0500 Subject: [PATCH 131/133] closest yet --- .gitignore | 1 + examples/progressive_distillation/colab.py | 15 +-- .../pipeline_progressive_distillation.py | 94 +++++++++++-------- 3 files changed, 63 insertions(+), 47 deletions(-) diff --git a/.gitignore b/.gitignore index b5a276da1d4b..1964b12f941e 100644 --- a/.gitignore +++ b/.gitignore @@ -164,3 +164,4 @@ tags # DS_Store (MacOS) .DS_Store +*.png diff --git a/examples/progressive_distillation/colab.py b/examples/progressive_distillation/colab.py index 92b5d3f39d10..511481142c8c 100644 --- a/examples/progressive_distillation/colab.py +++ b/examples/progressive_distillation/colab.py @@ -48,10 +48,11 @@ def transform(examples): import torch import os -from diffusers import UNet2DModel, DistillationPipeline, DDPMPipeline, DDPMScheduler +from diffusers import UNet2DModel, DistillationPipeline, DDPMPipeline, DDPMScheduler, DDIMPipeline, DDIMScheduler from accelerate import Accelerator -teacher = UNet2DModel.from_pretrained("bglick13/ddpm-butterflies-128", subfolder="unet") + +teacher = UNet2DModel.from_pretrained("bglick13/ddim-butterflies-128-v-diffusion", subfolder="unet") # accelerator = Accelerator( # mixed_precision=config.mixed_precision, @@ -67,14 +68,16 @@ def transform(examples): n_teacher_trainsteps, dataset, epochs=100, - batch_size=1, + batch_size=32, mixed_precision="fp16", sample_every=1, gamma=0.0, - lr=0.3 * 5e-5, + lr=1e-4, +) +new_scheduler = DDIMScheduler( + num_train_timesteps=500, beta_schedule="squaredcos_cap_v2", variance_type="v_diffusion", prediction_type="v" ) -new_scheduler = DDPMScheduler(num_train_timesteps=500, beta_schedule="squaredcos_cap_v2") -pipeline = DDPMPipeline( +pipeline = DDIMPipeline( unet=distill_accelrator.unwrap_model(distilled_ema.averaged_model), scheduler=new_scheduler, ) diff --git a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py index eb6920ebc9b7..e5fa91321c82 100644 --- a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py +++ b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py @@ -11,7 +11,9 @@ from diffusers import DiffusionPipeline from diffusers.optimization import get_scheduler from diffusers.schedulers.scheduling_ddpm import DDPMScheduler +from diffusers.schedulers.scheduling_ddim import DDIMScheduler from diffusers.pipelines.ddpm import DDPMPipeline +from diffusers.pipelines.ddim import DDIMPipeline from diffusers.training_utils import EMAModel @@ -60,15 +62,17 @@ def __call__( train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True) # Setup the noise schedulers for the teacher and student - teacher_scheduler = DDPMScheduler( + teacher_scheduler = DDIMScheduler( num_train_timesteps=n_teacher_trainsteps, beta_schedule="squaredcos_cap_v2", variance_type="v_diffusion", + prediction_type="v", ) - student_scheduler = DDPMScheduler( + student_scheduler = DDIMScheduler( num_train_timesteps=n_teacher_trainsteps // 2, beta_schedule="squaredcos_cap_v2", variance_type="v_diffusion", + prediction_type="v", ) # Initialize the student model as a direct copy of the teacher @@ -116,19 +120,18 @@ def __call__( global_step = 0 # run pipeline in inference (sample random noise and denoise) on our teacher model as a baseline - # pipeline = DDPMPipeline( - # unet=teacher, - # scheduler=teacher_scheduler, - # ) + pipeline = DDIMPipeline( + unet=teacher, + scheduler=teacher_scheduler, + ) - # images = pipeline(batch_size=4, output_type="numpy", generator=torch.manual_seed(0)).images + images = pipeline(batch_size=4, generator=torch.manual_seed(0)).images - # # denormalize the images and save to tensorboard + # denormalize the images and save to tensorboard # images_processed = (images * 255).round().astype("uint8") - # for sample_number, img in enumerate(images_processed): - # img = Image.fromarray(img) + for sample_number, img in enumerate(images): - # img.save(os.path.join(sample_path, f"{n_teacher_trainsteps}", f"baseline_sample_{sample_number}.png")) + img.save(os.path.join(sample_path, f"{n_teacher_trainsteps}", f"baseline_sample_{sample_number}.png")) # Train the student for epoch in range(epochs): @@ -142,50 +145,57 @@ def __call__( noise = torch.randn(batch.shape, generator=generator).to(accelerator.device) bsz = batch.shape[0] # Sample a random timestep for each image - timesteps = ( - torch.randint( - 0, - student_scheduler.config.num_train_timesteps, - (bsz,), - device=batch.device, - generator=generator, - ).long() - * 2 - ) + + timesteps = torch.randint( + 1, + n_teacher_trainsteps - 1, + (bsz,), + device=batch.device, + generator=generator, + ).long() with torch.no_grad(): # Add noise to the image based on noise scheduler a t=timesteps alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps + 1, accelerator.device) z_t = alpha_t * batch + sigma_t * noise # Take the first diffusion step with the teacher - noise_pred_t = teacher(z_t.permute(*permute_samples), timesteps + 1).sample.permute( + v_pred_t = teacher(z_t.permute(*permute_samples), timesteps + 1).sample.permute( *permute_samples ) - x_teacher_z_t = (alpha_t * z_t - sigma_t * noise_pred_t).clip(-1, 1) + + # reconstruct the image at timesteps using v diffusion + x_teacher_z_t = (alpha_t * z_t - sigma_t * v_pred_t).clip(-1, 1) + # eps = (z - alpha*x)/sigma. + eps_pred = (z_t - alpha_t * x_teacher_z_t) / sigma_t # Add noise to the image based on noise scheduler a t=timesteps-1, to prepare for the next diffusion step alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma( batch, timesteps, accelerator.device ) - z_t_prime = alpha_t_prime * x_teacher_z_t + (sigma_t_prime / sigma_t) * ( - z_t - alpha_t * x_teacher_z_t - ) + z_t_prime = alpha_t_prime * x_teacher_z_t + sigma_t_prime * eps_pred # Take the second diffusion step with the teacher - noise_pred_t_prime = teacher(z_t_prime.permute(*permute_samples), timesteps).sample.permute( + v_pred_t_prime = teacher(z_t_prime.permute(*permute_samples), timesteps).sample.permute( *permute_samples ) - rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * noise_pred_t_prime).clip(-1, 1) + rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * v_pred_t_prime).clip(-1, 1) + eps_pred = (z_t_prime - alpha_t_prime * rec_t_prime) / sigma_t_prime # V prediction per Appendix D - alpha_t_prime2, sigma_t_prime2 = student_scheduler.get_alpha_sigma( - batch, timesteps // 2, accelerator.device + alpha_t_prime2, sigma_t_prime2 = teacher_scheduler.get_alpha_sigma( + batch, (timesteps - 1), accelerator.device ) - x_teacher_z_t_prime = (z_t - alpha_t_prime2 * rec_t_prime) / sigma_t_prime2 - z_t_prime_2 = alpha_t_prime2 * x_teacher_z_t_prime - sigma_t_prime2 * rec_t_prime + z_teacher = alpha_t_prime2 * rec_t_prime + sigma_t_prime2 * eps_pred + sigma_frac = sigma_t_prime / sigma_t + + x_target = (z_teacher - sigma_frac * z_t) / (alpha_t_prime2 - sigma_frac * alpha_t) + eps_target = (z_teacher - alpha_t_prime2 * x_target) / sigma_t_prime2 + v_target = alpha_t * eps_target - sigma_t * x_target - noise_pred = student(z_t.permute(*permute_samples), timesteps).sample.permute(*permute_samples) + noise_pred = student(z_t.permute(*permute_samples), timesteps // 2).sample.permute( + *permute_samples + ) w = torch.pow(1 + alpha_t_prime2 / sigma_t_prime2, gamma) - loss = F.mse_loss(noise_pred * w, z_t_prime_2 * w) + loss = F.mse_loss(noise_pred * w, v_target * w) accelerator.backward(loss) if accelerator.sync_gradients: @@ -209,24 +219,26 @@ def __call__( progress_bar.close() if sample_every is not None: if (epoch + 1) % sample_every == 0: - new_scheduler = DDPMScheduler( + new_scheduler = DDIMScheduler( num_train_timesteps=n_teacher_trainsteps // 2, beta_schedule="squaredcos_cap_v2", variance_type="v_diffusion", + prediction_type="v", ) - pipeline = DDPMPipeline( + pipeline = DDIMPipeline( unet=accelerator.unwrap_model(ema_model.averaged_model if use_ema else student), scheduler=new_scheduler, ) # run pipeline in inference (sample random noise and denoise) - images = pipeline(batch_size=4, output_type="numpy", generator=torch.manual_seed(0)).images + images = pipeline( + batch_size=4, + generator=torch.manual_seed(0), + num_inference_steps=n_teacher_trainsteps // 2, + ).images # denormalize the images and save to tensorboard - images_processed = (images * 255).round().astype("uint8") - for sample_number, img in enumerate(images_processed): - img = Image.fromarray(img) - + for sample_number, img in enumerate(images): img.save( os.path.join( sample_path, f"{n_teacher_trainsteps}", f"epoch_{epoch}_sample_{sample_number}.png" From d50cd2f5bfd540eb00abfee1037458d15f231eae Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Sat, 19 Nov 2022 17:40:53 -0500 Subject: [PATCH 132/133] weird torch.rand bug --- .../pipeline_progressive_distillation.py | 78 +++++++++++++------ src/diffusers/schedulers/scheduling_ddim.py | 28 +++++-- 2 files changed, 75 insertions(+), 31 deletions(-) diff --git a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py index e5fa91321c82..24b1021d338d 100644 --- a/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py +++ b/src/diffusers/pipelines/progressive_distillation/pipeline_progressive_distillation.py @@ -17,6 +17,31 @@ from diffusers.training_utils import EMAModel +def logsnr_schedule(t, logsnr_min=-20, logsnr_max=20): + logsnr_min = torch.tensor(logsnr_min, dtype=torch.float32) + logsnr_max = torch.tensor(logsnr_max, dtype=torch.float32) + b = torch.arctan(torch.exp(-0.5 * logsnr_max)) + a = torch.arctan(torch.exp(-0.5 * logsnr_min)) - b + return -2.0 * torch.log(torch.tan(a * t + b)) + + +def continuous_to_discrete_time(u, num_timesteps): + return (u * (num_timesteps - 1)).float().round().long() + + +def predict_x_from_v(*, z, v, logsnr): + logsnr = utils.broadcast_from_left(logsnr, z.shape) + alpha_t = torch.sqrt(F.sigmoid(logsnr)) + sigma_t = torch.sqrt(F.sigmoid(-logsnr)) + return alpha_t * z - sigma_t * v + + +def alpha_sigma_from_logsnr(logsnr): + alpha_t = torch.sqrt(F.sigmoid(logsnr)) + sigma_t = torch.sqrt(F.sigmoid(-logsnr)) + return alpha_t, sigma_t + + class DistillationPipeline(DiffusionPipeline): def __init__(self): pass @@ -109,8 +134,10 @@ def __call__( ) = accelerator.prepare( teacher, student, optimizer, lr_scheduler, train_data, teacher_scheduler, student_scheduler ) - if generator: - generator = accelerator.prepare(generator) + if not generator: + generator = torch.Generator().manual_seed(0) + + # generator = accelerator.prepare(generator) ema_model = EMAModel( student, inv_gamma=ema_inv_gamma, @@ -146,54 +173,55 @@ def __call__( bsz = batch.shape[0] # Sample a random timestep for each image - timesteps = torch.randint( - 1, - n_teacher_trainsteps - 1, - (bsz,), - device=batch.device, - generator=generator, - ).long() + u = torch.rand(size=(bsz,), generator=generator).to(accelerator.device) + u_1 = u - (0.5 / (n_teacher_trainsteps // 2)) + u_2 = u - (1 / (n_teacher_trainsteps // 2)) + # logsnr = logsnr_schedule(u) + # alpha_t, sigma_t = alpha_sigma_from_logsnr(logsnr) with torch.no_grad(): # Add noise to the image based on noise scheduler a t=timesteps - alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps + 1, accelerator.device) + timesteps = continuous_to_discrete_time(u, n_teacher_trainsteps) + alpha_t, sigma_t = teacher_scheduler.get_alpha_sigma(batch, timesteps, accelerator.device) z_t = alpha_t * batch + sigma_t * noise + # z_t = batch * torch.sqrt(F.sigmoid(logsnr)) + noise * torch.sqrt(F.sigmoid(-logsnr)) + # teach_out_start = teacher(z_t, continuous_to_discrete_time(u, n_teacher_trainsteps)) + # x_pred = predict_x_from_v(teach_out_start) # Take the first diffusion step with the teacher - v_pred_t = teacher(z_t.permute(*permute_samples), timesteps + 1).sample.permute( - *permute_samples - ) + v_pred_t = teacher(z_t.permute(*permute_samples), timesteps).sample.permute(*permute_samples) # reconstruct the image at timesteps using v diffusion - x_teacher_z_t = (alpha_t * z_t - sigma_t * v_pred_t).clip(-1, 1) + x_teacher_z_t = alpha_t * z_t - sigma_t * v_pred_t # eps = (z - alpha*x)/sigma. eps_pred = (z_t - alpha_t * x_teacher_z_t) / sigma_t # Add noise to the image based on noise scheduler a t=timesteps-1, to prepare for the next diffusion step + timesteps = continuous_to_discrete_time(u_1, n_teacher_trainsteps) alpha_t_prime, sigma_t_prime = teacher_scheduler.get_alpha_sigma( batch, timesteps, accelerator.device ) - z_t_prime = alpha_t_prime * x_teacher_z_t + sigma_t_prime * eps_pred + z_mid = alpha_t_prime * x_teacher_z_t + sigma_t_prime * eps_pred # Take the second diffusion step with the teacher - v_pred_t_prime = teacher(z_t_prime.permute(*permute_samples), timesteps).sample.permute( + v_pred_mid = teacher(z_mid.permute(*permute_samples), timesteps).sample.permute( *permute_samples ) - rec_t_prime = (alpha_t_prime * z_t_prime - sigma_t_prime * v_pred_t_prime).clip(-1, 1) + x_pred_mid = alpha_t_prime * z_mid - sigma_t_prime * v_pred_mid - eps_pred = (z_t_prime - alpha_t_prime * rec_t_prime) / sigma_t_prime - # V prediction per Appendix D + eps_pred = (z_mid - alpha_t_prime * x_pred_mid) / sigma_t_prime + + timesteps = continuous_to_discrete_time(u_2, n_teacher_trainsteps) alpha_t_prime2, sigma_t_prime2 = teacher_scheduler.get_alpha_sigma( - batch, (timesteps - 1), accelerator.device + batch, timesteps, accelerator.device ) - z_teacher = alpha_t_prime2 * rec_t_prime + sigma_t_prime2 * eps_pred - sigma_frac = sigma_t_prime / sigma_t + z_teacher = alpha_t_prime2 * x_pred_mid + sigma_t_prime2 * eps_pred + sigma_frac = sigma_t / sigma_t_prime2 x_target = (z_teacher - sigma_frac * z_t) / (alpha_t_prime2 - sigma_frac * alpha_t) eps_target = (z_teacher - alpha_t_prime2 * x_target) / sigma_t_prime2 v_target = alpha_t * eps_target - sigma_t * x_target - noise_pred = student(z_t.permute(*permute_samples), timesteps // 2).sample.permute( - *permute_samples - ) + timesteps = continuous_to_discrete_time(u_2, n_teacher_trainsteps // 2) + noise_pred = student(z_t.permute(*permute_samples), timesteps).sample.permute(*permute_samples) w = torch.pow(1 + alpha_t_prime2 / sigma_t_prime2, gamma) loss = F.mse_loss(noise_pred * w, v_target * w) accelerator.backward(loss) diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index a41ba49cb156..d3f40a9fb7a7 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -80,15 +80,26 @@ def alpha_bar(time_step): return torch.tensor(betas) +def _logsnr_schedule_cosine(t, logsnr_min=-20, logsnr_max=20): + logsnr_min = torch.tensor(logsnr_min, dtype=torch.float32) + logsnr_max = torch.tensor(logsnr_max, dtype=torch.float32) + b = torch.arctan(torch.exp(-0.5 * logsnr_max)) + a = torch.arctan(torch.exp(-0.5 * logsnr_min)) - b + return -2.0 * torch.log(torch.tan(a * t + b)) + + def t_to_alpha_sigma(num_diffusion_timesteps): """Returns the scaling factors for the clean image and for the noise, given a timestep.""" - alphas = torch.cos( - torch.tensor([(t / num_diffusion_timesteps) * math.pi / 2 for t in range(num_diffusion_timesteps)]) - ) - sigmas = torch.sin( - torch.tensor([(t / num_diffusion_timesteps) * math.pi / 2 for t in range(num_diffusion_timesteps)]) - ) + out = torch.FloatTensor([_logsnr_schedule_cosine(t) for t in torch.linspace(0, 1, 1000)]) + alphas = torch.sqrt(torch.sigmoid(out)) + sigmas = torch.sqrt(torch.sigmoid(-out)) + # alphas = torch.cos( + # torch.tensor([(t / num_diffusion_timesteps) * math.pi / 2 for t in range(num_diffusion_timesteps)]) + # ) + # sigmas = torch.sin( + # torch.tensor([(t / num_diffusion_timesteps) * math.pi / 2 for t in range(num_diffusion_timesteps)]) + # ) return alphas, sigmas @@ -394,3 +405,8 @@ def get_alpha_sigma(self, sample, timesteps, device): alpha = expand_to_shape(self.alphas, timesteps, sample.shape, device) sigma = expand_to_shape(self.sigmas, timesteps, sample.shape, device) return alpha, sigma + + def get_alpha_sigma_from_logsnr(self, sample, logsnr, device): + alpha = expand_to_shape(self.alphas, logsnr, sample.shape, device) + sigma = expand_to_shape(self.sigmas, logsnr, sample.shape, device) + return alpha, sigma From eeb8e311af91d27cfcca59efb6961c6ec905ed60 Mon Sep 17 00:00:00 2001 From: Ben Glickenhaus Date: Sun, 20 Nov 2022 11:11:08 -0500 Subject: [PATCH 133/133] remove literals --- src/diffusers/schedulers/scheduling_ddim.py | 4 ++-- src/diffusers/schedulers/scheduling_ddpm.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index d3f40a9fb7a7..8fdbe44cdcf6 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -17,7 +17,7 @@ import math from dataclasses import dataclass -from typing import Literal, Optional, Tuple, Union +from typing import Optional, Tuple, Union import numpy as np import torch @@ -151,7 +151,7 @@ def __init__( set_alpha_to_one: bool = True, variance_type: str = "fixed", steps_offset: int = 0, - prediction_type: Literal["epsilon", "sample", "v"] = "epsilon", + prediction_type: str = "epsilon", **kwargs, ): if trained_betas is not None: diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index 8ae50a45060d..b5602eb69c56 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -16,7 +16,7 @@ import math from dataclasses import dataclass -from typing import Literal, Optional, Tuple, Union +from typing import Optional, Tuple, Union import numpy as np import torch @@ -135,7 +135,7 @@ def __init__( trained_betas: Optional[np.ndarray] = None, variance_type: str = "fixed_small", clip_sample: bool = True, - prediction_type: Literal["epsilon", "sample", "v"] = "epsilon", + prediction_type: str = "epsilon", ): if trained_betas is not None: self.betas = torch.from_numpy(trained_betas)