Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
67 commits
Select commit Hold shift + click to select a range
bfabd0a
remove module from pyproject toml
jxtngx Jul 11, 2022
56b69ec
add error report
jxtngx Jul 11, 2022
9741a1d
fix _param_requires_grad_state
jxtngx Jul 11, 2022
41f8fbf
fix all: Function is missing a return type annotation
jxtngx Jul 11, 2022
7f2b74a
fix all: Function is missing a type annotation
jxtngx Jul 11, 2022
9ea9b96
fix: Return value expected
jxtngx Jul 11, 2022
bb85416
fix: return type of a generator function should be "Generator"
jxtngx Jul 11, 2022
97ea817
fix: Missing positional argument
jxtngx Jul 11, 2022
9a5fe10
undo change in log_grad_norm
jxtngx Jul 12, 2022
a5f261f
Merge branch 'Lightning-AI:master' into lightningmodule/code-quality/…
jxtngx Jul 16, 2022
80c45ea
update lr_schedulers; add LRSchedulerPLType
jxtngx Jul 16, 2022
90cc952
Delete module_typehint_errors.md
jxtngx Jul 16, 2022
43a77a1
add ignores
jxtngx Jul 16, 2022
dec3ee4
add Tuple
jxtngx Jul 16, 2022
8737df1
pre-commit & mypy conflict
jxtngx Jul 16, 2022
6b287d1
add ignore
jxtngx Jul 16, 2022
d36232f
remove type hints
jxtngx Jul 16, 2022
3ece027
update ignores from CI checks
jxtngx Jul 16, 2022
66e662e
update ignores
jxtngx Jul 16, 2022
73f2fc8
revert to original lr_scheduler_step and ignore
jxtngx Jul 16, 2022
c6bcfc0
Merge branch 'Lightning-AI:master' into lightningmodule/code-quality/…
jxtngx Jul 20, 2022
e9a9d5b
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Jul 22, 2022
273594c
import pl
jxtngx Jul 22, 2022
ddb260c
make bagua.py match upstream
jxtngx Jul 22, 2022
4572dbd
update to match upstream
jxtngx Jul 22, 2022
abf5b43
update
jxtngx Jul 23, 2022
f48821b
update
jxtngx Jul 23, 2022
736cbeb
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Jul 23, 2022
205e546
clean
jxtngx Jul 24, 2022
4b5ec09
Update src/pytorch_lightning/core/module.py
jxtngx Jul 24, 2022
ca4f160
update
jxtngx Jul 24, 2022
f53509e
update
jxtngx Jul 26, 2022
38aeef8
resolve merge conflicts
jxtngx Jul 26, 2022
2e6bec2
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Jul 27, 2022
cf4aeb1
update lightningmodule.logger
jxtngx Jul 27, 2022
2d4a5db
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Jul 28, 2022
d2742ce
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Jul 28, 2022
df17937
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Jul 28, 2022
f839283
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Jul 28, 2022
049ace9
update
jxtngx Jul 28, 2022
4ddf841
update
jxtngx Jul 29, 2022
024906c
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Jul 29, 2022
d59f1cc
promote DeviceDtypeModuleMixin.device to torch.device
jxtngx Jul 29, 2022
fd94b0d
resolve redundant cast
jxtngx Jul 29, 2022
669464f
update
jxtngx Jul 29, 2022
b0989f3
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Aug 1, 2022
1d9520d
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Aug 1, 2022
db8a9e2
update
jxtngx Aug 1, 2022
2fb7fd2
update
jxtngx Aug 1, 2022
8a8e86f
update
jxtngx Aug 1, 2022
d92a63a
resolve merge conflicts
jxtngx Aug 2, 2022
afc43cd
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Aug 3, 2022
e2753c6
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Aug 3, 2022
058e018
resolve merge conflicts
jxtngx Aug 5, 2022
9c49f8c
resolve merge conflicts
jxtngx Aug 8, 2022
57ebb77
Merge branch 'master' into lightningmodule/code-quality/type-hints
jxtngx Aug 9, 2022
849e6a7
Self suggestions
carmocca Aug 10, 2022
d606a02
fix merge conflict
jxtngx Aug 12, 2022
f954534
update
jxtngx Aug 12, 2022
b9ed56e
update
jxtngx Aug 12, 2022
dc76adc
move module_optimizers to types file
awaelchli Aug 18, 2022
87730f9
Revert "move module_optimizers to types file"
awaelchli Aug 18, 2022
f65d3ec
Merge branch 'master' into lightningmodule/code-quality/type-hints
awaelchli Aug 18, 2022
2eb738b
ignore trainer assignment
awaelchli Aug 18, 2022
ea4eb49
ignore mypy bug
awaelchli Aug 18, 2022
d0692d0
avoid ignore for split_x
awaelchli Aug 18, 2022
c1bf538
fix tuple
awaelchli Aug 18, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ module = [
"pytorch_lightning.callbacks.progress.rich_progress",
"pytorch_lightning.callbacks.quantization",
"pytorch_lightning.core.datamodule",
"pytorch_lightning.core.module",
"pytorch_lightning.demos.boring_classes",
"pytorch_lightning.demos.mnist_datamodule",
"pytorch_lightning.profilers.base",
Expand Down
2 changes: 1 addition & 1 deletion src/pytorch_lightning/core/mixins/device_dtype_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def dtype(self, new_dtype: Union[str, torch.dtype]) -> None:
raise RuntimeError("Cannot set the dtype explicitly. Please use module.to(new_dtype).")

@property
def device(self) -> Union[str, torch.device]:
def device(self) -> torch.device:
device = self._device

# make this more explicit to always include the index
Expand Down
105 changes: 56 additions & 49 deletions src/pytorch_lightning/core/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
import weakref
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Callable, Dict, List, Mapping, Optional, overload, Sequence, Tuple, Union
from typing import Any, Callable, Dict, Generator, List, Mapping, Optional, overload, Sequence, Tuple, Union

import torch
from torch import ScriptModule, Tensor
Expand All @@ -47,12 +47,20 @@
from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_11, _TORCH_GREATER_EQUAL_1_13
from pytorch_lightning.utilities.rank_zero import rank_zero_debug, rank_zero_deprecation, rank_zero_warn
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.types import _METRIC_COLLECTION, EPOCH_OUTPUT, LRSchedulerTypeUnion, STEP_OUTPUT
from pytorch_lightning.utilities.types import (
_METRIC_COLLECTION,
EPOCH_OUTPUT,
LRSchedulerPLType,
LRSchedulerTypeUnion,
STEP_OUTPUT,
)
from pytorch_lightning.utilities.warnings import WarningCache

warning_cache = WarningCache()
log = logging.getLogger(__name__)

MODULE_OPTIMIZERS = Union[Optimizer, LightningOptimizer, List[Optimizer], List[LightningOptimizer]]


class LightningModule(
DeviceDtypeModuleMixin,
Expand Down Expand Up @@ -104,7 +112,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None:
self._current_fx_name: Optional[str] = None
self._automatic_optimization: bool = True
self._truncated_bptt_steps: int = 0
self._param_requires_grad_state = {}
self._param_requires_grad_state: Dict[str, bool] = {}
self._metric_attributes: Optional[Dict[int, str]] = None
self._should_prevent_trainer_and_dataloaders_deepcopy: bool = False
# TODO: remove in 1.8
Expand All @@ -121,14 +129,10 @@ def optimizers(self, use_pl_optimizer: Literal[False]) -> Union[Optimizer, List[
...

@overload
def optimizers(
self, use_pl_optimizer: bool
) -> Union[Optimizer, LightningOptimizer, List[Optimizer], List[LightningOptimizer]]:
def optimizers(self, use_pl_optimizer: bool) -> MODULE_OPTIMIZERS:
...

def optimizers(
self, use_pl_optimizer: bool = True
) -> Union[Optimizer, LightningOptimizer, List[Optimizer], List[LightningOptimizer]]:
def optimizers(self, use_pl_optimizer: bool = True) -> MODULE_OPTIMIZERS:
"""Returns the optimizer(s) that are being used during training. Useful for manual optimization.

Args:
Expand All @@ -140,7 +144,7 @@ def optimizers(
A single optimizer, or a list of optimizers in case multiple ones are present.
"""
if use_pl_optimizer:
opts = list(self.trainer.strategy._lightning_optimizers.values())
opts: MODULE_OPTIMIZERS = list(self.trainer.strategy._lightning_optimizers.values())
else:
opts = self.trainer.optimizers

Expand All @@ -150,7 +154,7 @@ def optimizers(
# multiple opts
return opts

def lr_schedulers(self) -> Optional[Union[LRSchedulerTypeUnion, List[LRSchedulerTypeUnion]]]:
def lr_schedulers(self) -> Union[None, List[LRSchedulerPLType], LRSchedulerPLType]:
"""Returns the learning rate scheduler(s) that are being used during training. Useful for manual
optimization.

Expand All @@ -162,7 +166,7 @@ def lr_schedulers(self) -> Optional[Union[LRSchedulerTypeUnion, List[LRScheduler
return None

# ignore other keys "interval", "frequency", etc.
lr_schedulers = [config.scheduler for config in self.trainer.lr_scheduler_configs]
lr_schedulers: List[LRSchedulerPLType] = [config.scheduler for config in self.trainer.lr_scheduler_configs]

# single scheduler
if len(lr_schedulers) == 1:
Expand All @@ -175,13 +179,13 @@ def lr_schedulers(self) -> Optional[Union[LRSchedulerTypeUnion, List[LRScheduler
def trainer(self) -> "pl.Trainer":
if not self._running_torchscript and self._trainer is None:
raise RuntimeError(f"{self.__class__.__qualname__} is not attached to a `Trainer`.")
return self._trainer
return self._trainer # type: ignore[return-value]

@trainer.setter
def trainer(self, trainer: Optional["pl.Trainer"]) -> None:
for v in self.children():
if isinstance(v, LightningModule):
v.trainer = trainer
v.trainer = trainer # type: ignore[assignment]
if trainer is not None and not isinstance(trainer, weakref.ProxyTypes):
trainer = weakref.proxy(trainer)
self._trainer = trainer
Expand Down Expand Up @@ -228,7 +232,7 @@ def local_rank(self) -> int:
return self.trainer.local_rank if self._trainer else 0

@property
def on_gpu(self):
def on_gpu(self) -> bool:
"""Returns ``True`` if this model is currently located on a GPU.

Useful to set flags around the LightningModule for different CPU vs GPU behavior.
Expand Down Expand Up @@ -264,7 +268,7 @@ def logger(self) -> Optional[Logger]:
# this should match the implementation of `trainer.logger`
# we don't reuse it so we can properly set the deprecation stacklevel
if self._trainer is None:
return
return None
loggers = self.trainer.loggers
if len(loggers) == 0:
return None
Expand All @@ -287,15 +291,15 @@ def loggers(self) -> List[Logger]:
"""Reference to the list of loggers in the Trainer."""
return self.trainer.loggers if self._trainer else []

def _call_batch_hook(self, hook_name, *args) -> Any:
def _call_batch_hook(self, hook_name: str, *args: Any) -> Any:
if self._trainer:
datahook_selector = self._trainer._data_connector._datahook_selector
obj = datahook_selector.get_instance(hook_name)
trainer_method = (
self._trainer._call_lightning_module_hook
if isinstance(obj, self.__class__)
else self._trainer._call_lightning_datamodule_hook
)
if isinstance(obj, self.__class__):
trainer_method = self._trainer._call_lightning_module_hook
else:
trainer_method = self._trainer._call_lightning_datamodule_hook

return trainer_method(hook_name, *args)
else:
hook = getattr(self, hook_name)
Expand All @@ -312,7 +316,7 @@ def _apply_batch_transfer_handler(
batch = self._call_batch_hook("on_after_batch_transfer", batch, dataloader_idx)
return batch

def print(self, *args, **kwargs) -> None:
def print(self, *args: Any, **kwargs: Any) -> None:
r"""
Prints only from process 0. Use this in any distributed mode to log only once.

Expand Down Expand Up @@ -463,7 +467,7 @@ def log(
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
reduce_fx=reduce_fx, # type: ignore[arg-type]
enable_graph=enable_graph,
add_dataloader_idx=add_dataloader_idx,
batch_size=batch_size,
Expand Down Expand Up @@ -578,7 +582,9 @@ def log_grad_norm(self, grad_norm_dict):
"""
self.log_dict(grad_norm_dict, on_step=True, on_epoch=True, prog_bar=False, logger=True)

def all_gather(self, data: Union[Tensor, Dict, List, Tuple], group: Optional[Any] = None, sync_grads: bool = False):
def all_gather(
self, data: Union[Tensor, Dict, List, Tuple], group: Optional[Any] = None, sync_grads: bool = False
) -> Union[Tensor, Dict, List, Tuple]:
r"""
Allows users to call ``self.all_gather()`` from the LightningModule, thus making the ``all_gather`` operation
accelerator agnostic. ``all_gather`` is a function provided by accelerators to gather a tensor from several
Expand All @@ -598,7 +604,7 @@ def all_gather(self, data: Union[Tensor, Dict, List, Tuple], group: Optional[Any
data = convert_to_tensors(data, device=self.device)
return apply_to_collection(data, Tensor, all_gather, group=group, sync_grads=sync_grads)

def forward(self, *args, **kwargs) -> Any:
def forward(self, *args: Any, **kwargs: Any) -> Any:
r"""
Same as :meth:`torch.nn.Module.forward()`.

Expand All @@ -611,7 +617,7 @@ def forward(self, *args, **kwargs) -> Any:
"""
return super().forward(*args, **kwargs)

def training_step(self, *args, **kwargs) -> STEP_OUTPUT:
def training_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT:
r"""
Here you compute and return the training loss and some additional metrics for e.g.
the progress bar or logger.
Expand Down Expand Up @@ -769,7 +775,7 @@ def training_epoch_end(self, training_step_outputs):
...
"""

def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
def validation_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]:
r"""
Operates on a single batch of data from the validation set.
In this step you'd might generate examples or calculate anything of interest like accuracy.
Expand Down Expand Up @@ -858,7 +864,7 @@ def validation_step(self, batch, batch_idx, dataloader_idx=0):
the model goes back to training mode and gradients are enabled.
"""

def validation_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
def validation_step_end(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]:
"""Use this when validating with dp because :meth:`validation_step` will operate on only part of the batch.
However, this is still optional and only needed for things like softmax or NCE loss.

Expand Down Expand Up @@ -955,7 +961,7 @@ def validation_epoch_end(self, outputs):
self.log("final_metric", final_value)
"""

def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
def test_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]:
r"""
Operates on a single batch of data from the test set.
In this step you'd normally generate examples or calculate anything of interest
Expand Down Expand Up @@ -1035,7 +1041,7 @@ def test_step(self, batch, batch_idx, dataloader_idx=0):
to training mode and gradients are enabled.
"""

def test_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
def test_step_end(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]:
"""Use this when testing with DP because :meth:`test_step` will operate on only part of the batch. However,
this is still optional and only needed for things like softmax or NCE loss.

Expand Down Expand Up @@ -1200,7 +1206,7 @@ def configure_callbacks(self):
"""
return []

def configure_optimizers(self):
def configure_optimizers(self) -> Any:
r"""
Choose what optimizers and learning-rate schedulers to use in your optimization.
Normally you'd need one. But in the case of GANs or similar you might have multiple.
Expand Down Expand Up @@ -1374,7 +1380,7 @@ def configure_optimizers(self):
"""
rank_zero_warn("`configure_optimizers` must be implemented to be used with the Lightning Trainer")

def manual_backward(self, loss: Tensor, *args, **kwargs) -> None:
def manual_backward(self, loss: Tensor, *args: Any, **kwargs: Any) -> None:
"""Call this directly from your :meth:`training_step` when doing optimizations manually. By using this,
Lightning can ensure that all the proper scaling gets applied when using mixed precision.

Expand All @@ -1399,7 +1405,7 @@ def training_step(...):
self.trainer.strategy.backward(loss, None, None, *args, **kwargs)

def backward(
self, loss: Tensor, optimizer: Optional[Optimizer], optimizer_idx: Optional[int], *args, **kwargs
self, loss: Tensor, optimizer: Optional[Optimizer], optimizer_idx: Optional[int], *args: Any, **kwargs: Any
) -> None:
"""Called to perform backward on the loss returned in :meth:`training_step`. Override this hook with your
own implementation if you need to.
Expand Down Expand Up @@ -1442,7 +1448,7 @@ def toggle_optimizer(self, optimizer: Union[Optimizer, LightningOptimizer], opti

# Then iterate over the current optimizer's parameters and set its `requires_grad`
# properties accordingly
for group in optimizer.param_groups:
for group in optimizer.param_groups: # type: ignore[union-attr]
for param in group["params"]:
param.requires_grad = param_requires_grad_state[param]
self._param_requires_grad_state = param_requires_grad_state
Expand All @@ -1469,7 +1475,7 @@ def clip_gradients(
optimizer: Optimizer,
gradient_clip_val: Optional[Union[int, float]] = None,
gradient_clip_algorithm: Optional[str] = None,
):
) -> None:
"""Handles gradient clipping internally.

Note:
Expand Down Expand Up @@ -1523,7 +1529,7 @@ def configure_gradient_clipping(
optimizer_idx: int,
gradient_clip_val: Optional[Union[int, float]] = None,
gradient_clip_algorithm: Optional[str] = None,
):
) -> None:
"""Perform gradient clipping for the optimizer parameters. Called before :meth:`optimizer_step`.

Args:
Expand Down Expand Up @@ -1584,7 +1590,7 @@ def lr_scheduler_step(self, scheduler, optimizer_idx, metric):

"""
if metric is None:
scheduler.step()
scheduler.step() # type: ignore[call-arg]
else:
scheduler.step(metric)

Expand Down Expand Up @@ -1672,7 +1678,7 @@ def optimizer_step(
"""
optimizer.step(closure=optimizer_closure)

def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int) -> None:
"""Override this method to change the default behaviour of ``optimizer.zero_grad()``.

Args:
Expand Down Expand Up @@ -1741,12 +1747,11 @@ def tbptt_split_batch(self, batch, split_size):
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
split_x: Union[Tensor, List[Tensor]]
if isinstance(x, Tensor):
split_x = x[:, t : t + split_size]
elif isinstance(x, collections.abc.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t : t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [x[batch_idx][t : t + split_size] for batch_idx in range(len(x))]

batch_split.append(split_x)

Expand Down Expand Up @@ -1782,15 +1787,15 @@ def unfreeze(self) -> None:

self.train()

def _verify_is_manual_optimization(self, fn_name):
def _verify_is_manual_optimization(self, fn_name: str) -> None:
if self.automatic_optimization:
raise MisconfigurationException(
f"to use {fn_name}, please disable automatic optimization:"
" set model property `automatic_optimization` as False"
)

@torch.no_grad()
def to_onnx(self, file_path: Union[str, Path], input_sample: Optional[Any] = None, **kwargs):
def to_onnx(self, file_path: Union[str, Path], input_sample: Optional[Any] = None, **kwargs: Any) -> None:
"""Saves the model in ONNX format.

Args:
Expand Down Expand Up @@ -1829,7 +1834,7 @@ def to_onnx(self, file_path: Union[str, Path], input_sample: Optional[Any] = Non

if not _TORCH_GREATER_EQUAL_1_10 and "example_outputs" not in kwargs:
self.eval()
if isinstance(input_sample, Tuple):
if isinstance(input_sample, tuple):
kwargs["example_outputs"] = self(*input_sample)
else:
kwargs["example_outputs"] = self(input_sample)
Expand All @@ -1843,7 +1848,7 @@ def to_torchscript(
file_path: Optional[Union[str, Path]] = None,
method: Optional[str] = "script",
example_inputs: Optional[Any] = None,
**kwargs,
**kwargs: Any,
) -> Union[ScriptModule, Dict[str, ScriptModule]]:
"""By default compiles the whole model to a :class:`~torch.jit.ScriptModule`. If you want to use tracing,
please provided the argument ``method='trace'`` and make sure that either the `example_inputs` argument is
Expand Down Expand Up @@ -1953,7 +1958,7 @@ def use_amp(self, use_amp: bool) -> None:
self._use_amp = use_amp

@contextmanager
def _prevent_trainer_and_dataloaders_deepcopy(self) -> None:
def _prevent_trainer_and_dataloaders_deepcopy(self) -> Generator[None, None, None]:
self._should_prevent_trainer_and_dataloaders_deepcopy = True
yield
self._should_prevent_trainer_and_dataloaders_deepcopy = False
Expand Down Expand Up @@ -1988,4 +1993,6 @@ def _register_sharded_tensor_state_dict_hooks_if_available(self) -> None:
self._register_load_state_dict_pre_hook(pre_load_state_dict_hook, True)
else:
# We need to make sure the self inside the method is a weakref proxy
self.__class__._register_load_state_dict_pre_hook(weakref.proxy(self), pre_load_state_dict_hook, True)
self.__class__._register_load_state_dict_pre_hook(
weakref.proxy(self), pre_load_state_dict_hook, True # type: ignore[arg-type]
)
4 changes: 2 additions & 2 deletions src/pytorch_lightning/overrides/data_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.
import numbers
import warnings
from typing import Any, cast, Optional, Union
from typing import Any, Optional, Union

import torch
from torch import Tensor
Expand Down Expand Up @@ -77,7 +77,7 @@ def forward(self, *inputs: Any, **kwargs: Any) -> Any:
output = super().forward(*inputs, **kwargs)

def output_transform(data: Any) -> Any:
device = cast(torch.device, self.lightning_module.device)
device = self.lightning_module.device
data = python_scalar_to_tensor(data, device)
data = unsqueeze_scalar_tensor(data)
return data
Expand Down
Loading