From c0bc26411d7ccda327dd1cc12a4d3ef85e250307 Mon Sep 17 00:00:00 2001 From: Jerome Date: Tue, 28 Jun 2022 06:03:34 +0300 Subject: [PATCH 01/89] Add auto_device_count and device name support Signed-off-by: Jerome --- src/pytorch_lightning/accelerators/hpu.py | 24 +++++++++++++++---- .../strategies/hpu_parallel.py | 2 +- .../strategies/single_hpu.py | 1 - tests/tests_pytorch/accelerators/test_hpu.py | 6 +++++ 4 files changed, 27 insertions(+), 6 deletions(-) diff --git a/src/pytorch_lightning/accelerators/hpu.py b/src/pytorch_lightning/accelerators/hpu.py index 76fdb02b307b8..040c8d91414d4 100644 --- a/src/pytorch_lightning/accelerators/hpu.py +++ b/src/pytorch_lightning/accelerators/hpu.py @@ -21,6 +21,8 @@ from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.rank_zero import rank_zero_debug +if _HPU_AVAILABLE: + import habana_frameworks.torch.hpu as torch_hpu class HPUAccelerator(Accelerator): """Accelerator for HPU devices.""" @@ -52,13 +54,27 @@ def get_parallel_devices(devices: int) -> List[torch.device]: @staticmethod def auto_device_count() -> int: - """Get the devices when set to auto.""" - # TODO(@kaushikb11): Update this when api is exposed by the Habana team - return 8 + """Returns the number of HPU devices when the devices is set to auto.""" + try: + return torch_hpu.device_count() + except: + return 0 @staticmethod def is_available() -> bool: - return _HPU_AVAILABLE + """Returns a bool indicating if HPU is currently available.""" + try: + return torch_hpu.is_available() + except: + return False + + @staticmethod + def get_device_name() -> str: + """Returns the name of the HPU device.""" + try: + return torch_hpu.get_device_name() + except: + return "" @classmethod def register_accelerators(cls, accelerator_registry: Dict) -> None: diff --git a/src/pytorch_lightning/strategies/hpu_parallel.py b/src/pytorch_lightning/strategies/hpu_parallel.py index 9e0dc84402cca..35f40b9cef4a4 100644 --- a/src/pytorch_lightning/strategies/hpu_parallel.py +++ b/src/pytorch_lightning/strategies/hpu_parallel.py @@ -32,7 +32,7 @@ if _HPU_AVAILABLE: import habana_frameworks.torch.core as htcore - import habana_frameworks.torch.core.hccl # noqa: F401 + import habana_frameworks.torch.distributed.hccl # noqa: F401 log = logging.getLogger(__name__) diff --git a/src/pytorch_lightning/strategies/single_hpu.py b/src/pytorch_lightning/strategies/single_hpu.py index 7190db0061f0a..cbc9dee12640f 100644 --- a/src/pytorch_lightning/strategies/single_hpu.py +++ b/src/pytorch_lightning/strategies/single_hpu.py @@ -24,7 +24,6 @@ if _HPU_AVAILABLE: import habana_frameworks.torch.core as htcore - import habana_frameworks.torch.core.hccl # noqa: F401 class SingleHPUStrategy(SingleDeviceStrategy): diff --git a/tests/tests_pytorch/accelerators/test_hpu.py b/tests/tests_pytorch/accelerators/test_hpu.py index 96e312f3fbfb8..9fd886b1a4621 100644 --- a/tests/tests_pytorch/accelerators/test_hpu.py +++ b/tests/tests_pytorch/accelerators/test_hpu.py @@ -40,6 +40,11 @@ def test_availability(): assert HPUAccelerator.is_available() +@RunIf(hpu=True) +def test_device_name(): + assert HPUAccelerator.get_device_name() in ["GAUDI"] + + @pytest.mark.skipif(_HPU_AVAILABLE, reason="test requires non-HPU machine") def test_fail_if_no_hpus(): with pytest.raises(MisconfigurationException, match="HPUAccelerator can not run on your system"): @@ -239,6 +244,7 @@ def test_inference_only(tmpdir, hpus): trainer.predict(model) +@RunIf(hpu=True) def test_hpu_auto_device_count(): assert HPUAccelerator.auto_device_count() == 8 From 24cbfc15c6efd3ab1834e20c998a5307438b5643 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 28 Jun 2022 03:07:58 +0000 Subject: [PATCH 02/89] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/pytorch_lightning/accelerators/hpu.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pytorch_lightning/accelerators/hpu.py b/src/pytorch_lightning/accelerators/hpu.py index 040c8d91414d4..7eddde6f36f19 100644 --- a/src/pytorch_lightning/accelerators/hpu.py +++ b/src/pytorch_lightning/accelerators/hpu.py @@ -24,6 +24,7 @@ if _HPU_AVAILABLE: import habana_frameworks.torch.hpu as torch_hpu + class HPUAccelerator(Accelerator): """Accelerator for HPU devices.""" From c103cff7846977c4557ced7f5b1af6b2b81e4869 Mon Sep 17 00:00:00 2001 From: Jerome Date: Tue, 28 Jun 2022 07:47:48 +0300 Subject: [PATCH 03/89] Update change log Signed-off-by: Jerome --- src/pytorch_lightning/CHANGELOG.md | 3 +++ src/pytorch_lightning/accelerators/hpu.py | 6 +++--- tests/tests_pytorch/accelerators/test_hpu.py | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 314fa55a61f98..63c5fb25cb5e4 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -122,6 +122,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - The `WandbLogger` will now use the run name in the logs folder if it is provided, and otherwise the project name ([#12604](https://github.com/PyTorchLightning/pytorch-lightning/pull/12604)) +- Updated hpus's auto_device_count, device available & name usage based on the updated torch habana package ([#13423](https://github.com/PyTorchLightning/pytorch-lightning/pull/13423)) + + - diff --git a/src/pytorch_lightning/accelerators/hpu.py b/src/pytorch_lightning/accelerators/hpu.py index 7eddde6f36f19..53c76c425d170 100644 --- a/src/pytorch_lightning/accelerators/hpu.py +++ b/src/pytorch_lightning/accelerators/hpu.py @@ -58,7 +58,7 @@ def auto_device_count() -> int: """Returns the number of HPU devices when the devices is set to auto.""" try: return torch_hpu.device_count() - except: + except AttributeError: return 0 @staticmethod @@ -66,7 +66,7 @@ def is_available() -> bool: """Returns a bool indicating if HPU is currently available.""" try: return torch_hpu.is_available() - except: + except AttributeError: return False @staticmethod @@ -74,7 +74,7 @@ def get_device_name() -> str: """Returns the name of the HPU device.""" try: return torch_hpu.get_device_name() - except: + except AttributeError: return "" @classmethod diff --git a/tests/tests_pytorch/accelerators/test_hpu.py b/tests/tests_pytorch/accelerators/test_hpu.py index 9fd886b1a4621..eaa0ca9b65e27 100644 --- a/tests/tests_pytorch/accelerators/test_hpu.py +++ b/tests/tests_pytorch/accelerators/test_hpu.py @@ -42,7 +42,7 @@ def test_availability(): @RunIf(hpu=True) def test_device_name(): - assert HPUAccelerator.get_device_name() in ["GAUDI"] + assert HPUAccelerator.get_device_name() in ["GAUDI", "GAUDI HL2000M"] @pytest.mark.skipif(_HPU_AVAILABLE, reason="test requires non-HPU machine") From ca009a4ef1b9f159965bee2f9400af8a65415967 Mon Sep 17 00:00:00 2001 From: Jerome Date: Wed, 29 Jun 2022 05:24:01 +0300 Subject: [PATCH 04/89] return default device count on failure Signed-off-by: Jerome --- src/pytorch_lightning/accelerators/hpu.py | 3 ++- tests/tests_pytorch/accelerators/test_hpu.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/pytorch_lightning/accelerators/hpu.py b/src/pytorch_lightning/accelerators/hpu.py index 53c76c425d170..06c543b4a0c93 100644 --- a/src/pytorch_lightning/accelerators/hpu.py +++ b/src/pytorch_lightning/accelerators/hpu.py @@ -59,7 +59,8 @@ def auto_device_count() -> int: try: return torch_hpu.device_count() except AttributeError: - return 0 + rank_zero_debug("HPU auto_device_count failed, returning default count of 8.") + return 8 @staticmethod def is_available() -> bool: diff --git a/tests/tests_pytorch/accelerators/test_hpu.py b/tests/tests_pytorch/accelerators/test_hpu.py index eaa0ca9b65e27..9fd886b1a4621 100644 --- a/tests/tests_pytorch/accelerators/test_hpu.py +++ b/tests/tests_pytorch/accelerators/test_hpu.py @@ -42,7 +42,7 @@ def test_availability(): @RunIf(hpu=True) def test_device_name(): - assert HPUAccelerator.get_device_name() in ["GAUDI", "GAUDI HL2000M"] + assert HPUAccelerator.get_device_name() in ["GAUDI"] @pytest.mark.skipif(_HPU_AVAILABLE, reason="test requires non-HPU machine") From 3c3fb0b571c8ee106958348d7f1c22fad0011ecd Mon Sep 17 00:00:00 2001 From: Jerome Anand <88475913+jerome-habana@users.noreply.github.com> Date: Wed, 29 Jun 2022 15:38:48 +0530 Subject: [PATCH 05/89] Update src/pytorch_lightning/CHANGELOG.md Co-authored-by: Kaushik B <45285388+kaushikb11@users.noreply.github.com> --- src/pytorch_lightning/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 63c5fb25cb5e4..67ec1d604a89e 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -122,7 +122,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - The `WandbLogger` will now use the run name in the logs folder if it is provided, and otherwise the project name ([#12604](https://github.com/PyTorchLightning/pytorch-lightning/pull/12604)) -- Updated hpus's auto_device_count, device available & name usage based on the updated torch habana package ([#13423](https://github.com/PyTorchLightning/pytorch-lightning/pull/13423)) +- Updated Habana Accelerator's `auto_device_count`, `is_available` & `get_device_name` methods based on the latest torch habana package ([#13423](https://github.com/PyTorchLightning/pytorch-lightning/pull/13423)) - From b4565649d1ca9674a89c0fef170080bf8ae0544c Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Wed, 29 Jun 2022 15:20:34 +0200 Subject: [PATCH 06/89] Apply suggestions from code review Co-authored-by: Rohit Gupta Co-authored-by: Justus Schock <12886177+justusschock@users.noreply.github.com> --- src/pytorch_lightning/accelerators/hpu.py | 2 +- tests/tests_pytorch/accelerators/test_hpu.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pytorch_lightning/accelerators/hpu.py b/src/pytorch_lightning/accelerators/hpu.py index 06c543b4a0c93..b58599bece7f8 100644 --- a/src/pytorch_lightning/accelerators/hpu.py +++ b/src/pytorch_lightning/accelerators/hpu.py @@ -59,7 +59,7 @@ def auto_device_count() -> int: try: return torch_hpu.device_count() except AttributeError: - rank_zero_debug("HPU auto_device_count failed, returning default count of 8.") + rank_zero_debug("HPU `auto_device_count` failed, returning default count of 8.") return 8 @staticmethod diff --git a/tests/tests_pytorch/accelerators/test_hpu.py b/tests/tests_pytorch/accelerators/test_hpu.py index 9fd886b1a4621..0ef63de417907 100644 --- a/tests/tests_pytorch/accelerators/test_hpu.py +++ b/tests/tests_pytorch/accelerators/test_hpu.py @@ -42,7 +42,7 @@ def test_availability(): @RunIf(hpu=True) def test_device_name(): - assert HPUAccelerator.get_device_name() in ["GAUDI"] + assert HPUAccelerator.get_device_name() == "GAUDI" @pytest.mark.skipif(_HPU_AVAILABLE, reason="test requires non-HPU machine") From 19e4601621b02e6a083a6bcead123b8e5652f163 Mon Sep 17 00:00:00 2001 From: Sean Naren Date: Tue, 28 Jun 2022 13:41:08 +0100 Subject: [PATCH 07/89] Remove unnecessary endpoint logic, rename `collaborative` to `hivemind` (#13392) * Remove endpoint after collaborate app/dht CLI * Fix references, change filename * Add CHANGELOG.md * Address review Co-authored-by: Jirka --- docs/source-pytorch/common_usecases.rst | 2 +- docs/source-pytorch/extensions/strategy.rst | 2 +- docs/source-pytorch/index.rst | 4 +- ...ollaborative_training.rst => hivemind.rst} | 8 +- ..._training_basic.rst => hivemind_basic.rst} | 2 +- ...raining_expert.rst => hivemind_expert.rst} | 2 +- ...rmediate.rst => hivemind_intermediate.rst} | 2 +- src/pytorch_lightning/CHANGELOG.md | 1 + src/pytorch_lightning/strategies/__init__.py | 2 +- .../{collaborative.py => hivemind.py} | 258 +++--------------- ...test_collaborative.py => test_hivemind.py} | 71 +---- 11 files changed, 50 insertions(+), 304 deletions(-) rename docs/source-pytorch/strategies/{collaborative_training.rst => hivemind.rst} (86%) rename docs/source-pytorch/strategies/{collaborative_training_basic.rst => hivemind_basic.rst} (98%) rename docs/source-pytorch/strategies/{collaborative_training_expert.rst => hivemind_expert.rst} (98%) rename docs/source-pytorch/strategies/{collaborative_training_intermediate.rst => hivemind_intermediate.rst} (98%) rename src/pytorch_lightning/strategies/{collaborative.py => hivemind.py} (60%) rename tests/tests_pytorch/strategies/{test_collaborative.py => test_hivemind.py} (79%) diff --git a/docs/source-pytorch/common_usecases.rst b/docs/source-pytorch/common_usecases.rst index 93646296d2cf8..307a32f03be41 100644 --- a/docs/source-pytorch/common_usecases.rst +++ b/docs/source-pytorch/common_usecases.rst @@ -127,7 +127,7 @@ Customize and extend Lightning for things like custom hardware or distributed st :header: Train on multiple machines over the internet :description: Train on local machines or unreliable GPUs across the internet. :col_css: col-md-12 - :button_link: strategies/collaborative_training + :button_link: strategies/hivemind :height: 100 .. displayitem:: diff --git a/docs/source-pytorch/extensions/strategy.rst b/docs/source-pytorch/extensions/strategy.rst index 95c48e09496e6..0cc426225ca36 100644 --- a/docs/source-pytorch/extensions/strategy.rst +++ b/docs/source-pytorch/extensions/strategy.rst @@ -77,7 +77,7 @@ The below table lists all relevant strategies available in Lightning with their - Strategy for training using the Bagua library, with advanced distributed training algorithms and system optimizations. :ref:`Learn more. ` * - collaborative - :class:`~pytorch_lightning.strategies.HivemindStrategy` - - Strategy for training collaboratively on local machines or unreliable GPUs across the internet. :ref:`Learn more. ` + - Strategy for training collaboratively on local machines or unreliable GPUs across the internet. :ref:`Learn more. ` * - fsdp - :class:`~pytorch_lightning.strategies.DDPFullyShardedStrategy` - Strategy for Fully Sharded Data Parallel provided by FairScale. :ref:`Learn more. ` diff --git a/docs/source-pytorch/index.rst b/docs/source-pytorch/index.rst index 40a3d63d787e0..b9bf1861f5a59 100644 --- a/docs/source-pytorch/index.rst +++ b/docs/source-pytorch/index.rst @@ -203,7 +203,7 @@ Current Lightning Users clouds/cluster Save and load model progress Save memory with half-precision - Training over the internet + Training over the internet advanced/model_parallel clouds/cloud_training Train on single or multiple GPUs @@ -248,7 +248,7 @@ Current Lightning Users Metrics Model Model Parallel - Collaborative Training + Collaborative Training Plugins Progress bar Production diff --git a/docs/source-pytorch/strategies/collaborative_training.rst b/docs/source-pytorch/strategies/hivemind.rst similarity index 86% rename from docs/source-pytorch/strategies/collaborative_training.rst rename to docs/source-pytorch/strategies/hivemind.rst index 72e9d13f9133a..5695f5695fcaf 100644 --- a/docs/source-pytorch/strategies/collaborative_training.rst +++ b/docs/source-pytorch/strategies/hivemind.rst @@ -1,4 +1,4 @@ -.. _collaborative_training: +.. _hivemind: ##################################################### Training on unreliable mixed GPUs across the internet @@ -17,7 +17,7 @@ Training on unreliable mixed GPUs across the internet :header: 1: Training across multiple machines over the internet :description: Quick setup to start training on multiple machines. :col_css: col-md-4 - :button_link: collaborative_training_basic.html + :button_link: hivemind_basic.html :height: 200 :tag: basic @@ -25,7 +25,7 @@ Training on unreliable mixed GPUs across the internet :header: 2: Speed up training by enabling under-the-hood optimizations :description: Learn which flags to use with the HivemindStrategy to speed up training. :col_css: col-md-4 - :button_link: collaborative_training_intermediate.html + :button_link: hivemind_intermediate.html :height: 200 :tag: intermediate @@ -33,7 +33,7 @@ Training on unreliable mixed GPUs across the internet :header: 3: Optimize Memory and Communication using compression hooks :description: Enable gradient buffer optimizations and communication improvements to reduce bottlenecks in communication. :col_css: col-md-4 - :button_link: collaborative_training_expert.html + :button_link: hivemind_expert.html :height: 200 :tag: expert diff --git a/docs/source-pytorch/strategies/collaborative_training_basic.rst b/docs/source-pytorch/strategies/hivemind_basic.rst similarity index 98% rename from docs/source-pytorch/strategies/collaborative_training_basic.rst rename to docs/source-pytorch/strategies/hivemind_basic.rst index 108f6197fdd09..98e90cbfe94cd 100644 --- a/docs/source-pytorch/strategies/collaborative_training_basic.rst +++ b/docs/source-pytorch/strategies/hivemind_basic.rst @@ -1,6 +1,6 @@ :orphan: -.. _collaborative_training_basic: +.. _hivemind_basic: Training on unreliable mixed GPUs across the internet (Basic) ============================================================= diff --git a/docs/source-pytorch/strategies/collaborative_training_expert.rst b/docs/source-pytorch/strategies/hivemind_expert.rst similarity index 98% rename from docs/source-pytorch/strategies/collaborative_training_expert.rst rename to docs/source-pytorch/strategies/hivemind_expert.rst index 5b8a5e8b4c49e..3fa55afb132fd 100644 --- a/docs/source-pytorch/strategies/collaborative_training_expert.rst +++ b/docs/source-pytorch/strategies/hivemind_expert.rst @@ -1,6 +1,6 @@ :orphan: -.. _collaborative_training_expert: +.. _hivemind_expert: Training on unreliable mixed GPUs across the internet (Expert) ============================================================== diff --git a/docs/source-pytorch/strategies/collaborative_training_intermediate.rst b/docs/source-pytorch/strategies/hivemind_intermediate.rst similarity index 98% rename from docs/source-pytorch/strategies/collaborative_training_intermediate.rst rename to docs/source-pytorch/strategies/hivemind_intermediate.rst index 38d6c6a3421b6..cec004219f5d5 100644 --- a/docs/source-pytorch/strategies/collaborative_training_intermediate.rst +++ b/docs/source-pytorch/strategies/hivemind_intermediate.rst @@ -1,6 +1,6 @@ :orphan: -.. _collaborative_training_intermediate: +.. _hivemind_intermediate: Training on unreliable mixed GPUs across the internet (Intermediate) ==================================================================== diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 67ec1d604a89e..fbad93167198c 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -44,6 +44,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Hivemind Strategy * Added `CollaborativeStrategy` ([#12842](https://github.com/PyTorchLightning/pytorch-lightning/pull/12842)) * Renamed `CollaborativeStrategy` to `HivemindStrategy` ([#13388](https://github.com/PyTorchLightning/pytorch-lightning/pull/13388)) + * Removed unnecessary endpoint logic, renamed `collaborative` to `hivemind` ([#13392](https://github.com/PyTorchLightning/pytorch-lightning/pull/13392)) - Include a version suffix for new "last" checkpoints of later runs in the same directory ([#12902](https://github.com/PyTorchLightning/pytorch-lightning/pull/12902)) diff --git a/src/pytorch_lightning/strategies/__init__.py b/src/pytorch_lightning/strategies/__init__.py index f59d976edf439..ab79bd4fd70d9 100644 --- a/src/pytorch_lightning/strategies/__init__.py +++ b/src/pytorch_lightning/strategies/__init__.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. from pytorch_lightning.strategies.bagua import BaguaStrategy # noqa: F401 -from pytorch_lightning.strategies.collaborative import HivemindStrategy # noqa: F401 from pytorch_lightning.strategies.ddp import DDPStrategy # noqa: F401 from pytorch_lightning.strategies.ddp2 import DDP2Strategy # noqa: F401 from pytorch_lightning.strategies.ddp_spawn import DDPSpawnStrategy # noqa: F401 @@ -20,6 +19,7 @@ from pytorch_lightning.strategies.dp import DataParallelStrategy # noqa: F401 from pytorch_lightning.strategies.fully_sharded import DDPFullyShardedStrategy # noqa: F401 from pytorch_lightning.strategies.fully_sharded_native import DDPFullyShardedNativeStrategy # noqa: F401 +from pytorch_lightning.strategies.hivemind import HivemindStrategy # noqa: F401 from pytorch_lightning.strategies.horovod import HorovodStrategy # noqa: F401 from pytorch_lightning.strategies.hpu_parallel import HPUParallelStrategy # noqa: F401 from pytorch_lightning.strategies.ipu import IPUStrategy # noqa: F401 diff --git a/src/pytorch_lightning/strategies/collaborative.py b/src/pytorch_lightning/strategies/hivemind.py similarity index 60% rename from src/pytorch_lightning/strategies/collaborative.py rename to src/pytorch_lightning/strategies/hivemind.py index b594704aba605..34e2f40b2ec40 100644 --- a/src/pytorch_lightning/strategies/collaborative.py +++ b/src/pytorch_lightning/strategies/hivemind.py @@ -1,22 +1,15 @@ -import http import ipaddress import logging import os import platform -import re -import threading -import time -import warnings -from http.server import BaseHTTPRequestHandler from typing import Any, Callable, Dict, List, Optional, Union -import requests import torch from torch import Tensor import pytorch_lightning as pl from pytorch_lightning.strategies.strategy import Strategy, TBroadcast -from pytorch_lightning.utilities import rank_zero_only, rank_zero_warn +from pytorch_lightning.utilities import rank_zero_warn from pytorch_lightning.utilities.data import extract_batch_size from pytorch_lightning.utilities.enums import PrecisionType from pytorch_lightning.utilities.exceptions import MisconfigurationException @@ -33,6 +26,8 @@ class HivemindStrategy(Strategy): + INITIAL_PEERS_ENV: str = "PL_INITIAL_PEERS" + def __init__( self, target_batch_size: int, @@ -50,18 +45,11 @@ def __init__( averager_opts: Optional[Dict] = None, host_maddrs: Optional[List] = None, initial_peers: Optional[Union[str, List]] = None, - endpoint: Optional[bool] = None, - peer_endpoint: Optional[str] = None, - persistent: bool = True, - host: Optional[str] = None, - port: Optional[int] = None, - retry_endpoint_attempts: int = 5, - retry_endpoint_sleep_duration: int = 5, **optimizer_kwargs: Any, ): """Provides capabilities to train using the Hivemind Library, training collaboratively across the internet with unreliable machines. For more information, `refer to the docs `__. + lightning.readthedocs.io/en/latest/strategies/hivemind.html>`__. .. warning:: ``HivemindStrategy`` is experimental and subject to change. @@ -81,11 +69,11 @@ def __init__( corresponding :meth:`hivemind.Optimizer.step` call. delay_optimizer_step: Run optimizer in background, apply results in future .step. requires - :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.offload_optimizer`. + :paramref:`~pytorch_lightning.strategies.hivemind.HivemindStrategy.offload_optimizer`. delay_grad_averaging: Average gradients in background; requires - :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.offload_optimizer` and - :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.delay_optimizer_step`. + :paramref:`~pytorch_lightning.strategies.hivemind.HivemindStrategy.offload_optimizer` and + :paramref:`~pytorch_lightning.strategies.hivemind.HivemindStrategy.delay_optimizer_step`. offload_optimizer: Offload the optimizer to host memory, saving GPU memory for parameters and gradients. @@ -118,26 +106,6 @@ def __init__( initial_peers: If connecting to a running process, a list of initial peers needs to be passed in. This can also be set via the env variable ``INITIAL_PEERS``. - endpoint: Enable if a side-car endpoint server is required on the process to server initial peers. - This is useful when using some form of orchestration such as torchelastic. - - peer_endpoint: The endpoint to request initial peers from. - - persistent: When using an endpoint, this controls whether other processes that are not the endpoint - server log/checkpoint. If ``persistent`` is True, we do not log/checkpoint from other processes. - - host: When creating the endpoint, the host IP to use. - - port: When creating the endpoint, the host port to use. - - retry_endpoint_attempts: When connecting to the - :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.peer_endpoint`, - how many time to retry before raising an exception. - - retry_endpoint_sleep_duration: When connecting to the - :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.peer_endpoint`, - how long to wait between retries. - **optimizer_kwargs: kwargs are passed to the :class:`hivemind.Optimizer` class. """ if not _HIVEMIND_AVAILABLE or platform.system() != "Linux": @@ -147,17 +115,7 @@ def __init__( ) super().__init__() - self.dht_manager = DHTManager( - persistent=persistent, - endpoint=endpoint, - peer_endpoint=peer_endpoint, - host=host, - port=port, - host_maddrs=host_maddrs, - initial_peers=initial_peers, - retry_endpoint_attempts=retry_endpoint_attempts, - retry_endpoint_sleep_duration=retry_endpoint_sleep_duration, - ) + self._initial_peers = initial_peers self._target_batch_size = target_batch_size self._batch_size = batch_size self._scheduler_fn = scheduler_fn @@ -179,28 +137,38 @@ def __init__( **optimizer_kwargs, ) - # a bit of a hack to only log from the stable server - if self.dht_manager.disable_logging_checkpointing: - warnings.warn( - "This machine is not a persistent machine. Checkpointing/Logging has been disabled.", UserWarning + self._parse_env_initial_peers() + + self.dht = hivemind.DHT( + start=True, + initial_peers=initial_peers, + host_maddrs=host_maddrs if host_maddrs is not None else ["/ip4/0.0.0.0/tcp/0", "/ip4/0.0.0.0/udp/0/quic"], + ) + + visible_addresses = [ + str(a) for a in self.dht.get_visible_maddrs() if not ipaddress.ip_address(a.values()[0]).is_loopback + ] + + if initial_peers is None: + log.info( + "\nOther machines can connect running the same command:\n" + f"INITIAL_PEERS={','.join(visible_addresses)} python ...\n" + "or passing the peers to the strategy:\n" + f"HivemindStrategy(initial_peers='{','.join(visible_addresses)}')" ) - rank_zero_only.rank = 1 if self.dht_manager.disable_logging_checkpointing else 0 + self._hivemind_initialized = False + def _parse_env_initial_peers(self) -> None: + initial_peers = os.environ.get(self.INITIAL_PEERS_ENV, self._initial_peers) + self._initial_peers = initial_peers.split(",") if isinstance(initial_peers, str) else self._initial_peers + @property def num_peers(self) -> int: if self._opt: return self._opt.tracker.global_progress.num_peers return 1 - @property - def dht(self) -> "hivemind.DHT": - """Hivemind Distributed Hash Table which stores values across all peers. - - See documentation for more details: `https://learning-at-home.readthedocs.io/en/latest/modules/dht.html` - """ - return self.dht_manager.dht - @property def root_device(self) -> torch.device: from pytorch_lightning.accelerators.cpu import CPUAccelerator @@ -361,167 +329,3 @@ def load_state_dict(self, state_dict: Dict) -> None: def state_dict(self) -> Dict: return self.scheduler.state_dict() - - -class DHTManager: - ENDPOINT_ENV: str = "PL_ENDPOINT" - PEER_ENDPOINT_ENV: str = "PL_PEER_ENDPOINT" - INITIAL_PEERS_ENV: str = "PL_INITIAL_PEERS" - HOST_ENV: str = "PL_HOST" - PORT_ENV: str = "PL_PORT" - DEFAULT_HOST: str = "0.0.0.0" - DEFAULT_PORT: int = 1440 - - def __init__( - self, - host_maddrs: Optional[List], - initial_peers: Optional[Union[str, List]], - persistent: bool, - endpoint: Optional[bool], - peer_endpoint: Optional[str], - host: Optional[str], - port: Optional[int], - retry_endpoint_attempts: int = 5, - retry_endpoint_sleep_duration: int = 5, - ) -> None: - """Manages the `hivemind.DHT` connection and provides a side-car endpoint server for initial peer access. - - Arguments: - - host_maddrs: :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.host_maddrs` - - initial_peers: :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.initial_peers` - - persistent: :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.persistent` - - endpoint: :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.endpoint` - - peer_endpoint: :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.peer_endpoint` - - host: :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.host` - - port: :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.port` - - retry_endpoint_attempts: - :paramref:`~pytorch_lightning.strategies.collaborative.HivemindStrategy.retry_endpoint_attempts` - - retry_endpoint_sleep_duration: - :paramref: - `~pytorch_lightning.strategies.collaborative.HivemindStrategy.retry_endpoint_sleep_duration` - """ - self._persistent = persistent - self._endpoint = endpoint - self._initial_peers = initial_peers - self._peer_endpoint = peer_endpoint - self._host = host - self._port = port - - self._parse_env_vars() - - if self._peer_endpoint and self._initial_peers is None: - self._initial_peers = self._get_initial_peers_from_endpoint( - retry_initial_peers=retry_endpoint_attempts, retry_peer_sleep_duration=retry_endpoint_sleep_duration - ) - - self.dht = hivemind.DHT( - start=True, - initial_peers=self._initial_peers, - host_maddrs=host_maddrs if host_maddrs is not None else ["/ip4/0.0.0.0/tcp/0", "/ip4/0.0.0.0/udp/0/quic"], - ) - - visible_addresses = [ - str(a) for a in self.dht.get_visible_maddrs() if not ipaddress.ip_address(a.values()[0]).is_loopback - ] - - if self._endpoint: - self._host = self._host if self._host is not None else self.DEFAULT_HOST - self._port = self._port if self._port is not None else self.DEFAULT_PORT - self._start_server_process(self._host, self._port) - self._log_endpoint_helper_message(visible_addresses) - elif self._peer_endpoint: - log.info("Machine received initial peers from endpoint.") - elif self._initial_peers is None: - log.info( - "\nOther machines can connect running the same command:\n" - f"INITIAL_PEERS={','.join(visible_addresses)} python ...\n" - "or passing the peers to the strategy:\n" - f"HivemindStrategy(initial_peers='{','.join(visible_addresses)}')" - ) - - def _log_endpoint_helper_message(self, visible_addresses: List[str]) -> None: - assert self._host is not None - resolved_host = self._host - if "0.0.0.0" in self._host: - # use the visible multi-addresses to figure out the IP that has been exposed - # todo (sean): this is pretty hacky, worth investigating. - p = re.compile(r"[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+") - # todo (sean): we select one address from here, could we have multiple? - resolved_host = {p.findall(maddr)[0] for maddr in visible_addresses}.pop() - log.info( - "\nSidecar endpoint enabled to serve peers.\n" - "Other peers can connect via:\n" - f"PEER_ENDPOINT={resolved_host}:{self._port} python ...\n" - "or pass the peer endpoint address to the strategy:\n" - f"HivemindStrategy(peer_endpoint='{resolved_host}:{self._port}')" - ) - - def _start_server_process(self, host: str, port: int) -> None: - dht = self.dht - - class DHTHandler(BaseHTTPRequestHandler): - def do_GET(self) -> None: - """Respond to a GET request.""" - self.send_response(200) - self.send_header("Content-type", "text/html") - self.end_headers() - - visible_peers = [ - str(a) for a in dht.get_visible_maddrs() if not ipaddress.ip_address(a.values()[0]).is_loopback - ] - - self.wfile.write("\n".join(visible_peers).encode()) - - server = http.server.ThreadingHTTPServer((host, int(port)), DHTHandler) - thread = threading.Thread(target=server.serve_forever) - thread.daemon = True - thread.start() - - def _get_initial_peers_from_endpoint(self, retry_initial_peers: int, retry_peer_sleep_duration: int) -> List: - peers = None - for _ in range(retry_initial_peers): - try: - peers = self._get_peers() - break - except requests.exceptions.RequestException: - log.info(f"Failed to get peers, retrying in {retry_peer_sleep_duration} seconds...") - time.sleep(retry_peer_sleep_duration) - if peers is None: - raise MisconfigurationException( - f"Unable to get peers. Tried {retry_initial_peers} times waiting {retry_peer_sleep_duration}s." - f"These parameters can be extended by passing " - "to the strategy (HivemindStrategy(retry_connection=x, retry_sleep_duration=y))." - ) - log.info(f"Received initial peers from collaborative server: {peers}") - return peers - - def _get_peers(self) -> List[str]: - assert self._peer_endpoint is not None - url = f"http://{self._peer_endpoint}" if not self._peer_endpoint.startswith("http://") else self._peer_endpoint - r = requests.get(url) - return r.text.split(",") - - def _parse_env_vars(self) -> None: - endpoint = os.environ.get(self.ENDPOINT_ENV, self._endpoint) - self._endpoint = endpoint == "1" if isinstance(endpoint, str) else endpoint - self._peer_endpoint = os.environ.get(self.PEER_ENDPOINT_ENV, self._peer_endpoint) - initial_peers = os.environ.get(self.INITIAL_PEERS_ENV, self._initial_peers) - self._initial_peers = initial_peers.split(",") if isinstance(initial_peers, str) else initial_peers - - port = os.environ.get(self.PORT_ENV, self._port) - self._port = int(port) if isinstance(port, str) else port - self._host = os.environ.get(self.HOST_ENV, self._host) - - @property - def disable_logging_checkpointing(self) -> bool: - # if this node is a peer, we do not log/checkpoint in persistent mode. - return self._persistent and (self._initial_peers is not None or self._peer_endpoint is not None) diff --git a/tests/tests_pytorch/strategies/test_collaborative.py b/tests/tests_pytorch/strategies/test_hivemind.py similarity index 79% rename from tests/tests_pytorch/strategies/test_collaborative.py rename to tests/tests_pytorch/strategies/test_hivemind.py index 8e4ef88f03fb4..58ae523858d53 100644 --- a/tests/tests_pytorch/strategies/test_collaborative.py +++ b/tests/tests_pytorch/strategies/test_hivemind.py @@ -6,15 +6,13 @@ from unittest.mock import PropertyMock import pytest -import requests import torch from torch.optim import Optimizer import pytorch_lightning as pl from pytorch_lightning.demos.boring_classes import BoringModel -from pytorch_lightning.plugins.environments.lightning_environment import find_free_network_port from pytorch_lightning.strategies import HivemindStrategy -from pytorch_lightning.strategies.collaborative import HiveMindScheduler +from pytorch_lightning.strategies.hivemind import HiveMindScheduler from pytorch_lightning.utilities import _HIVEMIND_AVAILABLE from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.types import STEP_OUTPUT @@ -24,7 +22,7 @@ import hivemind -@mock.patch("pytorch_lightning.strategies.collaborative._HIVEMIND_AVAILABLE", False) +@mock.patch("pytorch_lightning.strategies.hivemind._HIVEMIND_AVAILABLE", False) def test_raise_exception_if_hivemind_unavailable(): """Test that we raise an exception when Hivemind is not available.""" with pytest.raises(MisconfigurationException, match="you must have Hivemind installed"): @@ -39,37 +37,6 @@ def test_strategy(mock_dht): assert trainer.strategy == strategy -@RunIf(hivemind=True) -@mock.patch("hivemind.DHT", autospec=True) -@mock.patch("pytorch_lightning.strategies.collaborative.DHTManager._get_peers", autospec=True) -@pytest.mark.parametrize( - "initial_peers,peer_endpoint", - [(["TEST"], None), (None, "localhost:153")], -) -def test_logging_disabled_when_second_peer(mock_dht, mock_http, initial_peers, peer_endpoint): - """Test when we are a second peer (passing initial peers or peer endpoint) we warn the user that - logging/checkpointing will be disabled.""" - with pytest.warns(UserWarning, match="This machine is not a persistent machine"): - HivemindStrategy(target_batch_size=1, initial_peers=initial_peers, peer_endpoint=peer_endpoint) - - -@RunIf(hivemind=True) -@mock.patch.dict( - os.environ, - {"HIVEMIND_MEMORY_SHARING_STRATEGY": "file_descriptor", "PL_PORT": str(find_free_network_port())}, - clear=True, -) -@pytest.mark.parametrize( - "endpoint,expected_message", - [(False, "INITIAL_PEERS"), (True, "Sidecar endpoint enabled to serve peers.")], -) -def test_initial_peer_message(caplog, endpoint, expected_message): - model = BoringModel() - trainer = pl.Trainer(strategy=HivemindStrategy(target_batch_size=1, endpoint=endpoint), fast_dev_run=True) - trainer.fit(model) - assert expected_message in caplog.text - - @RunIf(hivemind=True) @mock.patch.dict(os.environ, {"HIVEMIND_MEMORY_SHARING_STRATEGY": "file_descriptor"}, clear=True) def test_optimizer_wrapped(): @@ -109,24 +76,14 @@ def configure_optimizers(self): { "HIVEMIND_MEMORY_SHARING_STRATEGY": "file_descriptor", "PL_INITIAL_PEERS": "TEST_PEERS", - "PL_HOST": "TEST_HOST", - "PL_PORT": "1300", - "PL_ENDPOINT": "1", - "PL_PEER_ENDPOINT": "TEST_PEER_ENDPOINT", }, clear=True, ) @mock.patch("hivemind.DHT", autospec=True) -@mock.patch("pytorch_lightning.strategies.collaborative.DHTManager._get_peers", autospec=True) -@mock.patch("http.server.ThreadingHTTPServer", autospec=True) -def test_env_variables_parsed(mock_dht, mock_peers, mock_server): +def test_env_variables_parsed(mock_dht): """Test that env variables are parsed correctly.""" strategy = HivemindStrategy(target_batch_size=1) - assert strategy.dht_manager._initial_peers == ["TEST_PEERS"] - assert strategy.dht_manager._host == "TEST_HOST" - assert strategy.dht_manager._port == 1300 - assert strategy.dht_manager._endpoint - assert strategy.dht_manager._peer_endpoint == "TEST_PEER_ENDPOINT" + assert strategy._initial_peers == ["TEST_PEERS"] @RunIf(hivemind=True) @@ -204,9 +161,8 @@ def test_warn_if_argument_passed(delay_grad_averaging, delay_state_averaging, de @RunIf(hivemind=True) @mock.patch.dict(os.environ, {"HIVEMIND_MEMORY_SHARING_STRATEGY": "file_descriptor"}, clear=True) -@mock.patch("http.server.ThreadingHTTPServer", autospec=True) -@mock.patch("pytorch_lightning.strategies.collaborative.HivemindStrategy.num_peers", new_callable=PropertyMock) -def test_args_passed_to_optimizer(mock_peers, mock_server): +@mock.patch("pytorch_lightning.strategies.hivemind.HivemindStrategy.num_peers", new_callable=PropertyMock) +def test_args_passed_to_optimizer(mock_peers): """Test to ensure arguments are correctly passed to the hivemind optimizer wrapper.""" mock_peers.return_value = 1 compression = hivemind.ScaledFloat16Compression() @@ -353,18 +309,3 @@ def on_fit_start(self) -> None: ) with pytest.raises(SystemExit): trainer.fit(model) - - -@RunIf(hivemind=True) -def test_raise_when_peer_endpoint_unsuccessful(caplog): - port = find_free_network_port() - with pytest.raises(MisconfigurationException, match="Unable to get peers"): - with mock.patch("requests.get", wraps=requests.get) as requests_mock: - HivemindStrategy( - target_batch_size=1, - peer_endpoint=f"localhost:{port}", - retry_endpoint_attempts=10, - retry_endpoint_sleep_duration=0, - ) - assert "Failed to get peers, retrying" in caplog.text - assert requests_mock.call_count == 10 From fc6c27c09547d96c162a6d0f9a0a673f0be3d8c7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jun 2022 10:31:38 -0400 Subject: [PATCH 08/89] Update neptune-client requirement from <0.16.3,>=0.10.0 to >=0.10.0,<0.16.4 in /requirements (#13416) Update neptune-client requirement in /requirements Updates the requirements on [neptune-client](https://github.com/neptune-ai/neptune-client) to permit the latest version. - [Release notes](https://github.com/neptune-ai/neptune-client/releases) - [Changelog](https://github.com/neptune-ai/neptune-client/blob/master/CHANGELOG.md) - [Commits](https://github.com/neptune-ai/neptune-client/compare/0.10.0...0.16.3) --- updated-dependencies: - dependency-name: neptune-client dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/pytorch/loggers.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/pytorch/loggers.txt b/requirements/pytorch/loggers.txt index ebe425f8a34cd..8c4482e396658 100644 --- a/requirements/pytorch/loggers.txt +++ b/requirements/pytorch/loggers.txt @@ -1,6 +1,6 @@ # all supported loggers -neptune-client>=0.10.0, <0.16.3 +neptune-client>=0.10.0, <0.16.4 comet-ml>=3.1.12, <=3.28.2 mlflow>=1.0.0, <1.27.0 test_tube>=0.7.5, <=0.7.5 From 7638bb73a2f03116fcdc9f8a4ecd195df3f19203 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jun 2022 10:31:51 -0400 Subject: [PATCH 09/89] Update numpy requirement from <1.22.5,>=1.17.2 to >=1.17.2,<1.23.1 in /requirements (#13413) Update numpy requirement in /requirements Updates the requirements on [numpy](https://github.com/numpy/numpy) to permit the latest version. - [Release notes](https://github.com/numpy/numpy/releases) - [Changelog](https://github.com/numpy/numpy/blob/main/doc/HOWTO_RELEASE.rst) - [Commits](https://github.com/numpy/numpy/compare/v1.17.2...v1.23.0) --- updated-dependencies: - dependency-name: numpy dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/pytorch/base.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/pytorch/base.txt b/requirements/pytorch/base.txt index 0984bd79f3c9f..846cf5ec95d11 100644 --- a/requirements/pytorch/base.txt +++ b/requirements/pytorch/base.txt @@ -1,4 +1,4 @@ -numpy>=1.17.2, <1.22.5 +numpy>=1.17.2, <1.23.1 torch>=1.9.*, <=1.11.0 tqdm>=4.57.0, <=4.63.0 PyYAML>=5.4, <=6.0 From fa7e854450126d90867d010829cc2001d1e77b3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Tue, 28 Jun 2022 17:39:17 +0200 Subject: [PATCH 10/89] [CLI] Support custom trainers without callbacks (#13138) --- src/pytorch_lightning/CHANGELOG.md | 3 ++ src/pytorch_lightning/utilities/cli.py | 46 +++++++++++++---------- tests/tests_pytorch/utilities/test_cli.py | 16 ++++++++ 3 files changed, 45 insertions(+), 20 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index fbad93167198c..da9e9df30edb0 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -73,6 +73,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `teardown()` method to `Accelerator` ([#11935](https://github.com/PyTorchLightning/pytorch-lightning/pull/11935)) +- Added support for using custom Trainers that don't include callbacks using the CLI ([#13138](https://github.com/PyTorchLightning/pytorch-lightning/pull/13138)) + + - Added a `timeout` argument to `DDPStrategy`. ([#13244](https://github.com/PyTorchLightning/pytorch-lightning/pull/13244)) diff --git a/src/pytorch_lightning/utilities/cli.py b/src/pytorch_lightning/utilities/cli.py index a5d5fe3b66960..54e92d55491af 100644 --- a/src/pytorch_lightning/utilities/cli.py +++ b/src/pytorch_lightning/utilities/cli.py @@ -602,30 +602,36 @@ def instantiate_trainer(self, **kwargs: Any) -> Trainer: kwargs: Any custom trainer arguments. """ extra_callbacks = [self._get(self.config_init, c) for c in self._parser(self.subcommand).callback_keys] - trainer_config = {**self._get(self.config_init, "trainer"), **kwargs} + trainer_config = {**self._get(self.config_init, "trainer", default={}), **kwargs} return self._instantiate_trainer(trainer_config, extra_callbacks) def _instantiate_trainer(self, config: Dict[str, Any], callbacks: List[Callback]) -> Trainer: - if config["callbacks"] is None: - config["callbacks"] = [] - elif not isinstance(config["callbacks"], list): - config["callbacks"] = [config["callbacks"]] - assert isinstance(config["callbacks"], list) # to handle mypy false positive - config["callbacks"].extend(callbacks) - if "callbacks" in self.trainer_defaults: - if isinstance(self.trainer_defaults["callbacks"], list): - config["callbacks"].extend(self.trainer_defaults["callbacks"]) - else: - config["callbacks"].append(self.trainer_defaults["callbacks"]) - if self.save_config_callback and not config["fast_dev_run"]: - config_callback = self.save_config_callback( - self._parser(self.subcommand), - self.config.get(str(self.subcommand), self.config), - self.save_config_filename, - overwrite=self.save_config_overwrite, - multifile=self.save_config_multifile, + key = "callbacks" + if key in config: + if config[key] is None: + config[key] = [] + elif not isinstance(config[key], list): + config[key] = [config[key]] + config[key].extend(callbacks) + if key in self.trainer_defaults: + if isinstance(self.trainer_defaults[key], list): + config[key].extend(self.trainer_defaults[key]) + else: + config[key].append(self.trainer_defaults[key]) + if self.save_config_callback and not config.get("fast_dev_run", False): + config_callback = self.save_config_callback( + self._parser(self.subcommand), + self.config.get(str(self.subcommand), self.config), + self.save_config_filename, + overwrite=self.save_config_overwrite, + multifile=self.save_config_multifile, + ) + config[key].append(config_callback) + else: + rank_zero_warn( + f"The `{self.trainer_class.__qualname__}` class does not expose the `{key}` argument so they will" + " not be included." ) - config["callbacks"].append(config_callback) return self.trainer_class(**config) def _parser(self, subcommand: Optional[str]) -> LightningArgumentParser: diff --git a/tests/tests_pytorch/utilities/test_cli.py b/tests/tests_pytorch/utilities/test_cli.py index 655d9849a64ca..95092c5cbee7d 100644 --- a/tests/tests_pytorch/utilities/test_cli.py +++ b/tests/tests_pytorch/utilities/test_cli.py @@ -1483,6 +1483,22 @@ def test_cli_auto_seeding(): assert cli.config["seed_everything"] == 123 # the original seed is kept +def test_cli_trainer_no_callbacks(): + class MyTrainer(Trainer): + def __init__(self): + super().__init__() + + class MyCallback(Callback): + ... + + match = "MyTrainer` class does not expose the `callbacks" + with mock.patch("sys.argv", ["any.py"]), pytest.warns(UserWarning, match=match): + cli = LightningCLI( + BoringModel, run=False, trainer_class=MyTrainer, trainer_defaults={"callbacks": MyCallback()} + ) + assert not any(isinstance(cb, MyCallback) for cb in cli.trainer.callbacks) + + def test_unresolvable_import_paths(): class TestModel(BoringModel): def __init__(self, a_func: Callable = torch.softmax): From 053b0df5be0f4b413f1aac647122b3a7e990c156 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Tue, 28 Jun 2022 17:59:31 +0200 Subject: [PATCH 11/89] Better errors for logging corner cases (#13164) --- src/pytorch_lightning/CHANGELOG.md | 6 ++++ src/pytorch_lightning/core/module.py | 12 ++++++-- .../connectors/logger_connector/result.py | 16 ++++++---- .../core/test_metric_result_integration.py | 30 +++++++++++++++++++ .../logging_/test_train_loop_logging.py | 23 ++++++++++---- 5 files changed, 75 insertions(+), 12 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index da9e9df30edb0..88971af7adeec 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -49,6 +49,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Include a version suffix for new "last" checkpoints of later runs in the same directory ([#12902](https://github.com/PyTorchLightning/pytorch-lightning/pull/12902)) +- Show a better error message when a Metric that does not return a Tensor is logged ([#13164](https://github.com/PyTorchLightning/pytorch-lightning/pull/13164)) + + - Added missing `predict_dataset` argument in `LightningDataModule.from_datasets` to create predict dataloaders ([#12942](https://github.com/PyTorchLightning/pytorch-lightning/pull/12942)) @@ -123,6 +126,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - `DataLoader` instantiated inside a `*_dataloader` hook will not set the passed arguments as attributes anymore ([#12981](https://github.com/PyTorchLightning/pytorch-lightning/pull/12981)) +- When a multi-element tensor is logged, an error is now raised instead of silently taking the mean of all elements ([#13164](https://github.com/PyTorchLightning/pytorch-lightning/pull/13164)) + + - The `WandbLogger` will now use the run name in the logs folder if it is provided, and otherwise the project name ([#12604](https://github.com/PyTorchLightning/pytorch-lightning/pull/12604)) diff --git a/src/pytorch_lightning/core/module.py b/src/pytorch_lightning/core/module.py index 90c5716138729..b4867ac7d32ff 100644 --- a/src/pytorch_lightning/core/module.py +++ b/src/pytorch_lightning/core/module.py @@ -396,6 +396,7 @@ def log( ) value = apply_to_collection(value, numbers.Number, self.__to_tensor) + apply_to_collection(value, torch.Tensor, self.__check_numel_1, name) if self.trainer._logger_connector.should_reset_tensors(self._current_fx_name): # if we started a new epoch (running its first batch) the hook name has changed @@ -518,11 +519,10 @@ def log_dict( ) @staticmethod - def __check_not_nested(value: dict, name: str) -> dict: + def __check_not_nested(value: dict, name: str) -> None: # self-imposed restriction. for simplicity if any(isinstance(v, dict) for v in value.values()): raise ValueError(f"`self.log({name}, {value})` was called, but nested dictionaries cannot be logged") - return value @staticmethod def __check_allowed(v: Any, name: str, value: Any) -> None: @@ -531,6 +531,14 @@ def __check_allowed(v: Any, name: str, value: Any) -> None: def __to_tensor(self, value: numbers.Number) -> Tensor: return torch.tensor(value, device=self.device) + @staticmethod + def __check_numel_1(value: torch.Tensor, name: str) -> None: + if not torch.numel(value) == 1: + raise ValueError( + f"`self.log({name}, {value})` was called, but the tensor must have a single element." + f" You can try doing `self.log({name}, {value}.mean())`" + ) + def log_grad_norm(self, grad_norm_dict: Dict[str, float]) -> None: """Override this method to change the default behaviour of ``log_grad_norm``. diff --git a/src/pytorch_lightning/trainer/connectors/logger_connector/result.py b/src/pytorch_lightning/trainer/connectors/logger_connector/result.py index b1d8a064e122e..a33359a3fe5e9 100644 --- a/src/pytorch_lightning/trainer/connectors/logger_connector/result.py +++ b/src/pytorch_lightning/trainer/connectors/logger_connector/result.py @@ -244,12 +244,12 @@ def update(self, value: _IN_METRIC, batch_size: int) -> None: # type: ignore[ov # perform accumulation with reduction if self.meta.is_mean_reduction: # do not use `+=` as it doesn't do type promotion - self.value = self.value + value.mean() * batch_size + self.value = self.value + value * batch_size self.cumulated_batch_size = self.cumulated_batch_size + batch_size elif self.meta.is_max_reduction or self.meta.is_min_reduction: - self.value = self.meta.reduce_fx(self.value, value.mean()) + self.value = self.meta.reduce_fx(self.value, value) elif self.meta.is_sum_reduction: - self.value = self.value + value.mean() + self.value = self.value + value else: value = cast(Metric, value) self.value = value @@ -528,8 +528,14 @@ def _get_cache(result_metric: _ResultMetric, on_step: bool) -> Optional[Tensor]: result_metric.compute() result_metric.meta.sync.should = should cache = result_metric._computed - if cache is not None and not result_metric.meta.enable_graph: - return cache.detach() + if cache is not None: + if not isinstance(cache, torch.Tensor): + raise ValueError( + f"The `.compute()` return of the metric logged as {result_metric.meta.name!r} must be a tensor." + f" Found {cache}" + ) + if not result_metric.meta.enable_graph: + return cache.detach() return cache def valid_items(self) -> Generator: diff --git a/tests/tests_pytorch/core/test_metric_result_integration.py b/tests/tests_pytorch/core/test_metric_result_integration.py index e6b3f8a1c682e..12247e27e8c9d 100644 --- a/tests/tests_pytorch/core/test_metric_result_integration.py +++ b/tests/tests_pytorch/core/test_metric_result_integration.py @@ -629,3 +629,33 @@ def test_result_metric_max_min(reduce_fx, expected): rm = _ResultMetric(metadata, is_tensor=True) rm.update(torch.tensor(expected), 1) assert rm.compute() == expected + + +def test_compute_not_a_tensor_raises(): + class RandomMetric(Metric): + def update(self): + pass + + def compute(self): + return torch.tensor(1.0), torch.tensor(2.0) + + class MyModel(BoringModel): + def __init__(self): + super().__init__() + self.metric = RandomMetric() + + def on_train_start(self): + self.log("foo", self.metric) + + model = MyModel() + trainer = Trainer( + limit_train_batches=1, + limit_val_batches=0, + max_epochs=1, + enable_progress_bar=False, + enable_checkpointing=False, + logger=False, + enable_model_summary=False, + ) + with pytest.raises(ValueError, match=r"compute\(\)` return of.*foo' must be a tensor"): + trainer.fit(model) diff --git a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py index 13dc7f52a22b7..5855eba4c86af 100644 --- a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py @@ -631,7 +631,16 @@ class TestModel(BoringModel): def training_step(self, batch, batch_idx): self.log("foo/dataloader_idx_0", -1) - trainer = Trainer(default_root_dir=tmpdir) + trainer = Trainer( + default_root_dir=tmpdir, + limit_train_batches=1, + limit_val_batches=0, + max_epochs=1, + enable_progress_bar=False, + enable_checkpointing=False, + logger=False, + enable_model_summary=False, + ) model = TestModel() with pytest.raises(MisconfigurationException, match="`self.log` with the key `foo/dataloader_idx_0`"): trainer.fit(model) @@ -640,7 +649,6 @@ class TestModel(BoringModel): def training_step(self, batch, batch_idx): self.log("foo", Accuracy()) - trainer = Trainer(default_root_dir=tmpdir) model = TestModel() with pytest.raises(MisconfigurationException, match="fix this by setting an attribute for the metric in your"): trainer.fit(model) @@ -653,7 +661,6 @@ def __init__(self): def training_step(self, batch, batch_idx): self.log("foo", Accuracy()) - trainer = Trainer(default_root_dir=tmpdir) model = TestModel() with pytest.raises( MisconfigurationException, @@ -667,7 +674,6 @@ def training_step(self, *args): self.log("foo", -1, prog_bar=True) return super().training_step(*args) - trainer = Trainer(default_root_dir=tmpdir) model = TestModel() with pytest.raises(MisconfigurationException, match=r"self.log\(foo, ...\)` twice in `training_step`"): trainer.fit(model) @@ -677,11 +683,18 @@ def training_step(self, *args): self.log("foo", -1, reduce_fx=torch.argmax) return super().training_step(*args) - trainer = Trainer(default_root_dir=tmpdir) model = TestModel() with pytest.raises(MisconfigurationException, match=r"reduce_fx={min,max,mean,sum}\)` are supported"): trainer.fit(model) + class TestModel(BoringModel): + def on_train_start(self): + self.log("foo", torch.tensor([1.0, 2.0])) + + model = TestModel() + with pytest.raises(ValueError, match="tensor must have a single element"): + trainer.fit(model) + def test_sanity_metrics_are_reset(tmpdir): class TestModel(BoringModel): From a3d76746fd6fa7fc05692df4a40e1caf26693aaa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Tue, 28 Jun 2022 20:57:44 +0200 Subject: [PATCH 12/89] Rename old references to training type plugin in tests (#13421) --- .../test_accelerator_connector.py | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/tests/tests_pytorch/accelerators/test_accelerator_connector.py b/tests/tests_pytorch/accelerators/test_accelerator_connector.py index 20cac155f9915..206e0fc69a773 100644 --- a/tests/tests_pytorch/accelerators/test_accelerator_connector.py +++ b/tests/tests_pytorch/accelerators/test_accelerator_connector.py @@ -304,22 +304,22 @@ def test_accelerator_gpu(): assert isinstance(trainer.accelerator, GPUAccelerator) -@pytest.mark.parametrize(["devices", "plugin"], [(1, SingleDeviceStrategy), (5, DDPSpawnStrategy)]) -def test_accelerator_cpu_with_devices(devices, plugin): +@pytest.mark.parametrize(["devices", "strategy_class"], [(1, SingleDeviceStrategy), (5, DDPSpawnStrategy)]) +def test_accelerator_cpu_with_devices(devices, strategy_class): trainer = Trainer(accelerator="cpu", devices=devices) assert trainer.num_devices == devices - assert isinstance(trainer.strategy, plugin) + assert isinstance(trainer.strategy, strategy_class) assert isinstance(trainer.accelerator, CPUAccelerator) @RunIf(min_cuda_gpus=2) @pytest.mark.parametrize( - ["devices", "plugin"], [(1, SingleDeviceStrategy), ([1], SingleDeviceStrategy), (2, DDPSpawnStrategy)] + ["devices", "strategy_class"], [(1, SingleDeviceStrategy), ([1], SingleDeviceStrategy), (2, DDPSpawnStrategy)] ) -def test_accelerator_gpu_with_devices(devices, plugin): +def test_accelerator_gpu_with_devices(devices, strategy_class): trainer = Trainer(accelerator="gpu", devices=devices) assert trainer.num_devices == len(devices) if isinstance(devices, list) else devices - assert isinstance(trainer.strategy, plugin) + assert isinstance(trainer.strategy, strategy_class) assert isinstance(trainer.accelerator, GPUAccelerator) @@ -356,7 +356,7 @@ def test_exception_invalid_strategy(): @pytest.mark.parametrize( - ["strategy", "plugin"], + ["strategy", "strategy_class"], [ ("ddp_spawn", DDPSpawnStrategy), ("ddp_spawn_find_unused_parameters_false", DDPSpawnStrategy), @@ -364,20 +364,20 @@ def test_exception_invalid_strategy(): ("ddp_find_unused_parameters_false", DDPStrategy), ], ) -def test_strategy_choice_cpu_str(tmpdir, strategy, plugin): +def test_strategy_choice_cpu_str(strategy, strategy_class): trainer = Trainer(strategy=strategy, accelerator="cpu", devices=2) - assert isinstance(trainer.strategy, plugin) + assert isinstance(trainer.strategy, strategy_class) -@pytest.mark.parametrize("plugin", [DDPSpawnStrategy, DDPStrategy]) -def test_strategy_choice_cpu_plugin(tmpdir, plugin): - trainer = Trainer(strategy=plugin(), accelerator="cpu", devices=2) - assert isinstance(trainer.strategy, plugin) +@pytest.mark.parametrize("strategy_class", [DDPSpawnStrategy, DDPStrategy]) +def test_strategy_choice_cpu_instance(strategy_class): + trainer = Trainer(strategy=strategy_class(), accelerator="cpu", devices=2) + assert isinstance(trainer.strategy, strategy_class) @RunIf(min_cuda_gpus=2) @pytest.mark.parametrize( - ["strategy", "plugin"], + ["strategy", "strategy_class"], [ ("ddp_spawn", DDPSpawnStrategy), ("ddp_spawn_find_unused_parameters_false", DDPSpawnStrategy), @@ -390,29 +390,29 @@ def test_strategy_choice_cpu_plugin(tmpdir, plugin): pytest.param("deepspeed", DeepSpeedStrategy, marks=RunIf(deepspeed=True)), ], ) -def test_strategy_choice_gpu_str(tmpdir, strategy, plugin): +def test_strategy_choice_gpu_str(strategy, strategy_class): trainer = Trainer(strategy=strategy, accelerator="gpu", devices=2) - assert isinstance(trainer.strategy, plugin) + assert isinstance(trainer.strategy, strategy_class) @RunIf(min_cuda_gpus=2) -@pytest.mark.parametrize("plugin", [DDPSpawnStrategy, DDPStrategy]) -def test_strategy_choice_gpu_plugin(tmpdir, plugin): - trainer = Trainer(strategy=plugin(), accelerator="gpu", devices=2) - assert isinstance(trainer.strategy, plugin) +@pytest.mark.parametrize("strategy_class", [DDPSpawnStrategy, DDPStrategy]) +def test_strategy_choice_gpu_instance(strategy_class): + trainer = Trainer(strategy=strategy_class(), accelerator="gpu", devices=2) + assert isinstance(trainer.strategy, strategy_class) @RunIf(min_cuda_gpus=2) -@pytest.mark.parametrize("plugin", [DDPSpawnStrategy, DDPStrategy]) -def test_device_type_when_training_plugin_gpu_passed(tmpdir, plugin): +@pytest.mark.parametrize("strategy_class", [DDPSpawnStrategy, DDPStrategy]) +def test_device_type_when_strategy_instance_gpu_passed(strategy_class): - trainer = Trainer(strategy=plugin(), accelerator="gpu", devices=2) - assert isinstance(trainer.strategy, plugin) + trainer = Trainer(strategy=strategy_class(), accelerator="gpu", devices=2) + assert isinstance(trainer.strategy, strategy_class) assert isinstance(trainer.accelerator, GPUAccelerator) @pytest.mark.parametrize("precision", [1, 12, "invalid"]) -def test_validate_precision_type(tmpdir, precision): +def test_validate_precision_type(precision): with pytest.raises(MisconfigurationException, match=f"Precision {repr(precision)} is invalid"): Trainer(precision=precision) @@ -423,7 +423,7 @@ def test_amp_level_raises_error_with_native(): _ = Trainer(amp_level="O2", amp_backend="native", precision=16) -def test_strategy_choice_ddp_spawn_cpu(tmpdir): +def test_strategy_choice_ddp_spawn_cpu(): trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", accelerator="cpu", devices=2) assert isinstance(trainer.accelerator, CPUAccelerator) assert isinstance(trainer.strategy, DDPSpawnStrategy) From c1ad29858c093e52e0880d6b11abf34cb8944b1e Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Wed, 29 Jun 2022 15:35:57 +0200 Subject: [PATCH 13/89] CI: fix requirements freeze (#13441) * allow freeze * ci * typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ipu Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .azure/gpu-benchmark.yml | 1 + .azure/gpu-tests.yml | 1 + .azure/hpu-tests.yml | 1 + .azure/ipu-tests.yml | 1 + .github/workflows/ci-pytorch_test-conda.yml | 1 + .github/workflows/ci-pytorch_test-full.yml | 1 + .github/workflows/ci-pytorch_test-slow.yml | 1 + .github/workflows/docs-checks.yml | 2 ++ dockers/base-ipu/Dockerfile | 12 +++++------- dockers/tpu-tests/tpu_test_cases.jsonnet | 2 ++ requirements/pytorch/base.txt | 4 ++-- src/lightning/__setup__.py | 5 ++++- src/lightning_app/__setup__.py | 10 ++++++---- src/pytorch_lightning/__setup__.py | 14 ++++++++------ 14 files changed, 36 insertions(+), 20 deletions(-) diff --git a/.azure/gpu-benchmark.yml b/.azure/gpu-benchmark.yml index 9f432ae2f85ad..4d3eaddd41f90 100644 --- a/.azure/gpu-benchmark.yml +++ b/.azure/gpu-benchmark.yml @@ -40,6 +40,7 @@ jobs: pip list env: PACKAGE_NAME: pytorch + FREEZE_REQUIREMENTS: 1 displayName: 'Install package' - bash: python -m pytest benchmarks -v --durations=0 diff --git a/.azure/gpu-tests.yml b/.azure/gpu-tests.yml index dc315ca88c87e..f84463a6615b3 100644 --- a/.azure/gpu-tests.yml +++ b/.azure/gpu-tests.yml @@ -56,6 +56,7 @@ jobs: pip list env: PACKAGE_NAME: pytorch + FREEZE_REQUIREMENTS: 1 displayName: 'Install dependencies' - bash: | diff --git a/.azure/hpu-tests.yml b/.azure/hpu-tests.yml index ef1982da8e8e6..a3041ce32daae 100644 --- a/.azure/hpu-tests.yml +++ b/.azure/hpu-tests.yml @@ -33,6 +33,7 @@ jobs: pip install -e .[extra] -r requirements/pytorch/test.txt env: PACKAGE_NAME: pytorch + FREEZE_REQUIREMENTS: 1 displayName: 'Install dependencies' - bash: | diff --git a/.azure/ipu-tests.yml b/.azure/ipu-tests.yml index 9a97ae41b914b..418a70d6fa72e 100644 --- a/.azure/ipu-tests.yml +++ b/.azure/ipu-tests.yml @@ -57,6 +57,7 @@ jobs: pip list env: PACKAGE_NAME: pytorch + FREEZE_REQUIREMENTS: 1 displayName: 'Install dependencies' - bash: | diff --git a/.github/workflows/ci-pytorch_test-conda.yml b/.github/workflows/ci-pytorch_test-conda.yml index b9645720f4012..d0913ee0c2549 100644 --- a/.github/workflows/ci-pytorch_test-conda.yml +++ b/.github/workflows/ci-pytorch_test-conda.yml @@ -40,6 +40,7 @@ jobs: - name: Update base dependencies env: PACKAGE_NAME: pytorch + FREEZE_REQUIREMENTS: 1 run: | conda info conda list diff --git a/.github/workflows/ci-pytorch_test-full.yml b/.github/workflows/ci-pytorch_test-full.yml index 35f669cee508b..d857cacf5fac0 100644 --- a/.github/workflows/ci-pytorch_test-full.yml +++ b/.github/workflows/ci-pytorch_test-full.yml @@ -81,6 +81,7 @@ jobs: - name: Install dependencies env: PACKAGE_NAME: pytorch + FREEZE_REQUIREMENTS: 1 run: | flag=$(python -c "print('--pre' if '${{matrix.release}}' == 'pre' else '')" 2>&1) url=$(python -c "print('test/cpu/torch_test.html' if '${{matrix.release}}' == 'pre' else 'cpu/torch_stable.html')" 2>&1) diff --git a/.github/workflows/ci-pytorch_test-slow.yml b/.github/workflows/ci-pytorch_test-slow.yml index 9b66d4fa0f947..c907a2c3d6410 100644 --- a/.github/workflows/ci-pytorch_test-slow.yml +++ b/.github/workflows/ci-pytorch_test-slow.yml @@ -52,6 +52,7 @@ jobs: - name: Install dependencies env: PACKAGE_NAME: pytorch + FREEZE_REQUIREMENTS: 1 run: | # adjust versions according installed Torch version python ./requirements/pytorch/adjust-versions.py requirements.txt ${{ matrix.pytorch-version }} diff --git a/.github/workflows/docs-checks.yml b/.github/workflows/docs-checks.yml index 6e60a34c05b71..25a9b17d6914b 100644 --- a/.github/workflows/docs-checks.yml +++ b/.github/workflows/docs-checks.yml @@ -38,6 +38,7 @@ jobs: - name: Install dependencies env: PACKAGE_NAME: pytorch + FREEZE_REQUIREMENTS: 1 run: | sudo apt-get update sudo apt-get install -y cmake pandoc @@ -82,6 +83,7 @@ jobs: - name: Install dependencies env: PACKAGE_NAME: pytorch + FREEZE_REQUIREMENTS: 1 run: | sudo apt-get update sudo apt-get install -y cmake pandoc diff --git a/dockers/base-ipu/Dockerfile b/dockers/base-ipu/Dockerfile index 256cd4031b0b7..af1f615034e69 100644 --- a/dockers/base-ipu/Dockerfile +++ b/dockers/base-ipu/Dockerfile @@ -73,20 +73,18 @@ ENV \ CONDA_DEFAULT_ENV=${CONDA_ENV} \ MKL_THREADING_LAYER=GNU -COPY ./requirements/pytorch/extra.txt requirements-extra.txt -COPY ./requirements/pytorch/test.txt requirements-test.txt -COPY ./requirements/pytorch/adjust-versions.py adjust_versions.py +COPY ./requirements/pytorch/* requirements/ COPY ./.actions/assistant.py assistant.py RUN \ pip list | grep torch && \ python -c "import torch; print(torch.__version__)" && \ - python adjust_versions.py requirements-extra.txt && \ pip install -q fire && \ + python requirements/adjust-versions.py requirements/extra.txt && \ # Install remaining requirements - pip install -r requirements-extra.txt --no-cache-dir && \ - pip install -r requirements-test.txt --no-cache-dir && \ - rm requirements* + pip install -r requirements/extra.txt --no-cache-dir && \ + pip install -r requirements/test.txt --no-cache-dir && \ + rm -rf requirements/ RUN \ # Show what we have diff --git a/dockers/tpu-tests/tpu_test_cases.jsonnet b/dockers/tpu-tests/tpu_test_cases.jsonnet index 338d09d0b6612..620deffae5275 100644 --- a/dockers/tpu-tests/tpu_test_cases.jsonnet +++ b/dockers/tpu-tests/tpu_test_cases.jsonnet @@ -29,6 +29,8 @@ local tputests = base.BaseTest { git ls-remote --refs origin git fetch origin "refs/pull/{PR_NUMBER}/head:pr/{PR_NUMBER}" && git checkout "pr/{PR_NUMBER}" git checkout {SHA} + export PACKAGE_NAME=pytorch + export FREEZE_REQUIREMENTS=1 pip install -e . echo $KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS export XRT_TPU_CONFIG="tpu_worker;0;${KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS:7}" diff --git a/requirements/pytorch/base.txt b/requirements/pytorch/base.txt index 846cf5ec95d11..fc20b78a58d3f 100644 --- a/requirements/pytorch/base.txt +++ b/requirements/pytorch/base.txt @@ -1,5 +1,5 @@ numpy>=1.17.2, <1.23.1 -torch>=1.9.*, <=1.11.0 +torch>=1.9.*, <=1.11.0 # strict tqdm>=4.57.0, <=4.63.0 PyYAML>=5.4, <=6.0 fsspec[http]>=2021.05.0, !=2021.06.0, <2022.6.0 @@ -8,4 +8,4 @@ torchmetrics>=0.7.0, <0.9.2 # needed for using fixed compare_version pyDeprecate>=0.3.1, <=0.3.2 packaging>=17.0, <=21.3 typing-extensions>=4.0.0, <4.2.1 -protobuf<=3.20.1 # strict. TODO: Remove after tensorboard gets compatible https://github.com/tensorflow/tensorboard/issues/5708 +protobuf<=3.20.1 # strict TODO: Remove after tensorboard gets compatible https://github.com/tensorflow/tensorboard/issues/5708 diff --git a/src/lightning/__setup__.py b/src/lightning/__setup__.py index 8ca323522c655..3b76307bfd39f 100644 --- a/src/lightning/__setup__.py +++ b/src/lightning/__setup__.py @@ -10,6 +10,7 @@ _PROJECT_ROOT = "." _SOURCE_ROOT = os.path.join(_PROJECT_ROOT, "src") _PACKAGE_ROOT = os.path.join(_SOURCE_ROOT, "lightning") +_FREEZE_REQUIREMENTS = bool(int(os.environ.get("FREEZE_REQUIREMENTS", 0))) def _load_py_module(name: str, location: str) -> ModuleType: @@ -56,7 +57,9 @@ def _setup_args(**kwargs: Any) -> Dict[str, Any]: else: _include_pkgs = ["*"] _requires = [ - _setup_tools.load_requirements(d) for d in glob.glob(os.path.join("requirements", "*")) if os.path.isdir(d) + _setup_tools.load_requirements(d, unfreeze=not _FREEZE_REQUIREMENTS) + for d in glob.glob(os.path.join("requirements", "*")) + if os.path.isdir(d) ] _requires = list(chain(*_requires)) # todo: consider invaliding some additional arguments from packages, for example if include data or safe to zip diff --git a/src/lightning_app/__setup__.py b/src/lightning_app/__setup__.py index b8bc265756054..e989a17b50412 100644 --- a/src/lightning_app/__setup__.py +++ b/src/lightning_app/__setup__.py @@ -9,6 +9,7 @@ _SOURCE_ROOT = os.path.join(_PROJECT_ROOT, "src") _PACKAGE_ROOT = os.path.join(_SOURCE_ROOT, "lightning_app") _PATH_REQUIREMENTS = os.path.join("requirements", "app") +_FREEZE_REQUIREMENTS = bool(int(os.environ.get("FREEZE_REQUIREMENTS", 0))) def _load_py_module(name: str, location: str) -> ModuleType: @@ -25,11 +26,12 @@ def _prepare_extras(**kwargs: Any) -> Dict[str, Any]: # Define package extras. These are only installed if you specify them. # From remote, use like `pip install pytorch-lightning[dev, docs]` # From local copy of repo, use like `pip install ".[dev, docs]"` + common_args = dict(path_dir=_PATH_REQUIREMENTS, unfreeze=not _FREEZE_REQUIREMENTS) extras = { # 'docs': load_requirements(file_name='docs.txt'), - "cloud": _setup_tools.load_requirements(path_dir=_PATH_REQUIREMENTS, file_name="cloud.txt"), - "ui": _setup_tools.load_requirements(path_dir=_PATH_REQUIREMENTS, file_name="ui.txt"), - "test": _setup_tools.load_requirements(path_dir=_PATH_REQUIREMENTS, file_name="test.txt"), + "cloud": _setup_tools.load_requirements(file_name="cloud.txt", **common_args), + "ui": _setup_tools.load_requirements(file_name="ui.txt", **common_args), + "test": _setup_tools.load_requirements(file_name="test.txt", **common_args), } extras["dev"] = extras["cloud"] + extras["ui"] + extras["test"] # + extras['docs'] extras["all"] = extras["cloud"] + extras["ui"] @@ -83,7 +85,7 @@ def _setup_args(**__: Any) -> Dict[str, Any]: ], }, setup_requires=["wheel"], - install_requires=_setup_tools.load_requirements(_PATH_REQUIREMENTS), + install_requires=_setup_tools.load_requirements(_PATH_REQUIREMENTS, unfreeze=not _FREEZE_REQUIREMENTS), extras_require=_prepare_extras(), project_urls={ "Bug Tracker": "https://github.com/Lightning-AI/lightning/issues", diff --git a/src/pytorch_lightning/__setup__.py b/src/pytorch_lightning/__setup__.py index 48e90e652f67f..4b28df68296a0 100644 --- a/src/pytorch_lightning/__setup__.py +++ b/src/pytorch_lightning/__setup__.py @@ -10,6 +10,7 @@ _SOURCE_ROOT = os.path.join(_PROJECT_ROOT, "src") _PACKAGE_ROOT = os.path.join(_SOURCE_ROOT, "pytorch_lightning") _PATH_REQUIREMENTS = os.path.join("requirements", "pytorch") +_FREEZE_REQUIREMENTS = bool(int(os.environ.get("FREEZE_REQUIREMENTS", 0))) def _load_py_module(name: str, location: str) -> ModuleType: @@ -26,13 +27,14 @@ def _prepare_extras(**kwargs: Any) -> Dict[str, Any]: # Define package extras. These are only installed if you specify them. # From remote, use like `pip install pytorch-lightning[dev, docs]` # From local copy of repo, use like `pip install ".[dev, docs]"` + common_args = dict(path_dir=_PATH_REQUIREMENTS, unfreeze=not _FREEZE_REQUIREMENTS) extras = { # 'docs': load_requirements(file_name='docs.txt'), - "examples": _setup_tools.load_requirements(path_dir=_PATH_REQUIREMENTS, file_name="examples.txt"), - "loggers": _setup_tools.load_requirements(path_dir=_PATH_REQUIREMENTS, file_name="loggers.txt"), - "extra": _setup_tools.load_requirements(path_dir=_PATH_REQUIREMENTS, file_name="extra.txt"), - "strategies": _setup_tools.load_requirements(path_dir=_PATH_REQUIREMENTS, file_name="strategies.txt"), - "test": _setup_tools.load_requirements(path_dir=_PATH_REQUIREMENTS, file_name="test.txt"), + "examples": _setup_tools.load_requirements(file_name="examples.txt", **common_args), + "loggers": _setup_tools.load_requirements(file_name="loggers.txt", **common_args), + "extra": _setup_tools.load_requirements(file_name="extra.txt", **common_args), + "strategies": _setup_tools.load_requirements(file_name="strategies.txt", **common_args), + "test": _setup_tools.load_requirements(file_name="test.txt", **common_args), } for req in parse_requirements(extras["strategies"]): extras[req.key] = [str(req)] @@ -83,7 +85,7 @@ def _setup_args(**__: Any) -> Dict[str, Any]: keywords=["deep learning", "pytorch", "AI"], python_requires=">=3.7", setup_requires=[], - install_requires=_setup_tools.load_requirements(_PATH_REQUIREMENTS), + install_requires=_setup_tools.load_requirements(_PATH_REQUIREMENTS, unfreeze=not _FREEZE_REQUIREMENTS), extras_require=_prepare_extras(), project_urls={ "Bug Tracker": "https://github.com/Lightning-AI/lightning/issues", From 29547143383eb923a7aae2763552b87193cb60a2 Mon Sep 17 00:00:00 2001 From: Sean Naren Date: Wed, 29 Jun 2022 15:49:34 +0100 Subject: [PATCH 14/89] Add model summary when using DeepSpeed Stage 3 (#13427) --- src/pytorch_lightning/CHANGELOG.md | 3 + .../callbacks/model_summary.py | 16 +++- .../utilities/deepspeed_model_summary.py | 94 +++++++++++++++++++ .../callbacks/test_deepspeed_model_summary.py | 53 +++++++++++ 4 files changed, 163 insertions(+), 3 deletions(-) create mode 100644 src/pytorch_lightning/utilities/deepspeed_model_summary.py create mode 100644 tests/tests_pytorch/callbacks/test_deepspeed_model_summary.py diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 88971af7adeec..0e583ff931faf 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -283,6 +283,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed `estimated_stepping_batches` requiring distributed comms in `configure_optimizers` for the `DeepSpeedStrategy` ([#13350](https://github.com/PyTorchLightning/pytorch-lightning/pull/13350)) +- Fixed Model Summary when using DeepSpeed Stage 3 ([#13427](https://github.com/PyTorchLightning/pytorch-lightning/pull/13427)) + + - diff --git a/src/pytorch_lightning/callbacks/model_summary.py b/src/pytorch_lightning/callbacks/model_summary.py index 44c4b660c1c79..1b3082a6a9d72 100644 --- a/src/pytorch_lightning/callbacks/model_summary.py +++ b/src/pytorch_lightning/callbacks/model_summary.py @@ -22,11 +22,14 @@ """ import logging -from typing import List, Tuple +from typing import List, Tuple, Union import pytorch_lightning as pl from pytorch_lightning.callbacks.callback import Callback -from pytorch_lightning.utilities.model_summary import _format_summary_table, summarize +from pytorch_lightning.utilities.deepspeed_model_summary import DeepSpeedSummary +from pytorch_lightning.utilities.model_summary import _format_summary_table +from pytorch_lightning.utilities.model_summary import ModelSummary as Summary +from pytorch_lightning.utilities.model_summary import summarize log = logging.getLogger(__name__) @@ -53,7 +56,7 @@ def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") - if not self._max_depth: return None - model_summary = summarize(pl_module, max_depth=self._max_depth) + model_summary = self._summary(trainer, pl_module) summary_data = model_summary._get_summary_data() total_parameters = model_summary.total_parameters trainable_parameters = model_summary.trainable_parameters @@ -62,6 +65,13 @@ def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") - if trainer.is_global_zero: self.summarize(summary_data, total_parameters, trainable_parameters, model_size) + def _summary(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> Union[DeepSpeedSummary, Summary]: + from pytorch_lightning.strategies.deepspeed import DeepSpeedStrategy + + if isinstance(trainer.strategy, DeepSpeedStrategy) and trainer.strategy.zero_stage_3: + return DeepSpeedSummary(pl_module, max_depth=self._max_depth) + return summarize(pl_module, max_depth=self._max_depth) + @staticmethod def summarize( summary_data: List[Tuple[str, List[str]]], diff --git a/src/pytorch_lightning/utilities/deepspeed_model_summary.py b/src/pytorch_lightning/utilities/deepspeed_model_summary.py new file mode 100644 index 0000000000000..fca288be0542b --- /dev/null +++ b/src/pytorch_lightning/utilities/deepspeed_model_summary.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +# Copyright 2020 The PyTorch Lightning team and Microsoft Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities that can be used with Deepspeed.""" + +from collections import OrderedDict +from typing import Dict, List, Tuple + +import torch + +from pytorch_lightning.utilities.model_summary import ( + _is_lazy_weight_tensor, + get_human_readable_count, + LayerSummary, + ModelSummary, +) + + +def deepspeed_param_size(p: torch.nn.Parameter) -> int: + assert hasattr(p, "ds_numel") + return p.ds_numel + + +class DeepSpeedLayerSummary(LayerSummary): + @property + def num_parameters(self) -> int: + """Returns the number of parameters in this module.""" + return sum(deepspeed_param_size(p) if not _is_lazy_weight_tensor(p) else 0 for p in self._module.parameters()) + + @property + def average_shard_parameters(self) -> int: + """Returns the number of parameters in this module.""" + return sum(p.partitioned_size() if not _is_lazy_weight_tensor(p) else 0 for p in self._module.parameters()) + + +class DeepSpeedSummary(ModelSummary): + def summarize(self) -> Dict[str, DeepSpeedLayerSummary]: # type: ignore[override] + summary = OrderedDict((name, DeepSpeedLayerSummary(module)) for name, module in self.named_modules) + if self._model.example_input_array is not None: + self._forward_example_input() + for layer in summary.values(): + layer.detach_hook() + + if self._max_depth >= 1: + # remove summary entries with depth > max_depth + for k in [k for k in summary if k.count(".") >= self._max_depth]: + del summary[k] + + return summary + + @property + def total_parameters(self) -> int: + return sum(deepspeed_param_size(p) if not _is_lazy_weight_tensor(p) else 0 for p in self._model.parameters()) + + @property + def trainable_parameters(self) -> int: + return sum( + deepspeed_param_size(p) if not _is_lazy_weight_tensor(p) else 0 + for p in self._model.parameters() + if p.requires_grad + ) + + @property + def parameters_per_layer(self) -> List[int]: + return [layer.average_shard_parameters for layer in self._layer_summary.values()] + + def _get_summary_data(self) -> List[Tuple[str, List[str]]]: + """Makes a summary listing with: + + Layer Name, Layer Type, Number of Parameters, Input Sizes, Output Sizes, Model Size + """ + arrays = [ + (" ", list(map(str, range(len(self._layer_summary))))), + ("Name", self.layer_names), + ("Type", self.layer_types), + ("Params", list(map(get_human_readable_count, self.param_nums))), + ("Params per Device", list(map(get_human_readable_count, self.parameters_per_layer))), + ] + if self._model.example_input_array is not None: + arrays.append(("In sizes", [str(x) for x in self.in_sizes])) + arrays.append(("Out sizes", [str(x) for x in self.out_sizes])) + + return arrays diff --git a/tests/tests_pytorch/callbacks/test_deepspeed_model_summary.py b/tests/tests_pytorch/callbacks/test_deepspeed_model_summary.py new file mode 100644 index 0000000000000..59733325a741c --- /dev/null +++ b/tests/tests_pytorch/callbacks/test_deepspeed_model_summary.py @@ -0,0 +1,53 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytorch_lightning as pl +from pytorch_lightning import Callback, Trainer +from pytorch_lightning.demos.boring_classes import BoringModel +from pytorch_lightning.strategies import DeepSpeedStrategy +from pytorch_lightning.utilities.deepspeed_model_summary import DeepSpeedSummary +from tests_pytorch.helpers.runif import RunIf + + +@RunIf(min_cuda_gpus=2, deepspeed=True, standalone=True) +def test_deepspeed_summary(tmpdir): + """Test to ensure that the summary contains the correct values when stage 3 is enabled and that the trainer + enables the `DeepSpeedSummary` when DeepSpeed is used.""" + + model = BoringModel() + total_parameters = sum(x.numel() for x in model.parameters()) + + class TestCallback(Callback): + def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: + model_summary = DeepSpeedSummary(pl_module, max_depth=1) + assert model_summary.total_parameters == total_parameters + assert model_summary.trainable_parameters == total_parameters + + # check the additional params per device + summary_data = model_summary._get_summary_data() + params_per_device = summary_data[-1][-1] + assert int(params_per_device[0]) == (model_summary.total_parameters // 2) + + trainer = Trainer( + strategy=DeepSpeedStrategy(stage=3), + default_root_dir=tmpdir, + accelerator="gpu", + fast_dev_run=True, + devices=2, + precision=16, + enable_model_summary=True, + callbacks=[TestCallback()], + ) + + trainer.fit(model) From 26469d96525a5f0bb4b819598b2ab689a91a132d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 29 Jun 2022 17:06:51 +0200 Subject: [PATCH 15/89] Remove support for DDP2 strategy (#12705) Co-authored-by: Rohit Gupta Co-authored-by: Jirka --- docs/source-pytorch/accelerators/gpu_faq.rst | 20 ++--- .../accelerators/gpu_intermediate.rst | 45 ++++------- docs/source-pytorch/api_references.rst | 1 - .../common/lightning_module.rst | 6 +- src/pytorch_lightning/CHANGELOG.md | 3 + src/pytorch_lightning/core/module.py | 18 ++--- .../plugins/training_type/ddp2.py | 8 +- src/pytorch_lightning/strategies/ddp2.py | 77 ++++--------------- .../connectors/accelerator_connector.py | 9 ++- .../test_accelerator_connector.py | 64 +-------------- .../deprecated_api/test_remove_1-8.py | 16 +++- .../tests_pytorch/plugins/test_amp_plugins.py | 2 +- .../plugins/test_cluster_integration.py | 12 +-- .../test_estimated_stepping_batches.py | 3 - tests/tests_pytorch/trainer/test_trainer.py | 13 ---- 15 files changed, 78 insertions(+), 219 deletions(-) diff --git a/docs/source-pytorch/accelerators/gpu_faq.rst b/docs/source-pytorch/accelerators/gpu_faq.rst index c697b2ca7b354..8302665591f4b 100644 --- a/docs/source-pytorch/accelerators/gpu_faq.rst +++ b/docs/source-pytorch/accelerators/gpu_faq.rst @@ -36,25 +36,25 @@ In DDP, DDP_SPAWN, Deepspeed, DDP_SHARDED, or Horovod your effective batch size Trainer(accelerator="gpu", devices=8, num_nodes=10, strategy="ddp_sharded") Trainer(accelerator="gpu", devices=8, num_nodes=10, strategy="horovod") -In DDP2 or DP, your effective batch size will be 7 * num_nodes. -The reason is that the full batch is visible to all GPUs on the node when using DDP2. -.. code-block:: python +.. note:: Huge batch sizes are actually really bad for convergence. Check out: + `Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour `_ - # effective batch size = 7 - Trainer(accelerator="gpu", devices=8, strategy="ddp2") - Trainer(accelerator="gpu", devices=8, strategy="dp") +In DP, which does not support multi-node, the effective batch size will be just 7, regardless of how many devices are being used. +The reason is that the full batch gets split evenly between all devices. - # effective batch size = 7 * 10 - Trainer(accelerator="gpu", devices=8, num_nodes=10, strategy="ddp2") +.. code-block:: python + + # effective batch size = 7, each GPU sees a batch size of 1 except the last GPU Trainer(accelerator="gpu", devices=8, strategy="dp") + # effective batch size = 7, first GPU sees a batch size of 4, the other sees batch size 3 + Trainer(accelerator="gpu", devices=2, num_nodes=10, strategy="dp") -.. note:: Huge batch sizes are actually really bad for convergence. Check out: - `Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour `_ ---- + ********************************************************* How do I use multiple GPUs on Jupyter or Colab notebooks? ********************************************************* diff --git a/docs/source-pytorch/accelerators/gpu_intermediate.rst b/docs/source-pytorch/accelerators/gpu_intermediate.rst index c4d9ad8817621..930d4654a4c02 100644 --- a/docs/source-pytorch/accelerators/gpu_intermediate.rst +++ b/docs/source-pytorch/accelerators/gpu_intermediate.rst @@ -23,7 +23,6 @@ Lightning supports multiple ways of doing distributed training. - Data Parallel (``strategy='dp'``) (multiple-gpus, 1 machine) - DistributedDataParallel (``strategy='ddp'``) (multiple-gpus across many machines (python script based)). - DistributedDataParallel (``strategy='ddp_spawn'``) (multiple-gpus across many machines (spawn based)). -- DistributedDataParallel 2 (``strategy='ddp2'``) (DP in a machine, DDP across machines). - Horovod (``strategy='horovod'``) (multi-machine, multi-gpu, configured at runtime) - Bagua (``strategy='bagua'``) (multiple-gpus across many machines with advanced training algorithms) @@ -104,6 +103,12 @@ In these situations you should use `dp` or `ddp_spawn` instead. Distributed Data Parallel 2 ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. warning:: + The DDP2 strategy is no longer supported. For single-node use, we recommend ``strategy='ddp'`` or + ``strategy='dp'`` as a replacement. If you need DDP2, you will need ``torch < 1.9``, + ``pytorch-lightning < 1.5``, and set it as ``accelerator='ddp2'``. + In certain cases, it's advantageous to use all batches on the same machine instead of a subset. For instance, you might want to compute a NCE loss where it pays to have more negative samples. @@ -345,10 +350,10 @@ is described as an ip address followed by a ssh port. See `Bagua Tutorials `_ for more details on installation and advanced features. -DP/DDP2 caveats -^^^^^^^^^^^^^^^ -In DP and DDP2 each GPU within a machine sees a portion of a batch. -DP and ddp2 roughly do the following: +DP caveats +^^^^^^^^^^ +In DP each GPU within a machine sees a portion of a batch. +It does roughly the following: .. testcode:: @@ -375,9 +380,8 @@ you will only be operating on one of those pieces. def training_step(self, batch, batch_idx): y_0 = batch -For most metrics, this doesn't really matter. However, if you want -to add something to your computational graph (like softmax) -using all batch parts you can use the `training_step_end` step. +For most metrics, this doesn't really matter. However, if you want to add something to your computational graph using +all batch parts you can use the `training_step_end` step. .. testcode:: @@ -410,29 +414,6 @@ In pseudocode, the full sequence is: # use the full batch for something like softmax full_out = model.training_step_end(all_results) -To illustrate why this is needed, let's look at DataParallel - -.. testcode:: - - def training_step(self, batch, batch_idx): - x, y = batch - y_hat = self(batch) - - # on dp or ddp2 if we did softmax now it would be wrong - # because batch is actually a piece of the full batch - return y_hat - - - def training_step_end(self, step_output): - # step_output has outputs of each part of the batch - - # do softmax here - outputs = torch.cat(outputs, dim=1) - softmax = softmax(outputs, dim=1) - out = softmax.mean() - - return out - If `training_step_end` is defined it will be called regardless of TPU, DP, DDP, etc... which means it will behave the same regardless of the backend. @@ -481,7 +462,7 @@ If you also need to use your own DDP implementation, override :meth:`pytorch_lig Torch Distributed Elastic ------------------------- -Lightning supports the use of Torch Distributed Elastic to enable fault-tolerant and elastic distributed job scheduling. To use it, specify the 'ddp' or 'ddp2' backend and the number of GPUs you want to use in the trainer. +Lightning supports the use of Torch Distributed Elastic to enable fault-tolerant and elastic distributed job scheduling. To use it, specify the 'ddp' backend and the number of GPUs you want to use in the trainer. .. code-block:: python diff --git a/docs/source-pytorch/api_references.rst b/docs/source-pytorch/api_references.rst index 401ba3928ce20..ba95e74428a15 100644 --- a/docs/source-pytorch/api_references.rst +++ b/docs/source-pytorch/api_references.rst @@ -270,7 +270,6 @@ strategies BaguaStrategy HivemindStrategy - DDP2Strategy DDPFullyShardedStrategy DDPShardedStrategy DDPSpawnShardedStrategy diff --git a/docs/source-pytorch/common/lightning_module.rst b/docs/source-pytorch/common/lightning_module.rst index 2e1a0c7d32338..636777ec7e9e5 100644 --- a/docs/source-pytorch/common/lightning_module.rst +++ b/docs/source-pytorch/common/lightning_module.rst @@ -281,7 +281,7 @@ Training with DataParallel ========================== When training using a ``strategy`` that splits data from each batch across GPUs, sometimes you might -need to aggregate them on the main GPU for processing (DP, or DDP2). +need to aggregate them on the main GPU for processing (DP). In this case, implement the :meth:`~pytorch_lightning.core.module.LightningModule.training_step_end` method which will have outputs from all the devices and you can accumulate to get the effective results. @@ -419,8 +419,8 @@ override the :meth:`~pytorch_lightning.core.module.LightningModule.validation_ep Validating with DataParallel ============================ -When training using a ``strategy`` that splits data from each batch across GPUs, sometimes you might -need to aggregate them on the main GPU for processing (DP, or DDP2). +When validating using a ``strategy`` that splits data from each batch across GPUs, sometimes you might +need to aggregate them on the main GPU for processing (DP). In this case, implement the :meth:`~pytorch_lightning.core.module.LightningModule.validation_step_end` method which will have outputs from all the devices and you can accumulate to get the effective results. diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 0e583ff931faf..6392f7dae5a1d 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -262,6 +262,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed deprecated `pytorch_lightning.callbacks.lr_monitor.LearningRateMonitor.lr_sch_names` ([#13353](https://github.com/Lightning-AI/lightning/pull/13353)) +- Removed support for the `DDP2Strategy` ([#12705](https://github.com/PyTorchLightning/pytorch-lightning/pull/12705)) + + ### Fixed diff --git a/src/pytorch_lightning/core/module.py b/src/pytorch_lightning/core/module.py index b4867ac7d32ff..ef4a869b3c502 100644 --- a/src/pytorch_lightning/core/module.py +++ b/src/pytorch_lightning/core/module.py @@ -654,8 +654,8 @@ def training_step(self, batch, batch_idx, hiddens): rank_zero_warn("`training_step` must be implemented to be used with the Lightning Trainer") def training_step_end(self, step_output: STEP_OUTPUT) -> STEP_OUTPUT: - """Use this when training with dp or ddp2 because :meth:`training_step` will operate on only part of the - batch. However, this is still optional and only needed for things like softmax or NCE loss. + """Use this when training with dp because :meth:`training_step` will operate on only part of the batch. + However, this is still optional and only needed for things like softmax or NCE loss. Note: If you later switch to ddp or some other mode, this will still be called @@ -674,7 +674,7 @@ def training_step_end(self, step_output: STEP_OUTPUT) -> STEP_OUTPUT: Return: Anything - When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step: + When using the DP strategy, only a portion of the batch is inside the training_step: .. code-block:: python @@ -836,8 +836,8 @@ def validation_step(self, batch, batch_idx, dataloader_idx=0): """ def validation_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: - """Use this when validating with dp or ddp2 because :meth:`validation_step` will operate on only part of - the batch. However, this is still optional and only needed for things like softmax or NCE loss. + """Use this when validating with dp because :meth:`validation_step` will operate on only part of the batch. + However, this is still optional and only needed for things like softmax or NCE loss. Note: If you later switch to ddp or some other mode, this will still be called @@ -859,7 +859,7 @@ def validation_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: .. code-block:: python # WITHOUT validation_step_end - # if used in DP or DDP2, this batch is 1/num_gpus large + # if used in DP, this batch is 1/num_gpus large def validation_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch @@ -1013,8 +1013,8 @@ def test_step(self, batch, batch_idx, dataloader_idx=0): """ def test_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: - """Use this when testing with DP or DDP2 because :meth:`test_step` will operate on only part of the batch. - However, this is still optional and only needed for things like softmax or NCE loss. + """Use this when testing with DP because :meth:`test_step` will operate on only part of the batch. However, + this is still optional and only needed for things like softmax or NCE loss. Note: If you later switch to ddp or some other mode, this will still be called @@ -1036,7 +1036,7 @@ def test_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: .. code-block:: python # WITHOUT test_step_end - # if used in DP or DDP2, this batch is 1/num_gpus large + # if used in DP, this batch is 1/num_gpus large def test_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch diff --git a/src/pytorch_lightning/plugins/training_type/ddp2.py b/src/pytorch_lightning/plugins/training_type/ddp2.py index 2660639df5f08..64cd2635bc134 100644 --- a/src/pytorch_lightning/plugins/training_type/ddp2.py +++ b/src/pytorch_lightning/plugins/training_type/ddp2.py @@ -12,13 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from pytorch_lightning.strategies import DDP2Strategy -from pytorch_lightning.utilities import rank_zero_deprecation class DDP2Plugin(DDP2Strategy): - def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def] - rank_zero_deprecation( - "The `pl.plugins.training_type.ddp2.DDP2Plugin` is deprecated in v1.6 and will be removed in v1.8." - " Use `pl.strategies.ddp2.DDP2Strategy` instead." - ) - super().__init__(*args, **kwargs) + pass diff --git a/src/pytorch_lightning/strategies/ddp2.py b/src/pytorch_lightning/strategies/ddp2.py index 6046c831a8a69..81ee737e5286c 100644 --- a/src/pytorch_lightning/strategies/ddp2.py +++ b/src/pytorch_lightning/strategies/ddp2.py @@ -11,74 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict -from torch import Tensor -from pytorch_lightning.strategies.ddp import DDPStrategy -from pytorch_lightning.utilities.apply_func import apply_to_collection -from pytorch_lightning.utilities.types import _METRIC_COLLECTION +class DDP2Strategy: + """DDP2 behaves like DP in one node, but synchronization across nodes behaves like in DDP. - -class DDP2Strategy(DDPStrategy): - """DDP2 behaves like DP in one node, but synchronization across nodes behaves like in DDP.""" + .. deprecated:: v1.7 + This strategy is no longer supported in v1.7 will be removed completely in v1.8. For single-node execution, we + recommend the :class:`~pytorch_lightning.strategies.ddp.DDPStrategy` or the + :class:`~pytorch_lightning.strategies.dp.DataParallelStrategy` as a replacement. If you rely on DDP2, you will + need ``torch < 1.9`` and ``pytorch-lightning < 1.5``. + """ strategy_name = "ddp2" - @property - def global_rank(self) -> int: - return self.node_rank - - @property - def world_size(self) -> int: - return self.num_nodes - - def reduce(self, collection: _METRIC_COLLECTION, *args, **kwargs) -> _METRIC_COLLECTION: - """Reduces a collection of tensors from all processes. It can be applied to just a single tensor. In DDP2, - the reduction here is only across local devices within the node. - - Args: - collection: The collection of tensors to sync and reduce. - *args: ignored for DDP2 - **kwargs: ignored for DDP2 - - Return: - Reduced tensor values or the same value if it was not or did not contain a tensor. - """ - - def mean(t: Tensor) -> Tensor: - original_dtype = t.dtype - return t.float().mean().to(original_dtype) - - return apply_to_collection(collection, Tensor, mean) - - @property - def root_device(self): - return self.parallel_devices[0] - - def model_to_device(self): - # no need to do anything when model is wrapped in torch.nn.DataParallel - pass - - @property - def distributed_sampler_kwargs(self): - distributed_sampler_kwargs = dict(num_replicas=self.num_nodes, rank=self.global_rank) - return distributed_sampler_kwargs - - @property - def _is_single_process_single_device(self) -> bool: - return False - - def set_world_ranks(self) -> None: - if self.cluster_environment is None: - return - self.cluster_environment.set_global_rank(self.node_rank) - self.cluster_environment.set_world_size(self.num_nodes) - - @classmethod - def register_strategies(cls, strategy_registry: Dict) -> None: - strategy_registry.register( - cls.strategy_name, - cls, - description=f"{cls.__class__.__name__}", + def __new__(cls, *args, **kwargs) -> None: + raise TypeError( + "The `DDP2Strategy`/`DDP2Plugin` is no longer supported in v1.7 and will be removed completely in v1.8." + " For single-node execution, we recommend the `DDPStrategy` or the `DPStrategy`. If you rely on DDP2, you" + " will need `torch < 1.9` and `pytorch-lightning < 1.5`." ) diff --git a/src/pytorch_lightning/trainer/connectors/accelerator_connector.py b/src/pytorch_lightning/trainer/connectors/accelerator_connector.py index 265cfdaf13f08..ab7094c90b1bc 100644 --- a/src/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/src/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -599,7 +599,7 @@ def _check_strategy_and_fallback(self) -> None: TorchElasticEnvironment.detect() or KubeflowEnvironment.detect() or self._is_slurm_managing_tasks() ): strategy_flag = "ddp" - if strategy_flag in ("dp", "ddp2") and self._accelerator_flag == "cpu": + if strategy_flag == "dp" and self._accelerator_flag == "cpu": rank_zero_warn(f"{strategy_flag!r} is not supported on CPUs, hence setting `strategy='ddp'`.") strategy_flag = "ddp" if ( @@ -641,6 +641,13 @@ def _init_strategy(self) -> None: # TODO lazy initialized and setup horovod strategy `global_rank` self._handle_horovod() if isinstance(self._strategy_flag, str): + if self._strategy_flag == "ddp2": + # TODO: remove this error in v1.8 + raise ValueError( + "The DDP2 strategy is no longer supported. For single-node use, we recommend `strategy='ddp'` or" + " `strategy='dp'` as a replacement. If you need DDP2, you will need `torch < 1.9`," + " `pytorch-lightning < 1.5`, and set it as `accelerator='ddp2'`." + ) self.strategy = StrategyRegistry.get(self._strategy_flag) elif isinstance(self._strategy_flag, Strategy): self.strategy = self._strategy_flag diff --git a/tests/tests_pytorch/accelerators/test_accelerator_connector.py b/tests/tests_pytorch/accelerators/test_accelerator_connector.py index 206e0fc69a773..098870d628905 100644 --- a/tests/tests_pytorch/accelerators/test_accelerator_connector.py +++ b/tests/tests_pytorch/accelerators/test_accelerator_connector.py @@ -36,7 +36,6 @@ from pytorch_lightning.plugins.io import TorchCheckpointIO from pytorch_lightning.strategies import ( DataParallelStrategy, - DDP2Strategy, DDPFullyShardedNativeStrategy, DDPShardedStrategy, DDPSpawnShardedStrategy, @@ -212,9 +211,6 @@ def test_ipython_incompatible_backend_error(_, monkeypatch): with pytest.raises(MisconfigurationException, match=r"strategy='ddp'\)`.*is not compatible"): Trainer(strategy="ddp", accelerator="gpu", devices=2) - with pytest.raises(MisconfigurationException, match=r"strategy='ddp2'\)`.*is not compatible"): - Trainer(strategy="ddp2", accelerator="gpu", devices=2) - with pytest.raises(MisconfigurationException, match=r"strategy='ddp_spawn'\)`.*is not compatible"): Trainer(strategy="ddp_spawn", accelerator="gpu", devices=2) @@ -341,10 +337,9 @@ def test_set_devices_if_none_cpu(): assert trainer.num_devices == 3 -@pytest.mark.parametrize("strategy", ["ddp2", "dp"]) -def test_unsupported_strategy_types_on_cpu(strategy): +def test_unsupported_strategy_types_on_cpu_and_fallback(): with pytest.warns(UserWarning, match="is not supported on CPUs, hence setting `strategy='ddp"): - trainer = Trainer(strategy=strategy, num_processes=2) + trainer = Trainer(strategy="dp", num_processes=2) assert isinstance(trainer.strategy, DDPStrategy) @@ -383,7 +378,6 @@ def test_strategy_choice_cpu_instance(strategy_class): ("ddp_spawn_find_unused_parameters_false", DDPSpawnStrategy), ("ddp", DDPStrategy), ("ddp_find_unused_parameters_false", DDPStrategy), - ("ddp2", DDP2Strategy), ("dp", DataParallelStrategy), ("ddp_sharded", DDPShardedStrategy), ("ddp_sharded_spawn", DDPSpawnShardedStrategy), @@ -474,34 +468,6 @@ def test_strategy_choice_ddp_slurm(setup_distributed_mock, strategy): assert trainer.strategy.local_rank == 1 -@mock.patch.dict( - os.environ, - { - "CUDA_VISIBLE_DEVICES": "0,1", - "SLURM_NTASKS": "2", - "SLURM_JOB_NAME": "SOME_NAME", - "SLURM_NODEID": "0", - "SLURM_PROCID": "1", - "SLURM_LOCALID": "1", - }, -) -@mock.patch("torch.cuda.set_device") -@mock.patch("torch.cuda.device_count", return_value=2) -@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) -@mock.patch("torch.cuda.is_available", return_value=True) -@pytest.mark.parametrize("strategy", ["ddp2", DDP2Strategy()]) -def test_strategy_choice_ddp2_slurm( - set_device_mock, device_count_mock, setup_distributed_mock, is_available_mock, strategy -): - trainer = Trainer(fast_dev_run=True, strategy=strategy, accelerator="gpu", devices=2) - assert trainer._accelerator_connector._is_slurm_managing_tasks() - assert isinstance(trainer.accelerator, GPUAccelerator) - assert isinstance(trainer.strategy, DDP2Strategy) - assert isinstance(trainer.strategy.cluster_environment, SLURMEnvironment) - assert trainer.strategy.cluster_environment.local_rank() == 1 - assert trainer.strategy.local_rank == 1 - - @mock.patch.dict( os.environ, { @@ -528,32 +494,6 @@ def test_strategy_choice_ddp_te(*_): assert trainer.strategy.local_rank == 1 -@mock.patch.dict( - os.environ, - { - "CUDA_VISIBLE_DEVICES": "0,1", - "WORLD_SIZE": "2", - "LOCAL_WORLD_SIZE": "2", - "RANK": "1", - "LOCAL_RANK": "1", - "GROUP_RANK": "0", - "TORCHELASTIC_RUN_ID": "1", - }, -) -@mock.patch("torch.cuda.set_device") -@mock.patch("torch.cuda.device_count", return_value=2) -@mock.patch("torch.cuda.is_available", return_value=True) -@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) -@mock.patch("torch.cuda.is_available", return_value=True) -def test_strategy_choice_ddp2_te(*_): - trainer = Trainer(fast_dev_run=True, strategy="ddp2", accelerator="gpu", devices=2) - assert isinstance(trainer.accelerator, GPUAccelerator) - assert isinstance(trainer.strategy, DDP2Strategy) - assert isinstance(trainer.strategy.cluster_environment, TorchElasticEnvironment) - assert trainer.strategy.cluster_environment.local_rank() == 1 - assert trainer.strategy.local_rank == 1 - - @mock.patch.dict( os.environ, { diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py index 758367d1dd40b..85328372acaa0 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py @@ -42,7 +42,7 @@ from pytorch_lightning.plugins.training_type.tpu_spawn import TPUSpawnPlugin from pytorch_lightning.profiler import AbstractProfiler, BaseProfiler from pytorch_lightning.profilers import AdvancedProfiler, Profiler, SimpleProfiler -from pytorch_lightning.strategies import ParallelStrategy +from pytorch_lightning.strategies import DDP2Strategy, ParallelStrategy from pytorch_lightning.trainer.configuration_validator import _check_datamodule_checkpoint_hooks from pytorch_lightning.trainer.states import RunningStage from pytorch_lightning.utilities.apply_func import move_data_to_device @@ -330,7 +330,6 @@ def test_v_1_8_0_deprecated_device_stats_monitor_prefix_metric_keys(): "cls", [ DDPPlugin, - DDP2Plugin, DDPSpawnPlugin, pytest.param(DeepSpeedPlugin, marks=RunIf(deepspeed=True)), DataParallelPlugin, @@ -1145,4 +1144,15 @@ def test_trainer_tpu_cores(monkeypatch): match="`Trainer.tpu_cores` is deprecated in v1.6 and will be removed in v1.8. " "Please use `Trainer.num_devices` instead." ): - trainer.tpu_cores == 8 + assert trainer.tpu_cores == 8 + + +def test_unsupported_ddp2_strategy(): + with pytest.raises(TypeError, match="The `DDP2Strategy`/`DDP2Plugin` is no longer supported in v1.7 and will be"): + DDP2Strategy() + + with pytest.raises(TypeError, match="The `DDP2Strategy`/`DDP2Plugin` is no longer supported in v1.7 and will be"): + DDP2Plugin() + + with pytest.raises(ValueError, match="The DDP2 strategy is no longer supported."): + Trainer(strategy="ddp2") diff --git a/tests/tests_pytorch/plugins/test_amp_plugins.py b/tests/tests_pytorch/plugins/test_amp_plugins.py index 4f990e74b20aa..5b6b3db334219 100644 --- a/tests/tests_pytorch/plugins/test_amp_plugins.py +++ b/tests/tests_pytorch/plugins/test_amp_plugins.py @@ -47,7 +47,7 @@ class MyApexPlugin(ApexMixedPrecisionPlugin): ) @mock.patch("torch.cuda.is_available", return_value=True) @mock.patch("torch.cuda.device_count", return_value=2) -@pytest.mark.parametrize("strategy,devices", [("ddp", 2), ("ddp2", 2), ("ddp_spawn", 2)]) +@pytest.mark.parametrize("strategy,devices", [("ddp", 2), ("ddp_spawn", 2)]) @pytest.mark.parametrize( "amp,custom_plugin,plugin_cls", [ diff --git a/tests/tests_pytorch/plugins/test_cluster_integration.py b/tests/tests_pytorch/plugins/test_cluster_integration.py index 10ed1d6a4a650..a5c4f7e101761 100644 --- a/tests/tests_pytorch/plugins/test_cluster_integration.py +++ b/tests/tests_pytorch/plugins/test_cluster_integration.py @@ -19,7 +19,7 @@ from pytorch_lightning import Trainer from pytorch_lightning.plugins.environments import LightningEnvironment, SLURMEnvironment, TorchElasticEnvironment -from pytorch_lightning.strategies import DDP2Strategy, DDPShardedStrategy, DDPStrategy, DeepSpeedStrategy +from pytorch_lightning.strategies import DDPShardedStrategy, DDPStrategy, DeepSpeedStrategy from pytorch_lightning.utilities.rank_zero import rank_zero_only from tests_pytorch.helpers.runif import RunIf @@ -57,17 +57,13 @@ def environment_combinations(): @pytest.mark.parametrize( "strategy_cls", - [DDPStrategy, DDPShardedStrategy, DDP2Strategy, pytest.param(DeepSpeedStrategy, marks=RunIf(deepspeed=True))], + [DDPStrategy, DDPShardedStrategy, pytest.param(DeepSpeedStrategy, marks=RunIf(deepspeed=True))], ) @mock.patch("pytorch_lightning.accelerators.gpu.GPUAccelerator.is_available", return_value=True) def test_ranks_available_manual_strategy_selection(mock_gpu_acc_available, strategy_cls): """Test that the rank information is readily available after Trainer initialization.""" num_nodes = 2 for cluster, variables, expected in environment_combinations(): - - if strategy_cls == DDP2Strategy: - expected.update(global_rank=expected["node_rank"], world_size=num_nodes) - with mock.patch.dict(os.environ, variables): strategy = strategy_cls( parallel_devices=[torch.device("cuda", 1), torch.device("cuda", 2)], cluster_environment=cluster @@ -85,7 +81,6 @@ def test_ranks_available_manual_strategy_selection(mock_gpu_acc_available, strat [ dict(strategy="ddp", accelerator="gpu", devices=[1, 2]), dict(strategy="ddp_sharded", accelerator="gpu", devices=[1, 2]), - dict(strategy="ddp2", accelerator="gpu", devices=[1, 2]), dict(strategy="ddp_spawn", accelerator="cpu", devices=2), dict(strategy="ddp_spawn", accelerator="gpu", devices=[1, 2]), ], @@ -98,9 +93,6 @@ def test_ranks_available_automatic_strategy_selection(mock0, mock1, trainer_kwar trainer_kwargs.update(num_nodes=num_nodes) for cluster, variables, expected in environment_combinations(): - - if trainer_kwargs["strategy"] == "ddp2": - expected.update(global_rank=expected["node_rank"], world_size=num_nodes) if trainer_kwargs["strategy"] == "ddp_spawn": if isinstance(cluster, (SLURMEnvironment, TorchElasticEnvironment)): # slurm and torchelastic do not work with spawn strategies diff --git a/tests/tests_pytorch/trainer/properties/test_estimated_stepping_batches.py b/tests/tests_pytorch/trainer/properties/test_estimated_stepping_batches.py index ab19002d751a2..4929b2a801a70 100644 --- a/tests/tests_pytorch/trainer/properties/test_estimated_stepping_batches.py +++ b/tests/tests_pytorch/trainer/properties/test_estimated_stepping_batches.py @@ -122,9 +122,6 @@ def test_num_stepping_batches_accumulate_gradients(accumulate_grad_batches, expe ({"strategy": "ddp", "num_nodes": 3}, 4), ({"strategy": "ddp", "num_nodes": 4}, 3), ({"strategy": "dp"}, 64), - ({"strategy": "ddp2", "num_nodes": 1}, 64), - ({"strategy": "ddp2", "num_nodes": 2}, 32), - ({"strategy": "ddp2", "num_nodes": 3}, 22), ], ) def test_num_stepping_batches_gpu(trainer_kwargs, estimated_steps, monkeypatch): diff --git a/tests/tests_pytorch/trainer/test_trainer.py b/tests/tests_pytorch/trainer/test_trainer.py index 5966f4a41267e..d8e9e27d9ec1b 100644 --- a/tests/tests_pytorch/trainer/test_trainer.py +++ b/tests/tests_pytorch/trainer/test_trainer.py @@ -45,7 +45,6 @@ from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper, UnrepeatedDistributedSampler from pytorch_lightning.strategies import ( DataParallelStrategy, - DDP2Strategy, DDPFullyShardedStrategy, DDPShardedStrategy, DDPSpawnShardedStrategy, @@ -1974,7 +1973,6 @@ def training_step(self, batch, batch_idx): ({"strategy": "dp"}, DDPStrategy, "ddp", CPUAccelerator, 1), ({"strategy": "ddp"}, DDPStrategy, "ddp", CPUAccelerator, 1), ({"strategy": "ddp", "num_nodes": 2}, DDPStrategy, "ddp", CPUAccelerator, 1), - ({"strategy": "ddp2"}, DDPStrategy, "ddp", CPUAccelerator, 1), ( {"strategy": None, "accelerator": "gpu", "devices": 1}, SingleDeviceStrategy, @@ -1991,12 +1989,9 @@ def training_step(self, batch, batch_idx): GPUAccelerator, 1, ), - ({"strategy": "ddp2", "accelerator": "gpu", "devices": 1}, DDP2Strategy, "ddp2", GPUAccelerator, 1), ({"strategy": None, "accelerator": "gpu", "devices": 2}, DDPSpawnStrategy, "ddp_spawn", GPUAccelerator, 2), ({"strategy": "dp", "accelerator": "gpu", "devices": 2}, DataParallelStrategy, "dp", GPUAccelerator, 2), ({"strategy": "ddp", "accelerator": "gpu", "devices": 2}, DDPStrategy, "ddp", GPUAccelerator, 2), - ({"strategy": "ddp2", "accelerator": "gpu", "devices": 2}, DDP2Strategy, "ddp2", GPUAccelerator, 2), - ({"strategy": "ddp2", "accelerator": "cpu", "devices": 2}, DDPStrategy, "ddp", CPUAccelerator, 2), ({"strategy": "ddp", "accelerator": "cpu", "devices": 2}, DDPStrategy, "ddp", CPUAccelerator, 2), ( {"strategy": "ddp_spawn", "accelerator": "cpu", "devices": 2}, @@ -2035,7 +2030,6 @@ def training_step(self, batch, batch_idx): ), ({"strategy": DDPStrategy()}, DDPStrategy, "ddp", CPUAccelerator, 1), ({"strategy": DDPStrategy(), "accelerator": "gpu", "devices": 2}, DDPStrategy, "ddp", GPUAccelerator, 2), - ({"strategy": DDP2Strategy(), "accelerator": "gpu", "devices": 2}, DDP2Strategy, "ddp2", GPUAccelerator, 2), ( {"strategy": DataParallelStrategy(), "accelerator": "gpu", "devices": 2}, DataParallelStrategy, @@ -2064,13 +2058,6 @@ def training_step(self, batch, batch_idx): GPUAccelerator, 2, ), - ( - {"strategy": "ddp2", "accelerator": "gpu", "devices": 2, "num_nodes": 2}, - DDP2Strategy, - "ddp2", - GPUAccelerator, - 2, - ), ( {"strategy": "ddp_spawn", "accelerator": "gpu", "devices": 2, "num_nodes": 2}, DDPSpawnStrategy, From 2c18643f81246369704e4f383d1ca60fd764e9a6 Mon Sep 17 00:00:00 2001 From: Keiichi Kuroyanagi Date: Thu, 30 Jun 2022 00:18:07 +0900 Subject: [PATCH 16/89] [Docs] Fix README.md in lightning/examples/pl_basics (#13380) * Change the path of the command execution folder from mnist_examples to convert_from_pt_to_pl * Add a guide to add PYTHONPATH * Fix Lightning Lite link * Remove duplicate * Add note Co-authored-by: Akihiro Nitta --- examples/README.md | 6 ++ examples/convert_from_pt_to_pl/README.md | 2 +- examples/pl_basics/README.md | 76 +----------------------- 3 files changed, 10 insertions(+), 74 deletions(-) diff --git a/examples/README.md b/examples/README.md index e16a8bfcf190b..dfb2b0047ac5a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -5,6 +5,12 @@ can be found in our sister library [Lightning Bolts](https://pytorch-lightning.r ______________________________________________________________________ +*Note that some examples may rely on new features that are only available in the development branch and may be incompatible with any releases.* +*If you see any errors, you might want to consider switching to a version tag you would like to run examples with.* +*For example, if you're using `pytorch-lightning==1.6.4` in your environment and seeing issues, run examples of the tag [1.6.4](https://github.com/Lightning-AI/lightning/tree/1.6.4/pl_examples).* + +______________________________________________________________________ + ## MNIST Examples 5 MNIST examples showing how to gradually convert from pure PyTorch to PyTorch Lightning. diff --git a/examples/convert_from_pt_to_pl/README.md b/examples/convert_from_pt_to_pl/README.md index 2cc749509f042..3e0626b70ad24 100644 --- a/examples/convert_from_pt_to_pl/README.md +++ b/examples/convert_from_pt_to_pl/README.md @@ -2,7 +2,7 @@ Here are 5 MNIST examples showing you how to gradually convert from pure PyTorch to PyTorch Lightning. -The transition through [LightningLite](https://pytorch-lightning.readthedocs.io/en/latest/stable/lightning_lite.rst) from pure PyTorch is optional but it might be helpful to learn about it. +The transition through [LightningLite](https://pytorch-lightning.readthedocs.io/en/stable/starter/lightning_lite.html) from pure PyTorch is optional but it might be helpful to learn about it. #### 1. Image Classifier with Vanilla PyTorch diff --git a/examples/pl_basics/README.md b/examples/pl_basics/README.md index c049347c4a5d9..2064a61ce67f0 100644 --- a/examples/pl_basics/README.md +++ b/examples/pl_basics/README.md @@ -2,77 +2,7 @@ Use these examples to test how Lightning works. -## MNIST Examples - -Here are 5 MNIST examples showing you how to gradually convert from pure PyTorch to PyTorch Lightning. - -The transition through [LightningLite](https://pytorch-lightning.readthedocs.io/en/stable/starter/lightning_lite.html) from pure PyTorch is optional but it might be helpful to learn about it. - -#### 1. Image Classifier with Vanilla PyTorch - -Trains a simple CNN over MNIST using vanilla PyTorch. - -```bash -# CPU -python mnist_examples/image_classifier_1_pytorch.py -``` - -______________________________________________________________________ - -#### 2. Image Classifier with LightningLite - -This script shows you how to scale the previous script to enable GPU and multi-GPU training using [LightningLite](https://pytorch-lightning.readthedocs.io/en/stable/starter/lightning_lite.html). - -```bash -# CPU / multiple GPUs if available -python mnist_examples/image_classifier_2_lite.py -``` - -______________________________________________________________________ - -#### 3. Image Classifier - Conversion from Lite to Lightning - -This script shows you how to prepare your conversion from [LightningLite](https://pytorch-lightning.readthedocs.io/en/stable/starter/lightning_lite.html) to `LightningModule`. - -```bash -# CPU / multiple GPUs if available -python mnist_examples/image_classifier_3_lite_to_lightning_module.py -``` - -______________________________________________________________________ - -#### 4. Image Classifier with LightningModule - -This script shows you the result of the conversion to the `LightningModule` and finally all the benefits you get from the Lightning ecosystem. - -```bash -# CPU -python mnist_examples/image_classifier_4_lightning_module.py - -# GPUs (any number) -python mnist_examples/image_classifier_4_lightning_module.py --trainer.accelerator 'gpu' --trainer.devices 2 -``` - -______________________________________________________________________ - -#### 5. Image Classifier with LightningModule and LightningDataModule - -This script shows you how to extract the data related components into a `LightningDataModule`. - -```bash -# CPU -python mnist_examples/image_classifier_5_lightning_datamodule.py - -# GPUs (any number) -python mnist_examples/image_classifier_5_lightning_datamodule.py --trainer.accelerator 'gpu' --trainer.devices 2 - -# Distributed Data Parallel (DDP) -python mnist_examples/image_classifier_5_lightning_datamodule.py --trainer.accelerator 'gpu' --trainer.devices 2 --trainer.strategy 'ddp' -``` - -______________________________________________________________________ - -#### AutoEncoder +### AutoEncoder This script shows you how to implement a CNN auto-encoder. @@ -89,7 +19,7 @@ python autoencoder.py --trainer.accelerator 'gpu' --trainer.devices 2 --trainer. ______________________________________________________________________ -#### Backbone Image Classifier +### Backbone Image Classifier This script shows you how to implement a `LightningModule` as a system. A system describes a `LightningModule` which takes a single `torch.nn.Module` which makes exporting to producion simpler. @@ -107,7 +37,7 @@ python backbone_image_classifier.py --trainer.accelerator 'gpu' --trainer.device ______________________________________________________________________ -#### PyTorch Profiler +### PyTorch Profiler This script shows you how to activate the [PyTorch Profiler](https://github.com/pytorch/kineto) with Lightning. From 6338ad5b3b896d4787af16042a0acbf3b6bacf56 Mon Sep 17 00:00:00 2001 From: ananthsub Date: Wed, 29 Jun 2022 10:03:00 -0700 Subject: [PATCH 17/89] Update gather_all_tensors to handle tensors of different sizes (#12630) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jirka Co-authored-by: awaelchli --- src/pytorch_lightning/CHANGELOG.md | 3 + .../utilities/distributed.py | 40 +++++++++++-- .../utilities/test_distributed.py | 58 ++++++++++++++++++- 3 files changed, 96 insertions(+), 5 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 6392f7dae5a1d..3e9da2adbde5a 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -289,6 +289,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed Model Summary when using DeepSpeed Stage 3 ([#13427](https://github.com/PyTorchLightning/pytorch-lightning/pull/13427)) +- Fixed `pytorch_lightning.utilities.distributed.gather_all_tensors` to handle tensors of different dimensions ([#12630](https://github.com/PyTorchLightning/pytorch-lightning/pull/12630)) + + - diff --git a/src/pytorch_lightning/utilities/distributed.py b/src/pytorch_lightning/utilities/distributed.py index 20740f952d4bd..ed78559102ac4 100644 --- a/src/pytorch_lightning/utilities/distributed.py +++ b/src/pytorch_lightning/utilities/distributed.py @@ -17,6 +17,7 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch +import torch.nn.functional as F from torch import Tensor from torch.nn.parallel.distributed import DistributedDataParallel @@ -48,6 +49,9 @@ class group: # type: ignore def gather_all_tensors(result: Tensor, group: Optional[Any] = None) -> List[Tensor]: """Function to gather all tensors from several ddp processes onto a list that is broadcasted to all processes. + Works on tensors that have the same number of dimensions, but where each dimension may differ. In this case + tensors are padded, gathered and then trimmed to secure equal workload for all processes. + Args: result: the value to sync group: the process group to gather results from. Defaults to all processes (world) @@ -63,13 +67,41 @@ def gather_all_tensors(result: Tensor, group: Optional[Any] = None) -> List[Tens result = result.contiguous() world_size = torch.distributed.get_world_size(group) + torch.distributed.barrier(group=group) - gathered_result = [torch.zeros_like(result) for _ in range(world_size)] + # if the tensor is scalar, things are easy + if result.ndim == 0: + return _simple_gather_all_tensors(result, group, world_size) + + # 1. Gather sizes of all tensors + local_size = torch.tensor(result.shape, device=result.device) + local_sizes = [torch.zeros_like(local_size) for _ in range(world_size)] + torch.distributed.all_gather(local_sizes, local_size, group=group) + max_size = torch.stack(local_sizes).max(dim=0).values + all_sizes_equal = all(all(ls == max_size) for ls in local_sizes) + + # 2. If shapes are all the same, then do a simple gather: + if all_sizes_equal: + return _simple_gather_all_tensors(result, group, world_size) + + # 3. If not, we need to pad each local tensor to maximum size, gather and then truncate + pad_dims = [] + pad_by = (max_size - local_size).detach().cpu() + for val in reversed(pad_by): + pad_dims.append(0) + pad_dims.append(val.item()) + result_padded = F.pad(result, pad_dims) + gathered_result = [torch.zeros_like(result_padded) for _ in range(world_size)] + torch.distributed.all_gather(gathered_result, result_padded, group) + for idx, item_size in enumerate(local_sizes): + slice_param = [slice(dim_size) for dim_size in item_size] + gathered_result[idx] = gathered_result[idx][slice_param] + return gathered_result - # sync and broadcast all - torch.distributed.barrier(group=group) - torch.distributed.all_gather(gathered_result, result, group) +def _simple_gather_all_tensors(result: torch.Tensor, group: Any, world_size: int) -> List[torch.Tensor]: + gathered_result = [torch.zeros_like(result) for _ in range(world_size)] + torch.distributed.all_gather(gathered_result, result, group) return gathered_result diff --git a/tests/tests_pytorch/utilities/test_distributed.py b/tests/tests_pytorch/utilities/test_distributed.py index f5ceb96ecbd9b..c3c90b5da6a21 100644 --- a/tests/tests_pytorch/utilities/test_distributed.py +++ b/tests/tests_pytorch/utilities/test_distributed.py @@ -13,11 +13,13 @@ # limitations under the License. import os +import pytest import torch +import torch.distributed import torch.multiprocessing as mp import tests_pytorch.helpers.utils as tutils -from pytorch_lightning.utilities.distributed import _collect_states_on_rank_zero +from pytorch_lightning.utilities.distributed import _collect_states_on_rank_zero, gather_all_tensors from tests_pytorch.helpers.runif import RunIf @@ -42,3 +44,57 @@ def test_collect_states(): """ tutils.set_random_main_port() mp.spawn(_test_collect_states, args=(2,), nprocs=2) + + +def _test_all_gather_uneven_tensors(rank, world_size, backend): + os.environ["MASTER_ADDR"] = "localhost" + + if backend == "nccl": + device = torch.device("cuda", rank) + torch.cuda.set_device(device) + else: + device = torch.device("cpu") + + # initialize the process group + torch.distributed.init_process_group(backend, rank=rank, world_size=world_size) + + tensor = torch.ones(rank, device=device) + result = gather_all_tensors(tensor) + assert len(result) == world_size + for idx in range(world_size): + assert len(result[idx]) == idx + assert (result[idx] == torch.ones_like(result[idx])).all() + + +def _test_all_gather_uneven_tensors_multidim(rank, world_size, backend): + os.environ["MASTER_ADDR"] = "localhost" + + if backend == "nccl": + device = torch.device("cuda", rank) + torch.cuda.set_device(device) + else: + device = torch.device("cpu") + + # initialize the process group + torch.distributed.init_process_group(backend, rank=rank, world_size=world_size) + tensor = torch.ones(rank + 1, 2 - rank, device=device) + result = gather_all_tensors(tensor) + assert len(result) == world_size + for idx in range(world_size): + val = result[idx] + assert val.shape == (idx + 1, 2 - idx) + assert (val == torch.ones_like(val)).all() + + +@RunIf(min_torch="1.10", skip_windows=True) +@pytest.mark.parametrize( + "process", + [ + _test_all_gather_uneven_tensors_multidim, + _test_all_gather_uneven_tensors, + ], +) +@pytest.mark.parametrize("backend", [pytest.param("nccl", marks=RunIf(min_cuda_gpus=2)), "gloo"]) +def test_gather_all_tensors(backend, process): + tutils.set_random_main_port() + mp.spawn(process, args=(2, backend), nprocs=2) From 2e7cff7d47a2145db8bf646e722598e31b544951 Mon Sep 17 00:00:00 2001 From: Martino Sorbaro Date: Wed, 29 Jun 2022 19:34:23 +0200 Subject: [PATCH 18/89] Modified python version check to accommodate for legacy version styles (#13420) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- src/pytorch_lightning/CHANGELOG.md | 3 +++ src/pytorch_lightning/utilities/imports.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 3e9da2adbde5a..5e34039e77ed0 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -286,6 +286,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed `estimated_stepping_batches` requiring distributed comms in `configure_optimizers` for the `DeepSpeedStrategy` ([#13350](https://github.com/PyTorchLightning/pytorch-lightning/pull/13350)) +- Fixed bug with Python version check that prevented use with development versions of Python ([#13420](https://github.com/PyTorchLightning/pytorch-lightning/pull/13420)) + + - Fixed Model Summary when using DeepSpeed Stage 3 ([#13427](https://github.com/PyTorchLightning/pytorch-lightning/pull/13427)) diff --git a/src/pytorch_lightning/utilities/imports.py b/src/pytorch_lightning/utilities/imports.py index 4055ba03f8ebe..6fbeda8a7c600 100644 --- a/src/pytorch_lightning/utilities/imports.py +++ b/src/pytorch_lightning/utilities/imports.py @@ -123,7 +123,7 @@ def __repr__(self) -> str: _IS_WINDOWS = platform.system() == "Windows" _IS_INTERACTIVE = hasattr(sys, "ps1") # https://stackoverflow.com/a/64523765 -_PYTHON_GREATER_EQUAL_3_8_0 = Version(platform.python_version()) >= Version("3.8.0") +_PYTHON_GREATER_EQUAL_3_8_0 = (sys.version_info.major, sys.version_info.minor) >= (3, 8) _TORCH_GREATER_EQUAL_1_9_1 = _compare_version("torch", operator.ge, "1.9.1") _TORCH_GREATER_EQUAL_1_10 = _compare_version("torch", operator.ge, "1.10.0") _TORCH_LESSER_EQUAL_1_10_2 = _compare_version("torch", operator.le, "1.10.2") From 177d3b4f7eef4d46a877cf9f33158d5ad7a8d039 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Wed, 29 Jun 2022 19:36:36 +0200 Subject: [PATCH 19/89] fix PL release docker (#13439) --- dockers/release/Dockerfile | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/dockers/release/Dockerfile b/dockers/release/Dockerfile index f2c27d5a79ac0..cb393c91dfbe0 100644 --- a/dockers/release/Dockerfile +++ b/dockers/release/Dockerfile @@ -13,7 +13,7 @@ # limitations under the License. ARG PYTHON_VERSION=3.9 -ARG PYTORCH_VERSION=1.9 +ARG PYTORCH_VERSION=1.11 FROM pytorchlightning/pytorch_lightning:base-cuda-py${PYTHON_VERSION}-torch${PYTORCH_VERSION} @@ -21,23 +21,28 @@ LABEL maintainer="Lightning-AI " ARG LIGHTNING_VERSION="" -COPY ./ /home/pytorch-lightning/ +COPY ./ /home/lightning/ + +ENV PACKAGE_NAME=pytorch # install dependencies RUN \ cd /home && \ - mv pytorch-lightning/_notebooks notebooks && \ - mv pytorch-lightning/examples . && \ + mv lightning/_notebooks notebooks && \ + mv lightning/examples . && \ # replace by specific version if asked if [ ! -z "$LIGHTNING_VERSION" ] ; then \ - rm -rf pytorch-lightning ; \ + rm -rf lightning ; \ wget https://github.com/Lightning-AI/lightning/archive/${LIGHTNING_VERSION}.zip --progress=bar:force:noscroll ; \ unzip ${LIGHTNING_VERSION}.zip ; \ - mv pytorch-lightning-*/ pytorch-lightning ; \ + mv lightning-*/ lightning ; \ rm *.zip ; \ fi && \ - pip install ./pytorch-lightning["extra","loggers","strategies"] --no-cache-dir && \ - rm -rf pytorch-lightning + # otherwise there is collision with folder name ans pkg name on Pypi + cd lightning && \ + pip install .["extra","loggers","strategies"] --no-cache-dir && \ + cd .. && \ + rm -rf lightning RUN python --version && \ pip --version && \ From bb7d825426c6c5cf468a7f2eef44086bd442c9f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wang=20Ran=20=28=E6=B1=AA=E7=84=B6=29?= Date: Thu, 30 Jun 2022 01:52:09 +0800 Subject: [PATCH 20/89] Fix docstring typo (#13447) --- src/pytorch_lightning/utilities/seed.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pytorch_lightning/utilities/seed.py b/src/pytorch_lightning/utilities/seed.py index 01f6d09966aaa..6648b5a56b2b1 100644 --- a/src/pytorch_lightning/utilities/seed.py +++ b/src/pytorch_lightning/utilities/seed.py @@ -97,8 +97,8 @@ def reset_seed() -> None: def pl_worker_init_function(worker_id: int, rank: Optional[int] = None) -> None: # pragma: no cover - """The worker_init_fn that Lightning automatically adds to your dataloader if you previously set set the seed - with ``seed_everything(seed, workers=True)``. + """The worker_init_fn that Lightning automatically adds to your dataloader if you previously set the seed with + ``seed_everything(seed, workers=True)``. See also the PyTorch documentation on `randomness in DataLoaders `_. From b6a666a6135db3acf5188c236e8a4bc2f3a4d263 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Thu, 30 Jun 2022 03:44:46 +0900 Subject: [PATCH 21/89] Unpin `protobuf` version and update `tensorboard` version (#13259) * Remove protobuf from base req * Update tensorboard version Co-authored-by: Jirka --- requirements/pytorch/base.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements/pytorch/base.txt b/requirements/pytorch/base.txt index fc20b78a58d3f..48374063e03a8 100644 --- a/requirements/pytorch/base.txt +++ b/requirements/pytorch/base.txt @@ -3,9 +3,8 @@ torch>=1.9.*, <=1.11.0 # strict tqdm>=4.57.0, <=4.63.0 PyYAML>=5.4, <=6.0 fsspec[http]>=2021.05.0, !=2021.06.0, <2022.6.0 -tensorboard>=2.2.0, <2.10.0 +tensorboard>=2.9.1, <2.10.0 torchmetrics>=0.7.0, <0.9.2 # needed for using fixed compare_version pyDeprecate>=0.3.1, <=0.3.2 packaging>=17.0, <=21.3 typing-extensions>=4.0.0, <4.2.1 -protobuf<=3.20.1 # strict TODO: Remove after tensorboard gets compatible https://github.com/tensorflow/tensorboard/issues/5708 From 891f8f288b54a36a65cd8a52fc1d41c98d70fe7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 29 Jun 2022 20:51:48 +0200 Subject: [PATCH 22/89] Remove remaining old-style AcceleratorConnector properties (#13412) Co-authored-by: Jirka --- .../trainer/connectors/accelerator_connector.py | 15 +-------------- src/pytorch_lightning/trainer/trainer.py | 2 +- .../deprecated_api/test_remove_1-8.py | 12 ++++++++---- 3 files changed, 10 insertions(+), 19 deletions(-) diff --git a/src/pytorch_lightning/trainer/connectors/accelerator_connector.py b/src/pytorch_lightning/trainer/connectors/accelerator_connector.py index ab7094c90b1bc..2ec0e30a5b739 100644 --- a/src/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/src/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -811,22 +811,9 @@ def _lazy_init_strategy(self) -> None: f" found {self.strategy.__class__.__name__}." ) - """The following properties are here for backward-compatibility and will be deprecated and removed in favor - of accessing this information through the strategy/accelerator directly.""" - # TODO: deprecate all properties below - - @property - def tpu_cores(self) -> Optional[Union[List[int], int]]: - if isinstance(self.accelerator, TPUAccelerator): - return self._tpu_cores # type: ignore - return 0 - - @property - def gpus(self) -> Optional[Union[List[int], str, int]]: - return self._gpus - @property def is_distributed(self) -> bool: + # TODO: deprecate this property # Used for custom plugins. # Custom plugins should implement is_distributed property. if hasattr(self.strategy, "is_distributed") and not isinstance(self.accelerator, TPUAccelerator): diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index 46774395fd5e2..e823ff7e08eb0 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -2180,7 +2180,7 @@ def gpus(self) -> Optional[Union[List[int], str, int]]: "`Trainer.gpus` was deprecated in v1.6 and will be removed in v1.8." " Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead." ) - return self._accelerator_connector.gpus + return self._accelerator_connector._gpus @property def model(self) -> torch.nn.Module: diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py index 85328372acaa0..de02cba564c0a 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py @@ -1131,8 +1131,10 @@ def test_trainer_gpus(monkeypatch, trainer_kwargs): monkeypatch.setattr(torch.cuda, "device_count", lambda: 4) trainer = Trainer(**trainer_kwargs) with pytest.deprecated_call( - match="`Trainer.gpus` was deprecated in v1.6 and will be removed in v1.8." - " Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead." + match=( + "`Trainer.gpus` was deprecated in v1.6 and will be removed in v1.8." + " Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead." + ) ): assert trainer.gpus == trainer_kwargs["devices"] @@ -1141,8 +1143,10 @@ def test_trainer_tpu_cores(monkeypatch): monkeypatch.setattr(pytorch_lightning.accelerators.tpu.TPUAccelerator, "is_available", lambda _: True) trainer = Trainer(accelerator="tpu", devices=8) with pytest.deprecated_call( - match="`Trainer.tpu_cores` is deprecated in v1.6 and will be removed in v1.8. " - "Please use `Trainer.num_devices` instead." + match=( + "`Trainer.tpu_cores` is deprecated in v1.6 and will be removed in v1.8. " + "Please use `Trainer.num_devices` instead." + ) ): assert trainer.tpu_cores == 8 From d5671cafe872886acc6bea8a54d6e807a69c36b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 29 Jun 2022 21:09:35 +0200 Subject: [PATCH 23/89] Call `set_epoch` for distributed batch samplers (#13396) Co-authored-by: Jirka Co-authored-by: Rohit Gupta --- src/pytorch_lightning/CHANGELOG.md | 3 + .../loops/dataloader/evaluation_loop.py | 11 +--- .../loops/dataloader/prediction_loop.py | 10 +--- src/pytorch_lightning/loops/fit_loop.py | 9 +-- src/pytorch_lightning/loops/utilities.py | 14 +++++ src/pytorch_lightning/trainer/supporters.py | 7 ++- .../loops/test_evaluation_loop.py | 57 ++++++++++++++++--- tests/tests_pytorch/loops/test_utilities.py | 24 +++++++- 8 files changed, 103 insertions(+), 32 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 5e34039e77ed0..f73f31dac4551 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -295,6 +295,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed `pytorch_lightning.utilities.distributed.gather_all_tensors` to handle tensors of different dimensions ([#12630](https://github.com/PyTorchLightning/pytorch-lightning/pull/12630)) +- The loops now call `.set_epoch()` also on batch samplers if the dataloader has one wrapped in a distributed sampler ([#13396](https://github.com/PyTorchLightning/pytorch-lightning/pull/13396)) + + - diff --git a/src/pytorch_lightning/loops/dataloader/evaluation_loop.py b/src/pytorch_lightning/loops/dataloader/evaluation_loop.py index 37406f092dd40..baf65d566d2dc 100644 --- a/src/pytorch_lightning/loops/dataloader/evaluation_loop.py +++ b/src/pytorch_lightning/loops/dataloader/evaluation_loop.py @@ -27,6 +27,7 @@ from pytorch_lightning.callbacks.progress.rich_progress import _RICH_AVAILABLE from pytorch_lightning.loops.dataloader import DataLoaderLoop from pytorch_lightning.loops.epoch import EvaluationEpochLoop +from pytorch_lightning.loops.utilities import _set_sampler_epoch from pytorch_lightning.trainer.connectors.logger_connector.result import _OUT_DICT, _ResultCollection from pytorch_lightning.trainer.states import TrainerFn from pytorch_lightning.utilities.apply_func import apply_to_collection @@ -161,14 +162,8 @@ def advance(self, *args: Any, **kwargs: Any) -> None: self._has_run = True def on_advance_start(self, *args: Any, **kwargs: Any) -> None: - dataloader = self.current_dataloader - if ( - dataloader is not None - and getattr(dataloader, "sampler", None) - and callable(getattr(dataloader.sampler, "set_epoch", None)) - ): - # set seed for distributed sampler (enables shuffling for each epoch) - dataloader.sampler.set_epoch(self.trainer.fit_loop.epoch_progress.current.processed) + if self.current_dataloader is not None: + _set_sampler_epoch(self.current_dataloader, self.trainer.fit_loop.epoch_progress.current.processed) super().on_advance_start(*args, **kwargs) diff --git a/src/pytorch_lightning/loops/dataloader/prediction_loop.py b/src/pytorch_lightning/loops/dataloader/prediction_loop.py index 4ff6543064a6e..ce9ec9008c2db 100644 --- a/src/pytorch_lightning/loops/dataloader/prediction_loop.py +++ b/src/pytorch_lightning/loops/dataloader/prediction_loop.py @@ -5,6 +5,7 @@ from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop from pytorch_lightning.loops.epoch.prediction_epoch_loop import PredictionEpochLoop +from pytorch_lightning.loops.utilities import _set_sampler_epoch from pytorch_lightning.strategies import DDPSpawnStrategy from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.types import _PREDICT_OUTPUT @@ -90,13 +91,8 @@ def advance(self, *args: Any, **kwargs: Any) -> None: """Predicts one entire dataloader.""" void(*args, **kwargs) dataloader = self.current_dataloader - if ( - dataloader is not None - and getattr(dataloader, "sampler", None) - and callable(getattr(dataloader.sampler, "set_epoch", None)) - ): - # set seed for distributed sampler (enables shuffling for each epoch) - dataloader.sampler.set_epoch(self.trainer.fit_loop.epoch_progress.current.processed) + if dataloader is not None: + _set_sampler_epoch(dataloader, self.trainer.fit_loop.epoch_progress.current.processed) dataloader = self.trainer.strategy.process_dataloader(dataloader) dataloader_iter = enumerate(dataloader) dl_max_batches = self.max_batches[self.current_dataloader_idx] diff --git a/src/pytorch_lightning/loops/fit_loop.py b/src/pytorch_lightning/loops/fit_loop.py index ac33390a97cec..0771a4a71de9f 100644 --- a/src/pytorch_lightning/loops/fit_loop.py +++ b/src/pytorch_lightning/loops/fit_loop.py @@ -21,7 +21,7 @@ from pytorch_lightning.loops import Loop from pytorch_lightning.loops.epoch import TrainingEpochLoop from pytorch_lightning.loops.epoch.training_epoch_loop import _OUTPUTS_TYPE as _EPOCH_OUTPUTS_TYPE -from pytorch_lightning.loops.utilities import _is_max_limit_reached +from pytorch_lightning.loops.utilities import _is_max_limit_reached, _set_sampler_epoch from pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection from pytorch_lightning.trainer.progress import Progress from pytorch_lightning.trainer.supporters import TensorRunningAccum @@ -232,11 +232,8 @@ def on_advance_start(self) -> None: # type: ignore[override] # reset outputs here instead of in `reset` as they are not accumulated between epochs self._outputs = [] - if self.trainer.train_dataloader is not None and callable( - getattr(self.trainer.train_dataloader.sampler, "set_epoch", None) - ): - # set seed for distributed sampler (enables shuffling for each epoch) - self.trainer.train_dataloader.sampler.set_epoch(self.epoch_progress.current.processed) + if self.trainer.train_dataloader is not None: + _set_sampler_epoch(self.trainer.train_dataloader, self.epoch_progress.current.processed) # changing gradient according accumulation_scheduler self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module) diff --git a/src/pytorch_lightning/loops/utilities.py b/src/pytorch_lightning/loops/utilities.py index b5fefcd4b0011..02d9cc2c42552 100644 --- a/src/pytorch_lightning/loops/utilities.py +++ b/src/pytorch_lightning/loops/utilities.py @@ -22,6 +22,7 @@ import torch from torch import Tensor from torch.optim import Optimizer +from torch.utils.data import DataLoader import pytorch_lightning as pl from pytorch_lightning.loops import Loop @@ -220,3 +221,16 @@ def _reset_progress(loop: Loop) -> None: def _v1_8_output_format(fx: Callable) -> bool: parameters = inspect.signature(fx).parameters return "new_format" in parameters and parameters["new_format"].default is True + + +def _set_sampler_epoch(dataloader: DataLoader, epoch: int) -> None: + """Calls the ``set_epoch`` method on either the sampler or the batch sampler of the given dataloader. + + Every PyTorch dataloader has either a sampler or a batch sampler, and if it is wrapped by a + :class:`~torch.utils.data.distributed.DistributedSampler`, ``set_epoch`` must be called at the beginning + of every epoch to ensure shuffling applies a new ordering. This has no effect if shuffling is off. + """ + for sampler_name in ("sampler", "batch_sampler"): + sampler = getattr(dataloader, sampler_name, None) + if sampler is not None and callable(getattr(sampler, "set_epoch", None)): + sampler.set_epoch(epoch) diff --git a/src/pytorch_lightning/trainer/supporters.py b/src/pytorch_lightning/trainer/supporters.py index b8f688892b318..6d3ec88b0be6a 100644 --- a/src/pytorch_lightning/trainer/supporters.py +++ b/src/pytorch_lightning/trainer/supporters.py @@ -438,9 +438,14 @@ class DataLoaderDict(dict): @property def sampler(self) -> Union[Iterable, Sequence, Mapping]: - """Return a collections of samplers extracting from loaders.""" + """Return a collections of samplers extracted from loaders.""" return apply_to_collection(self.loaders, (DataLoader, IterableDataset), getattr, "sampler", None) + @property + def batch_sampler(self) -> Union[Iterable, Sequence, Mapping]: + """Return a collections of batch samplers extracted from loaders.""" + return apply_to_collection(self.loaders, (DataLoader, IterableDataset), getattr, "batch_sampler", None) + def _wrap_loaders_max_size_cycle(self) -> Any: """Wraps all loaders to make sure they are cycled until the longest loader is exhausted. diff --git a/tests/tests_pytorch/loops/test_evaluation_loop.py b/tests/tests_pytorch/loops/test_evaluation_loop.py index 4ab898699f478..80e31a2781d1b 100644 --- a/tests/tests_pytorch/loops/test_evaluation_loop.py +++ b/tests/tests_pytorch/loops/test_evaluation_loop.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock -from unittest.mock import Mock +from unittest.mock import call, Mock import torch from torch.utils.data.dataloader import DataLoader -from torch.utils.data.sampler import RandomSampler +from torch.utils.data.sampler import BatchSampler, RandomSampler from pytorch_lightning import Trainer from pytorch_lightning.demos.boring_classes import BoringModel, RandomDataset @@ -44,9 +44,8 @@ def test_on_evaluation_epoch_end(eval_epoch_end_mock, tmpdir): assert eval_epoch_end_mock.call_count == 4 -def test_set_epoch_called_eval_predict(tmpdir): - """Tests that set_epoch (if the sampler has one) is called on the DataLoader during evaluation and - prediction.""" +def test_evaluation_loop_sampler_set_epoch_called(tmpdir): + """Tests that set_epoch is called on the dataloader's sampler (if any) during training and validation.""" def _get_dataloader(): dataset = RandomDataset(32, 64) @@ -56,20 +55,60 @@ def _get_dataloader(): model = BoringModel() trainer = Trainer( - default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, max_epochs=2, enable_model_summary=False + default_root_dir=tmpdir, + limit_train_batches=1, + limit_val_batches=1, + max_epochs=2, + enable_model_summary=False, + enable_checkpointing=False, + logger=False, + ) + + train_dataloader = _get_dataloader() + val_dataloader = _get_dataloader() + trainer.fit(model, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader) + # One for each epoch + assert train_dataloader.sampler.set_epoch.call_args_list == [call(0), call(1)] + # One for each epoch + sanity check + assert val_dataloader.sampler.set_epoch.call_args_list == [call(0), call(0), call(1)] + + val_dataloader = _get_dataloader() + trainer.validate(model, val_dataloader) + assert val_dataloader.sampler.set_epoch.call_args_list == [call(2)] + + +def test_evaluation_loop_batch_sampler_set_epoch_called(tmpdir): + """Tests that set_epoch is called on the dataloader's batch sampler (if any) during training and validation.""" + + def _get_dataloader(): + dataset = RandomDataset(32, 64) + sampler = RandomSampler(dataset) + batch_sampler = BatchSampler(sampler, 2, True) + batch_sampler.set_epoch = Mock() + return DataLoader(dataset, batch_sampler=batch_sampler) + + model = BoringModel() + trainer = Trainer( + default_root_dir=tmpdir, + limit_train_batches=1, + limit_val_batches=1, + max_epochs=2, + enable_model_summary=False, + enable_checkpointing=False, + logger=False, ) train_dataloader = _get_dataloader() val_dataloader = _get_dataloader() trainer.fit(model, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader) # One for each epoch - assert train_dataloader.sampler.set_epoch.call_count == 2 + assert train_dataloader.batch_sampler.set_epoch.call_args_list == [call(0), call(1)] # One for each epoch + sanity check - assert val_dataloader.sampler.set_epoch.call_count == 3 + assert val_dataloader.batch_sampler.set_epoch.call_args_list == [call(0), call(0), call(1)] val_dataloader = _get_dataloader() trainer.validate(model, val_dataloader) - assert val_dataloader.sampler.set_epoch.call_count == 1 + assert val_dataloader.batch_sampler.set_epoch.call_args_list == [call(2)] @mock.patch( diff --git a/tests/tests_pytorch/loops/test_utilities.py b/tests/tests_pytorch/loops/test_utilities.py index c5d2e98d008b0..914c1de8e115b 100644 --- a/tests/tests_pytorch/loops/test_utilities.py +++ b/tests/tests_pytorch/loops/test_utilities.py @@ -11,10 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from unittest.mock import Mock + import pytest import torch -from pytorch_lightning.loops.utilities import _extract_hiddens, _v1_8_output_format +from pytorch_lightning.loops.utilities import _extract_hiddens, _set_sampler_epoch, _v1_8_output_format from pytorch_lightning.utilities.exceptions import MisconfigurationException @@ -61,3 +63,23 @@ def training_epoch_end(outputs, new_format=True): ... assert _v1_8_output_format(training_epoch_end) + + +def test_set_sampler_epoch(): + # No samplers + dataloader = Mock() + dataloader.sampler = None + dataloader.batch_sampler = None + _set_sampler_epoch(dataloader, 55) + + # set_epoch not callable + dataloader = Mock() + dataloader.sampler.set_epoch = None + dataloader.batch_sampler.set_epoch = None + _set_sampler_epoch(dataloader, 55) + + # set_epoch callable + dataloader = Mock() + _set_sampler_epoch(dataloader, 55) + dataloader.sampler.set_epoch.assert_called_once_with(55) + dataloader.batch_sampler.set_epoch.assert_called_once_with(55) From 58b62df8e670ceef815aa831542c4585fe4bf09f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Jun 2022 19:55:47 +0000 Subject: [PATCH 24/89] Update comet-ml requirement from <=3.28.2,>=3.1.12 to >=3.1.12,<3.31.6 in /requirements (#13414) Update comet-ml requirement in /requirements Updates the requirements on [comet-ml](https://www.comet.ml) to permit the latest version. --- updated-dependencies: - dependency-name: comet-ml dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/pytorch/loggers.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/pytorch/loggers.txt b/requirements/pytorch/loggers.txt index 8c4482e396658..77a8ca158e272 100644 --- a/requirements/pytorch/loggers.txt +++ b/requirements/pytorch/loggers.txt @@ -1,7 +1,7 @@ # all supported loggers neptune-client>=0.10.0, <0.16.4 -comet-ml>=3.1.12, <=3.28.2 +comet-ml>=3.1.12, <3.31.6 mlflow>=1.0.0, <1.27.0 test_tube>=0.7.5, <=0.7.5 wandb>=0.8.21, <0.12.19 From 42c371d86a2b4652a4b5bbb92d8bcd634f1a0ef4 Mon Sep 17 00:00:00 2001 From: Rhys Goodall Date: Wed, 29 Jun 2022 15:34:25 -0700 Subject: [PATCH 25/89] Convert validation loop config warnings to `PossibleUserWarning` (#13377) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Jirka Co-authored-by: Adrian Wälchli --- src/pytorch_lightning/CHANGELOG.md | 3 +++ src/pytorch_lightning/trainer/configuration_validator.py | 6 +++++- tests/tests_pytorch/trainer/test_config_validator.py | 3 ++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index f73f31dac4551..b1bd49d410391 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -9,6 +9,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Converted validation loop config warnings to `PossibleUserWarning` ([#13377](https://github.com/PyTorchLightning/pytorch-lightning/pull/13377)) + + - Added a flag named `log_rank_zero_only` to `EarlyStopping` to disable logging to non-zero rank processes ([#13233](https://github.com/PyTorchLightning/pytorch-lightning/pull/13233)) diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index f9820fe7d1d21..78ac390608649 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -19,6 +19,7 @@ from pytorch_lightning.utilities.model_helpers import is_overridden from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature +from pytorch_lightning.utilities.warnings import PossibleUserWarning def verify_loop_configurations(trainer: "pl.Trainer") -> None: @@ -117,7 +118,10 @@ def __verify_train_val_loop_configuration(trainer: "pl.Trainer", model: "pl.Ligh if has_val_loader and not has_val_step: rank_zero_warn("You passed in a `val_dataloader` but have no `validation_step`. Skipping val loop.") if has_val_step and not has_val_loader: - rank_zero_warn("You defined a `validation_step` but have no `val_dataloader`. Skipping val loop.") + rank_zero_warn( + "You defined a `validation_step` but have no `val_dataloader`. Skipping val loop.", + category=PossibleUserWarning, + ) def _check_on_post_move_to_device(model: "pl.LightningModule") -> None: diff --git a/tests/tests_pytorch/trainer/test_config_validator.py b/tests/tests_pytorch/trainer/test_config_validator.py index ab29b10f7cf2d..a2f24f3addc31 100644 --- a/tests/tests_pytorch/trainer/test_config_validator.py +++ b/tests/tests_pytorch/trainer/test_config_validator.py @@ -18,6 +18,7 @@ from pytorch_lightning.callbacks.callback import Callback from pytorch_lightning.demos.boring_classes import BoringDataModule, BoringModel, RandomDataset from pytorch_lightning.utilities.exceptions import MisconfigurationException +from pytorch_lightning.utilities.warnings import PossibleUserWarning def test_wrong_train_setting(tmpdir): @@ -59,7 +60,7 @@ def test_fit_val_loop_config(tmpdir): trainer.fit(model) # has val loop but no val data - with pytest.warns(UserWarning, match=r"You defined a `validation_step` but have no `val_dataloader`"): + with pytest.warns(PossibleUserWarning, match=r"You defined a `validation_step` but have no `val_dataloader`"): model = BoringModel() model.val_dataloader = None trainer.fit(model) From 657099d230afe697112a36e8bdf83458b897a44e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Thu, 30 Jun 2022 00:42:34 +0200 Subject: [PATCH 26/89] Fix validation when accelerator is a string (#13417) Co-authored-by: Rohit Gupta --- src/pytorch_lightning/CHANGELOG.md | 3 ++- .../connectors/accelerator_connector.py | 21 +++++++++++-------- .../test_accelerator_connector.py | 12 +---------- 3 files changed, 15 insertions(+), 21 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index b1bd49d410391..3a689a8cb5d21 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -301,7 +301,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - The loops now call `.set_epoch()` also on batch samplers if the dataloader has one wrapped in a distributed sampler ([#13396](https://github.com/PyTorchLightning/pytorch-lightning/pull/13396)) -- +- Fixed the input validation for the accelerator Trainer argument when passed as a string ([#13417](https://github.com/PyTorchLightning/pytorch-lightning/pull/13417)) + ## [1.6.4] - 2022-06-01 diff --git a/src/pytorch_lightning/trainer/connectors/accelerator_connector.py b/src/pytorch_lightning/trainer/connectors/accelerator_connector.py index 2ec0e30a5b739..f1accaa29ef35 100644 --- a/src/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/src/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -275,9 +275,18 @@ def _check_config_and_set_final_flags( " you can use `Trainer(strategy='ddp_spawn', accelerator='tpu')` instead." ) - if accelerator is not None: - if accelerator in self._accelerator_types or accelerator == "auto" or isinstance(accelerator, Accelerator): - self._accelerator_flag = accelerator + if ( + accelerator is not None + and accelerator not in self._accelerator_types + and accelerator != "auto" + and not isinstance(accelerator, Accelerator) + ): + raise ValueError( + f"You selected an invalid accelerator name: `accelerator={accelerator!r}`." + f" Available names are: {', '.join(self._accelerator_types)}." + ) + + self._accelerator_flag = accelerator if precision is not None: if str(precision) not in self._precision_types: @@ -496,12 +505,6 @@ def _set_parallel_devices_and_init_accelerator(self) -> None: self.accelerator: Accelerator = self._accelerator_flag else: assert self._accelerator_flag is not None - self._accelerator_flag = self._accelerator_flag.lower() - if self._accelerator_flag not in AcceleratorRegistry: - raise MisconfigurationException( - "When passing string value for the `accelerator` argument of `Trainer`," - f" it can only be one of {self._accelerator_types}." - ) self.accelerator = AcceleratorRegistry.get(self._accelerator_flag) if not self.accelerator.is_available(): diff --git a/tests/tests_pytorch/accelerators/test_accelerator_connector.py b/tests/tests_pytorch/accelerators/test_accelerator_connector.py index 098870d628905..100a4cc1d1c7a 100644 --- a/tests/tests_pytorch/accelerators/test_accelerator_connector.py +++ b/tests/tests_pytorch/accelerators/test_accelerator_connector.py @@ -54,12 +54,8 @@ def test_accelerator_choice_cpu(tmpdir): assert isinstance(trainer.strategy, SingleDeviceStrategy) -@pytest.mark.xfail(reason="Should be fixed by #12698") def test_accelerator_invalid_choice(): - with pytest.raises( - MisconfigurationException, - match="When passing string value for the `accelerator` argument of `Trainer`, it can only be one of", - ): + with pytest.raises(ValueError, match="You selected an invalid accelerator name: `accelerator='invalid'`"): Trainer(accelerator="invalid") @@ -326,12 +322,6 @@ def test_accelerator_auto_with_devices_gpu(): assert trainer.num_devices == 1 -def test_validate_accelerator_and_devices(): - trainer = Trainer(accelerator="ddp_cpu", devices=2) - assert isinstance(trainer.accelerator, CPUAccelerator) - assert trainer.num_devices == 2 - - def test_set_devices_if_none_cpu(): trainer = Trainer(accelerator="cpu", devices=3) assert trainer.num_devices == 3 From fd8afbe2d2dbceeed2701c4e89f0ca2e484acf1f Mon Sep 17 00:00:00 2001 From: Siyuan Li Date: Thu, 30 Jun 2022 08:55:06 +0800 Subject: [PATCH 27/89] Set timeout for DDPSpawnStrategy (#13383) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Jirka Co-authored-by: Carlos Mocholí --- src/pytorch_lightning/CHANGELOG.md | 3 +- src/pytorch_lightning/strategies/ddp_spawn.py | 12 +++++++- .../strategies/test_ddp_spawn_strategy.py | 28 +++++++++++++++++++ 3 files changed, 41 insertions(+), 2 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 3a689a8cb5d21..bd5f5baa258a4 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -82,7 +82,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for using custom Trainers that don't include callbacks using the CLI ([#13138](https://github.com/PyTorchLightning/pytorch-lightning/pull/13138)) -- Added a `timeout` argument to `DDPStrategy`. ([#13244](https://github.com/PyTorchLightning/pytorch-lightning/pull/13244)) +- Added a `timeout` argument to `DDPStrategy` and `DDPSpawnStrategy`. ([#13244](https://github.com/PyTorchLightning/pytorch-lightning/pull/13244), [#13383](https://github.com/Lightning-AI/lightning/pull/13383)) - Added `XLAEnvironment` cluster environment plugin ([#11330](https://github.com/PyTorchLightning/pytorch-lightning/pull/11330)) @@ -94,6 +94,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added Apple Silicon Support via `MPSAccelerator` ([#13123](https://github.com/PyTorchLightning/pytorch-lightning/pull/13123)) + ### Changed - Enable validation during overfitting ([#12527](https://github.com/PyTorchLightning/pytorch-lightning/pull/12527)) diff --git a/src/pytorch_lightning/strategies/ddp_spawn.py b/src/pytorch_lightning/strategies/ddp_spawn.py index f9d386ed3fa2b..e3c37aa2f2ff1 100644 --- a/src/pytorch_lightning/strategies/ddp_spawn.py +++ b/src/pytorch_lightning/strategies/ddp_spawn.py @@ -13,11 +13,13 @@ # limitations under the License. import logging import os +from datetime import timedelta from typing import Any, Dict, List, Optional, Union import torch import torch.distributed from torch import Tensor +from torch.distributed.constants import default_pg_timeout from torch.nn import Module from torch.nn.parallel.distributed import DistributedDataParallel @@ -68,6 +70,7 @@ def __init__( ddp_comm_hook: Optional[callable] = None, ddp_comm_wrapper: Optional[callable] = None, process_group_backend: Optional[str] = None, + timeout: Optional[timedelta] = default_pg_timeout, **kwargs: Any, ): super().__init__( @@ -84,6 +87,7 @@ def __init__( self._ddp_comm_wrapper = ddp_comm_wrapper self._local_rank = 0 self._process_group_backend: Optional[str] = process_group_backend + self._timeout: Optional[timedelta] = timeout @property def num_nodes(self) -> int: @@ -158,7 +162,13 @@ def _worker_setup(self, process_idx: int): self.set_world_ranks(process_idx) rank_zero_only.rank = self.global_rank self._process_group_backend = self._get_process_group_backend() - init_dist_connection(self.cluster_environment, self._process_group_backend, self.global_rank, self.world_size) + init_dist_connection( + self.cluster_environment, + self._process_group_backend, + self.global_rank, + self.world_size, + timeout=self._timeout, + ) def _get_process_group_backend(self) -> str: return ( diff --git a/tests/tests_pytorch/strategies/test_ddp_spawn_strategy.py b/tests/tests_pytorch/strategies/test_ddp_spawn_strategy.py index 073eef6069132..9a072368b0136 100644 --- a/tests/tests_pytorch/strategies/test_ddp_spawn_strategy.py +++ b/tests/tests_pytorch/strategies/test_ddp_spawn_strategy.py @@ -11,7 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from datetime import timedelta from pathlib import Path +from unittest import mock from unittest.mock import Mock import pytest @@ -176,3 +178,29 @@ def test_ddp_spawn_transfer_weights(tmpdir, trainer_fn): strategy._launcher._recover_results_in_main_process(spawn_output, trainer) assert model.load_state_dict.call_count == int(spawn_output.weights_path is not None) assert not temp_file.exists() + + +@RunIf(min_cuda_gpus=1) +@mock.patch("torch.distributed.init_process_group") +def test_ddp_spawn_strategy_set_timeout(mock_init_process_group): + """Tests with ddp strategy.""" + test_timedelta = timedelta(seconds=30) + model = BoringModel() + ddp_spawn_strategy = DDPSpawnStrategy(timeout=test_timedelta) + trainer = Trainer( + max_epochs=1, + strategy=ddp_spawn_strategy, + ) + # test wrap the model if fitting + trainer.state.fn = TrainerFn.FITTING + trainer.strategy.connect(model) + trainer.lightning_module.trainer = trainer + trainer.strategy.setup_environment() + trainer.strategy._worker_setup(0) + + process_group_backend = trainer.strategy._get_process_group_backend() + global_rank = trainer.strategy.cluster_environment.global_rank() + world_size = trainer.strategy.cluster_environment.world_size() + mock_init_process_group.assert_called_with( + process_group_backend, rank=global_rank, world_size=world_size, timeout=test_timedelta + ) From 6149abb004b33e0dfd94af7c281f3b3ba6ee7eb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wang=20Ran=20=28=E6=B1=AA=E7=84=B6=29?= Date: Thu, 30 Jun 2022 09:57:03 +0800 Subject: [PATCH 28/89] Remove unused docstring parameter `device` (#13448) --- src/pytorch_lightning/utilities/distributed.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/pytorch_lightning/utilities/distributed.py b/src/pytorch_lightning/utilities/distributed.py index ed78559102ac4..9bc6389ae6525 100644 --- a/src/pytorch_lightning/utilities/distributed.py +++ b/src/pytorch_lightning/utilities/distributed.py @@ -393,7 +393,6 @@ def _collect_states_on_rank_zero(state: Dict[str, Any]) -> Dict[int, Any]: Args: state: Dictionary containing the state of the current process - device: Current process device. Returns: states: On global rank 0, a dictionary where the primary keys are From 7fa962dec7032eefd9474f626a8a18d46002d4f3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Jun 2022 22:02:07 -0400 Subject: [PATCH 29/89] Update wandb requirement from <0.12.19,>=0.8.21 to >=0.8.21,<0.12.20 in /requirements (#13415) Co-authored-by: Jirka --- requirements/pytorch/loggers.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/pytorch/loggers.txt b/requirements/pytorch/loggers.txt index 77a8ca158e272..2abcb4b2df31f 100644 --- a/requirements/pytorch/loggers.txt +++ b/requirements/pytorch/loggers.txt @@ -4,4 +4,4 @@ neptune-client>=0.10.0, <0.16.4 comet-ml>=3.1.12, <3.31.6 mlflow>=1.0.0, <1.27.0 test_tube>=0.7.5, <=0.7.5 -wandb>=0.8.21, <0.12.19 +wandb>=0.8.21, <0.12.20 From fc91c72e9729ada51f7a68ac37baf07ac2503912 Mon Sep 17 00:00:00 2001 From: Ekagra Ranjan Date: Thu, 30 Jun 2022 07:33:43 +0530 Subject: [PATCH 30/89] Add flash[image] dependency in Active learning example (#13442) --- docs/source-pytorch/extensions/loops.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source-pytorch/extensions/loops.rst b/docs/source-pytorch/extensions/loops.rst index 160b000f2c9e6..f92e7efab7dc2 100644 --- a/docs/source-pytorch/extensions/loops.rst +++ b/docs/source-pytorch/extensions/loops.rst @@ -378,7 +378,7 @@ To run the following demo, install Flash and `BaaL Date: Thu, 30 Jun 2022 11:27:06 +0900 Subject: [PATCH 31/89] fixed doc of timer (#13393) * fix doc of timer --- src/pytorch_lightning/callbacks/timer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pytorch_lightning/callbacks/timer.py b/src/pytorch_lightning/callbacks/timer.py index b047dabfbbdd6..ca9a2c9861faa 100644 --- a/src/pytorch_lightning/callbacks/timer.py +++ b/src/pytorch_lightning/callbacks/timer.py @@ -51,6 +51,7 @@ class Timer(Callback): If ``interval`` is not one of the supported choices. Example:: + from pytorch_lightning import Trainer from pytorch_lightning.callbacks import Timer From 1e245a935fefde9cf6c3afcf49bc835fa0d2d208 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Thu, 30 Jun 2022 07:39:25 +0200 Subject: [PATCH 32/89] Simplify list extension (#13435) --- src/pytorch_lightning/utilities/cli.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/pytorch_lightning/utilities/cli.py b/src/pytorch_lightning/utilities/cli.py index 54e92d55491af..f9d3375a6c6d8 100644 --- a/src/pytorch_lightning/utilities/cli.py +++ b/src/pytorch_lightning/utilities/cli.py @@ -614,10 +614,8 @@ def _instantiate_trainer(self, config: Dict[str, Any], callbacks: List[Callback] config[key] = [config[key]] config[key].extend(callbacks) if key in self.trainer_defaults: - if isinstance(self.trainer_defaults[key], list): - config[key].extend(self.trainer_defaults[key]) - else: - config[key].append(self.trainer_defaults[key]) + value = self.trainer_defaults[key] + config[key] += value if isinstance(value, list) else [value] if self.save_config_callback and not config.get("fast_dev_run", False): config_callback = self.save_config_callback( self._parser(self.subcommand), From 284d95cddc0cfc03e454fdbf80f279c7687efd87 Mon Sep 17 00:00:00 2001 From: otaj <6065855+otaj@users.noreply.github.com> Date: Thu, 30 Jun 2022 12:07:46 +0200 Subject: [PATCH 33/89] Add BaseModelCheckpoint class to inherit from (#13024) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Adrian Wälchli Co-authored-by: ananthsub Co-authored-by: Jirka Co-authored-by: Carlos Mocholí --- .gitignore | 2 +- .../common/checkpointing_expert.rst | 9 ++++- src/pytorch_lightning/CHANGELOG.md | 3 ++ src/pytorch_lightning/callbacks/__init__.py | 2 + src/pytorch_lightning/callbacks/checkpoint.py | 9 +++++ .../callbacks/fault_tolerance.py | 4 +- .../callbacks/model_checkpoint.py | 4 +- src/pytorch_lightning/loggers/logger.py | 6 +-- src/pytorch_lightning/loggers/neptune.py | 34 +++++++++------- src/pytorch_lightning/loggers/wandb.py | 40 ++++++++++++------- .../strategies/launchers/spawn.py | 8 +++- .../strategies/launchers/xla_spawn.py | 6 ++- .../trainer/connectors/callback_connector.py | 9 +++-- src/pytorch_lightning/trainer/trainer.py | 16 ++++---- 14 files changed, 99 insertions(+), 53 deletions(-) create mode 100644 src/pytorch_lightning/callbacks/checkpoint.py diff --git a/.gitignore b/.gitignore index eb56709276b25..47b9bfff92523 100644 --- a/.gitignore +++ b/.gitignore @@ -136,7 +136,7 @@ ENV/ Datasets/ mnist/ MNIST/ -legacy/checkpoints/ +tests/legacy/checkpoints/ *.gz *ubyte diff --git a/docs/source-pytorch/common/checkpointing_expert.rst b/docs/source-pytorch/common/checkpointing_expert.rst index c1859d60ecf52..c4a948a34cb9d 100644 --- a/docs/source-pytorch/common/checkpointing_expert.rst +++ b/docs/source-pytorch/common/checkpointing_expert.rst @@ -6,7 +6,12 @@ Checkpointing (expert) ###################### -TODO: I don't understand this... +********************************* +Writing your own Checkpoint class +********************************* + +We provide ``Checkpoint`` class, for easier subclassing. Users may want to subclass this class in case of writing custom ``ModelCheckpoint`` callback, so that the ``Trainer`` recognizes the custom class as a checkpointing callback. + *********************** Customize Checkpointing @@ -23,6 +28,8 @@ and :meth:`~pytorch_lightning.core.hooks.CheckpointHooks.on_load_checkpoint` met what's saved in the checkpoint. +TODO: I don't understand this... + ****************************** Built-in Checkpoint IO Plugins ****************************** diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index bd5f5baa258a4..e389fee222262 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -73,6 +73,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added breaking of lazy graph across training, validation, test and predict steps when training with habana accelerators to ensure better performance ([#12938](https://github.com/PyTorchLightning/pytorch-lightning/pull/12938)) +- Added `Checkpoint` class to inherit from ([#13024](https://github.com/PyTorchLightning/pytorch-lightning/pull/13024)) + + - Added CPU metric tracking to `DeviceStatsMonitor` ([#11795](https://github.com/PyTorchLightning/pytorch-lightning/pull/11795)) diff --git a/src/pytorch_lightning/callbacks/__init__.py b/src/pytorch_lightning/callbacks/__init__.py index 6e37b84ce204a..b3d2035f33496 100644 --- a/src/pytorch_lightning/callbacks/__init__.py +++ b/src/pytorch_lightning/callbacks/__init__.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from pytorch_lightning.callbacks.callback import Callback +from pytorch_lightning.callbacks.checkpoint import Checkpoint from pytorch_lightning.callbacks.device_stats_monitor import DeviceStatsMonitor from pytorch_lightning.callbacks.early_stopping import EarlyStopping from pytorch_lightning.callbacks.finetuning import BackboneFinetuning, BaseFinetuning @@ -32,6 +33,7 @@ "BackboneFinetuning", "BaseFinetuning", "Callback", + "Checkpoint", "DeviceStatsMonitor", "EarlyStopping", "GradientAccumulationScheduler", diff --git a/src/pytorch_lightning/callbacks/checkpoint.py b/src/pytorch_lightning/callbacks/checkpoint.py new file mode 100644 index 0000000000000..405f29876c6fc --- /dev/null +++ b/src/pytorch_lightning/callbacks/checkpoint.py @@ -0,0 +1,9 @@ +from pytorch_lightning.callbacks.callback import Callback + + +class Checkpoint(Callback): + r""" + This is the base class for model checkpointing. Expert users may want to subclass it in case of writing + custom :class:`~pytorch_lightning.callbacksCheckpoint` callback, so that + the trainer recognizes the custom class as a checkpointing callback. + """ diff --git a/src/pytorch_lightning/callbacks/fault_tolerance.py b/src/pytorch_lightning/callbacks/fault_tolerance.py index 59b8d31f46506..9d04fc86b62ce 100644 --- a/src/pytorch_lightning/callbacks/fault_tolerance.py +++ b/src/pytorch_lightning/callbacks/fault_tolerance.py @@ -21,11 +21,11 @@ from typing import Any import pytorch_lightning as pl -from pytorch_lightning import Callback +from pytorch_lightning.callbacks import Checkpoint from pytorch_lightning.utilities.types import _PATH -class _FaultToleranceCheckpoint(Callback): +class _FaultToleranceCheckpoint(Checkpoint): """Used to save a fault-tolerance checkpoint on exception.""" FILE_EXTENSION = ".ckpt" diff --git a/src/pytorch_lightning/callbacks/model_checkpoint.py b/src/pytorch_lightning/callbacks/model_checkpoint.py index 8522bb49b7292..bb6d0a9a9b0b6 100644 --- a/src/pytorch_lightning/callbacks/model_checkpoint.py +++ b/src/pytorch_lightning/callbacks/model_checkpoint.py @@ -34,7 +34,7 @@ from torch import Tensor import pytorch_lightning as pl -from pytorch_lightning.callbacks.callback import Callback +from pytorch_lightning.callbacks import Checkpoint from pytorch_lightning.utilities.cloud_io import get_filesystem from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.logger import _name, _version @@ -46,7 +46,7 @@ warning_cache = WarningCache() -class ModelCheckpoint(Callback): +class ModelCheckpoint(Checkpoint): r""" Save the model periodically by monitoring a quantity. Every metric logged with :meth:`~pytorch_lightning.core.module.log` or :meth:`~pytorch_lightning.core.module.log_dict` in diff --git a/src/pytorch_lightning/loggers/logger.py b/src/pytorch_lightning/loggers/logger.py index c1eecb93fc8bf..d532aae413650 100644 --- a/src/pytorch_lightning/loggers/logger.py +++ b/src/pytorch_lightning/loggers/logger.py @@ -25,7 +25,7 @@ import numpy as np import pytorch_lightning as pl -from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint +from pytorch_lightning.callbacks import Checkpoint from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_only @@ -86,7 +86,7 @@ def __init__( else: self._agg_default_func = np.mean - def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[ModelCheckpoint]") -> None: + def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[Checkpoint]") -> None: """Called after model checkpoint callback saves a new checkpoint. Args: @@ -221,7 +221,7 @@ def __init__(self, logger_iterable: Iterable[Logger]): def __getitem__(self, index: int) -> Logger: return list(self._logger_iterable)[index] - def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[ModelCheckpoint]") -> None: + def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[Checkpoint]") -> None: for logger in self._logger_iterable: logger.after_save_checkpoint(checkpoint_callback) diff --git a/src/pytorch_lightning/loggers/neptune.py b/src/pytorch_lightning/loggers/neptune.py index 4d2f6897a21aa..44ae3f0f5bfdc 100644 --- a/src/pytorch_lightning/loggers/neptune.py +++ b/src/pytorch_lightning/loggers/neptune.py @@ -31,7 +31,7 @@ from torch import Tensor from pytorch_lightning import __version__ -from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint +from pytorch_lightning.callbacks import Checkpoint from pytorch_lightning.loggers.logger import Logger, rank_zero_experiment from pytorch_lightning.utilities.imports import _NEPTUNE_AVAILABLE, _NEPTUNE_GREATER_EQUAL_0_9 from pytorch_lightning.utilities.logger import _add_prefix, _convert_params, _sanitize_callable_params @@ -534,7 +534,7 @@ def log_model_summary(self, model, max_depth=-1): ) @rank_zero_only - def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[ModelCheckpoint]") -> None: + def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[Checkpoint]") -> None: """Automatically log checkpointed model. Called after model checkpoint callback saves a new checkpoint. Args: @@ -547,19 +547,20 @@ def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[ModelCheckpo checkpoints_namespace = self._construct_path_with_prefix("model/checkpoints") # save last model - if checkpoint_callback.last_model_path: + if hasattr(checkpoint_callback, "last_model_path") and checkpoint_callback.last_model_path: model_last_name = self._get_full_model_name(checkpoint_callback.last_model_path, checkpoint_callback) file_names.add(model_last_name) self.run[f"{checkpoints_namespace}/{model_last_name}"].upload(checkpoint_callback.last_model_path) # save best k models - for key in checkpoint_callback.best_k_models.keys(): - model_name = self._get_full_model_name(key, checkpoint_callback) - file_names.add(model_name) - self.run[f"{checkpoints_namespace}/{model_name}"].upload(key) + if hasattr(checkpoint_callback, "best_k_models"): + for key in checkpoint_callback.best_k_models.keys(): + model_name = self._get_full_model_name(key, checkpoint_callback) + file_names.add(model_name) + self.run[f"{checkpoints_namespace}/{model_name}"].upload(key) # log best model path and checkpoint - if checkpoint_callback.best_model_path: + if hasattr(checkpoint_callback, "best_model_path") and checkpoint_callback.best_model_path: self.run[self._construct_path_with_prefix("model/best_model_path")] = checkpoint_callback.best_model_path model_name = self._get_full_model_name(checkpoint_callback.best_model_path, checkpoint_callback) @@ -575,19 +576,22 @@ def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[ModelCheckpo del self.run[f"{checkpoints_namespace}/{file_to_drop}"] # log best model score - if checkpoint_callback.best_model_score: + if hasattr(checkpoint_callback, "best_model_score") and checkpoint_callback.best_model_score: self.run[self._construct_path_with_prefix("model/best_model_score")] = ( checkpoint_callback.best_model_score.cpu().detach().numpy() ) @staticmethod - def _get_full_model_name(model_path: str, checkpoint_callback: "ReferenceType[ModelCheckpoint]") -> str: + def _get_full_model_name(model_path: str, checkpoint_callback: "ReferenceType[Checkpoint]") -> str: """Returns model name which is string `model_path` appended to `checkpoint_callback.dirpath`.""" - expected_model_path = f"{checkpoint_callback.dirpath}{os.path.sep}" - if not model_path.startswith(expected_model_path): - raise ValueError(f"{model_path} was expected to start with {expected_model_path}.") - # Remove extension from filepath - filepath, _ = os.path.splitext(model_path[len(expected_model_path) :]) + if hasattr(checkpoint_callback, "dirpath"): + expected_model_path = f"{checkpoint_callback.dirpath}{os.path.sep}" + if not model_path.startswith(expected_model_path): + raise ValueError(f"{model_path} was expected to start with {expected_model_path}.") + # Remove extension from filepath + filepath, _ = os.path.splitext(model_path[len(expected_model_path) :]) + else: + filepath = model_path return filepath diff --git a/src/pytorch_lightning/loggers/wandb.py b/src/pytorch_lightning/loggers/wandb.py index 53103dfdfd154..88439cd9435db 100644 --- a/src/pytorch_lightning/loggers/wandb.py +++ b/src/pytorch_lightning/loggers/wandb.py @@ -23,7 +23,7 @@ import torch.nn as nn -from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint +from pytorch_lightning.callbacks import Checkpoint from pytorch_lightning.loggers.logger import Logger, rank_zero_experiment from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.imports import _WANDB_GREATER_EQUAL_0_10_22, _WANDB_GREATER_EQUAL_0_12_10 @@ -461,9 +461,14 @@ def version(self) -> Optional[str]: # don't create an experiment if we don't have one return self._experiment.id if self._experiment else self._id - def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[ModelCheckpoint]") -> None: + def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[Checkpoint]") -> None: # log checkpoints as artifacts - if self._log_model == "all" or self._log_model is True and checkpoint_callback.save_top_k == -1: + if ( + self._log_model == "all" + or self._log_model is True + and hasattr(checkpoint_callback, "save_top_k") + and checkpoint_callback.save_top_k == -1 + ): self._scan_and_log_checkpoints(checkpoint_callback) elif self._log_model is True: self._checkpoint_callback = checkpoint_callback @@ -474,25 +479,33 @@ def finalize(self, status: str) -> None: if self._checkpoint_callback: self._scan_and_log_checkpoints(self._checkpoint_callback) - def _scan_and_log_checkpoints(self, checkpoint_callback: "ReferenceType[ModelCheckpoint]") -> None: + def _scan_and_log_checkpoints(self, checkpoint_callback: "ReferenceType[Checkpoint]") -> None: # get checkpoints to be saved with associated score - checkpoints = { - checkpoint_callback.last_model_path: checkpoint_callback.current_score, - checkpoint_callback.best_model_path: checkpoint_callback.best_model_score, - **checkpoint_callback.best_k_models, - } - checkpoints = sorted((Path(p).stat().st_mtime, p, s) for p, s in checkpoints.items() if Path(p).is_file()) + checkpoints = dict() + if hasattr(checkpoint_callback, "last_model_path") and hasattr(checkpoint_callback, "current_score"): + checkpoints[checkpoint_callback.last_model_path] = (checkpoint_callback.current_score, "latest") + + if hasattr(checkpoint_callback, "best_model_path") and hasattr(checkpoint_callback, "best_model_score"): + checkpoints[checkpoint_callback.best_model_path] = (checkpoint_callback.best_model_score, "best") + + if hasattr(checkpoint_callback, "best_k_models"): + for key, value in checkpoint_callback.best_k_models.items(): + checkpoints[key] = (value, "best_k") + + checkpoints = sorted( + (Path(p).stat().st_mtime, p, s, tag) for p, (s, tag) in checkpoints.items() if Path(p).is_file() + ) checkpoints = [ c for c in checkpoints if c[1] not in self._logged_model_time.keys() or self._logged_model_time[c[1]] < c[0] ] # log iteratively all new checkpoints - for t, p, s in checkpoints: + for t, p, s, tag in checkpoints: metadata = ( { "score": s, "original_filename": Path(p).name, - "ModelCheckpoint": { + checkpoint_callback.__class__.__name__: { k: getattr(checkpoint_callback, k) for k in [ "monitor", @@ -511,7 +524,6 @@ def _scan_and_log_checkpoints(self, checkpoint_callback: "ReferenceType[ModelChe ) artifact = wandb.Artifact(name=f"model-{self.experiment.id}", type="model", metadata=metadata) artifact.add_file(p, name="model.ckpt") - aliases = ["latest", "best"] if p == checkpoint_callback.best_model_path else ["latest"] - self.experiment.log_artifact(artifact, aliases=aliases) + self.experiment.log_artifact(artifact, aliases=[tag]) # remember logged models - timestamp needed in case filename didn't change (lastkckpt or custom name) self._logged_model_time[p] = t diff --git a/src/pytorch_lightning/strategies/launchers/spawn.py b/src/pytorch_lightning/strategies/launchers/spawn.py index 6af2688e47419..d94909b778a83 100644 --- a/src/pytorch_lightning/strategies/launchers/spawn.py +++ b/src/pytorch_lightning/strategies/launchers/spawn.py @@ -109,7 +109,7 @@ def _wrapping_function( def _recover_results_in_main_process(self, spawn_output: "_SpawnOutput", trainer: "pl.Trainer") -> None: # transfer back the best path to the trainer - if trainer.checkpoint_callback: + if trainer.checkpoint_callback and hasattr(trainer.checkpoint_callback, "best_model_path"): trainer.checkpoint_callback.best_model_path = str(spawn_output.best_model_path) # TODO: pass also best score @@ -131,7 +131,11 @@ def _recover_results_in_main_process(self, spawn_output: "_SpawnOutput", trainer def _collect_rank_zero_results(self, trainer: "pl.Trainer", results: Any) -> Optional["_SpawnOutput"]: rank_zero_debug("Finalizing the DDP spawn environment.") checkpoint_callback = trainer.checkpoint_callback - best_model_path = checkpoint_callback.best_model_path if checkpoint_callback else None + best_model_path = ( + checkpoint_callback.best_model_path + if checkpoint_callback and hasattr(checkpoint_callback, "best_model_path") + else None + ) # requires to compute the state_dict on all processes in case Metrics are present state_dict = trainer.lightning_module.state_dict() diff --git a/src/pytorch_lightning/strategies/launchers/xla_spawn.py b/src/pytorch_lightning/strategies/launchers/xla_spawn.py index b3e1bf3465203..13c948577ca5b 100644 --- a/src/pytorch_lightning/strategies/launchers/xla_spawn.py +++ b/src/pytorch_lightning/strategies/launchers/xla_spawn.py @@ -115,7 +115,11 @@ def _wrapping_function( def _collect_rank_zero_results(self, trainer: "pl.Trainer", results: Any) -> Optional["_SpawnOutput"]: rank_zero_debug("Finalizing the TPU spawn environment.") checkpoint_callback = trainer.checkpoint_callback - best_model_path = checkpoint_callback.best_model_path if checkpoint_callback else None + best_model_path = ( + checkpoint_callback.best_model_path + if checkpoint_callback and hasattr(checkpoint_callback, "best_model_path") + else None + ) # requires to compute the state_dict on all processes in case Metrics are present state_dict = trainer.lightning_module.state_dict() diff --git a/src/pytorch_lightning/trainer/connectors/callback_connector.py b/src/pytorch_lightning/trainer/connectors/callback_connector.py index eddc2e2a84716..83881905beeb1 100644 --- a/src/pytorch_lightning/trainer/connectors/callback_connector.py +++ b/src/pytorch_lightning/trainer/connectors/callback_connector.py @@ -19,6 +19,7 @@ from pytorch_lightning.callbacks import ( Callback, + Checkpoint, GradientAccumulationScheduler, ModelCheckpoint, ModelSummary, @@ -232,18 +233,18 @@ def _attach_model_callbacks(self) -> None: @staticmethod def _reorder_callbacks(callbacks: List[Callback]) -> List[Callback]: - """Moves all ModelCheckpoint callbacks to the end of the list. The sequential order within the group of + """Moves all Checkpoint callbacks to the end of the list. The sequential order within the group of checkpoint callbacks is preserved, as well as the order of all other callbacks. Args: callbacks: A list of callbacks. Return: - A new list in which the last elements are ModelCheckpoints if there were any present in the + A new list in which the last elements are Checkpoint if there were any present in the input. """ - checkpoints = [c for c in callbacks if isinstance(c, ModelCheckpoint)] - not_checkpoints = [c for c in callbacks if not isinstance(c, ModelCheckpoint)] + checkpoints = [c for c in callbacks if isinstance(c, Checkpoint)] + not_checkpoints = [c for c in callbacks if not isinstance(c, Checkpoint)] return not_checkpoints + checkpoints diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index e823ff7e08eb0..7201ef53501c0 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -44,7 +44,7 @@ MPSAccelerator, TPUAccelerator, ) -from pytorch_lightning.callbacks import Callback, EarlyStopping, ModelCheckpoint, ProgressBarBase +from pytorch_lightning.callbacks import Callback, Checkpoint, EarlyStopping, ProgressBarBase from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter from pytorch_lightning.core.datamodule import LightningDataModule from pytorch_lightning.core.optimizer import LightningOptimizer @@ -1406,7 +1406,7 @@ def __set_ckpt_path(self, ckpt_path: Optional[str], model_provided: bool, model_ f'`.{fn}(ckpt_path="best")` is set but `ModelCheckpoint` is not configured.' ) - if not self.checkpoint_callback.best_model_path: + if hasattr(self.checkpoint_callback, "best_model_path") and not self.checkpoint_callback.best_model_path: if self.fast_dev_run: raise MisconfigurationException( f'You cannot execute `.{fn}(ckpt_path="best")` with `fast_dev_run=True`.' @@ -1416,11 +1416,11 @@ def __set_ckpt_path(self, ckpt_path: Optional[str], model_provided: bool, model_ f'`.{fn}(ckpt_path="best")` is set but `ModelCheckpoint` is not configured to save the best model.' ) # load best weights - ckpt_path = self.checkpoint_callback.best_model_path + ckpt_path = getattr(self.checkpoint_callback, "best_model_path", None) if ckpt_path == "last": - candidates = [ft.ckpt_path for ft in ft_checkpoints] + [ - cb.last_model_path for cb in self.checkpoint_callbacks + candidates = [getattr(ft, "ckpt_path", None) for ft in ft_checkpoints] + [ + getattr(cb, "last_model_path", None) for cb in self.checkpoint_callbacks ] candidates_fs = {path: get_filesystem(path) for path in candidates if path} candidates_ts = {path: fs.modified(path) for path, fs in candidates_fs.items() if fs.exists(path)} @@ -2308,17 +2308,17 @@ def prediction_writer_callbacks(self) -> List[BasePredictionWriter]: return [cb for cb in self.callbacks if isinstance(cb, BasePredictionWriter)] @property - def checkpoint_callback(self) -> Optional[ModelCheckpoint]: + def checkpoint_callback(self) -> Optional[Checkpoint]: """The first :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callback in the Trainer.callbacks list, or ``None`` if it doesn't exist.""" callbacks = self.checkpoint_callbacks return callbacks[0] if len(callbacks) > 0 else None @property - def checkpoint_callbacks(self) -> List[ModelCheckpoint]: + def checkpoint_callbacks(self) -> List[Checkpoint]: """A list of all instances of :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` found in the Trainer.callbacks list.""" - return [c for c in self.callbacks if isinstance(c, ModelCheckpoint)] + return [c for c in self.callbacks if isinstance(c, Checkpoint)] @property def progress_bar_callback(self) -> Optional[ProgressBarBase]: From f9a305573d942ab04badc40a5bc26ce108b10628 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Thu, 30 Jun 2022 15:02:57 +0200 Subject: [PATCH 34/89] CI: abstract and make full pkg check (#13460) * cut actions * sequence * ver --- .github/actions/pkg-check/action.yml | 35 ++++++++ .github/actions/pkg-install/action.yml | 33 +++++++ .github/workflows/ci_pkg-install.yml | 120 ++++++++++++++----------- .github/workflows/ci_schema.yml | 4 +- src/lightning/__setup__.py | 2 +- 5 files changed, 140 insertions(+), 54 deletions(-) create mode 100644 .github/actions/pkg-check/action.yml create mode 100644 .github/actions/pkg-install/action.yml diff --git a/.github/actions/pkg-check/action.yml b/.github/actions/pkg-check/action.yml new file mode 100644 index 0000000000000..dc6031a1b769c --- /dev/null +++ b/.github/actions/pkg-check/action.yml @@ -0,0 +1,35 @@ +name: Create and check package +description: building, checking the package + +inputs: + pkg-name: + description: package name inside lightning.* + required: true + +runs: + using: "composite" + steps: + + - name: install dev. env + run: pip install "twine==4.0.1" setuptools wheel flake8 + shell: bash + + - name: Create package + env: + PACKAGE_NAME: ${{ inputs.pkg-name }} + run: | + python setup.py check --metadata --strict + flake8 src/lightning/ --ignore E402,F401,E501,W391,E303 + python setup.py sdist bdist_wheel + shell: bash + + - name: Check package + run: | + ls -l dist/ + twine check dist/* + # python setup.py clean + shell: bash + + - name: copy/export pkg + run: cp dist/* pypi/ + shell: bash diff --git a/.github/actions/pkg-install/action.yml b/.github/actions/pkg-install/action.yml new file mode 100644 index 0000000000000..b5253cd1779c2 --- /dev/null +++ b/.github/actions/pkg-install/action.yml @@ -0,0 +1,33 @@ +name: Install and check package +description: installing and validationg the package + +inputs: + pkg-name: + description: package name for import + required: true + pip-flags: + description: additional pil install flags + required: false + default: "" + +runs: + using: "composite" + steps: + + - name: Install | Uninstall package - archive + working-directory: ./dist + run: | + pip install *.tar.gz ${{ inputs.pip-flags }} + pip list | grep lightning + python -c "import ${{ inputs.pkg-name }} ; print(${{ inputs.pkg-name }}.__version__)" + pip uninstall -y ${{ inputs.pkg-name }} + shell: bash + + - name: Install | Uninstall package - wheel + working-directory: ./dist + run: | + pip install *.whl ${{ inputs.pip-flags }} + pip list | grep lightning + python -c "import ${{ inputs.pkg-name }} ; print(${{ inputs.pkg-name }}.__version__)" + pip uninstall -y ${{ inputs.pkg-name }} + shell: bash diff --git a/.github/workflows/ci_pkg-install.yml b/.github/workflows/ci_pkg-install.yml index a5c7ab5bd5a70..aaf1d5aefe2f9 100644 --- a/.github/workflows/ci_pkg-install.yml +++ b/.github/workflows/ci_pkg-install.yml @@ -19,17 +19,24 @@ defaults: jobs: - install: + init-temp: + runs-on: ubuntu-20.04 + steps: + - run: mkdir pypi && touch pypi/.placeholder + - uses: actions/upload-artifact@v3 + with: + name: ci-packages-${{ github.sha }} + path: pypi + + install-standalone: + needs: init-temp runs-on: ${{ matrix.os }} strategy: - fail-fast: false - # max-parallel: 6 + fail-fast: true + max-parallel: 1 matrix: os: [ubuntu-20.04, macOS-10.15, windows-2019] - pkg: ["", "app", "pytorch"] # todo: add following use-case "lightning" after resowing pulling sources - # in fact, lest make this se depending jobs, at fist build app and pytorch, and save these packages as artefact - # then, in the following job pull artefacts and install these packages and build and test lightning - # todo: in such case abstract this pkg testing as actions to reduce variation in testing + pkg: ["app", "pytorch"] python-version: [3.7] # , 3.9 steps: @@ -38,51 +45,60 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Prepare env - run: | - pip install "twine==4.0.1" setuptools wheel flake8 - - - name: Create package - env: - PACKAGE_NAME: ${{ matrix.pkg }} - run: | - python setup.py check --metadata --strict - flake8 src/lightning/ --ignore E402,F401,E501,W391,E303 - python setup.py sdist bdist_wheel - - - name: Check package - #env: - # PACKAGE_NAME: ${{ matrix.pkg }} - run: | - ls -l dist/ - twine check dist/* - python setup.py clean - - # ToDo - #- name: Setup Windows - # if: runner.os == 'windows' - # run: | - # # this is just a hotfix because of Win cannot install it directly - # pip install -r requirements.txt --find-links https://download.pytorch.org/whl/cpu/torch_stable.html - - - name: Package name + - uses: actions/download-artifact@v3 + with: + name: ci-packages-${{ github.sha }} + path: pypi + - run: ls -lh pypi/ + + - uses: ./.github/actions/pkg-check + with: + pkg-name: ${{ matrix.pkg }} + + - uses: actions/upload-artifact@v3 + with: + name: ci-packages-${{ github.sha }} + path: pypi + + - name: Determine package name + if: ${{ inputs.pkg-import == '' }} working-directory: ./dist run: python -c "import glob ; ls = glob.glob('*.tar.gz') ; name = '_'.join(ls[0].split('-')[:-1]) ; print(f'PKG_NAME={name}')" >> $GITHUB_ENV - - name: Install | Uninstall package - archive - working-directory: ./dist - run: | - # install as archive - pip install *.tar.gz - pip list | grep lightning - python -c "import ${PKG_NAME} ; print(${PKG_NAME}.__version__)" - pip uninstall -y ${PKG_NAME} - - - name: Install | Uninstall package - wheel - working-directory: ./dist - run: | - # install as wheel - pip install *.whl - pip list | grep lightning - python -c "import ${PKG_NAME} ; print(${PKG_NAME}.__version__)" - pip uninstall -y ${PKG_NAME} + - uses: ./.github/actions/pkg-install + with: + pkg-name: ${{ env.PKG_NAME }} + + install-meta: + needs: install-standalone + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + # max-parallel: 1 + matrix: + os: [ubuntu-20.04, macOS-10.15, windows-2019] + pkg: ["", "lightning"] + python-version: [3.7] # , 3.9 + + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - run: mkdir -p pypi + - uses: actions/download-artifact@v3 + if: ${{ matrix.pkg != '' }} + with: + name: ci-packages-${{ github.sha }} + path: pypi + - run: ls -lh pypi/ + + - uses: ./.github/actions/pkg-check + with: + pkg-name: ${{ matrix.pkg }} + + - uses: ./.github/actions/pkg-install + with: + pkg-name: "lightning" + pip-flags: "-U --pre --find-links ../pypi/" diff --git a/.github/workflows/ci_schema.yml b/.github/workflows/ci_schema.yml index 03b230124085d..2e62157dfbd74 100644 --- a/.github/workflows/ci_schema.yml +++ b/.github/workflows/ci_schema.yml @@ -16,7 +16,9 @@ jobs: pip install "check-jsonschema>=0.10" - name: GH Workflows - run: check-jsonschema .github/workflows/*.yml --builtin-schema "github-workflows" + run: | + check-jsonschema .github/workflows/*.yml --builtin-schema "github-workflows" + check-jsonschema .github/actions/*/*.yml --builtin-schema "github-actions" - name: Azure Pipelines env: diff --git a/src/lightning/__setup__.py b/src/lightning/__setup__.py index 3b76307bfd39f..f549419bf7cae 100644 --- a/src/lightning/__setup__.py +++ b/src/lightning/__setup__.py @@ -53,7 +53,7 @@ def _setup_args(**kwargs: Any) -> Dict[str, Any]: if kwargs["pkg_name"] == "lightning": _include_pkgs = ["lightning", "lightning.*"] # todo: generate this list automatically with parsing feature pkg versions - _requires = ["pytorch-lightning==1.6.*", "lightning-app==0.5.*"] + _requires = ["pytorch-lightning>=1.6.*", "lightning-app>=0.5.*"] else: _include_pkgs = ["*"] _requires = [ From 153ffca455f4d77a70c3f75564f4f86ac56a406a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wang=20Ran=20=28=E6=B1=AA=E7=84=B6=29?= Date: Fri, 1 Jul 2022 01:31:45 +0800 Subject: [PATCH 35/89] Fix typo in `_block_parallel_sync_behavior` docstring (#13451) Typo --- src/pytorch_lightning/loops/utilities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pytorch_lightning/loops/utilities.py b/src/pytorch_lightning/loops/utilities.py index 02d9cc2c42552..d0631f5953e2f 100644 --- a/src/pytorch_lightning/loops/utilities.py +++ b/src/pytorch_lightning/loops/utilities.py @@ -153,7 +153,7 @@ def _build_training_step_kwargs( @contextmanager def _block_parallel_sync_behavior(strategy: Strategy, block: bool = True) -> Generator[None, None, None]: """Blocks synchronization in :class:`~pytorch_lightning.strategies.parallel.ParallelStrategy`. This is useful - for example when when accumulating gradients to reduce communication when it is not needed. + for example when accumulating gradients to reduce communication when it is not needed. Args: strategy: the strategy instance to use. From c3c450f7ec6022eebb1e062cd5b11520213d0a9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wang=20Ran=20=28=E6=B1=AA=E7=84=B6=29?= Date: Fri, 1 Jul 2022 01:32:02 +0800 Subject: [PATCH 36/89] Fix typo in `Loop.replace` docstring (#13452) Typo --- src/pytorch_lightning/loops/loop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pytorch_lightning/loops/loop.py b/src/pytorch_lightning/loops/loop.py index a032676c247d8..e6d5aa8d366fc 100644 --- a/src/pytorch_lightning/loops/loop.py +++ b/src/pytorch_lightning/loops/loop.py @@ -116,7 +116,7 @@ def connect(self, **kwargs: "Loop") -> None: def replace(self, **loops: Union["Loop", Type["Loop"]]) -> None: """Optionally replace one or multiple of this loop's sub-loops. - This methods takes care of instantiating the class (if necessary) with all existing arguments, connecting all + This method takes care of instantiating the class (if necessary) with all existing arguments, connecting all sub-loops of the old loop to the new instance, setting the ``Trainer`` reference, and connecting the new loop to the parent. From 88749832ec908977b23033d2ea084eedb249b120 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wang=20Ran=20=28=E6=B1=AA=E7=84=B6=29?= Date: Fri, 1 Jul 2022 01:32:17 +0800 Subject: [PATCH 37/89] Typo in tuner/lr_finder.py (#13453) Typo --- src/pytorch_lightning/tuner/lr_finder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pytorch_lightning/tuner/lr_finder.py b/src/pytorch_lightning/tuner/lr_finder.py index 9d63c8b95256b..ad15707d079f1 100644 --- a/src/pytorch_lightning/tuner/lr_finder.py +++ b/src/pytorch_lightning/tuner/lr_finder.py @@ -279,8 +279,8 @@ def __lr_finder_restore_params(trainer: "pl.Trainer", params: Dict[str, Any]) -> class _LRCallback(Callback): - """Special callback used by the learning rate finder. This callbacks log the learning rate before each batch - and log the corresponding loss after each batch. + """Special callback used by the learning rate finder. This callback logs the learning rate before each batch + and logs the corresponding loss after each batch. Args: num_training: number of iterations done by the learning rate finder From 63c611f55f64aa7ae20a29292d2895e2b0665f07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wang=20Ran=20=28=E6=B1=AA=E7=84=B6=29?= Date: Fri, 1 Jul 2022 01:32:31 +0800 Subject: [PATCH 38/89] Remove unused argument `model` (#13454) Remove unused argument `model` in the doc of `verify_loop_configurations`. --- src/pytorch_lightning/trainer/configuration_validator.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index 78ac390608649..c1ca692d031b8 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -27,8 +27,7 @@ def verify_loop_configurations(trainer: "pl.Trainer") -> None: Checks that the model is configured correctly before the run is started. Args: - trainer: Lightning Trainer - model: The model to check the configuration. + trainer: Lightning Trainer. Its `lightning_module` (the model) to check the configuration. """ model = trainer.lightning_module From 57d5659a9e064fc0587efb5578b61cfd003f7a1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wang=20Ran=20=28=E6=B1=AA=E7=84=B6=29?= Date: Fri, 1 Jul 2022 01:32:43 +0800 Subject: [PATCH 39/89] Typo in trainer/supporters.py (#13455) Typo --- src/pytorch_lightning/trainer/supporters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pytorch_lightning/trainer/supporters.py b/src/pytorch_lightning/trainer/supporters.py index 6d3ec88b0be6a..101af14fe2d64 100644 --- a/src/pytorch_lightning/trainer/supporters.py +++ b/src/pytorch_lightning/trainer/supporters.py @@ -110,7 +110,7 @@ def _agg_memory(self, how: str): @dataclass class SharedCycleIteratorState: - """A state shared between all CylceIterators in a CombinedLoader. + """A state shared between all CycleIterators in a CombinedLoader. With a shared state, the iterators can decide to terminate based on the state of all others. If the mode is *max_size_cycle*, all iterators need to have finished before the combined loading is considered finished, and From 09b2d519d3ed7c297afab71a21186e0bc61d1437 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wang=20Ran=20=28=E6=B1=AA=E7=84=B6=29?= Date: Fri, 1 Jul 2022 01:32:53 +0800 Subject: [PATCH 40/89] More clear docs for `LightningDataModule` (#13464) * More clear docs for `LightningDataModule` More clear docs for the methods `add_argparse_args` and `from_argparse_args` of the class `LightningDataModule`. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- src/pytorch_lightning/core/datamodule.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/pytorch_lightning/core/datamodule.py b/src/pytorch_lightning/core/datamodule.py index a2b4e8e0f1309..60a010ff7c3b9 100644 --- a/src/pytorch_lightning/core/datamodule.py +++ b/src/pytorch_lightning/core/datamodule.py @@ -65,7 +65,13 @@ def __init__(self) -> None: @classmethod def add_argparse_args(cls, parent_parser: ArgumentParser, **kwargs) -> ArgumentParser: - """Extends existing argparse by default `LightningDataModule` attributes.""" + """Extends existing argparse by default `LightningDataModule` attributes. + + Example:: + + parser = ArgumentParser(add_help=False) + parser = LightningDataModule.add_argparse_args(parser) + """ return add_argparse_args(cls, parent_parser, **kwargs) @classmethod @@ -80,8 +86,6 @@ def from_argparse_args(cls, args: Union[Namespace, ArgumentParser], **kwargs): Example:: - parser = ArgumentParser(add_help=False) - parser = LightningDataModule.add_argparse_args(parser) module = LightningDataModule.from_argparse_args(args) """ return from_argparse_args(cls, args, **kwargs) From 7fecd51ed06f9b5c2dadb945bd52c15b80ad29a4 Mon Sep 17 00:00:00 2001 From: Gautier Dagan Date: Thu, 30 Jun 2022 19:05:48 +0100 Subject: [PATCH 41/89] fix mypy typing errors in lightning/trainer/optimizers.py (#13470) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- pyproject.toml | 1 - src/pytorch_lightning/trainer/optimizers.py | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ddc903d6af9d7..dc9db77d6dabd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -86,7 +86,6 @@ module = [ "pytorch_lightning.trainer.connectors.callback_connector", "pytorch_lightning.trainer.connectors.data_connector", "pytorch_lightning.trainer.data_loading", - "pytorch_lightning.trainer.optimizers", "pytorch_lightning.trainer.supporters", "pytorch_lightning.trainer.trainer", "pytorch_lightning.tuner.batch_size_scaling", diff --git a/src/pytorch_lightning/trainer/optimizers.py b/src/pytorch_lightning/trainer/optimizers.py index 1cb3430f2e488..8e25fb5ac60f7 100644 --- a/src/pytorch_lightning/trainer/optimizers.py +++ b/src/pytorch_lightning/trainer/optimizers.py @@ -37,9 +37,10 @@ def init_optimizers(self, model: Optional["pl.LightningModule"]) -> Tuple[List, "`TrainerOptimizersMixin.init_optimizers` was deprecated in v1.6 and will be removed in v1.8." ) pl_module = self.lightning_module or model + assert isinstance(pl_module, pl.LightningModule) return _init_optimizers_and_lr_schedulers(pl_module) - def convert_to_lightning_optimizers(self): + def convert_to_lightning_optimizers(self) -> None: r""" .. deprecated:: v1.6 `TrainerOptimizersMixin.convert_to_lightning_optimizers` was deprecated in v1.6 and will be removed in v1.8. @@ -59,6 +60,6 @@ def _convert_to_lightning_optimizer(optimizer: Optimizer) -> LightningOptimizer: break return optimizer # type: ignore [return-value] - self.strategy._cached_lightning_optimizers = { # type: ignore [assignment] + self.strategy._cached_lightning_optimizers = { idx: _convert_to_lightning_optimizer(opt) for idx, opt in enumerate(self.optimizers) } From 6c9d49039ab16f58aae35d0c3e2eae48fb5633ca Mon Sep 17 00:00:00 2001 From: Sherin Thomas Date: Fri, 1 Jul 2022 02:13:04 +0530 Subject: [PATCH 42/89] adding LAI test (#13321) * tests * ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Jirka Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: mansy --- .github/workflows/ci-app_block.yml | 4 + .github/workflows/ci-pytorch_dockers.yml | 1 + .github/workflows/ci-pytorch_test-conda.yml | 1 + .github/workflows/ci-pytorch_test-full.yml | 1 + .github/workflows/ci-pytorch_test-slow.yml | 1 + tests/tests_app/__init__.py | 4 + tests/tests_app/cli/__init__.py | 0 tests/tests_app/cli/test_cli.py | 86 ++ tests/tests_app/cli/test_cloud_cli.py | 223 +++++ tests/tests_app/cli/test_cmd_init.py | 99 ++ tests/tests_app/cli/test_cmd_install.py | 337 +++++++ tests/tests_app/cli/test_cmd_pl_init.py | 111 +++ tests/tests_app/cli/test_cmd_react_ui_init.py | 59 ++ tests/tests_app/cli/test_run_app.py | 92 ++ tests/tests_app/components/__init__.py | 0 .../tests_app/components/python/scripts/a.py | 1 + .../tests_app/components/python/scripts/b.py | 3 + .../tests_app/components/python/scripts/c.py | 4 + .../components/python/test_python.py | 72 ++ .../__init__.py | 17 + .../components/sample_package_repo/setup.py | 46 + .../tests_app/components/serve/test_gradio.py | 30 + .../serve/test_model_inference_api.py | 78 ++ .../test_install_external_component.py | 50 + tests/tests_app/conftest.py | 83 ++ tests/tests_app/core/__init__.py | 0 .../tests_app/core/lightning_app/__init__.py | 0 .../lightning_app/test_configure_layout.py | 220 +++++ tests/tests_app/core/scripts/.lightning | 1 + tests/tests_app/core/scripts/app_metadata.py | 61 ++ tests/tests_app/core/scripts/empty.py | 0 tests/tests_app/core/scripts/example_1.py | 1 + tests/tests_app/core/scripts/example_2.py | 1 + tests/tests_app/core/scripts/lightning_cli.py | 62 ++ .../core/scripts/lightning_overrides.py | 55 ++ .../core/scripts/lightning_trainer.py | 74 ++ tests/tests_app/core/scripts/registry.py | 103 ++ .../core/scripts/script_with_error.py | 13 + tests/tests_app/core/scripts/two_apps.py | 10 + tests/tests_app/core/test_constants.py | 9 + tests/tests_app/core/test_lightning_api.py | 373 ++++++++ tests/tests_app/core/test_lightning_app.py | 886 ++++++++++++++++++ tests/tests_app/core/test_lightning_flow.py | 637 +++++++++++++ tests/tests_app/core/test_lightning_work.py | 283 ++++++ tests/tests_app/core/test_queues.py | 153 +++ tests/tests_app/frontend/__init__.py | 0 tests/tests_app/frontend/test_stream_lit.py | 94 ++ tests/tests_app/frontend/test_web.py | 77 ++ tests/tests_app/runners/__init__.py | 0 tests/tests_app/runners/test_cloud.py | 289 ++++++ tests/tests_app/runners/test_multiprocess.py | 83 ++ tests/tests_app/runners/test_runtime.py | 45 + tests/tests_app/runners/test_singleprocess.py | 18 + tests/tests_app/source_code/test_copytree.py | 107 +++ tests/tests_app/source_code/test_local.py | 390 ++++++++ tests/tests_app/source_code/test_tar.py | 125 +++ tests/tests_app/source_code/test_uploader.py | 48 + tests/tests_app/storage/__init__.py | 0 tests/tests_app/storage/test_copier.py | 133 +++ tests/tests_app/storage/test_drive.py | 232 +++++ tests/tests_app/storage/test_orchestrator.py | 77 ++ tests/tests_app/storage/test_path.py | 680 ++++++++++++++ tests/tests_app/storage/test_payload.py | 148 +++ tests/tests_app/structures/__init__.py | 0 tests/tests_app/structures/test_structures.py | 442 +++++++++ tests/tests_app/utilities/__init__.py | 0 .../tests_app/utilities/packaging/__init__.py | 0 .../packaging/projects/Dockerfile.cpu | 1 + .../packaging/projects/dock/__init__.py | 0 .../utilities/packaging/projects/dock/app.py | 12 + .../packaging/projects/dock/compo/__init__.py | 0 .../projects/dock/compo/a/__init__.py | 0 .../packaging/projects/dock/compo/a/a.py | 14 + .../projects/dock/compo/b/__init__.py | 0 .../packaging/projects/dock/compo/b/b.py | 10 + .../packaging/projects/dockerfile/__init__.py | 0 .../packaging/projects/dockerfile/app.py | 11 + .../dockerfile/comp_dockerfile/__init__.py | 0 .../dockerfile/comp_dockerfile/a/Dockerfile | 1 + .../dockerfile/comp_dockerfile/a/__init__.py | 0 .../dockerfile/comp_dockerfile/a/a.py | 6 + .../dockerfile/comp_dockerfile/b/__init__.py | 0 .../dockerfile/comp_dockerfile/b/b.py | 10 + .../packaging/projects/no_req/__init__.py | 0 .../packaging/projects/no_req/app.py | 12 + .../projects/no_req/comp/__init__.py | 0 .../projects/no_req/comp/a/__init__.py | 0 .../packaging/projects/no_req/comp/a/a.py | 8 + .../projects/no_req/comp/b/__init__.py | 0 .../packaging/projects/no_req/comp/b/b.py | 10 + .../packaging/projects/req/__init__.py | 0 .../utilities/packaging/projects/req/app.py | 12 + .../projects/req/comp_req/__init__.py | 0 .../projects/req/comp_req/a/__init__.py | 0 .../packaging/projects/req/comp_req/a/a.py | 8 + .../projects/req/comp_req/a/requirements.txt | 3 + .../projects/req/comp_req/b/__init__.py | 0 .../packaging/projects/req/comp_req/b/b.py | 10 + .../packaging/projects/requirements.txt | 1 + .../utilities/packaging/test_app_config.py | 61 ++ .../utilities/packaging/test_build_spec.py | 79 ++ .../utilities/packaging/test_cloud_compute.py | 22 + .../utilities/packaging/test_docker.py | 68 ++ .../packaging/test_lightning_utils.py | 37 + tests/tests_app/utilities/test_app_helpers.py | 107 +++ tests/tests_app/utilities/test_apply_func.py | 264 ++++++ tests/tests_app/utilities/test_cli_helpers.py | 30 + tests/tests_app/utilities/test_component.py | 78 ++ .../utilities/test_dependency_caching.py | 15 + tests/tests_app/utilities/test_git.py | 29 + tests/tests_app/utilities/test_imports.py | 49 + .../tests_app/utilities/test_introspection.py | 60 ++ tests/tests_app/utilities/test_load_app.py | 84 ++ tests/tests_app/utilities/test_login.py | 142 +++ tests/tests_app/utilities/test_network.py | 5 + tests/tests_app/utilities/test_proxies.py | 689 ++++++++++++++ tests/tests_app/utilities/test_state.py | 282 ++++++ tests/tests_app/utilities/test_tracer.py | 27 + tests/tests_app/utilities/test_tree.py | 171 ++++ 119 files changed, 9761 insertions(+) create mode 100644 tests/tests_app/__init__.py create mode 100644 tests/tests_app/cli/__init__.py create mode 100644 tests/tests_app/cli/test_cli.py create mode 100644 tests/tests_app/cli/test_cloud_cli.py create mode 100644 tests/tests_app/cli/test_cmd_init.py create mode 100644 tests/tests_app/cli/test_cmd_install.py create mode 100644 tests/tests_app/cli/test_cmd_pl_init.py create mode 100644 tests/tests_app/cli/test_cmd_react_ui_init.py create mode 100644 tests/tests_app/cli/test_run_app.py create mode 100644 tests/tests_app/components/__init__.py create mode 100644 tests/tests_app/components/python/scripts/a.py create mode 100644 tests/tests_app/components/python/scripts/b.py create mode 100644 tests/tests_app/components/python/scripts/c.py create mode 100644 tests/tests_app/components/python/test_python.py create mode 100644 tests/tests_app/components/sample_package_repo/external_lightning_component_package/__init__.py create mode 100644 tests/tests_app/components/sample_package_repo/setup.py create mode 100644 tests/tests_app/components/serve/test_gradio.py create mode 100644 tests/tests_app/components/serve/test_model_inference_api.py create mode 100644 tests/tests_app/components/test_install_external_component.py create mode 100644 tests/tests_app/conftest.py create mode 100644 tests/tests_app/core/__init__.py create mode 100644 tests/tests_app/core/lightning_app/__init__.py create mode 100644 tests/tests_app/core/lightning_app/test_configure_layout.py create mode 100644 tests/tests_app/core/scripts/.lightning create mode 100644 tests/tests_app/core/scripts/app_metadata.py create mode 100644 tests/tests_app/core/scripts/empty.py create mode 100644 tests/tests_app/core/scripts/example_1.py create mode 100644 tests/tests_app/core/scripts/example_2.py create mode 100644 tests/tests_app/core/scripts/lightning_cli.py create mode 100644 tests/tests_app/core/scripts/lightning_overrides.py create mode 100644 tests/tests_app/core/scripts/lightning_trainer.py create mode 100644 tests/tests_app/core/scripts/registry.py create mode 100644 tests/tests_app/core/scripts/script_with_error.py create mode 100644 tests/tests_app/core/scripts/two_apps.py create mode 100644 tests/tests_app/core/test_constants.py create mode 100644 tests/tests_app/core/test_lightning_api.py create mode 100644 tests/tests_app/core/test_lightning_app.py create mode 100644 tests/tests_app/core/test_lightning_flow.py create mode 100644 tests/tests_app/core/test_lightning_work.py create mode 100644 tests/tests_app/core/test_queues.py create mode 100644 tests/tests_app/frontend/__init__.py create mode 100644 tests/tests_app/frontend/test_stream_lit.py create mode 100644 tests/tests_app/frontend/test_web.py create mode 100644 tests/tests_app/runners/__init__.py create mode 100644 tests/tests_app/runners/test_cloud.py create mode 100644 tests/tests_app/runners/test_multiprocess.py create mode 100644 tests/tests_app/runners/test_runtime.py create mode 100644 tests/tests_app/runners/test_singleprocess.py create mode 100644 tests/tests_app/source_code/test_copytree.py create mode 100644 tests/tests_app/source_code/test_local.py create mode 100644 tests/tests_app/source_code/test_tar.py create mode 100644 tests/tests_app/source_code/test_uploader.py create mode 100644 tests/tests_app/storage/__init__.py create mode 100644 tests/tests_app/storage/test_copier.py create mode 100644 tests/tests_app/storage/test_drive.py create mode 100644 tests/tests_app/storage/test_orchestrator.py create mode 100644 tests/tests_app/storage/test_path.py create mode 100644 tests/tests_app/storage/test_payload.py create mode 100644 tests/tests_app/structures/__init__.py create mode 100644 tests/tests_app/structures/test_structures.py create mode 100644 tests/tests_app/utilities/__init__.py create mode 100644 tests/tests_app/utilities/packaging/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/Dockerfile.cpu create mode 100644 tests/tests_app/utilities/packaging/projects/dock/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/dock/app.py create mode 100644 tests/tests_app/utilities/packaging/projects/dock/compo/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/dock/compo/a/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/dock/compo/a/a.py create mode 100644 tests/tests_app/utilities/packaging/projects/dock/compo/b/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/dock/compo/b/b.py create mode 100644 tests/tests_app/utilities/packaging/projects/dockerfile/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/dockerfile/app.py create mode 100644 tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/a/Dockerfile create mode 100644 tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/a/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/a/a.py create mode 100644 tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/b/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/b/b.py create mode 100644 tests/tests_app/utilities/packaging/projects/no_req/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/no_req/app.py create mode 100644 tests/tests_app/utilities/packaging/projects/no_req/comp/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/no_req/comp/a/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/no_req/comp/a/a.py create mode 100644 tests/tests_app/utilities/packaging/projects/no_req/comp/b/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/no_req/comp/b/b.py create mode 100644 tests/tests_app/utilities/packaging/projects/req/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/req/app.py create mode 100644 tests/tests_app/utilities/packaging/projects/req/comp_req/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/req/comp_req/a/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/req/comp_req/a/a.py create mode 100644 tests/tests_app/utilities/packaging/projects/req/comp_req/a/requirements.txt create mode 100644 tests/tests_app/utilities/packaging/projects/req/comp_req/b/__init__.py create mode 100644 tests/tests_app/utilities/packaging/projects/req/comp_req/b/b.py create mode 100644 tests/tests_app/utilities/packaging/projects/requirements.txt create mode 100644 tests/tests_app/utilities/packaging/test_app_config.py create mode 100644 tests/tests_app/utilities/packaging/test_build_spec.py create mode 100644 tests/tests_app/utilities/packaging/test_cloud_compute.py create mode 100644 tests/tests_app/utilities/packaging/test_docker.py create mode 100644 tests/tests_app/utilities/packaging/test_lightning_utils.py create mode 100644 tests/tests_app/utilities/test_app_helpers.py create mode 100644 tests/tests_app/utilities/test_apply_func.py create mode 100644 tests/tests_app/utilities/test_cli_helpers.py create mode 100644 tests/tests_app/utilities/test_component.py create mode 100644 tests/tests_app/utilities/test_dependency_caching.py create mode 100644 tests/tests_app/utilities/test_git.py create mode 100644 tests/tests_app/utilities/test_imports.py create mode 100644 tests/tests_app/utilities/test_introspection.py create mode 100644 tests/tests_app/utilities/test_load_app.py create mode 100644 tests/tests_app/utilities/test_login.py create mode 100644 tests/tests_app/utilities/test_network.py create mode 100644 tests/tests_app/utilities/test_proxies.py create mode 100644 tests/tests_app/utilities/test_state.py create mode 100644 tests/tests_app/utilities/test_tracer.py create mode 100644 tests/tests_app/utilities/test_tree.py diff --git a/.github/workflows/ci-app_block.yml b/.github/workflows/ci-app_block.yml index 865957b8a55c6..1561959f3bf31 100644 --- a/.github/workflows/ci-app_block.yml +++ b/.github/workflows/ci-app_block.yml @@ -26,3 +26,7 @@ jobs: - name: Block edits in docs/source-app if: contains(steps.changed-files.outputs.all_changed_and_modified_files, 'docs/source-app') run: exit 1 + + - name: Block edits in tests/tests_app + if: contains(steps.changed-files.outputs.all_changed_and_modified_files, 'tests/tests_app') + run: exit 1 diff --git a/.github/workflows/ci-pytorch_dockers.yml b/.github/workflows/ci-pytorch_dockers.yml index d3fe13da71991..6365d576c466b 100644 --- a/.github/workflows/ci-pytorch_dockers.yml +++ b/.github/workflows/ci-pytorch_dockers.yml @@ -9,6 +9,7 @@ on: # Trigger the workflow on push or pull request, but only for the master bran branches: [master, "release/*"] paths: - "!src/lightning_app/**" # todo: implement job skip + - "!tests/tests_app/**" # todo: implement job skip - "dockers/**" - "!dockers/README.md" - "requirements/*" diff --git a/.github/workflows/ci-pytorch_test-conda.yml b/.github/workflows/ci-pytorch_test-conda.yml index d0913ee0c2549..ff2783a4695a1 100644 --- a/.github/workflows/ci-pytorch_test-conda.yml +++ b/.github/workflows/ci-pytorch_test-conda.yml @@ -8,6 +8,7 @@ on: # Trigger the workflow on push or pull request, but only for the master bra branches: [master, "release/*"] paths-ignore: - "src/lightning_app/**" # todo: implement job skip + - "tests/tests_app/**" # todo: implement job skip concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} diff --git a/.github/workflows/ci-pytorch_test-full.yml b/.github/workflows/ci-pytorch_test-full.yml index d857cacf5fac0..37bc42a33335c 100644 --- a/.github/workflows/ci-pytorch_test-full.yml +++ b/.github/workflows/ci-pytorch_test-full.yml @@ -9,6 +9,7 @@ on: # Trigger the workflow on push or pull request, but only for the master bra types: [opened, reopened, ready_for_review, synchronize] paths-ignore: - "src/lightning_app/**" # todo: implement job skip + - "tests/tests_app/**" # todo: implement job skip concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} diff --git a/.github/workflows/ci-pytorch_test-slow.yml b/.github/workflows/ci-pytorch_test-slow.yml index c907a2c3d6410..5c6a95e707008 100644 --- a/.github/workflows/ci-pytorch_test-slow.yml +++ b/.github/workflows/ci-pytorch_test-slow.yml @@ -9,6 +9,7 @@ on: # Trigger the workflow on push or pull request, but only for the master bra types: [opened, reopened, ready_for_review, synchronize] paths-ignore: - "src/lightning_app/**" # todo: implement job skip + - "tests/tests_app/**" # todo: implement job skip concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} diff --git a/tests/tests_app/__init__.py b/tests/tests_app/__init__.py new file mode 100644 index 0000000000000..e1a00cdd0988b --- /dev/null +++ b/tests/tests_app/__init__.py @@ -0,0 +1,4 @@ +import os + +_TESTS_ROOT = os.path.dirname(__file__) +_PROJECT_ROOT = os.path.dirname(os.path.dirname(_TESTS_ROOT)) diff --git a/tests/tests_app/cli/__init__.py b/tests/tests_app/cli/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/cli/test_cli.py b/tests/tests_app/cli/test_cli.py new file mode 100644 index 0000000000000..2626116990340 --- /dev/null +++ b/tests/tests_app/cli/test_cli.py @@ -0,0 +1,86 @@ +import os +from unittest import mock + +import pytest +from click.testing import CliRunner +from lightning_cloud.openapi import Externalv1LightningappInstance + +from lightning_app.cli.lightning_cli import get_app_url, login, logout, main, run +from lightning_app.runners.runtime_type import RuntimeType + + +@pytest.mark.parametrize( + "runtime_type, extra_args, lightning_cloud_url, expected_url", + [ + ( + RuntimeType.CLOUD, + (Externalv1LightningappInstance(id="test-app-id"),), + "https://b975913c4b22eca5f0f9e8eff4c4b1c315340a0d.staging.lightning.ai", + "https://b975913c4b22eca5f0f9e8eff4c4b1c315340a0d.staging.lightning.ai/me/apps/test-app-id", + ), + ( + RuntimeType.CLOUD, + (Externalv1LightningappInstance(id="test-app-id"),), + "http://localhost:9800", + "http://localhost:9800/me/apps/test-app-id", + ), + (RuntimeType.SINGLEPROCESS, tuple(), "", "http://127.0.0.1:7501/view"), + (RuntimeType.SINGLEPROCESS, tuple(), "http://localhost:9800", "http://127.0.0.1:7501/view"), + (RuntimeType.MULTIPROCESS, tuple(), "", "http://127.0.0.1:7501/view"), + (RuntimeType.MULTIPROCESS, tuple(), "http://localhost:9800", "http://127.0.0.1:7501/view"), + ], +) +def test_start_target_url(runtime_type, extra_args, lightning_cloud_url, expected_url): + with mock.patch( + "lightning_app.cli.lightning_cli.get_lightning_cloud_url", mock.MagicMock(return_value=lightning_cloud_url) + ): + assert get_app_url(runtime_type, *extra_args) == expected_url + + +@pytest.mark.parametrize("command", [main, run]) +def test_commands(command): + runner = CliRunner() + result = runner.invoke(command) + assert result.exit_code == 0 + + +def test_main_lightning_cli_help(): + """Validate the Lightning CLI.""" + res = os.popen("python -m lightning_app --help").read() + assert "login " in res + assert "logout " in res + assert "run " in res + + res = os.popen("python -m lightning_app run --help").read() + assert "app " in res + + # hidden run commands should not appear in the help text + assert "server" not in res + assert "flow" not in res + assert "work" not in res + assert "frontend" not in res + + +@mock.patch("lightning_app.utilities.login.Auth._run_server") +@mock.patch("lightning_app.utilities.login.Auth.clear") +def test_cli_login(clear: mock.MagicMock, run_server: mock.MagicMock): + runner = CliRunner() + runner.invoke(login) + + clear.assert_called_once_with() + run_server.assert_called_once() + + +@mock.patch("pathlib.Path.unlink") +@mock.patch("pathlib.Path.exists") +@pytest.mark.parametrize("creds", [True, False]) +def test_cli_logout(exists: mock.MagicMock, unlink: mock.MagicMock, creds: bool): + exists.return_value = creds + runner = CliRunner() + runner.invoke(logout) + + exists.assert_called_once_with() + if creds: + unlink.assert_called_once_with() + else: + unlink.assert_not_called() diff --git a/tests/tests_app/cli/test_cloud_cli.py b/tests/tests_app/cli/test_cloud_cli.py new file mode 100644 index 0000000000000..fc50a49365dd4 --- /dev/null +++ b/tests/tests_app/cli/test_cloud_cli.py @@ -0,0 +1,223 @@ +import enum +import logging +import os +from dataclasses import dataclass +from functools import partial +from unittest import mock +from unittest.mock import ANY, call, MagicMock + +import pytest +from click.testing import CliRunner +from lightning_cloud.openapi import ( + V1LightningappV2, + V1ListLightningappInstancesResponse, + V1ListLightningappsV2Response, + V1ListMembershipsResponse, + V1Membership, +) +from lightning_cloud.openapi.rest import ApiException +from tests_app import _PROJECT_ROOT + +import lightning_app.runners.backends.cloud as cloud_backend +from lightning_app.cli.lightning_cli import run_app +from lightning_app.runners import cloud +from lightning_app.runners.cloud import CloudRuntime + +_FILE_PATH = os.path.join(_PROJECT_ROOT, "tests", "tests_app", "core", "scripts", "app_metadata.py") + + +@dataclass +class AppMetadata: + id: str + + +@dataclass +class FakeResponse: + lightningapps = [AppMetadata(id="my_app")] + + +class FakeLightningClient: + def __init__(self, response, api_client=None): + self._response = response + + def lightningapp_instance_service_list_lightningapp_instances(self, *args, **kwargs): + return V1ListLightningappInstancesResponse(lightningapps=[]) + + def lightningapp_service_delete_lightningapp(self, id: str = None): + assert id == "my_app" + + def projects_service_list_memberships(self): + return V1ListMembershipsResponse(memberships=[V1Membership(name="test-project", project_id="test-project-id")]) + + +class CloudRuntimePatch(CloudRuntime): + def __init__(self, *args, **kwargs): + super_init = super().__init__ + if hasattr(super_init, "__wrapped__"): + super_init.__wrapped__(self, *args, **kwargs) + else: + super_init(*args, **kwargs) + + +class V1LightningappInstanceState(enum.Enum): + FAILED = "failed" + SUCCESS = "success" + + +@dataclass +class FailedStatus: + phase = V1LightningappInstanceState.FAILED + + +@dataclass +class SuccessStatus: + phase = V1LightningappInstanceState.SUCCESS + + +@dataclass +class RuntimeErrorResponse: + id = "my_app" + source_upload_url = "something" + status = FailedStatus() + + +@dataclass +class RuntimeErrorResponse2: + id = "my_app" + source_upload_url = "" + status = SuccessStatus() + + +@dataclass +class SuccessResponse: + id = "my_app" + source_upload_url = "something" + status = SuccessStatus() + + +@dataclass +class ExceptionResponse: + status = FailedStatus() + + +class FakeLightningClientCreate(FakeLightningClient): + def __init__(self, *args, create_response, **kwargs): + super().__init__(*args, **kwargs) + self.create_response = create_response + + def lightningapp_v2_service_list_lightningapps_v2(self, *args, **kwargs): + return V1ListLightningappsV2Response(lightningapps=[V1LightningappV2(id="my_app", name="app")]) + + def lightningapp_v2_service_create_lightningapp_release(self, project_id, app_id, body): + assert project_id == "test-project-id" + return self.create_response + + def lightningapp_v2_service_create_lightningapp_release_instance(self, project_id, app_id, release_id, body): + assert project_id == "test-project-id" + return self.create_response + + +@mock.patch("lightning_app.core.queues.QueuingSystem", MagicMock()) +@mock.patch("lightning_app.runners.runtime_type.CloudRuntime", CloudRuntimePatch) +@pytest.mark.parametrize("create_response", [RuntimeErrorResponse(), RuntimeErrorResponse2()]) +def test_start_app(create_response, monkeypatch): + + monkeypatch.setattr(cloud, "V1LightningappInstanceState", MagicMock()) + monkeypatch.setattr(cloud, "Body8", MagicMock()) + monkeypatch.setattr(cloud, "V1Flowserver", MagicMock()) + monkeypatch.setattr(cloud, "V1LightningappInstanceSpec", MagicMock()) + monkeypatch.setattr( + cloud_backend, + "LightningClient", + partial(FakeLightningClientCreate, response=FakeResponse(), create_response=create_response), + ) + monkeypatch.setattr(cloud, "LocalSourceCodeDir", MagicMock()) + monkeypatch.setattr(cloud, "_prepare_lightning_wheels_and_requirements", MagicMock()) + + runner = CliRunner() + + def run(): + result = runner.invoke(run_app, [_FILE_PATH, "--cloud", "--open-ui=False"], catch_exceptions=False) + assert result.exit_code == 0 + + if isinstance(create_response, RuntimeErrorResponse): + cloud.V1LightningappInstanceState.FAILED = V1LightningappInstanceState.FAILED + with pytest.raises(RuntimeError, match="Failed to create the application"): + run() + elif isinstance(create_response, RuntimeErrorResponse2): + with pytest.raises(RuntimeError, match="The source upload url is empty."): + run() + elif isinstance(create_response, RuntimeErrorResponse2): + with pytest.raises(RuntimeError, match="The source upload url is empty."): + run() + else: + run() + mocks_calls = cloud.LocalSourceCodeDir._mock_mock_calls + assert len(mocks_calls) == 5 + assert str(mocks_calls[0].kwargs["path"]) == os.path.dirname(_FILE_PATH) + mocks_calls[1].assert_called_once() + mocks_calls[2].assert_called_once(url="url") + + assert cloud.V1Flowserver._mock_call_args_list == [call(name="root.flow_b")] + + cloud.V1LightningappInstanceSpec._mock_call_args.assert_called_once( + app_entrypoint_file=_FILE_PATH, + enable_app_server=True, + works=ANY, + flow_servers=ANY, + ) + + cloud.Body8.assert_called_once() + + +class FakeLightningClientException(FakeLightningClient): + def __init__(self, *args, message, api_client=None, **kwargs): + super().__init__(*args, api_client=api_client, **kwargs) + self.message = message + + def lightningapp_v2_service_list_lightningapps_v2(self, *args, **kwargs): + class HttpHeaderDict(dict): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.reason = "" + self.status = 500 + self.data = kwargs["data"] + + def getheaders(self): + return {} + + raise ApiException( + http_resp=HttpHeaderDict( + data=self.message, + reason="", + status=500, + ) + ) + + +@mock.patch("lightning_app.utilities.network.create_swagger_client", MagicMock()) +@mock.patch("lightning_app.runners.runtime_type.CloudRuntime", CloudRuntimePatch) +@pytest.mark.parametrize( + "message", + [ + "Cannot create a new app, you have reached the maximum number (10) of apps. Either increase your quota or delete some of the existing apps" # noqa E501 + ], +) +def test_start_app_exception(message, monkeypatch, caplog): + + monkeypatch.setattr(cloud, "V1LightningappInstanceState", MagicMock()) + monkeypatch.setattr(cloud, "Body8", MagicMock()) + monkeypatch.setattr(cloud, "V1Flowserver", MagicMock()) + monkeypatch.setattr(cloud, "V1LightningappInstanceSpec", MagicMock()) + monkeypatch.setattr(cloud, "LocalSourceCodeDir", MagicMock()) + monkeypatch.setattr(cloud, "_prepare_lightning_wheels_and_requirements", MagicMock()) + monkeypatch.setattr(cloud, "logger", logging.getLogger()) + + runner = CliRunner() + + fake_grid_rest_client = partial(FakeLightningClientException, response=FakeResponse(), message=message) + with caplog.at_level(logging.ERROR): + with mock.patch("lightning_app.runners.backends.cloud.LightningClient", fake_grid_rest_client): + result = runner.invoke(run_app, [_FILE_PATH, "--cloud", "--open-ui=False"], catch_exceptions=False) + assert result.exit_code == 1 + assert caplog.messages == [message] diff --git a/tests/tests_app/cli/test_cmd_init.py b/tests/tests_app/cli/test_cmd_init.py new file mode 100644 index 0000000000000..4ceaecf5e55ec --- /dev/null +++ b/tests/tests_app/cli/test_cmd_init.py @@ -0,0 +1,99 @@ +import os +import re +import shutil +import subprocess + +import pytest + +from lightning_app.cli import cmd_init + + +def test_validate_init_name(): + + # test that a good name works (mix chars) + value = cmd_init._capture_valid_app_component_name("abc1-cde") + assert value == "abc1-cde" + + # test that a good name works (letters only) + value = cmd_init._capture_valid_app_component_name("abc-cde") + assert value == "abc-cde" + + # assert bad input + with pytest.raises(SystemExit) as e: + value = cmd_init._capture_valid_app_component_name("abc-cde#") + + assert "Error: your Lightning app name" in str(e.value) + + +@pytest.mark.skip(reason="need app fast_dev_run to work via CLI") +def test_make_app_template(): + template_name = "app-test-template" + template_name_folder = re.sub("-", "_", template_name) + + # remove the template if there + template_dir = os.path.join(os.getcwd(), template_name) + try: + shutil.rmtree(template_dir) + except Exception as e: # noqa + pass + + # create template + subprocess.check_output(f"lightning init app {template_name}", shell=True) + + # make sure app is not in the env + env_output = subprocess.check_output("pip freeze", shell=True) + assert template_name not in str(env_output) + + # install the app + env_output = subprocess.check_output( + f"cd {template_name} && pip install -r requirements.txt && pip install -e .", shell=True + ) + env_output = subprocess.check_output("pip freeze", shell=True) + assert template_name in str(env_output) + + app_dir = os.path.join(template_dir, f"{template_name_folder}/app.py") + output = subprocess.check_output(f"lightning run app {app_dir} --fast_dev_run") # noqa + # TODO: verify output + + # clean up the template dir + try: + shutil.rmtree(template_dir) + except Exception as e: # noqa + pass + + +@pytest.mark.skip(reason="need component fast_dev_run to work via CLI") +def test_make_component_template(): + template_name = "component-test-template" + template_name_folder = re.sub("-", "_", template_name) + + # remove the template if there + template_dir = os.path.join(os.getcwd(), template_name) + try: + shutil.rmtree(template_dir) + except Exception as e: # noqa + pass + + # create template + subprocess.check_output(f"lightning init component {template_name}", shell=True) + + # make sure app is not in the env + env_output = subprocess.check_output("pip freeze", shell=True) + assert template_name not in str(env_output) + + # install the app + env_output = subprocess.check_output( + f"cd {template_name} && pip install -r requirements.txt && pip install -e .", shell=True + ) + env_output = subprocess.check_output("pip freeze", shell=True) + assert template_name in str(env_output) + + app_dir = os.path.join(template_dir, f"{template_name_folder}/app.py") + output = subprocess.check_output(f"lightning run app {app_dir} --fast_dev_run") # noqa + # TODO: verify output + + # clean up the template dir + try: + shutil.rmtree(template_dir) + except Exception as e: # noqa + pass diff --git a/tests/tests_app/cli/test_cmd_install.py b/tests/tests_app/cli/test_cmd_install.py new file mode 100644 index 0000000000000..52a36dd2324f6 --- /dev/null +++ b/tests/tests_app/cli/test_cmd_install.py @@ -0,0 +1,337 @@ +import os +import subprocess +from pathlib import Path +from unittest import mock + +import pytest +from click.testing import CliRunner + +from lightning_app.cli import cmd_install, lightning_cli +from lightning_app.cli.cmd_install import _install_app +from lightning_app.testing.helpers import RunIf + + +@mock.patch("lightning_app.cli.cmd_install.subprocess", mock.MagicMock()) +def test_valid_org_app_name(): + runner = CliRunner() + + # assert a bad app name should fail + fake_app = "fakeuser/impossible/name" + result = runner.invoke(lightning_cli.install_app, [fake_app]) + assert "app name format must have organization/app-name" in result.output + + # assert a good name (but unavailable name) should work + fake_app = "fakeuser/ALKKLJAUHREKJ21234KLAKJDLF" + result = runner.invoke(lightning_cli.install_app, [fake_app]) + assert f"app: '{fake_app}' is not available on ⚡ Lightning AI ⚡" in result.output + assert result.exit_code + + # assert a good (and availablea name) works + real_app = "lightning/install-app" + result = runner.invoke(lightning_cli.install_app, [real_app]) + assert "Press enter to continue:" in result.output + + +@pytest.mark.skip(reason="need to figure out how to authorize git clone from the private repo") +def test_valid_unpublished_app_name(): + runner = CliRunner() + + # assert warning of non official app given + real_app = "https://github.com/Lightning-AI/install-app" + try: + subprocess.check_output(f"lightning install app {real_app}", shell=True, stderr=subprocess.STDOUT) + # this condition should never be hit + assert False + except subprocess.CalledProcessError as e: + assert "WARNING" in str(e.output) + + # assert aborted install + result = runner.invoke(lightning_cli.install_app, [real_app], input="q") + assert "Installation aborted!" in result.output + + # assert a bad app name should fail + fake_app = "https://github.com/Lightning-AI/install-appdd" + result = runner.invoke(lightning_cli.install_app, [fake_app, "--yes"]) + assert "Looks like the github url was not found" in result.output + + # assert a good (and availablea name) works + result = runner.invoke(lightning_cli.install_app, [real_app]) + assert "Press enter to continue:" in result.output + + +@pytest.mark.skip(reason="need to figure out how to authorize git clone from the private repo") +def test_app_install(tmpdir): + """Tests unpublished app install.""" + + cwd = os.getcwd() + os.chdir(tmpdir) + + real_app = "https://github.com/Lightning-AI/install-app" + test_app_pip_name = "install-app" + + # install app and verify it's in the env + subprocess.check_output(f"lightning install app {real_app} --yes", shell=True) + new_env_output = subprocess.check_output("pip freeze", shell=True) + assert test_app_pip_name in str(new_env_output), f"{test_app_pip_name} should be in the env" + + os.chdir(cwd) + + +@mock.patch("lightning_app.cli.cmd_install.subprocess", mock.MagicMock()) +def test_valid_org_component_name(): + runner = CliRunner() + + # assert a bad name should fail + fake_component = "fakeuser/impossible/name" + result = runner.invoke(lightning_cli.install_component, [fake_component]) + assert "component name format must have organization/component-name" in result.output + + # assert a good name (but unavailable name) should work + fake_component = "fakeuser/ALKKLJAUHREKJ21234KLAKJDLF" + result = runner.invoke(lightning_cli.install_component, [fake_component]) + assert f"component: '{fake_component}' is not available on ⚡ Lightning AI ⚡" in result.output + + # assert a good (and availablea name) works + fake_component = "lightning/lit-slack-messenger" + result = runner.invoke(lightning_cli.install_component, [fake_component]) + assert "Press enter to continue:" in result.output + + +def test_unpublished_component_url_parsing(): + runner = CliRunner() + + # assert a bad name should fail (no git@) + fake_component = "https://github.com/Lightning-AI/LAI-slack-messenger" + result = runner.invoke(lightning_cli.install_component, [fake_component]) + assert "Error, your github url must be in the following format" in result.output + + # assert a good (and availablea name) works + sha = "14f333456ffb6758bd19458e6fa0bf12cf5575e1" + real_component = f"git+https://github.com/Lightning-AI/LAI-slack-messenger.git@{sha}" + result = runner.invoke(lightning_cli.install_component, [real_component]) + assert "Press enter to continue:" in result.output + + +@pytest.mark.skip(reason="need to figure out how to authorize pip install from the private repo") +@pytest.mark.parametrize( + "real_component, test_component_pip_name", + [ + ("lightning/lit-slack-messenger", "lit-slack"), + ( + "git+https://github.com/Lightning-AI/LAI-slack-messenger.git@14f333456ffb6758bd19458e6fa0bf12cf5575e1", + "lit-slack", + ), + ], +) +def test_component_install(real_component, test_component_pip_name): + """Tests both published and unpublished component installs.""" + # uninstall component just in case and verify it's not in the pip output + env_output = subprocess.check_output(f"pip uninstall {test_component_pip_name} --yes && pip freeze", shell=True) + assert test_component_pip_name not in str(env_output), f"{test_component_pip_name} should not be in the env" + + # install component and verify it's in the env + new_env_output = subprocess.check_output( + f"lightning install component {real_component} --yes && pip freeze", shell=True + ) + assert test_component_pip_name in str(new_env_output), f"{test_component_pip_name} should be in the env" + + # clean up for test + subprocess.run(f"pip uninstall {test_component_pip_name} --yes", shell=True) + env_output = subprocess.check_output("pip freeze", shell=True) + assert test_component_pip_name not in str( + env_output + ), f"{test_component_pip_name} should not be in the env after cleanup" + + +def test_prompt_actions(): + # TODO: each of these installs must check that a package is installed in the environment correctly + app_to_use = "lightning/install-app" + + runner = CliRunner() + + # assert that the user can cancel the command with any letter other than y + result = runner.invoke(lightning_cli.install_app, [app_to_use], input="b") + assert "Installation aborted!" in result.output + + # assert that the install happens with --yes + # result = runner.invoke(lightning_cli.install_app, [app_to_use, "--yes"]) + # assert result.exit_code == 0 + + # assert that the install happens with y + # result = runner.invoke(lightning_cli.install_app, [app_to_use], input='y') + # assert result.exit_code == 0 + + # # assert that the install happens with yes + # result = runner.invoke(lightning_cli.install_app, [app_to_use], input='yes') + # assert result.exit_code == 0 + + # assert that the install happens with pressing enter + # result = runner.invoke(lightning_cli.install_app, [app_to_use]) + + # TODO: how to check the output when the user types ctrl+c? + # result = runner.invoke(lightning_cli.install_app, [app_to_use], input='') + + +@mock.patch("lightning_app.cli.cmd_install.subprocess", mock.MagicMock()) +def test_version_arg_component(tmpdir, monkeypatch): + monkeypatch.chdir(tmpdir) + runner = CliRunner() + + # Version does not exist + component_name = "lightning/lit-slack-messenger" + version_arg = "NOT-EXIST" + result = runner.invoke(lightning_cli.install_component, [component_name, f"--version={version_arg}"]) + assert f"component: 'Version {version_arg} for {component_name}' is not" in str(result.exception) + assert result.exit_code == 1 + + # Version exists + # This somwehow fail in test but not when you actually run it + version_arg = "0.0.1" + runner = CliRunner() + result = runner.invoke(lightning_cli.install_component, [component_name, f"--version={version_arg}", "--yes"]) + assert result.exit_code == 0 + + +@mock.patch("lightning_app.cli.cmd_install.subprocess", mock.MagicMock()) +@mock.patch("lightning_app.cli.cmd_install.os.chdir", mock.MagicMock()) +def test_version_arg_app(tmpdir): + + # Version does not exist + app_name = "lightning/hackernews-app" + version_arg = "NOT-EXIST" + runner = CliRunner() + result = runner.invoke(lightning_cli.install_app, [app_name, f"--version={version_arg}"]) + assert f"app: 'Version {version_arg} for {app_name}' is not" in str(result.exception) + assert result.exit_code == 1 + + # Version exists + version_arg = "0.0.1" + runner = CliRunner() + result = runner.invoke(lightning_cli.install_app, [app_name, f"--version={version_arg}", "--yes"]) + assert result.exit_code == 0 + + +def test_proper_url_parsing(): + + name = "lightning/install-app" + + # make sure org/app-name name is correct + org, app = cmd_install._validate_name(name, resource_type="app", example="lightning/lit-slack-component") + assert org == "lightning" + assert app == "install-app" + + # resolve registry (orgs can have a private registry through their environment variables) + registry_url = cmd_install._resolve_app_registry() + assert registry_url == "https://api.sheety.co/e559626ba514c7ba80caae1e38a8d4f4/lightningAppRegistry/apps" + + # load the component resource + component_entry = cmd_install._resolve_resource(registry_url, name=name, version_arg="latest", resource_type="app") + + source_url, git_url, folder_name, git_sha = cmd_install._show_install_app_prompt( + component_entry, app, org, True, resource_type="app" + ) + assert folder_name == "install-app" + # FixMe: this need to be updated after release with updated org rename + assert source_url == "https://github.com/PyTorchLightning/install-app" + assert git_url.find("@") > 10 # TODO: this will be removed once the apps repos will be public + assert "#ref" not in git_url + assert git_sha + + +@RunIf(skip_windows=True) +def test_install_app_shows_error(tmpdir): + + app_folder_dir = Path(tmpdir / "some_random_directory").absolute() + app_folder_dir.mkdir() + + with pytest.raises(SystemExit, match=f"Folder {str(app_folder_dir)} exists, please delete it and try again."): + _install_app(source_url=mock.ANY, git_url=mock.ANY, folder_name=str(app_folder_dir), overwrite=False) + + +# def test_env_creation(tmpdir): +# cwd = os.getcwd() +# os.chdir(tmpdir) + +# # install app +# cmd_install.app("lightning/install-app", True, cwd=tmpdir) + +# # assert app folder is installed with venv +# assert "python" in set(os.listdir(os.path.join(tmpdir, "install-app/bin"))) + +# # assert the deps are in the env +# env_output = subprocess.check_output("source bin/activate && pip freeze", shell=True) +# non_env_output = subprocess.check_output("pip freeze", shell=True) + +# # assert envs are not the same +# assert env_output != non_env_output + +# # assert the reqs are in the env created and NOT in the non env +# reqs = open(os.path.join(tmpdir, "install-app/requirements.txt")).read() +# assert reqs in str(env_output) and reqs not in str(non_env_output) + +# # setup.py installs numpy +# assert "numpy" in str(env_output) + +# # run the python script to make sure the file works (in a folder) +# app_file = os.path.join(tmpdir, "install-app/src/app.py") +# app_output = subprocess.check_output(f"source bin/activate && python {app_file}", shell=True) +# assert "b'printed a\\ndeps loaded\\n'" == str(app_output) + +# # run the python script to make sure the file works (in root) +# app_file = os.path.join(tmpdir, "install-app/app_b.py") +# app_output = subprocess.check_output(f"source bin/activate && python {app_file}", shell=True) +# assert "b'printed a\\n'" == str(app_output) + +# # reset dir +# os.chdir(cwd) + + +def test_public_app_registry(): + registry = cmd_install._resolve_app_registry() + assert registry == "https://api.sheety.co/e559626ba514c7ba80caae1e38a8d4f4/lightningAppRegistry/apps" + + +@mock.patch.dict(os.environ, {"LIGHTNING_APP_REGISTRY": "https://TODO/other_non_PL_registry"}) +def test_private_app_registry(): + registry = cmd_install._resolve_app_registry() + assert registry == "https://TODO/other_non_PL_registry" + + +def test_public_component_registry(): + registry = cmd_install._resolve_component_registry() + assert registry == "https://api.sheety.co/e559626ba514c7ba80caae1e38a8d4f4/lightningAppRegistry/components" + + +@mock.patch.dict(os.environ, {"LIGHTNING_COMPONENT_REGISTRY": "https://TODO/other_non_PL_registry"}) +def test_private_component_registry(): + registry = cmd_install._resolve_component_registry() + assert registry == "https://TODO/other_non_PL_registry" + + +@mock.patch("lightning_app.cli.cmd_install.subprocess") +@mock.patch("lightning_app.cli.cmd_install.os.chdir", mock.MagicMock()) +@pytest.mark.parametrize( + "source_url, git_url, git_sha", + [ + ( + "https://github.com/PyTorchLightning/lightning-quick-start", + "https://@github.com/PyTorchLightning/lightning-quick-start", + None, + ), + ( + "https://github.com/PyTorchLightning/lightning-quick-start", + "https://@github.com/PyTorchLightning/lightning-quick-start", + "git_sha", + ), + ], +) +def test_install_app_process(subprocess_mock, source_url, git_url, git_sha, tmpdir): + app_folder_dir = Path(tmpdir / "some_random_directory").absolute() + app_folder_dir.mkdir() + + _install_app(source_url, git_url, folder_name=str(app_folder_dir), overwrite=True, git_sha=git_sha) + assert subprocess_mock.check_output.call_args_list[0].args == (["git", "clone", git_url],) + if git_sha: + assert subprocess_mock.check_output.call_args_list[1].args == (["git", "checkout", git_sha],) + assert subprocess_mock.call.call_args_list[0].args == ("pip install -r requirements.txt",) + assert subprocess_mock.call.call_args_list[1].args == ("pip install -e .",) diff --git a/tests/tests_app/cli/test_cmd_pl_init.py b/tests/tests_app/cli/test_cmd_pl_init.py new file mode 100644 index 0000000000000..1d5548b051323 --- /dev/null +++ b/tests/tests_app/cli/test_cmd_pl_init.py @@ -0,0 +1,111 @@ +import os +from unittest import mock + +import pytest +from click.testing import CliRunner + +from lightning_app.cli import lightning_cli +from lightning_app.cli.cmd_pl_init import download_frontend, pl_app + + +def test_pl_app_input_paths_do_not_exist(tmp_path): + """Test that the CLI prints an error message if the code directory or the script path does not exist.""" + runner = CliRunner() + + source_dir = tmp_path / "code" + script_file = tmp_path / "code" / "script.py" + + result = runner.invoke(lightning_cli.init_pl_app, (str(source_dir), str(script_file))) + assert result.exit_code == 1 + assert "The given source directory does not exist:" in result.output + + source_dir.mkdir(parents=True) + + result = runner.invoke(lightning_cli.init_pl_app, (str(source_dir), str(script_file))) + assert result.exit_code == 1 + assert "The given script path does not exist:" in result.output + + script_file_as_folder = tmp_path / "code" / "folder" + script_file_as_folder.mkdir(parents=True) + result = runner.invoke(lightning_cli.init_pl_app, (str(source_dir), str(script_file_as_folder))) + assert result.exit_code == 1 + assert "The given script path must be a file, you passed:" in result.output + + +def test_pl_app_script_path_not_subpath(tmp_path): + """Test that the CLI prints an error message if the provided script path is not a subpath of the source dir.""" + runner = CliRunner() + + source_dir = tmp_path / "code" + script_file = tmp_path / "not_code" / "script.py" + + source_dir.mkdir(parents=True) + script_file.parent.mkdir(parents=True) + script_file.touch() + + result = runner.invoke(lightning_cli.init_pl_app, (str(source_dir), str(script_file)), catch_exceptions=False) + assert result.exit_code == 1 + assert "The given script path must be a subpath of the source directory." in result.output + + +def test_pl_app_destination_app_already_exists(tmp_path, monkeypatch): + """Test that the CLI prints an error message if an app with the same name already exists.""" + runner = CliRunner() + monkeypatch.chdir(tmp_path) + + source_dir = tmp_path / "code" + script_file = source_dir / "script.py" + source_dir.mkdir(parents=True) + script_file.parent.mkdir(parents=True, exist_ok=True) + script_file.touch() + + # monkeypatch.chdir(tmp_path) + app_folder = tmp_path / "existing-app" + app_folder.mkdir(parents=True) + + result = runner.invoke(lightning_cli.init_pl_app, (str(source_dir), str(script_file), "--name", "existing-app")) + assert result.exit_code == 1 + assert "There is already an app with the name existing-app in the current working directory" in result.output + + +def test_pl_app_incorrect_number_of_arguments(tmp_path): + """Test that the CLI prints an error message if more than two input arguments for the source are provided.""" + runner = CliRunner() + result = runner.invoke(lightning_cli.init_pl_app, ("one", "two", "three")) + assert result.exit_code == 1 + assert "Incorrect number of arguments. You passed (one, two, three) but only either one argument" in result.output + + +def test_pl_app_download_frontend(tmp_path): + build_dir = tmp_path / "app" / "ui" / "build" + download_frontend(build_dir) + contents = os.listdir(build_dir) + assert "index.html" in contents + assert "static" in contents + + +@pytest.mark.parametrize( + "cwd, source_dir, script_path", + [ + ("./", "./", "train.py"), + ("./", "./code", "./code/train.py"), + ], +) +@mock.patch("lightning_app.cli.cmd_pl_init.project_file_from_template") +@mock.patch("lightning_app.cli.cmd_pl_init.download_frontend") +def test_pl_app_relative_paths(_, __, cwd, source_dir, script_path, tmp_path, monkeypatch): + source_dir = tmp_path / source_dir + source_dir.mkdir(parents=True, exist_ok=True) + script_path = tmp_path / script_path + script_path.parent.mkdir(parents=True, exist_ok=True) + script_path.touch() + cwd = tmp_path / cwd + monkeypatch.chdir(cwd) + + pl_app(source_dir=str(source_dir), script_path=str(script_path), name="app-name", overwrite=False) + assert (cwd / "app-name").is_dir() + + expected_source_files = set(os.listdir(source_dir)) + if cwd == source_dir: + expected_source_files.remove("app-name") + assert set(os.listdir(cwd / "app-name" / "source")) == expected_source_files diff --git a/tests/tests_app/cli/test_cmd_react_ui_init.py b/tests/tests_app/cli/test_cmd_react_ui_init.py new file mode 100644 index 0000000000000..7ad248084bc0e --- /dev/null +++ b/tests/tests_app/cli/test_cmd_react_ui_init.py @@ -0,0 +1,59 @@ +import os +import subprocess + +import pytest + +import lightning_app as la +from lightning_app.cli import cmd_init, cmd_react_ui_init +from lightning_app.testing.helpers import RunIf + + +@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") is None, reason="not running in GH actions.") +@pytest.mark.skip(reason="need to figure out how to mock not having npm") +def test_missing_npm(): + with pytest.raises(SystemExit, match="This machine is missing 'npm'"): + cmd_react_ui_init._check_react_prerequisites() + + +@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") is None, reason="not running in GH actions.") +@pytest.mark.skip(reason="need to figure out how to mock not having node") +def test_missing_nodejs(): + with pytest.raises(SystemExit, match="This machine is missing 'node'"): + cmd_react_ui_init._check_react_prerequisites() + + +@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") is None, reason="not running in GH actions") +@pytest.mark.skip(reason="need to figure out how to mock not having yarn") +def test_missing_yarn(): + with pytest.raises(SystemExit, match="This machine is missing 'yarn'"): + cmd_react_ui_init._check_react_prerequisites() + + +@RunIf(skip_windows=True) +def test_copy_and_setup_react_ui(tmpdir): + dest_dir = os.path.join(tmpdir, "react-ui") + subprocess.Popen(["python", "-m", "lightning", "init", "react-ui", "--dest_dir", dest_dir]).wait() + + # make sure package is minimal + files = sorted(f for f in os.listdir(dest_dir) if f != "__pycache__") + assert len(files) == 3, "should only be 3 objects: readme.md, example_app.py and ui dir" + + # make sure index.html has the vite app placeholder + index_content = open(dest_dir + "/ui/dist/index.html").read() + assert "Vite App" in index_content + + # read the compiled js file + js_file = [x for x in os.listdir(os.path.join(dest_dir, "ui", "dist", "assets")) if ".js" in x] + js_file = os.path.join(dest_dir, f"ui/dist/assets/{js_file[0]}") + index_content = open(js_file).read() + + # if this is in the compiled file, the compilation worked and the app will work + assert "Total number of prints in your terminal:" in index_content, "react app was not compiled properly" + assert "LightningState.subscribe" in index_content, "react app was not compiled properly" + + +@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") is None, reason="not running in GH actions") +def test_correct_num_react_template_files(): + template_dir = os.path.join(la.__path__[0], "cli/react-ui-template") + files = cmd_init._ls_recursively(template_dir) + assert len(files) == 15, "react-ui template files must be minimal... do not add nice to haves" diff --git a/tests/tests_app/cli/test_run_app.py b/tests/tests_app/cli/test_run_app.py new file mode 100644 index 0000000000000..152bd4b7417ac --- /dev/null +++ b/tests/tests_app/cli/test_run_app.py @@ -0,0 +1,92 @@ +import logging +import os +from pathlib import Path +from unittest import mock + +import pytest +from click.testing import CliRunner +from tests_app import _PROJECT_ROOT + +from lightning_app import LightningApp +from lightning_app.cli.lightning_cli import _run_app, run_app +from lightning_app.runners.runtime_type import RuntimeType +from lightning_app.testing.helpers import RunIf +from lightning_app.utilities.app_helpers import convert_print_to_logger_info + + +@RunIf(skip_linux=True) +@mock.patch("click.launch") +@pytest.mark.parametrize("open_ui", (True, False)) +def test_lightning_run_app(lauch_mock: mock.MagicMock, open_ui, caplog, monkeypatch): + """This test validates the command is runned properly and the LightningApp method is being executed.""" + + monkeypatch.setattr("lightning_app._logger", logging.getLogger()) + + original_method = LightningApp._run + + @convert_print_to_logger_info + def _lightning_app_run_and_logging(self, *args, **kwargs): + original_method(self, *args, **kwargs) + print("1" if open_ui else "0") + print(self) + + with caplog.at_level(logging.INFO): + with mock.patch("lightning_app.LightningApp._run", _lightning_app_run_and_logging): + + runner = CliRunner() + result = runner.invoke( + run_app, + [ + os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/app_metadata.py"), + "--blocking", + "False", + "--open-ui", + str(open_ui), + ], + catch_exceptions=False, + ) + # capture logs. + if open_ui: + lauch_mock.assert_called_with("http://127.0.0.1:7501/view") + else: + lauch_mock.assert_not_called() + assert result.exit_code == 0 + assert len(caplog.messages) == 2 + assert bool(int(caplog.messages[0])) is open_ui + + +@mock.patch.dict(os.environ, {"LIGHTNING_CLOUD_URL": "https://beta.lightning.ai"}) +@mock.patch("lightning_app.cli.lightning_cli.dispatch") +@pytest.mark.parametrize("open_ui", (True, False)) +def test_lightning_run_app_cloud(mock_dispatch: mock.MagicMock, open_ui, caplog, monkeypatch): + """This test validates the command has ran properly when --cloud argument is passed. + + It tests it by checking if the click.launch is called with the right url if --open-ui was true and also checks the + call to `dispatch` for the right arguments + """ + monkeypatch.setattr("lightning_app.runners.cloud.logger", logging.getLogger()) + + with caplog.at_level(logging.INFO): + _run_app( + file=os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/app_metadata.py"), + cloud=True, + without_server=False, + name="", + blocking=False, + open_ui=open_ui, + no_cache=True, + env=("FOO=bar",), + ) + # capture logs. + # TODO(yurij): refactor the test, check if the actual HTTP request is being sent and that the proper admin + # page is being opened + mock_dispatch.assert_called_with( + Path(os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/app_metadata.py")), + RuntimeType.CLOUD, + start_server=True, + blocking=False, + on_before_run=mock.ANY, + name="", + no_cache=True, + env_vars={"FOO": "bar"}, + ) diff --git a/tests/tests_app/components/__init__.py b/tests/tests_app/components/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/components/python/scripts/a.py b/tests/tests_app/components/python/scripts/a.py new file mode 100644 index 0000000000000..414be73ce4a51 --- /dev/null +++ b/tests/tests_app/components/python/scripts/a.py @@ -0,0 +1 @@ +print("Hello World !") diff --git a/tests/tests_app/components/python/scripts/b.py b/tests/tests_app/components/python/scripts/b.py new file mode 100644 index 0000000000000..53254da11906b --- /dev/null +++ b/tests/tests_app/components/python/scripts/b.py @@ -0,0 +1,3 @@ +import sys + +print(sys.argv) diff --git a/tests/tests_app/components/python/scripts/c.py b/tests/tests_app/components/python/scripts/c.py new file mode 100644 index 0000000000000..eb56e6de70a61 --- /dev/null +++ b/tests/tests_app/components/python/scripts/c.py @@ -0,0 +1,4 @@ +import os + +if __name__ == "__main__": + assert int(os.environ["VARIABLE"]) == 0 diff --git a/tests/tests_app/components/python/test_python.py b/tests/tests_app/components/python/test_python.py new file mode 100644 index 0000000000000..283f449092d06 --- /dev/null +++ b/tests/tests_app/components/python/test_python.py @@ -0,0 +1,72 @@ +import os + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.components.python import PopenPythonScript, TracerPythonScript +from lightning_app.testing.helpers import RunIf +from lightning_app.testing.testing import run_work_isolated + +COMPONENTS_SCRIPTS_FOLDER = str(os.path.join(_PROJECT_ROOT, "tests/tests_app/components/python/scripts/")) + + +def test_non_existing_python_script(): + match = "tests/components/python/scripts/0.py" + with pytest.raises(FileNotFoundError, match=match): + python_script = PopenPythonScript(match) + run_work_isolated(python_script) + assert not python_script.has_started + + with pytest.raises(FileNotFoundError, match=match): + python_script = TracerPythonScript(match) + run_work_isolated(python_script) + assert not python_script.has_started + + +def test_simple_python_script(): + python_script = PopenPythonScript(COMPONENTS_SCRIPTS_FOLDER + "a.py") + run_work_isolated(python_script) + assert python_script.has_succeeded + + python_script = TracerPythonScript(COMPONENTS_SCRIPTS_FOLDER + "a.py") + run_work_isolated(python_script) + assert python_script.has_succeeded + + +def test_simple_popen_python_script_with_kwargs(): + python_script = PopenPythonScript( + COMPONENTS_SCRIPTS_FOLDER + "b.py", + script_args="--arg_0=hello --arg_1=world", + ) + run_work_isolated(python_script) + assert python_script.has_succeeded + + +@RunIf(skip_windows=True) +def test_popen_python_script_failure(): + python_script = PopenPythonScript( + COMPONENTS_SCRIPTS_FOLDER + "c.py", + env={"VARIABLE": "1"}, + raise_exception=False, + ) + run_work_isolated(python_script) + assert python_script.has_failed + assert python_script.status.message == "1" + + +def test_tracer_python_script_with_kwargs(): + python_script = TracerPythonScript( + COMPONENTS_SCRIPTS_FOLDER + "b.py", + script_args="--arg_0=hello --arg_1=world", + raise_exception=False, + ) + run_work_isolated(python_script) + assert python_script.has_succeeded + + python_script = TracerPythonScript( + COMPONENTS_SCRIPTS_FOLDER + "c.py", + env={"VARIABLE": "1"}, + raise_exception=False, + ) + run_work_isolated(python_script) + assert python_script.has_failed diff --git a/tests/tests_app/components/sample_package_repo/external_lightning_component_package/__init__.py b/tests/tests_app/components/sample_package_repo/external_lightning_component_package/__init__.py new file mode 100644 index 0000000000000..30058360921b0 --- /dev/null +++ b/tests/tests_app/components/sample_package_repo/external_lightning_component_package/__init__.py @@ -0,0 +1,17 @@ +from lightning_app import LightningFlow, LightningWork + + +class MyCustomLightningWork(LightningWork): + @staticmethod + def special_method(): + return "Hi, I'm an external lightning work component and can be added to any lightning project." + + +class MyCustomLightningFlow(LightningFlow): + @staticmethod + def special_method(): + return "Hi, I'm an external lightning flow component and can be added to any lightning project." + + +def exported_lightning_components(): + return [MyCustomLightningWork, MyCustomLightningFlow] diff --git a/tests/tests_app/components/sample_package_repo/setup.py b/tests/tests_app/components/sample_package_repo/setup.py new file mode 100644 index 0000000000000..780088422383b --- /dev/null +++ b/tests/tests_app/components/sample_package_repo/setup.py @@ -0,0 +1,46 @@ +import json +import os + +from setuptools import find_packages, setup +from setuptools.command.install import install + +LIGHTNING_COMPONENT_INFO = { + "package": "external_lightning_component_package", + "version": "0.0.1", + "entry_point": "myorg.lightning_modules", +} + + +class PostInstallCommand(install): + def run(self): + install.run(self) + os.system(f"echo Installed lightning component package: {json.dumps(json.dumps(LIGHTNING_COMPONENT_INFO))}") + + +setup( + name=LIGHTNING_COMPONENT_INFO["package"], + version=LIGHTNING_COMPONENT_INFO["version"], + description="example of an external lightning package that contains lightning components", + author="manskx", + author_email="mansy@grid.ai", + url="grid.ai", + download_url="https://github.com/Lightning-AI/lightning", + license="TBD", + packages=find_packages(exclude=["tests", "docs"]), + long_description="example of an external lightning package that contains lightning components", + long_description_content_type="text/markdown", + include_package_data=True, + zip_safe=False, + keywords=["deep learning", "pytorch", "AI"], + python_requires=">=3.6", + entry_points={ + "lightning_app.external_components": [ + f"{LIGHTNING_COMPONENT_INFO['entry_point']}= " + f"{LIGHTNING_COMPONENT_INFO['package']}:exported_lightning_components", + ], + }, + cmdclass={ + "install": PostInstallCommand, + }, + setup_requires=["wheel"], +) diff --git a/tests/tests_app/components/serve/test_gradio.py b/tests/tests_app/components/serve/test_gradio.py new file mode 100644 index 0000000000000..8dcdeec70a341 --- /dev/null +++ b/tests/tests_app/components/serve/test_gradio.py @@ -0,0 +1,30 @@ +import os +from unittest import mock +from unittest.mock import ANY + + +@mock.patch.dict(os.environ, {"LIGHTING_TESTING": "1"}) +@mock.patch("lightning_app.components.serve.gradio.gradio") +def test_serve_gradio(gradio_mock): + + from lightning_app.components.serve.gradio import ServeGradio + + class MyGradioServe(ServeGradio): + + inputs = gradio_mock.inputs.Image(type="pil") + outputs = gradio_mock.outputs.Image(type="pil") + examples = [["./examples/app_components/serve/gradio/beyonce.png"]] + + def build_model(self): + super().build_model() + return "model" + + def predict(self, *args, **kwargs): + super().predict(*args, **kwargs) + return "prediction" + + comp = MyGradioServe() + comp.run() + assert comp.model == "model" + assert comp.predict() == "prediction" + gradio_mock.Interface.assert_called_once_with(fn=ANY, inputs=ANY, outputs=ANY, examples=ANY) diff --git a/tests/tests_app/components/serve/test_model_inference_api.py b/tests/tests_app/components/serve/test_model_inference_api.py new file mode 100644 index 0000000000000..17ed09aa2eea8 --- /dev/null +++ b/tests/tests_app/components/serve/test_model_inference_api.py @@ -0,0 +1,78 @@ +import base64 +import multiprocessing as mp +import os +from unittest.mock import ANY, MagicMock + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.components.serve import serve +from lightning_app.utilities.imports import _is_numpy_available, _is_torch_available +from lightning_app.utilities.network import _configure_session, find_free_network_port + +if _is_numpy_available(): + import numpy as np + +if _is_torch_available(): + import torch + + +class ImageServer(serve.ModelInferenceAPI): + def build_model(self): + return lambda x: x + + def predict(self, image): + image = self.model(image) + return torch.from_numpy(np.asarray(image)) + + +def target_fn(port, workers): + image_server = ImageServer(input="image", output="image", port=port, workers=workers) + image_server.run() + + +@pytest.mark.skipif(not (_is_torch_available() and _is_numpy_available()), reason="Missing torch and numpy") +@pytest.mark.parametrize("workers", [0]) +def test_model_inference_api(workers): + + port = find_free_network_port() + process = mp.Process(target=target_fn, args=(port, workers)) + process.start() + + image_path = os.path.join(_PROJECT_ROOT, "docs/source-app/_static/images/logo.png") + with open(image_path, "rb") as f: + imgstr = base64.b64encode(f.read()).decode("UTF-8") + + session = _configure_session() + res = session.post(f"http://127.0.0.1:{port}/predict", params={"data": imgstr}) + process.terminate() + # TODO: Investigate why this doesn't match exactly `imgstr`. + assert res.json() + + +class EmptyServer(serve.ModelInferenceAPI): + def build_model(self): + return lambda x: x + + def serialize(self, x): + return super().serialize(x) + + def deserialize(self, x): + return super().deserialize(x) + + def predict(self, x): + return super().predict(x) + + +def test_model_inference_api_mock(monkeypatch): + + monkeypatch.setattr(serve, "uvicorn", MagicMock()) + comp = EmptyServer() + comp.run() + serve.uvicorn.run.assert_called_once_with(app=ANY, host=comp.host, port=comp.port, log_level="error") + + with pytest.raises(Exception, match="Only input in"): + EmptyServer(input="something") + + with pytest.raises(Exception, match="Only output in"): + EmptyServer(output="something") diff --git a/tests/tests_app/components/test_install_external_component.py b/tests/tests_app/components/test_install_external_component.py new file mode 100644 index 0000000000000..300ced3c30918 --- /dev/null +++ b/tests/tests_app/components/test_install_external_component.py @@ -0,0 +1,50 @@ +import os +import shutil +import subprocess + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.utilities.install_components import _pip_uninstall_component_package, install_external_component + +_PACKAGE_PATH = os.path.join(_PROJECT_ROOT, "tests", "tests_app", "components", "sample_package_repo") +_EXTERNAL_COMPONENT_PACKAGE = "external_lightning_component_package" +_COMPONENT_PACKAGE_TAR_PATH = os.path.join(_PACKAGE_PATH, "dist", f"{_EXTERNAL_COMPONENT_PACKAGE}-0.0.1.tar.gz") + + +@pytest.fixture(scope="function", autouse=True) +def cleanup_installation(): + _pip_uninstall_component_package(_EXTERNAL_COMPONENT_PACKAGE.replace("_", "-")) + shutil.rmtree(os.path.join(_PROJECT_ROOT, "lightning", "components", "myorg"), ignore_errors=True) + yield + _pip_uninstall_component_package(_EXTERNAL_COMPONENT_PACKAGE.replace("_", "-")) + shutil.rmtree(os.path.join(_PACKAGE_PATH, "dist"), ignore_errors=True) + shutil.rmtree(os.path.join(_PACKAGE_PATH, f"{_EXTERNAL_COMPONENT_PACKAGE}.egg-info"), ignore_errors=True) + shutil.rmtree(os.path.join(_PROJECT_ROOT, "lightning", "components", "myorg"), ignore_errors=True) + + +@pytest.mark.usefixtures("cleanup_installation") +def test_install_external_component(): + with subprocess.Popen( + ["python", "setup.py", "sdist"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=_PACKAGE_PATH, + ) as proc: + proc.wait() + + assert os.path.exists(_COMPONENT_PACKAGE_TAR_PATH) + + install_external_component(_COMPONENT_PACKAGE_TAR_PATH) + + # TODO (tchaton) Enable once stable. + # from lightning_app.components.myorg.lightning_modules import MyCustomLightningFlow, MyCustomLightningWork + + # assert ( + # MyCustomLightningWork.special_method() + # == "Hi, I'm an external lightning work component and can be added to any lightning project." + # ) + # assert ( + # MyCustomLightningFlow.special_method() + # == "Hi, I'm an external lightning flow component and can be added to any lightning project." + # ) diff --git a/tests/tests_app/conftest.py b/tests/tests_app/conftest.py new file mode 100644 index 0000000000000..af0071322bd1d --- /dev/null +++ b/tests/tests_app/conftest.py @@ -0,0 +1,83 @@ +import os +import shutil +from datetime import datetime +from pathlib import Path +from subprocess import Popen + +import psutil +import py +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.storage.path import storage_root_dir +from lightning_app.utilities.component import _set_context +from lightning_app.utilities.packaging.app_config import _APP_CONFIG_FILENAME +from lightning_app.utilities.state import AppState + +GITHUB_APP_URLS = { + "template_react_ui": "https://github.com/Lightning-AI/lightning-template-react.git", +} + + +def pytest_sessionstart(*_): + """Pytest hook that get called after the Session object has been created and before performing collection and + entering the run test loop.""" + for name, url in GITHUB_APP_URLS.items(): + if not os.path.exists(os.path.join(_PROJECT_ROOT, "examples", name)): + Popen( + ["git", "clone", url, name], + cwd=os.path.join( + _PROJECT_ROOT, + "examples", + ), + ).wait(timeout=90) + else: + Popen(["git", "pull", "main"], cwd=os.path.join(_PROJECT_ROOT, "examples", name)).wait(timeout=90) + + +def pytest_sessionfinish(session, exitstatus): + """Pytest hook that get called after whole test run finished, right before returning the exit status to the + system.""" + # kill all the processes and threads created by parent + # TODO this isn't great. We should have each tests doing it's own cleanup + current_process = psutil.Process() + for child in current_process.children(recursive=True): + params = child.as_dict() or {} + cmd_lines = params.get("cmdline", []) + # we shouldn't kill the resource tracker from multiprocessing. If we do, + # `atexit` will throw as it uses resource tracker to try to clean up + if cmd_lines and "resource_tracker" in cmd_lines[-1]: + continue + child.kill() + + +@pytest.fixture(scope="function", autouse=True) +def cleanup(): + from lightning_app.utilities.app_helpers import _LightningAppRef + + yield + _LightningAppRef._app_instance = None + shutil.rmtree("./storage", ignore_errors=True) + shutil.rmtree(storage_root_dir(), ignore_errors=True) + shutil.rmtree("./.shared", ignore_errors=True) + if os.path.isfile(_APP_CONFIG_FILENAME): + os.remove(_APP_CONFIG_FILENAME) + _set_context(None) + + +@pytest.fixture(scope="function", autouse=True) +def clear_app_state_state_variables(): + """Resets global variables in order to prevent interference between tests.""" + yield + import lightning_app.utilities.state + + lightning_app.utilities.state._STATE = None + lightning_app.utilities.state._LAST_STATE = None + AppState._MY_AFFILIATION = () + + +@pytest.fixture +def another_tmpdir(tmp_path: Path) -> py.path.local: + random_dir = datetime.now().strftime("%m-%d-%Y-%H-%M-%S") + tmp_path = os.path.join(tmp_path, random_dir) + return py.path.local(tmp_path) diff --git a/tests/tests_app/core/__init__.py b/tests/tests_app/core/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/core/lightning_app/__init__.py b/tests/tests_app/core/lightning_app/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/core/lightning_app/test_configure_layout.py b/tests/tests_app/core/lightning_app/test_configure_layout.py new file mode 100644 index 0000000000000..9323a3503e839 --- /dev/null +++ b/tests/tests_app/core/lightning_app/test_configure_layout.py @@ -0,0 +1,220 @@ +from re import escape +from unittest import mock +from unittest.mock import Mock + +import pytest + +from lightning_app import LightningApp, LightningFlow +from lightning_app.frontend.stream_lit import StreamlitFrontend +from lightning_app.frontend.web import StaticWebFrontend +from lightning_app.runners import MultiProcessRuntime +from lightning_app.testing.helpers import EmptyFlow + + +@pytest.mark.parametrize("return_val", (1, None, set(), "string")) +def test_invalid_layout(return_val): + class Root(EmptyFlow): + def configure_layout(self): + return return_val + + root = Root() + with pytest.raises(TypeError, match=escape("The return value of configure_layout() in `Root`")): + LightningApp(root) + + +def test_invalid_layout_missing_content_key(): + class Root(EmptyFlow): + def configure_layout(self): + return [dict(name="one")] + + root = Root() + with pytest.raises( + ValueError, match=escape("A dictionary returned by `Root.configure_layout()` is missing a key 'content'.") + ): + LightningApp(root) + + +def test_invalid_layout_unsupported_content_value(): + class Root(EmptyFlow): + def configure_layout(self): + return [dict(name="one", content=[1, 2, 3])] + + root = Root() + + with pytest.raises( + ValueError, + match=escape("A dictionary returned by `Root.configure_layout()"), + ): + LightningApp(root) + + +class StreamlitFrontendFlow(LightningFlow): + def __init__(self): + super().__init__() + self.counter = 0 + + def run(self): + if self.counter > 2: + self._exit() + self.counter += 1 + + def configure_layout(self): + frontend = StreamlitFrontend(render_fn=_render_streamlit_fn) + frontend.start_server = Mock() + frontend.stop_server = Mock() + return frontend + + +def _render_streamlit_fn(): + pass + + +class StaticWebFrontendFlow(LightningFlow): + def __init__(self): + super().__init__() + self.counter = 0 + + def run(self): + if self.counter > 2: + self._exit() + self.counter += 1 + + def configure_layout(self): + frontend = StaticWebFrontend(serve_dir="a/b/c") + frontend.start_server = Mock() + frontend.stop_server = Mock() + return frontend + + +@pytest.mark.parametrize("flow", (StaticWebFrontendFlow(), StreamlitFrontendFlow())) +@mock.patch("lightning_app.runners.multiprocess.find_free_network_port") +def test_layout_leaf_node(find_ports_mock, flow): + find_ports_mock.side_effect = lambda: 100 + app = LightningApp(flow) + assert flow._layout == {} + # we copy the dict here because after we dispatch the dict will get update with new instances + # as the layout gets updated during the loop. + frontends = app.frontends.copy() + MultiProcessRuntime(app).dispatch() + assert flow.counter == 3 + + # The target url is available for the frontend after we started the servers in dispatch + assert flow._layout == dict(target="http://localhost:100/root") + assert app.frontends[flow.name].flow is flow + + # we start the servers for the frontends that we collected at the time of app instantiation + frontends[flow.name].start_server.assert_called_once() + + # leaf layout nodes can't be changed, they stay the same from when they first got configured + assert app.frontends[flow.name] == frontends[flow.name] + + +def test_default_content_layout(): + class SimpleFlow(EmptyFlow): + def configure_layout(self): + frontend = StaticWebFrontend(serve_dir="a/b/c") + frontend.start_server = Mock() + return frontend + + class TestContentComponent(EmptyFlow): + def __init__(self): + super().__init__() + self.component0 = SimpleFlow() + self.component1 = SimpleFlow() + self.component2 = SimpleFlow() + + root = TestContentComponent() + LightningApp(root) + assert root._layout == [ + dict(name="component0", content="root.component0"), + dict(name="component1", content="root.component1"), + dict(name="component2", content="root.component2"), + ] + + +def test_url_content_layout(): + class TestContentComponent(EmptyFlow): + def __init__(self): + super().__init__() + self.component0 = EmptyFlow() + self.component1 = EmptyFlow() + + def configure_layout(self): + return [ + dict(name="one", content=self.component0), + dict(name="url", content="https://lightning.ai"), + dict(name="two", content=self.component1), + ] + + root = TestContentComponent() + LightningApp(root) + assert root._layout == [ + dict(name="one", content="root.component0"), + dict(name="url", content="https://lightning.ai", target="https://lightning.ai"), + dict(name="two", content="root.component1"), + ] + + +def test_single_content_layout(): + """Test that returning a single dict also works (does not have to be returned in a list).""" + + class TestContentComponent(EmptyFlow): + def __init__(self): + super().__init__() + self.component0 = EmptyFlow() + self.component1 = EmptyFlow() + + def configure_layout(self): + return dict(name="single", content=self.component1) + + root = TestContentComponent() + LightningApp(root) + assert root._layout == [dict(name="single", content="root.component1")] + + +class DynamicContentComponent(EmptyFlow): + def __init__(self): + super().__init__() + self.component0 = EmptyFlow() + self.component1 = EmptyFlow() + self.counter = 0 + self.configure_layout_called = 0 + + def run(self): + self.run_assertion() + self.counter += 1 + if self.counter == 3: + self._exit() + + def configure_layout(self): + self.configure_layout_called += 1 + tabs = [ + dict(name="one", content=self.component0), + dict(name=f"{self.counter}", content=self.component1), + ] + # reverse the order of the two tabs every time the counter is odd + if self.counter % 2 != 0: + tabs = tabs[::-1] + return tabs + + def run_assertion(self): + """Assert that the layout changes as the counter changes its value.""" + layout_even = [ + dict(name="one", content="root.component0"), + dict(name=f"{self.counter}", content="root.component1"), + ] + layout_odd = layout_even[::-1] + assert ( + self.counter % 2 == 0 + and self._layout == layout_even + or self.counter % 2 == 1 + and self._layout == layout_odd + ) + + +def test_dynamic_content_layout_update(): + """Test that the `configure_layout()` gets called as part of the loop and can return new layouts.""" + flow = DynamicContentComponent() + app = LightningApp(flow) + MultiProcessRuntime(app).dispatch() + assert flow.configure_layout_called == 5 diff --git a/tests/tests_app/core/scripts/.lightning b/tests/tests_app/core/scripts/.lightning new file mode 100644 index 0000000000000..cde49e685cc26 --- /dev/null +++ b/tests/tests_app/core/scripts/.lightning @@ -0,0 +1 @@ +name: test-app-name diff --git a/tests/tests_app/core/scripts/app_metadata.py b/tests/tests_app/core/scripts/app_metadata.py new file mode 100644 index 0000000000000..bf6c025ceda0a --- /dev/null +++ b/tests/tests_app/core/scripts/app_metadata.py @@ -0,0 +1,61 @@ +from lightning_app.core.app import LightningApp +from lightning_app.core.flow import LightningFlow +from lightning_app.core.work import LightningWork +from lightning_app.frontend.web import StaticWebFrontend +from lightning_app.utilities.packaging.cloud_compute import CloudCompute + + +class WorkA(LightningWork): + def __init__(self): + """WorkA.""" + super().__init__() + + def run(self): + pass + + +class WorkB(LightningWork): + def __init__(self): + """WorkB.""" + super().__init__(cloud_compute=CloudCompute("gpu")) + + def run(self): + pass + + +class FlowA(LightningFlow): + def __init__(self): + """FlowA Component.""" + super().__init__() + self.work_a = WorkA() + + def run(self): + pass + + +class FlowB(LightningFlow): + def __init__(self): + """FlowB.""" + super().__init__() + self.work_b = WorkB() + + def run(self): + pass + + def configure_layout(self): + return StaticWebFrontend(serve_dir=".") + + +class RootFlow(LightningFlow): + def __init__(self): + """RootFlow.""" + super().__init__() + self.flow_a_1 = FlowA() + self.flow_a_2 = FlowA() + self.flow_b = FlowB() + + def run(self): + self._exit() + + +app = LightningApp(RootFlow()) diff --git a/tests/tests_app/core/scripts/empty.py b/tests/tests_app/core/scripts/empty.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/core/scripts/example_1.py b/tests/tests_app/core/scripts/example_1.py new file mode 100644 index 0000000000000..486a0566ff80d --- /dev/null +++ b/tests/tests_app/core/scripts/example_1.py @@ -0,0 +1 @@ +from numbers import Rational # noqa F401 diff --git a/tests/tests_app/core/scripts/example_2.py b/tests/tests_app/core/scripts/example_2.py new file mode 100644 index 0000000000000..3555ee3e0fdcf --- /dev/null +++ b/tests/tests_app/core/scripts/example_2.py @@ -0,0 +1 @@ +from lightning_app import LightningApp # noqa F401 diff --git a/tests/tests_app/core/scripts/lightning_cli.py b/tests/tests_app/core/scripts/lightning_cli.py new file mode 100644 index 0000000000000..e6f2e7b3b0198 --- /dev/null +++ b/tests/tests_app/core/scripts/lightning_cli.py @@ -0,0 +1,62 @@ +from lightning_app.utilities.imports import _is_pytorch_lightning_available, _is_torch_available + +if _is_torch_available(): + import torch + from torch.utils.data import DataLoader, Dataset + +if _is_pytorch_lightning_available(): + from pytorch_lightning import LightningDataModule, LightningModule + from pytorch_lightning.utilities import cli + + +if __name__ == "__main__": + + class RandomDataset(Dataset): + def __init__(self, size, length): + self.len = length + self.data = torch.randn(length, size) + + def __getitem__(self, index): + return self.data[index] + + def __len__(self): + return self.len + + class BoringDataModule(LightningDataModule): + def train_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + def val_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + def test_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + def predict_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + class BoringModel(LightningModule): + def __init__(self): + super().__init__() + self.layer = torch.nn.Linear(32, 2) + + def forward(self, x): + return self.layer(x) + + def training_step(self, batch, batch_idx): + loss = self(batch).sum() + self.log("train_loss", loss) + return {"loss": loss} + + def validation_step(self, batch, batch_idx): + loss = self(batch).sum() + self.log("valid_loss", loss) + + def test_step(self, batch, batch_idx): + loss = self(batch).sum() + self.log("test_loss", loss) + + def configure_optimizers(self): + return torch.optim.SGD(self.layer.parameters(), lr=0.1) + + cli.LightningCLI(BoringModel, BoringDataModule) diff --git a/tests/tests_app/core/scripts/lightning_overrides.py b/tests/tests_app/core/scripts/lightning_overrides.py new file mode 100644 index 0000000000000..6139d970289d0 --- /dev/null +++ b/tests/tests_app/core/scripts/lightning_overrides.py @@ -0,0 +1,55 @@ +from lightning_app.utilities.imports import _is_pytorch_lightning_available, _is_torch_available + +if _is_torch_available(): + from torch.utils.data import Dataset + +if _is_pytorch_lightning_available(): + from torchmetrics import Metric + + from pytorch_lightning import LightningDataModule, LightningModule, Trainer + from pytorch_lightning.accelerators.accelerator import Accelerator + from pytorch_lightning.callbacks import Callback + from pytorch_lightning.lite import LightningLite + from pytorch_lightning.loggers import LightningLoggerBase + from pytorch_lightning.loops.base import Loop + from pytorch_lightning.plugins import PrecisionPlugin + from pytorch_lightning.profiler.base import BaseProfiler + + +if __name__ == "__main__": + + class RandomDataset(Dataset): + pass + + class BoringDataModule(LightningDataModule): + pass + + class BoringModel(LightningModule): + pass + + class BoringTrainer(Trainer): + pass + + class BoringPrecisionPlugin(PrecisionPlugin): + pass + + class BoringAccelerator(Accelerator): + pass + + class BoringCallback(Callback): + pass + + class BoringLightningLoggerBase(LightningLoggerBase): + pass + + class BoringLoop(Loop): + pass + + class BoringMetric(Metric): + pass + + class BoringLightningLite(LightningLite): + pass + + class BoringBaseProfiler(BaseProfiler): + pass diff --git a/tests/tests_app/core/scripts/lightning_trainer.py b/tests/tests_app/core/scripts/lightning_trainer.py new file mode 100644 index 0000000000000..9e241767a8693 --- /dev/null +++ b/tests/tests_app/core/scripts/lightning_trainer.py @@ -0,0 +1,74 @@ +import argparse + +from lightning_app.utilities.imports import _is_pytorch_lightning_available, _is_torch_available + +if _is_torch_available(): + import torch + from torch.utils.data import DataLoader, Dataset + +if _is_pytorch_lightning_available(): + import pytorch_lightning as pl + from pytorch_lightning import LightningDataModule, LightningModule + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--max_epochs", type=int, default=10) + args = parser.parse_args() + + class RandomDataset(Dataset): + def __init__(self, size, length): + self.len = length + self.data = torch.randn(length, size) + + def __getitem__(self, index): + return self.data[index] + + def __len__(self): + return self.len + + class BoringDataModule(LightningDataModule): + def train_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + def val_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + def test_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + def predict_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + class BoringModel(LightningModule): + def __init__(self): + super().__init__() + self.layer = torch.nn.Linear(32, 2) + + def forward(self, x): + return self.layer(x) + + def training_step(self, batch, batch_idx): + loss = self(batch).sum() + self.log("train_loss", loss) + return {"loss": loss} + + def validation_step(self, batch, batch_idx): + loss = self(batch).sum() + self.log("valid_loss", loss) + + def test_step(self, batch, batch_idx): + loss = self(batch).sum() + self.log("test_loss", loss) + + def configure_optimizers(self): + return torch.optim.SGD(self.layer.parameters(), lr=0.1) + + model = BoringModel() + datamodule = BoringDataModule() + trainer = pl.Trainer(**vars(args)) + trainer.fit(model, datamodule) + + +if __name__ == "__main__": + main() diff --git a/tests/tests_app/core/scripts/registry.py b/tests/tests_app/core/scripts/registry.py new file mode 100644 index 0000000000000..35d6921756f36 --- /dev/null +++ b/tests/tests_app/core/scripts/registry.py @@ -0,0 +1,103 @@ +from lightning_app.utilities.imports import _is_pytorch_lightning_available + +if _is_pytorch_lightning_available(): + import torch + from torch.utils.data import DataLoader, Dataset + + from pytorch_lightning import LightningDataModule, LightningModule + from pytorch_lightning.utilities.cli import LightningCLI + + class RandomDataset(Dataset): + def __init__(self, size, length): + self.len = length + self.data = torch.randn(length, size) + + def __getitem__(self, index): + return self.data[index] + + def __len__(self): + return self.len + + class BoringDataModule(LightningDataModule): + def __init__(self, root_folder: str = "./", batch_size: int = 32): + super().__init__() + + def train_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + def val_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + def test_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + def predict_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + class BoringDataModule2(LightningDataModule): + def __init__(self, root_folder: str = "./", batch_size: int = 32, num_workers: int = 6): + super().__init__() + + def train_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + def val_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + def test_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + def predict_dataloader(self): + return DataLoader(RandomDataset(32, 64), batch_size=2) + + class BoringModel(LightningModule): + def __init__(self, hidden_size: int = 16): + super().__init__() + self.layer = torch.nn.Linear(32, 2) + + def forward(self, x): + return self.layer(x) + + def training_step(self, batch, batch_idx): + loss = self(batch).sum() + self.log("train_loss", loss) + return {"loss": loss} + + def validation_step(self, batch, batch_idx): + loss = self(batch).sum() + self.log("valid_loss", loss) + + def test_step(self, batch, batch_idx): + loss = self(batch).sum() + self.log("test_loss", loss) + + def configure_optimizers(self): + return torch.optim.SGD(self.layer.parameters(), lr=0.1) + + class BoringModel2(LightningModule): + def __init__(self, hidden_size: int = 16, batch_norm: bool = False): + super().__init__() + self.layer = torch.nn.Linear(32, 2) + + def forward(self, x): + return self.layer(x) + + def training_step(self, batch, batch_idx): + loss = self(batch).sum() + self.log("train_loss", loss) + return {"loss": loss} + + def validation_step(self, batch, batch_idx): + loss = self(batch).sum() + self.log("valid_loss", loss) + + def test_step(self, batch, batch_idx): + loss = self(batch).sum() + self.log("test_loss", loss) + + def configure_optimizers(self): + return torch.optim.SGD(self.layer.parameters(), lr=0.1) + + +if __name__ == "__main__": + LightningCLI() diff --git a/tests/tests_app/core/scripts/script_with_error.py b/tests/tests_app/core/scripts/script_with_error.py new file mode 100644 index 0000000000000..b962e082ac306 --- /dev/null +++ b/tests/tests_app/core/scripts/script_with_error.py @@ -0,0 +1,13 @@ +from lightning_app import LightningApp, LightningFlow + + +class EmptyFlow(LightningFlow): + def run(self): + pass + + +if __name__ == "__main__": + # trigger a Python exception `IndexError: list index out of range` before we can load the app + _ = [1, 2, 3][4] + + app = LightningApp(EmptyFlow()) diff --git a/tests/tests_app/core/scripts/two_apps.py b/tests/tests_app/core/scripts/two_apps.py new file mode 100644 index 0000000000000..9b9eb6e4cfe5d --- /dev/null +++ b/tests/tests_app/core/scripts/two_apps.py @@ -0,0 +1,10 @@ +from lightning_app import LightningApp, LightningFlow + + +class EmptyFlow(LightningFlow): + def run(self): + pass + + +app_1 = LightningApp(EmptyFlow()) +app_2 = LightningApp(EmptyFlow()) diff --git a/tests/tests_app/core/test_constants.py b/tests/tests_app/core/test_constants.py new file mode 100644 index 0000000000000..72a9ccf7f5e55 --- /dev/null +++ b/tests/tests_app/core/test_constants.py @@ -0,0 +1,9 @@ +import os +from unittest import mock + +from lightning_app.core.constants import get_lightning_cloud_url + + +@mock.patch.dict(os.environ, {"LIGHTNING_CLOUD_URL": "https://beta.lightning.ai"}) +def test_defaults(): + assert get_lightning_cloud_url() == "https://beta.lightning.ai" diff --git a/tests/tests_app/core/test_lightning_api.py b/tests/tests_app/core/test_lightning_api.py new file mode 100644 index 0000000000000..81ba6fe0ba179 --- /dev/null +++ b/tests/tests_app/core/test_lightning_api.py @@ -0,0 +1,373 @@ +import logging +import multiprocessing as mp +import os +from copy import deepcopy +from unittest import mock + +import pytest +from deepdiff import DeepDiff, Delta +from httpx import AsyncClient + +from lightning_app import LightningApp, LightningFlow, LightningWork +from lightning_app.core import api +from lightning_app.core.api import fastapi_service, global_app_state_store, start_server, UIRefresher +from lightning_app.runners import MultiProcessRuntime, SingleProcessRuntime +from lightning_app.storage.drive import Drive +from lightning_app.testing.helpers import MockQueue +from lightning_app.utilities.component import _set_frontend_context, _set_work_context +from lightning_app.utilities.enum import AppStage +from lightning_app.utilities.load_app import extract_metadata_from_app +from lightning_app.utilities.redis import check_if_redis_running +from lightning_app.utilities.state import AppState, headers_for + + +class WorkA(LightningWork): + def __init__(self): + super().__init__(parallel=True) + self.var_a = 0 + self.drive = Drive("lit://test_app_state_api") + + def run(self): + state = AppState() + assert state._my_affiliation == ("work_a",) + # this would download and push data to the REST API. + assert state.var_a == 0 + assert isinstance(state.drive, Drive) + assert state.drive.component_name == "root.work_a" + + with open("test_app_state_api.txt", "w") as f: + f.write("here") + state.drive.put("test_app_state_api.txt") + state.var_a = -1 + + +class _A(LightningFlow): + def __init__(self): + super().__init__() + self.work_a = WorkA() + + def run(self): + if self.work_a.var_a == -1: + self._exit() + self.work_a.run() + + +# TODO: Resolve singleprocess - idea: explore frame calls recursively. +@pytest.mark.parametrize("runtime_cls", [MultiProcessRuntime]) +def test_app_state_api(runtime_cls): + """This test validates the AppState can properly broadcast changes from work within its own process.""" + app = LightningApp(_A()) + runtime_cls(app, start_server=True).dispatch() + assert app.root.work_a.var_a == -1 + _set_work_context() + assert app.root.work_a.drive.list(".") == ["test_app_state_api.txt"] + _set_frontend_context() + assert app.root.work_a.drive.list(".") == ["test_app_state_api.txt"] + os.remove("test_app_state_api.txt") + + +class A2(LightningFlow): + def __init__(self): + super().__init__() + self.var_a = 0 + self.a = _A() + + def update_state(self): + state = AppState() + # this would download and push data to the REST API. + assert state.a.work_a.var_a == 0 + assert state.var_a == 0 + state.var_a = -1 + + def run(self): + if self.var_a == 0: + self.update_state() + elif self.var_a == -1: + self._exit() + + +# TODO: Find why this test is flaky. +@pytest.mark.skipif(True, reason="flaky test.") +@pytest.mark.parametrize("runtime_cls", [SingleProcessRuntime]) +def test_app_state_api_with_flows(runtime_cls, tmpdir): + """This test validates the AppState can properly broadcast changes from flows.""" + app = LightningApp(A2(), debug=True) + runtime_cls(app, start_server=True).dispatch() + assert app.root.var_a == -1 + + +class FlowA(LightningFlow): + def __init__(self): + super().__init__() + self.counter = 0 + + def run(self): + self.counter += 1 + if self.counter >= 3: + self._exit() + + +class AppStageTestingApp(LightningApp): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.counter_running = 0 + self.counter_stopped = 0 + self.counter = 0 + + def _change_stage(self, enum): + previous_state = deepcopy(self.state) + current_state = self.state + current_state["app_state"]["stage"] = enum.value + deep_diff = DeepDiff(previous_state, current_state) + self.api_delta_queue.put(Delta(deep_diff)) + + def maybe_apply_changes(self): + if self.counter_stopped == 1 and self.counter_running == 1: + if self.counter == 0: + self._change_stage(AppStage.RUNNING) + self.counter += 1 + if self.counter == 3: + self._change_stage(AppStage.STOPPING) + + # emulate pending from the UI. + elif self.stage == AppStage.BLOCKING: + self._change_stage(AppStage.RUNNING) + self.counter_running += 1 + + elif self.root.counter == 2: + self._change_stage(AppStage.RESTARTING) + self.counter_stopped += 1 + + super().maybe_apply_changes() + + +# FIXME: This test doesn't assert anything +@pytest.mark.skipif(True, reason="TODO: Resolve flaky test.") +@pytest.mark.parametrize("runtime_cls", [SingleProcessRuntime, MultiProcessRuntime]) +def test_app_stage_from_frontend(runtime_cls): + """This test validates that delta from the `api_delta_queue` manipulating the ['app_state']['stage'] would + start and stop the app.""" + app = AppStageTestingApp(FlowA(), debug=True) + app.stage = AppStage.BLOCKING + runtime_cls(app, start_server=True).dispatch() + + +def test_update_publish_state_and_maybe_refresh_ui(): + """This test checks that the method properly: + + - receives the state from the `publish_state_queue` and populates the app_state_store + - receives a notification to refresh the UI and makes a GET Request (streamlit). + """ + + app = AppStageTestingApp(FlowA(), debug=True) + publish_state_queue = MockQueue("publish_state_queue") + + publish_state_queue.put(app.state_with_changes) + + thread = UIRefresher(publish_state_queue) + thread.run_once() + + assert global_app_state_store.get_app_state("1234") == app.state_with_changes + global_app_state_store.remove("1234") + global_app_state_store.add("1234") + + +@pytest.mark.parametrize("x_lightning_type", ["DEFAULT", "STREAMLIT"]) +@pytest.mark.anyio +async def test_start_server(x_lightning_type): + """This test relies on FastAPI TestClient and validates that the REST API properly provides: + + - the state on GET /api/v1/state + - push a delta when making a POST request to /api/v1/state + """ + + class InfiniteQueue(MockQueue): + def get(self, timeout: int = 0): + return self._queue[0] + + app = AppStageTestingApp(FlowA(), debug=True) + app.stage = AppStage.BLOCKING + publish_state_queue = InfiniteQueue("publish_state_queue") + change_state_queue = MockQueue("change_state_queue") + has_started_queue = MockQueue("has_started_queue") + state = app.state_with_changes + publish_state_queue.put(state) + spec = extract_metadata_from_app(app) + ui_refresher = start_server( + publish_state_queue, change_state_queue, has_started_queue=has_started_queue, uvicorn_run=False, spec=spec + ) + headers = headers_for({"type": x_lightning_type}) + + async with AsyncClient(app=fastapi_service, base_url="http://test") as client: + + with pytest.raises(Exception, match="X-Lightning-Session-UUID"): + await client.get("/api/v1/spec") + + with pytest.raises(Exception, match="X-Lightning-Session-ID"): + await client.get("/api/v1/spec", headers={"X-Lightning-Session-UUID": headers["X-Lightning-Session-UUID"]}) + + response = await client.get("/api/v1/spec", headers=headers) + assert response.json() == spec + + with pytest.raises(Exception, match="X-Lightning-Session-UUID"): + await client.get("/api/v1/state") + + with pytest.raises(Exception, match="X-Lightning-Session-ID"): + await client.get("/api/v1/state", headers={"X-Lightning-Session-UUID": headers["X-Lightning-Session-UUID"]}) + + response = await client.get("/api/v1/state", headers=headers) + assert response.json() == state + assert response.status_code == 200 + + new_state = deepcopy(state) + new_state["vars"]["counter"] += 1 + + with pytest.raises(Exception, match="X-Lightning-Session-UUID"): + await client.post("/api/v1/state") + + with pytest.raises(Exception, match="X-Lightning-Session-ID"): + await client.post( + "/api/v1/state", headers={"X-Lightning-Session-UUID": headers["X-Lightning-Session-UUID"]} + ) + + response = await client.post("/api/v1/state", json={"stage": "running"}, headers=headers) + assert change_state_queue._queue[0].to_dict() == { + "values_changed": {"root['app_state']['stage']": {"new_value": "running"}} + } + assert response.status_code == 200 + + response = await client.post("/api/v1/state", json={"state": new_state}, headers=headers) + assert change_state_queue._queue[1].to_dict() == { + "values_changed": {"root['vars']['counter']": {"new_value": 1}} + } + assert response.status_code == 200 + + response = await client.post( + "/api/v1/delta", + json={ + "delta": { + "values_changed": {"root['flows']['video_search']['vars']['should_process']": {"new_value": True}} + } + }, + headers=headers, + ) + assert change_state_queue._queue[2].to_dict() == { + "values_changed": {"root['flows']['video_search']['vars']['should_process']": {"new_value": True}} + } + assert response.status_code == 200 + + # used to clean the app_state_store to following test. + global_app_state_store.remove("1234") + global_app_state_store.add("1234") + + del client + ui_refresher.join(0) + + +@pytest.mark.parametrize( + "path, expected_status_code", + ( + ("/api/v1", 404), + ("/api/v1/asdf", 404), + ("/api/asdf", 404), + ("/api", 404), + ), +) +@pytest.mark.anyio +async def test_state_api_routes(path, expected_status_code): + async with AsyncClient(app=fastapi_service, base_url="http://test") as client: + response = await client.get(path) + assert response.status_code == expected_status_code + + +@pytest.mark.skipif(not check_if_redis_running(), reason="redis not running") +@pytest.mark.anyio +async def test_health_endpoint_success(): + global_app_state_store.store = {} + global_app_state_store.add("1234") + async with AsyncClient(app=fastapi_service, base_url="http://test") as client: + # will respond 503 if redis is not running + response = await client.get("/healthz") + assert response.status_code == 500 + assert response.json() == {"status": "failure", "reason": "State is empty {}"} + global_app_state_store.set_app_state("1234", {"state": None}) + response = await client.get("/healthz") + assert response.status_code == 200 + assert response.json() == {"status": "ok"} + global_app_state_store.remove("1234") + global_app_state_store.store = {} + global_app_state_store.add("1234") + + +@pytest.mark.skipif( + check_if_redis_running(), reason="this is testing the failure condition " "for which the redis should not run" +) +@pytest.mark.anyio +async def test_health_endpoint_failure(): + async with AsyncClient(app=fastapi_service, base_url="http://test") as client: + # will respond 503 if redis is not running + response = await client.get("/healthz") + assert response.status_code == 500 + + +@pytest.mark.parametrize( + "path, expected_status_code", + ( + ("/", 200), + ("/asdf", 200), + ("/view/component_a", 200), + ("/admin", 200), + ), +) +@pytest.mark.anyio +async def test_frontend_routes(path, expected_status_code): + async with AsyncClient(app=fastapi_service, base_url="http://test") as client: + response = await client.get(path) + assert response.status_code == expected_status_code + + +def test_start_server_started(): + """This test ensures has_started_queue receives a signal when the REST API has started.""" + api_publish_state_queue = mp.Queue() + api_delta_queue = mp.Queue() + has_started_queue = mp.Queue() + kwargs = dict( + api_publish_state_queue=api_publish_state_queue, + api_delta_queue=api_delta_queue, + has_started_queue=has_started_queue, + port=1111, + ) + + server_proc = mp.Process(target=start_server, kwargs=kwargs) + server_proc.start() + # requires to wait for the UI to be clicked on. + + # wait for server to be ready + assert has_started_queue.get() == "SERVER_HAS_STARTED" + server_proc.kill() + + +@mock.patch("uvicorn.run") +@mock.patch("lightning_app.core.api.UIRefresher") +@pytest.mark.parametrize("host", ["http://0.0.0.1", "0.0.0.1"]) +def test_start_server_info_message(ui_refresher, uvicorn_run, caplog, monkeypatch, host): + api_publish_state_queue = MockQueue() + api_delta_queue = MockQueue() + has_started_queue = MockQueue() + kwargs = dict( + host=host, + port=1111, + api_publish_state_queue=api_publish_state_queue, + api_delta_queue=api_delta_queue, + has_started_queue=has_started_queue, + ) + + monkeypatch.setattr(api, "logger", logging.getLogger()) + + with caplog.at_level(logging.INFO): + start_server(**kwargs) + + assert "Your app has started. View it in your browser: http://0.0.0.1:1111/view" in caplog.text + + ui_refresher.assert_called_once() + uvicorn_run.assert_called_once_with(host="0.0.0.1", port=1111, log_level="error", app=mock.ANY) diff --git a/tests/tests_app/core/test_lightning_app.py b/tests/tests_app/core/test_lightning_app.py new file mode 100644 index 0000000000000..f55e7cb84b66a --- /dev/null +++ b/tests/tests_app/core/test_lightning_app.py @@ -0,0 +1,886 @@ +import os +import pickle +from time import sleep +from unittest import mock +from unittest.mock import ANY + +import pytest +from deepdiff import Delta +from tests_app import _PROJECT_ROOT + +from lightning_app import LightningApp, LightningFlow, LightningWork # F401 +from lightning_app.core.constants import ( + FLOW_DURATION_SAMPLES, + FLOW_DURATION_THRESHOLD, + REDIS_QUEUES_READ_DEFAULT_TIMEOUT, + STATE_UPDATE_TIMEOUT, +) +from lightning_app.core.queues import BaseQueue, MultiProcessQueue, RedisQueue, SingleProcessQueue +from lightning_app.frontend import StreamlitFrontend +from lightning_app.runners import MultiProcessRuntime, SingleProcessRuntime +from lightning_app.storage.path import storage_root_dir +from lightning_app.testing.helpers import RunIf +from lightning_app.testing.testing import LightningTestApp +from lightning_app.utilities.app_helpers import affiliation +from lightning_app.utilities.enum import AppStage, WorkStageStatus, WorkStopReasons +from lightning_app.utilities.redis import check_if_redis_running +from lightning_app.utilities.warnings import LightningFlowWarning + + +class B1(LightningFlow): + def __init__(self): + super().__init__() + + def run(self): + pass + + +class A1(LightningFlow): + def __init__(self): + super().__init__() + self.b = B1() + + def run(self): + pass + + +class Work(LightningWork): + def __init__(self, cache_calls: bool = True): + super().__init__(cache_calls=cache_calls) + self.counter = 0 + self.has_finished = False + + def run(self): + self.counter += 1 + if self.cache_calls: + self.has_finished = True + elif self.counter >= 3: + self.has_finished = True + + +class SimpleFlow(LightningFlow): + def __init__(self): + super().__init__() + self.work_a = Work(cache_calls=True) + self.work_b = Work(cache_calls=False) + + def run(self): + self.work_a.run() + self.work_b.run() + if self.work_a.has_finished and self.work_b.has_finished: + self._exit() + + +@pytest.mark.skip +@pytest.mark.parametrize("component_cls", [SimpleFlow]) +@pytest.mark.parametrize("runtime_cls", [SingleProcessRuntime]) +def test_simple_app(component_cls, runtime_cls, tmpdir): + comp = component_cls() + app = LightningApp(comp, debug=True) + assert app.root == comp + expected = { + "app_state": ANY, + "vars": {"_layout": ANY, "_paths": {}}, + "calls": {}, + "flows": {}, + "works": { + "work_b": { + "vars": {"has_finished": False, "counter": 0, "_urls": {}, "_paths": {}}, + "calls": {}, + "changes": {}, + }, + "work_a": { + "vars": {"has_finished": False, "counter": 0, "_urls": {}, "_paths": {}}, + "calls": {}, + "changes": {}, + }, + }, + "changes": {}, + } + assert app.state == expected + runtime_cls(app, start_server=False).dispatch() + + assert comp.work_a.has_finished + assert comp.work_b.has_finished + # possible the `work_a` takes for ever to + # start and `work_b` has already completed multiple iterations. + assert comp.work_a.counter == 1 + assert comp.work_b.counter >= 3 + + +class WorkCounter(LightningWork): + def __init__(self): + super().__init__() + self.c = 0 + + def run(self): + self.c = 1 + + +class E(LightningFlow): + def __init__(self): + super().__init__() + self.w_e = WorkCounter() + + def run(self): + self.w_e.run() + + +class D(LightningFlow): + def __init__(self): + super().__init__() + self.w_d = WorkCounter() + self.e = E() + + def run(self): + self.w_d.run() + self.e.run() + + +class C(LightningFlow): + def __init__(self): + super().__init__() + self.w_c = WorkCounter() + self.d = D() + + def run(self): + self.w_c.run() + self.d.run() + + +class B(LightningFlow): + def __init__(self): + super().__init__() + self.w_b = WorkCounter() + self.c = C() + + def run(self): + self.w_b.run() + self.c.run() + + +class A(LightningFlow): + def __init__(self): + super().__init__() + self.w_a = WorkCounter() + self.b = B() + + def run(self): + self.w_a.run() + self.b.run() + if self.b.c.d.e.w_e.c == 1: + self._exit() + + +def test_nested_component_names(): + root = A() + assert root.name == "root" + assert root.w_a.name == "root.w_a" + assert root.b.name == "root.b" + assert root.b.w_b.name == "root.b.w_b" + assert root.b.c.name == "root.b.c" + assert root.b.c.w_c.name == "root.b.c.w_c" + assert root.b.c.d.name == "root.b.c.d" + assert root.b.c.d.e.name == "root.b.c.d.e" + assert root.b.c.d.e.w_e.name == "root.b.c.d.e.w_e" + assert root.b.c.d.w_d.name == "root.b.c.d.w_d" + + +def test_get_component_by_name(): + app = LightningApp(A()) + assert app.get_component_by_name("root") is app.root + assert app.get_component_by_name("root.b") is app.root.b + assert app.get_component_by_name("root.w_a") is app.root.w_a + assert app.get_component_by_name("root.b.w_b") is app.root.b.w_b + assert app.get_component_by_name("root.b.c.d.e") is app.root.b.c.d.e + + +def test_get_component_by_name_raises(): + app = LightningApp(A()) + + for name in ("", "ro", "roott"): + with pytest.raises(ValueError, match=f"Invalid component name {name}."): + app.get_component_by_name(name) + + with pytest.raises(AttributeError, match="Component 'root' has no child component with name ''"): + app.get_component_by_name("root.") + + with pytest.raises(AttributeError, match="Component 'root' has no child component with name 'x'"): + app.get_component_by_name("root.x") + + with pytest.raises(AttributeError, match="Component 'root.b' has no child component with name 'x'"): + app.get_component_by_name("root.b.x") + + with pytest.raises(AttributeError, match="Component 'root.b.w_b' has no child component with name 'c'"): + app.get_component_by_name("root.b.w_b.c") + + +@pytest.mark.parametrize("runtime_cls", [SingleProcessRuntime, MultiProcessRuntime]) +def test_nested_component(runtime_cls): + app = LightningApp(A(), debug=True) + runtime_cls(app, start_server=False).dispatch() + assert app.root.w_a.c == 1 + assert app.root.b.w_b.c == 1 + assert app.root.b.c.w_c.c == 1 + assert app.root.b.c.d.w_d.c == 1 + assert app.root.b.c.d.e.w_e.c == 1 + + +class WorkCC(LightningWork): + def run(self): + pass + + +class CC(LightningFlow): + def __init__(self): + super().__init__() + self.work_cc = WorkCC() + + def run(self): + pass + + +class BB(LightningFlow): + def __init__(self): + super().__init__() + self.c1 = CC() + self.c2 = CC() + + def run(self): + pass + + +class AA(LightningFlow): + def __init__(self): + super().__init__() + self.b = BB() + + def run(self): + pass + + +def test_component_affiliation(): + app = LightningApp(AA()) + a_affiliation = affiliation(app.root) + assert a_affiliation == () + b_affiliation = affiliation(app.root.b) + assert b_affiliation == ("b",) + c1_affiliation = affiliation(app.root.b.c1) + assert c1_affiliation == ("b", "c1") + c2_affiliation = affiliation(app.root.b.c2) + assert c2_affiliation == ("b", "c2") + work_cc_affiliation = affiliation(app.root.b.c2.work_cc) + assert work_cc_affiliation == ("b", "c2", "work_cc") + + +class Work4(LightningWork): + def __init__(self): + super().__init__(parallel=True) + self.var_a = 0 + self.has_finished = False + + def run(self): + self.var_a = 1 + sleep(2) + # This would never been reached as the app would exit before + self.has_finished = True + + +class A4(LightningFlow): + def __init__(self): + super().__init__() + self.work = Work4() + + def run(self): + self.work.run() + if self.work.var_a == 1: + self._exit() + + +@pytest.mark.parametrize("runtime_cls", [MultiProcessRuntime]) +def test_setattr_multiprocessing(runtime_cls, tmpdir): + app = LightningApp(A4()) + runtime_cls(app, start_server=False).dispatch() + assert app.root.work.var_a == 1 + assert not app.root.work.has_finished + + +class CounterFlow(LightningFlow): + def __init__(self): + super().__init__() + self.counter = 0 + + def run(self): + self.counter += 1 + + +class SimpleApp2(LightningApp): + def run_once(self): + if self.root.counter == 5: + self.stage = AppStage.RESTARTING + return super().run_once() + + def _apply_restarting(self): + super()._apply_restarting() + assert self.stage == AppStage.BLOCKING + return True + + +@pytest.mark.parametrize("runtime_cls", [SingleProcessRuntime, MultiProcessRuntime]) +def test_app_restarting_move_to_blocking(runtime_cls, tmpdir): + """Validates sending restarting move the app to blocking again.""" + app = SimpleApp2(CounterFlow(), debug=True) + runtime_cls(app, start_server=False).dispatch() + + +class FlowWithFrontend(LightningFlow): + def run(self): + pass + + def configure_layout(self): + return StreamlitFrontend(render_fn=lambda _: None) + + +class AppWithFrontend(LightningApp): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.run_once_call_count = 0 + + def run_once(self): + # by the time run_once gets called the first time, the target_url for the frontend should be set + # and be present in both the LightningApp.state and the LightningApp._original_state + assert self.state["vars"]["_layout"]["target"].startswith("http://localhost") + assert self._original_state["vars"]["_layout"]["target"].startswith("http://localhost") + assert self.run_once_call_count or self.state == self._original_state + + self.run_once_call_count += 1 + if self.run_once_call_count == 3: + return True, 0.0 + return super().run_once() + + +@mock.patch("lightning_app.frontend.stream_lit.StreamlitFrontend.start_server") +@mock.patch("lightning_app.frontend.stream_lit.StreamlitFrontend.stop_server") +def test_app_starts_with_complete_state_copy(_, __): + """Test that the LightningApp captures the initial state in a separate copy when _run() gets called.""" + app = AppWithFrontend(FlowWithFrontend(), debug=True) + MultiProcessRuntime(app, start_server=False).dispatch() + assert app.run_once_call_count == 3 + + +class EmptyFlow(LightningFlow): + def __init__(self): + super().__init__() + self.counter = 0 + + def run(self): + pass + + +@pytest.mark.parametrize( + "queue_type_cls, default_timeout", + [ + (SingleProcessQueue, STATE_UPDATE_TIMEOUT), + (MultiProcessQueue, STATE_UPDATE_TIMEOUT), + pytest.param( + RedisQueue, + REDIS_QUEUES_READ_DEFAULT_TIMEOUT, + marks=pytest.mark.skipif(not check_if_redis_running(), reason="Redis is not running"), + ), + ], +) +@pytest.mark.parametrize( + "sleep_time, expect", + [ + (1, 0), + (0, 100), + ], +) +def test_lightning_app_aggregation_speed(default_timeout, queue_type_cls: BaseQueue, sleep_time, expect): + + """This test validates the `_collect_deltas_from_ui_and_work_queues` can aggregate multiple delta together in a + time window.""" + + class SlowQueue(queue_type_cls): + def get(self, timeout): + out = super().get(timeout) + sleep(sleep_time) + return out + + app = LightningApp(EmptyFlow()) + + app.api_delta_queue = SlowQueue("api_delta_queue", default_timeout) + if queue_type_cls is RedisQueue: + app.api_delta_queue.clear() + + def make_delta(i): + return Delta({"values_changed": {"root['vars']['counter']": {"new_value": i}}}) + + # flowed the queue with mocked delta + for i in range(expect + 10): + app.api_delta_queue.put(make_delta(i)) + + # Wait for a bit because multiprocessing.Queue doesn't run in the same thread and takes some time for writes + sleep(0.001) + + delta = app._collect_deltas_from_ui_and_work_queues()[-1] + generated = delta.to_dict()["values_changed"]["root['vars']['counter']"]["new_value"] + if sleep_time: + assert generated == expect + else: + # validate the flow should have aggregated at least expect. + assert generated > expect + + +class SimpleFlow(LightningFlow): + def __init__(self): + super().__init__() + self.counter = 0 + + def run(self): + self.counter = 1 + + +def test_maybe_apply_changes_from_flow(): + """This test validates the app `_updated` is set to True only if the state was changed in the flow.""" + + app = LightningApp(SimpleFlow()) + assert not app._has_updated + app.maybe_apply_changes() + app.root.run() + app.maybe_apply_changes() + assert app._has_updated + app._has_updated = False + app.maybe_apply_changes() + assert not app._has_updated + + +class SimpleWork(LightningWork): + def __init__(self): + super().__init__(cache_calls=False, parallel=True) + self.counter = 0 + + def run(self): + self.counter += 1 + + +class FlowA(LightningFlow): + def __init__(self): + super().__init__() + self.work_a = SimpleWork() + self.work_b = SimpleWork() + + def run(self): + if self.work_a.counter == self.work_b.counter == 0: + self.work_a.run() + self.work_b.run() + + +class SuccessException(Exception): + pass + + +class CheckpointLightningApp(LightningApp): + def _dump_checkpoint(self): + super()._dump_checkpoint() + raise SuccessException + + +@pytest.mark.parametrize("runtime_cls", [MultiProcessRuntime]) +def test_snapshotting(runtime_cls, tmpdir): + try: + app = CheckpointLightningApp(FlowA()) + app.checkpointing = True + runtime_cls(app, start_server=False).dispatch() + except SuccessException: + pass + checkpoint_dir = os.path.join(storage_root_dir(), "checkpoints") + checkpoints = os.listdir(checkpoint_dir) + assert len(checkpoints) == 1 + with open(os.path.join(checkpoint_dir, checkpoints[0]), "rb") as f: + state = pickle.load(f) + assert state["works"]["work_a"]["vars"]["counter"] == 1 + assert state["works"]["work_b"]["vars"]["counter"] == 1 + + +class CounterWork(LightningWork): + def __init__(self, parallel: bool, cache_calls: bool): + super().__init__(parallel=parallel, cache_calls=cache_calls) + self.counter = 0 + + def run(self, counter=0): + self.counter += 1 + + +class WaitForAllFlow(LightningFlow): + def __init__(self, use_same_args): + super().__init__() + counter = 0 + self.use_same_args = use_same_args + for parallel in [False, True]: + for cache_calls in [False, True]: + work = CounterWork(parallel=parallel, cache_calls=cache_calls) + setattr(self, f"work_{counter}", work) + counter += 1 + self.c = 0 + + def run(self): + next_c = self.c + 1 + for work in self.experimental_iterate(self.works(), run_once=False): + if work.num_successes < (next_c): + if not self.use_same_args: + work.run(self.c) + else: + work.run(None) + + expected = 1 if self.use_same_args else next_c + + if not all([w.num_successes == (expected if w.cache_calls else next_c) for w in self.works()]): + return + + self.c += 1 + assert [w.counter for w in self.works()] == [self.c, expected, self.c, expected] + if self.c > 3: + self._exit() + + +# TODO (tchaton) Resolve this test. +@pytest.mark.skipif(True, reason="flaky test which never terminates") +@pytest.mark.parametrize("runtime_cls", [MultiProcessRuntime]) +@pytest.mark.parametrize("use_same_args", [False, True]) +def test_state_wait_for_all_all_works(tmpdir, runtime_cls, use_same_args): + app = LightningApp(WaitForAllFlow(use_same_args)) + runtime_cls(app, start_server=False).dispatch() + + +class CheckpointCounter(LightningWork): + def __init__(self): + super().__init__(cache_calls=False) + self.counter = 0 + + def run(self): + self.counter += 1 + + +class CheckpointFlow(LightningFlow): + def __init__(self, work: LightningWork, depth=0): + super().__init__() + self.depth = depth + if depth == 0: + self.counter = 0 + + if depth >= 10: + self.work = work + else: + self.flow = CheckpointFlow(work, depth + 1) + + def run(self): + if hasattr(self, "counter"): + self.counter += 1 + if self.counter > 5: + self._exit() + if self.depth >= 10: + self.work.run() + else: + self.flow.run() + + +def test_lightning_app_checkpointing_with_nested_flows(): + work = CheckpointCounter() + app = LightningApp(CheckpointFlow(work)) + app.checkpointing = True + SingleProcessRuntime(app, start_server=False).dispatch() + + assert app.root.counter == 6 + assert app.root.flow.flow.flow.flow.flow.flow.flow.flow.flow.flow.work.counter == 5 + + work = CheckpointCounter() + app = LightningApp(CheckpointFlow(work)) + assert app.root.counter == 0 + assert app.root.flow.flow.flow.flow.flow.flow.flow.flow.flow.flow.work.counter == 0 + + app.load_state_dict_from_checkpoint_dir(app.checkpoint_dir) + # The counter was increment to 6 after the latest checkpoints was created. + assert app.root.counter == 5 + assert app.root.flow.flow.flow.flow.flow.flow.flow.flow.flow.flow.work.counter == 5 + + +def test_load_state_dict_from_checkpoint_dir(tmpdir): + work = CheckpointCounter() + app = LightningApp(CheckpointFlow(work)) + + checkpoints = [] + num_checkpoints = 11 + # generate 11 checkpoints. + for _ in range(num_checkpoints): + checkpoints.append(app._dump_checkpoint()) + app.root.counter += 1 + + app.load_state_dict_from_checkpoint_dir(app.checkpoint_dir) + assert app.root.counter == (num_checkpoints - 1) + + for version in range(num_checkpoints): + app.load_state_dict_from_checkpoint_dir(app.checkpoint_dir, version=version) + assert app.root.counter == version + + with pytest.raises(FileNotFoundError, match="The provided directory"): + app.load_state_dict_from_checkpoint_dir("./random_folder/") + + with pytest.raises(Exception, match="No checkpoints where found"): + app.load_state_dict_from_checkpoint_dir(str(os.path.join(_PROJECT_ROOT, "tests/tests_app/"))) + + # delete 2 checkpoints + os.remove(os.path.join(checkpoints[4])) + os.remove(os.path.join(checkpoints[7])) + + app.load_state_dict_from_checkpoint_dir(app.checkpoint_dir) + assert app.root.counter == (num_checkpoints - 1) + + app.load_state_dict_from_checkpoint_dir(app.checkpoint_dir, version=5) + checkpoint_path = app._dump_checkpoint() + + assert os.path.basename(checkpoint_path).startswith("v_11") + + +class PicklableObject: + pass + + +class PickleableReturnWork(LightningWork): + def __init__(self): + super().__init__() + + def run(self): + return PicklableObject() + + +class PickleableReturnFlow(LightningFlow): + def __init__(self): + super().__init__() + self.work = PickleableReturnWork() + + def run(self): + self.work.run() + + +def test_pickleable_return_from_work(): + """Test that any object that is pickleable can be returned from the run method in LightningWork.""" + with pytest.raises(SystemExit, match="1"): + app = LightningApp(PickleableReturnFlow()) + MultiProcessRuntime(app, start_server=False).dispatch() + + +class WorkDD(LightningWork): + def __init__(self): + super().__init__(parallel=True) + self.total = 10 + self.counter = 1 + + def run(self): + should_wait = self.counter == 1 + start_counter = self.total - self.counter + for _ in range(start_counter): + if should_wait: + sleep(0.5) + self.counter += 1 + + +class FlowCC(LightningFlow): + def __init__(self): + super().__init__() + self.work = WorkDD() + + def run(self): + self.work.run() + if self.work.counter == 10: + self._exit() + + +class FaultToleranceLightningTestApp(LightningTestApp): + def on_after_run_once(self): + if self.root.work.status.reason == WorkStopReasons.SIGTERM_SIGNAL_HANDLER: + assert self.root.work.counter < 10 + self.restart_work("root.work") + elif self.root.work.counter == 2: + self.kill_work("root.work") + return True, 0.0 + return super().on_after_run_once() + + +# TODO (tchaton) Resolve this test with Resumable App. +@RunIf(skip_windows=True) +def test_fault_tolerance_work(): + app = FaultToleranceLightningTestApp(FlowCC()) + MultiProcessRuntime(app, start_server=False).dispatch() + assert app.root.work.counter == 2 + + +class ProtectedAttributesWork(LightningWork): + def __init__(self): + super().__init__() + # a public attribute, this should show up in the state + self.done = False + # a protected and a private attribute, these should NOT show up in the state + self._protected = 1 + self.__private = 2 + + def run(self): + self.done = True + self._protected = 10 + self.__private = 20 + + +class ProtectedAttributesFlow(LightningFlow): + def __init__(self): + super().__init__() + # a public attribute, this should show up in the state + self.done = False + # a protected and a private attribute, these should NOT show up in the state + self._protected = 1 + self.__private = 2 + + self.protected_work = ProtectedAttributesWork() + + def run(self): + flow_variables = self.state_vars["vars"] + assert "done" in flow_variables + assert "_protected" not in flow_variables + assert "__private" not in flow_variables + self.done = True + + self.protected_work.run() + if self.protected_work.done: + work_variables = self.protected_work.state_vars["vars"] + assert "done" in work_variables + assert "_protected" not in work_variables + assert "__private" not in work_variables + + # TODO: getattr and setattr access outside the Work should raise an error in the future + _ = self.protected_work._protected + self.protected_work._protected = 1 + + if self.done and self.protected_work.done: + self._exit() + + +def test_protected_attributes_not_in_state(): + flow = ProtectedAttributesFlow() + MultiProcessRuntime(LightningApp(flow)).dispatch() + + +class WorkExit(LightningWork): + def __init__(self): + super().__init__() + + def run(self): + pass + + +class FlowExit(LightningFlow): + def __init__(self): + super().__init__() + self.work = WorkExit() + + def run(self): + self.work.run() + self._exit() + + +def test_lightning_app_exit(): + app = LightningApp(FlowExit()) + MultiProcessRuntime(app).dispatch() + assert app.root.work.status.stage == WorkStageStatus.STOPPED + + +class CounterWork2(LightningWork): + def __init__(self): + super().__init__(parallel=True) + self.counter = 0 + + def run(self): + self.counter += 1 + + +class FlowStop(LightningFlow): + def __init__(self): + super().__init__() + self.w = CounterWork2() + + def run(self): + if self.w.status.stage == WorkStageStatus.STOPPED: + self._exit() + if self.w.counter == 1: + self.w.stop() + self.w.run() + + +@RunIf(skip_windows=True) +def test_lightning_stop(): + app = LightningApp(FlowStop()) + MultiProcessRuntime(app, start_server=False).dispatch() + + +class SleepyFlow(LightningFlow): + def __init__(self, sleep_interval, *args, **kwargs): + super().__init__(*args, **kwargs) + self.counter = 0 + self.sleep_interval = sleep_interval + + def run(self): + if self.counter == 2 * FLOW_DURATION_SAMPLES: + self._exit() + sleep(self.sleep_interval) + self.counter += 1 + + +class SleepyWork(LightningWork): + def __init__(self, sleep_interval, *args, **kwargs): + super().__init__(*args, **kwargs) + self.sleep_interval = sleep_interval + + def run(self): + sleep(self.sleep_interval) + + +class SleepyFlowWithWork(LightningFlow): + def __init__(self, sleep_interval, work_sleep_interval, parallel, *args, **kwargs): + super().__init__(*args, **kwargs) + self.counter = 0 + self.sleep_interval = sleep_interval + self.work = SleepyWork(work_sleep_interval, parallel=parallel) + + def run(self): + if self.counter == 2 * FLOW_DURATION_SAMPLES: + self._exit() + self.work.run() + sleep(self.sleep_interval) + self.counter += 1 + + +def test_slow_flow(): + app0 = LightningApp(SleepyFlow(sleep_interval=0.5 * FLOW_DURATION_THRESHOLD)) + + MultiProcessRuntime(app0).dispatch() + + app1 = LightningApp(SleepyFlow(sleep_interval=2 * FLOW_DURATION_THRESHOLD)) + + with pytest.warns(LightningFlowWarning): + MultiProcessRuntime(app1).dispatch() + + app0 = LightningApp( + SleepyFlowWithWork( + sleep_interval=0.5 * FLOW_DURATION_THRESHOLD, + work_sleep_interval=2 * FLOW_DURATION_THRESHOLD, + parallel=False, + ) + ) + + MultiProcessRuntime(app0).dispatch() + + app1 = LightningApp( + SleepyFlowWithWork( + sleep_interval=0.5 * FLOW_DURATION_THRESHOLD, work_sleep_interval=2 * FLOW_DURATION_THRESHOLD, parallel=True + ) + ) + + MultiProcessRuntime(app1).dispatch() diff --git a/tests/tests_app/core/test_lightning_flow.py b/tests/tests_app/core/test_lightning_flow.py new file mode 100644 index 0000000000000..26841e057621b --- /dev/null +++ b/tests/tests_app/core/test_lightning_flow.py @@ -0,0 +1,637 @@ +import os +import pickle +from collections import Counter +from copy import deepcopy +from dataclasses import dataclass +from time import time +from unittest.mock import ANY + +import pytest +from deepdiff import DeepDiff, Delta + +from lightning_app import LightningApp +from lightning_app.core.flow import LightningFlow +from lightning_app.core.work import LightningWork +from lightning_app.runners import MultiProcessRuntime, SingleProcessRuntime +from lightning_app.storage import Path +from lightning_app.storage.path import storage_root_dir +from lightning_app.testing.helpers import EmptyFlow, EmptyWork +from lightning_app.utilities.app_helpers import _delta_to_appstate_delta, _LightningAppRef +from lightning_app.utilities.exceptions import ExitAppException + + +def test_empty_component(): + class A(LightningFlow): + def run(self): + pass + + empty_component = A() + assert empty_component.state == { + "vars": {"_layout": ANY, "_paths": {}}, + "calls": {}, + "flows": {}, + "structures": {}, + "changes": {}, + "works": {}, + } + + +@dataclass +class CustomDataclass: + x: int = 1 + y: tuple = (3, 2, 1) + + +@pytest.mark.parametrize( + "attribute", + ( + {3, 2, 1}, + lambda _: 5, + CustomDataclass(), + ), +) +@pytest.mark.parametrize("cls", (LightningWork, LightningFlow)) +def test_unsupported_attribute_types(cls, attribute): + class Component(cls): + def __init__(self): + super().__init__() + self.x = attribute + + def run(self): + pass + + with pytest.raises(AttributeError, match="Only JSON-serializable attributes are currently supported"): + Component() + + +@pytest.mark.parametrize( + "name,value", + [ + ("x", 1), + ("f", EmptyFlow()), + ("w", EmptyWork()), + ], +) +def test_unsupported_attribute_declaration_outside_init_or_run(name, value): + """Test that LightningFlow attributes (with a few exceptions) are not allowed to be declared outside + __init__.""" + flow = EmptyFlow() + with pytest.raises(AttributeError, match=f"Cannot set attributes that were not defined in __init__: {name}"): + setattr(flow, name, value) + assert not hasattr(flow, name) + assert name not in flow.state["vars"] + assert name not in flow._works + assert name not in flow._flows + + # no error for protected attributes, since they don't contribute to the state + setattr(flow, "_" + name, value) + assert hasattr(flow, "_" + name) + + +@pytest.mark.parametrize( + "name,value", + [ + ("x", 1), + ("f", EmptyFlow()), + ("w", EmptyWork()), + ], +) +@pytest.mark.parametrize("defined", [False, True]) +def test_unsupported_attribute_declaration_inside_run(defined, name, value): + """Test that LightningFlow attributes can set LightningFlow or LightningWork inside its run method, but + everything else needs to be defined in the __init__ method.""" + + class Flow(LightningFlow): + def __init__(self): + super().__init__() + if defined: + setattr(self, name, None) + + def run(self): + if not defined and not isinstance(value, (LightningFlow, LightningWork)): + with pytest.raises( + AttributeError, match=f"Cannot set attributes that were not defined in __init__: {name}" + ): + setattr(self, name, value) + assert name not in self.state["vars"] + assert name not in self._works + assert name not in self._flows + else: + setattr(self, name, value) + if isinstance(value, LightningFlow): + assert name in self._flows + elif isinstance(value, LightningWork): + assert name in self._works + else: + assert name in self.state["vars"] + + flow = Flow() + flow.run() + + +@pytest.mark.parametrize("value", [EmptyFlow(), EmptyWork()]) +def test_name_gets_removed_from_state_when_defined_as_flow_works(value): + """Test that LightningFlow attributes are removed from the state.""" + + class EmptyFlow(LightningFlow): + def __init__(self): + super().__init__() + self.value = None + + def run(self): + self.value = value + + flow = EmptyFlow() + flow.run() + if isinstance(value, LightningFlow): + assert "value" not in flow.state["vars"] + assert "value" in flow._flows + else: + assert "value" not in flow.state["vars"] + assert "value" in flow._works + + +@pytest.mark.parametrize( + "name,value", + [ + ("_name", "name"), + ("_changes", {"change": 1}), + ], +) +def test_supported_attribute_declaration_outside_init(name, value): + """Test the custom LightningFlow setattr implementation for the few reserved attributes that are allowed to be + set from outside __init__.""" + flow = EmptyFlow() + setattr(flow, name, value) + assert getattr(flow, name) == value + + +def test_supported_attribute_declaration_inside_init(): + """Test that the custom LightningFlow setattr can identify the __init__ call in the stack frames above.""" + + class Flow(EmptyFlow): + def __init__(self): + super().__init__() + self.directly_in_init = "init" + self.method_under_init() + + def method_under_init(self): + self.attribute = "test" + self.subflow = EmptyFlow() + + flow = Flow() + assert flow.directly_in_init == "init" + assert flow.state["vars"]["directly_in_init"] == "init" + assert flow.attribute == "test" + assert flow.state["vars"]["attribute"] == "test" + assert isinstance(flow.subflow, EmptyFlow) + assert flow.state["flows"]["subflow"] == flow.subflow.state + + +def test_setattr_outside_run_context(): + """Test that it is allowed to update attributes outside `run` as long as the attribute is already declared.""" + + class Flow(EmptyFlow): + def __init__(self): + super().__init__() + self.attribute = "" + + def outside_run(self): + # reading allowed, setting not allowed + self.attribute = "allowed" + return super().configure_layout() + + flow = Flow() + flow.outside_run() + assert flow.attribute == "allowed" + assert flow.state["vars"]["attribute"] == "allowed" + + +def _run_state_transformation(tmpdir, attribute, update_fn, inplace=False): + """This helper function defines a flow, assignes an attribute and performs a transformation on the state.""" + + class StateTransformationTest(LightningFlow): + def __init__(self): + super().__init__() + self.x = attribute + self.finished = False + + def run(self): + if self.finished: + self._exit() + + x = update_fn(self.x) + if not inplace: + self.x = x + self.finished = True + + flow = StateTransformationTest() + assert flow.x == attribute + app = LightningApp(flow) + SingleProcessRuntime(app, start_server=False).dispatch() + return app.state["vars"]["x"] + + +@pytest.mark.parametrize( + "attribute,update_fn,expected", + ( + (1, lambda x: x + 1, 2), + (0.5, lambda x: x + 0.5, 1.0), + (True, lambda x: not x, False), + ("cocofruit", lambda x: x + "s", "cocofruits"), + (dict(a=1, b=2), lambda x: dict(a=1, b=3), dict(a=1, b=3)), + ([1, 2], lambda x: [1, 2, 3], [1, 2, 3]), + ((4, 5), lambda x: (4, 5, 6), (4, 5, 6)), + ), +) +def test_attribute_state_change(attribute, update_fn, expected, tmpdir): + """Test that state changes get recored on all supported data types.""" + assert _run_state_transformation(tmpdir, attribute, update_fn, inplace=False) == expected + + +def test_inplace_attribute_state_change(tmpdir): + """Test that in-place modifications on containers get captured as a state change.""" + # inplace modification of a nested dict + def transform(x): + x["b"]["c"] += 1 + + value = dict(a=1, b=dict(c=2)) + expected = dict(a=1, b=dict(c=3)) + assert _run_state_transformation(tmpdir, value, transform, inplace=True) == expected + + # inplace modification of nested list + def transform(x): + x[2].append(3.0) + + value = ["a", 1, [2.0]] + expected = ["a", 1, [2.0, 3.0]] + assert _run_state_transformation(tmpdir, value, transform, inplace=True) == expected + + # inplace modification of a custom dict + def transform(x): + x.update("baa") + + value = Counter("abab") + expected = Counter(a=4, b=3) + assert _run_state_transformation(tmpdir, value, transform, inplace=True) == expected + + +def test_lightning_flow_and_work(): + class Work(LightningWork): + def __init__(self, cache_calls: bool = True, port=None): + super().__init__(cache_calls=cache_calls, port=port) + self.counter = 0 + + def run(self, *args, **kwargs): + self.counter += 1 + + class Flow_A(LightningFlow): + def __init__(self): + super().__init__() + self.counter = 0 + self.work_a = Work(cache_calls=True, port=8000) + self.work_b = Work(cache_calls=False, port=8001) + + def run(self): + if self.counter < 5: + self.work_a.run() + self.work_b.run() + self.counter += 1 + else: + self._exit() + + flow_a = Flow_A() + assert flow_a.named_works() == [("work_a", flow_a.work_a), ("work_b", flow_a.work_b)] + assert flow_a.works() == [flow_a.work_a, flow_a.work_b] + state = { + "vars": {"counter": 0, "_layout": ANY, "_paths": {}}, + "calls": {}, + "flows": {}, + "structures": {}, + "works": { + "work_b": { + "vars": { + "counter": 0, + "_url": "", + "_future_url": "", + "_port": 8001, + "_host": "127.0.0.1", + "_paths": {}, + "_restarting": False, + "_internal_ip": "", + }, + "calls": {"latest_call_hash": None}, + "changes": {}, + }, + "work_a": { + "vars": { + "counter": 0, + "_url": "", + "_future_url": "", + "_port": 8000, + "_host": "127.0.0.1", + "_paths": {}, + "_restarting": False, + "_internal_ip": "", + }, + "calls": {"latest_call_hash": None}, + "changes": {}, + }, + }, + "changes": {}, + } + assert flow_a.state == state + try: + while True: + flow_a.run() + except ExitAppException: + pass + + state = { + "vars": {"counter": 5, "_layout": ANY, "_paths": {}}, + "calls": {}, + "flows": {}, + "structures": {}, + "works": { + "work_b": { + "vars": { + "counter": 5, + "_url": "", + "_future_url": "", + "_port": 8001, + "_host": "127.0.0.1", + "_paths": {}, + "_restarting": False, + "_internal_ip": "", + }, + "calls": {"latest_call_hash": None}, + "changes": {}, + }, + "work_a": { + "vars": { + "counter": 1, + "_url": "", + "_future_url": "", + "_port": 8000, + "_host": "127.0.0.1", + "_paths": {}, + "_restarting": False, + "_internal_ip": "", + }, + "calls": { + "latest_call_hash": None, + "run:fe3fa0f34fc1317e152e5afb023332995392071046f1ea51c34c7c9766e3676c": { + "name": "run", + "call_hash": "run:fe3fa0f34fc1317e152e5afb023332995392071046f1ea51c34c7c9766e3676c", + "ret": None, + }, + }, + "changes": {}, + }, + }, + "changes": {}, + } + assert flow_a.state == state + + +def test_populate_changes(): + class WorkA(LightningWork): + def __init__(self): + super().__init__() + self.counter = 0 + + def run(self): + pass + + class A(LightningFlow): + def __init__(self): + super().__init__() + self.work = WorkA() + + def run(self): + pass + + flow_a = A() + flow_state = flow_a.state + work_state = flow_a.work.state + flow_a.work.counter = 1 + work_state_2 = flow_a.work.state + delta = Delta(DeepDiff(work_state, work_state_2)) + delta = _delta_to_appstate_delta(flow_a, flow_a.work, delta) + new_flow_state = LightningApp.populate_changes(flow_state, flow_state + delta) + flow_a.set_state(new_flow_state) + assert flow_a.work.counter == 1 + assert new_flow_state["works"]["work"]["changes"] == {"counter": {"from": 0, "to": 1}} + assert flow_a.work._changes == {"counter": {"from": 0, "to": 1}} + + +def test_populate_changes_status_removed(): + """Regression test for https://github.com/Lightning-AI/lightning/issues/342.""" + last_state = { + "vars": {}, + "calls": {}, + "flows": {}, + "works": { + "work": { + "vars": {}, + "calls": { + "latest_call_hash": "run:fe3f", + "run:fe3f": { + "statuses": [ + {"stage": "requesting", "message": None, "reason": None, "timestamp": 1}, + {"stage": "starting", "message": None, "reason": None, "timestamp": 2}, + {"stage": "requesting", "message": None, "reason": None, "timestamp": 3}, + ], + }, + }, + "changes": {}, + }, + }, + "changes": {}, + } + new_state = deepcopy(last_state) + call = new_state["works"]["work"]["calls"]["run:fe3f"] + call["statuses"] = call["statuses"][:-1] # pretend that a status was removed from the list + new_state_before = deepcopy(new_state) + new_state = LightningApp.populate_changes(last_state, new_state) + assert new_state == new_state_before + + +class CFlow(LightningFlow): + def __init__(self, run_once): + super().__init__() + self.looping = 0 + self.tracker = 0 + self.restarting = False + self.run_once = run_once + + def run(self): + for idx in self.experimental_iterate(range(0, 10), run_once=self.run_once): + if not self.restarting and (idx + 1) == 5: + _LightningAppRef.get_current()._dump_checkpoint() + self._exit() + self.tracker += 1 + self.looping += 1 + if self.looping == 2: + self._exit() + + +@pytest.mark.parametrize("runtime_cls", [SingleProcessRuntime]) +@pytest.mark.parametrize("run_once", [False, True]) +def test_lightning_flow_iterate(tmpdir, runtime_cls, run_once): + app = LightningApp(CFlow(run_once)) + runtime_cls(app, start_server=False).dispatch() + assert app.root.looping == 0 + assert app.root.tracker == 4 + call_hash = list(v for v in app.root._calls if "experimental_iterate" in v)[0] + iterate_call = app.root._calls[call_hash] + assert iterate_call["counter"] == 4 + assert not iterate_call["has_finished"] + + checkpoint_dir = os.path.join(storage_root_dir(), "checkpoints") + app = LightningApp(CFlow(run_once)) + app.load_state_dict_from_checkpoint_dir(checkpoint_dir) + app.root.restarting = True + assert app.root.looping == 0 + assert app.root.tracker == 4 + runtime_cls(app, start_server=False).dispatch() + assert app.root.looping == 2 + assert app.root.tracker == 10 if run_once else 20 + iterate_call = app.root._calls[call_hash] + assert iterate_call["has_finished"] + + +class FlowCounter(LightningFlow): + def __init__(self): + super().__init__() + self.counter = 0 + + def run(self): + if self.counter >= 3: + self._exit() + self.counter += 1 + + +@pytest.mark.parametrize("runtime_cls", [SingleProcessRuntime, MultiProcessRuntime]) +def test_lightning_flow_counter(runtime_cls, tmpdir): + + app = LightningApp(FlowCounter()) + app.checkpointing = True + runtime_cls(app, start_server=False).dispatch() + assert app.root.counter == 3 + + checkpoint_dir = os.path.join(storage_root_dir(), "checkpoints") + checkpoints = os.listdir(checkpoint_dir) + assert len(checkpoints) == 4 + for checkpoint in checkpoints: + checkpoint_path = os.path.join(checkpoint_dir, checkpoint) + with open(checkpoint_path, "rb") as f: + app = LightningApp(FlowCounter()) + app.set_state(pickle.load(f)) + runtime_cls(app, start_server=False).dispatch() + assert app.root.counter == 3 + + +def test_flow_iterate_method(): + class Flow(LightningFlow): + def run(self): + pass + + flow = Flow() + with pytest.raises(TypeError, match="An iterable should be provided"): + next(flow.experimental_iterate(1)) + + +def test_flow_path_assignment(): + """Test that paths in the lit format lit:// get converted to a proper lightning_app.storage.Path object.""" + + class Flow(LightningFlow): + def __init__(self): + super().__init__() + self.no_path = "a/b/c" + self.path = Path("lit://x/y/z") + self.lit_path = "lit://x/y/z" + + flow = Flow() + assert isinstance(flow.no_path, str) + assert isinstance(flow.path, Path) + assert isinstance(flow.lit_path, Path) + assert flow.path == flow.lit_path + + +def test_flow_state_change_with_path(): + """Test that type changes to a Path attribute are properly reflected within the state.""" + + class Flow(LightningFlow): + def __init__(self): + super().__init__() + self.none_to_path = None + self.path_to_none = Path() + self.path_to_path = Path() + + def run(self): + self.none_to_path = "lit://none/to/path" + self.path_to_none = None + self.path_to_path = "lit://path/to/path" + self._exit() + + flow = Flow() + MultiProcessRuntime(LightningApp(flow)).dispatch() + assert flow.none_to_path == Path("lit://none/to/path") + assert flow.path_to_none is None + assert flow.path_to_path == Path("lit://path/to/path") + + assert "path_to_none" not in flow._paths + assert "path_to_none" in flow._state + assert flow._paths["none_to_path"] == Path("lit://none/to/path").to_dict() + assert flow._paths["path_to_path"] == Path("lit://path/to/path").to_dict() + assert flow.state["vars"]["none_to_path"] == Path("lit://none/to/path") + assert flow.state["vars"]["path_to_none"] is None + assert flow.state["vars"]["path_to_path"] == Path("lit://path/to/path") + + +class FlowSchedule(LightningFlow): + def __init__(self): + super().__init__() + self._last_time = None + + def run(self): + if self.schedule("* * * * * 0,5,10,15,20,25,30,35,40,45,50,55"): + if self._last_time is None: + self._last_time = False + elif not self._last_time: + self._last_time = time() + else: + # TODO (tchaton) Optimize flow execution. + assert 4.0 < abs(time() - self._last_time) < 6.0 + self._exit() + + +def test_scheduling_api(): + + app = LightningApp(FlowSchedule()) + MultiProcessRuntime(app).dispatch() + + +def test_lightning_flow(): + class Flow(LightningFlow): + def run(self): + if self.schedule("midnight"): + pass + if self.schedule("hourly"): + pass + if self.schedule("@hourly"): + pass + if self.schedule("daily"): + pass + if self.schedule("weekly"): + pass + if self.schedule("monthly"): + pass + if self.schedule("yearly"): + pass + if self.schedule("annually"): + pass + assert len(self._calls["scheduling"]) == 8 + + Flow().run() diff --git a/tests/tests_app/core/test_lightning_work.py b/tests/tests_app/core/test_lightning_work.py new file mode 100644 index 0000000000000..913fdf04c3299 --- /dev/null +++ b/tests/tests_app/core/test_lightning_work.py @@ -0,0 +1,283 @@ +from queue import Empty +from unittest.mock import Mock + +import pytest + +from lightning_app import LightningApp +from lightning_app.core.flow import LightningFlow +from lightning_app.core.work import LightningWork, LightningWorkException +from lightning_app.runners import MultiProcessRuntime +from lightning_app.storage import Path +from lightning_app.storage.requests import GetRequest +from lightning_app.testing.helpers import EmptyFlow, EmptyWork, MockQueue +from lightning_app.utilities.enum import WorkStageStatus +from lightning_app.utilities.proxies import ProxyWorkRun, WorkRunner + + +def test_simple_lightning_work(): + class Work_A(LightningWork): + def __init__(self): + super().__init__() + self.started = False + + with pytest.raises(TypeError, match="Work_A"): + Work_A() + + class Work_B(Work_A): + def run(self, *args, **kwargs): + self.started = True + + work_b = Work_B() + work_b.run() + assert work_b.started + + class Work_C(LightningWork): + def __init__(self): + super().__init__() + self.work_b = Work_B() + + def run(self, *args, **kwargs): + pass + + with pytest.raises(LightningWorkException, match="isn't allowed to take any children such as"): + Work_C() + + class Work_C(LightningWork): + def __init__(self): + super().__init__() + self.flow = LightningFlow() + + def run(self, *args, **kwargs): + pass + + with pytest.raises(LightningWorkException, match="LightningFlow"): + Work_C() + + +def test_forgot_to_call_init(): + """This test validates the error message for user registering state without calling __init__ is + comprehensible.""" + + class W(LightningWork): + def __init__(self): + self.var_a = None + + def run(self): + pass + + with pytest.raises(AttributeError, match="Did you forget to call"): + W() + + +@pytest.mark.parametrize( + "name,value", + [ + ("x", 1), + ("f", EmptyFlow()), + ("w", EmptyWork()), + ("run", lambda _: _), + ], +) +def test_unsupported_attribute_declaration_outside_init(name, value): + """Test that LightningWork attributes (with a few exceptions) are not allowed to be set outside __init__.""" + flow = EmptyFlow() + with pytest.raises(AttributeError, match=f"Cannot set attributes that were not defined in __init__: {name}"): + setattr(flow, name, value) + assert name == "run" or not hasattr(flow, name) + + +@pytest.mark.parametrize( + "name,value", + [ + ("_name", "name"), + ("_changes", {"change": 1}), + ("run", ProxyWorkRun(work_run=Mock(), work_name="any", work=Mock(), caller_queue=Mock())), + ], +) +def test_supported_attribute_declaration_outside_init(name, value): + """Test the custom LightningWork setattr implementation for the few reserved attributes that are allowed to be + set from outside __init__.""" + flow = EmptyWork() + setattr(flow, name, value) + assert getattr(flow, name) == value + + +def test_supported_attribute_declaration_inside_init(): + """Test that the custom LightningWork setattr can identify the __init__ call in the stack frames above.""" + + class Work(EmptyWork): + def __init__(self): + super().__init__() + self.directly_in_init = "init" + self.method_under_init() + + def method_under_init(self): + self.attribute = "test" + + work = Work() + assert work.directly_in_init == "init" + assert work.attribute == "test" + + +@pytest.mark.parametrize("replacement", [EmptyFlow(), EmptyWork(), None]) +def test_fixing_flows_and_works(replacement): + class FlowFixed(LightningFlow): + def run(self): + self.empty_flow = EmptyFlow() + self.empty_flow = replacement + + with pytest.raises(AttributeError, match="Cannot set attributes as"): + FlowFixed().run() + + +@pytest.mark.parametrize("raise_exception", [False, True]) +@pytest.mark.parametrize("enable_exception", [False, True]) +def test_lightning_status(enable_exception, raise_exception): + class Work(EmptyWork): + def __init__(self, raise_exception, enable_exception=True): + super().__init__(raise_exception=raise_exception) + self.enable_exception = enable_exception + self.dummy_path = Path("test") + + def run(self): + if self.enable_exception: + raise Exception("Custom Exception") + + class BlockingQueue(MockQueue): + """A Mock for the file copier queues that keeps blocking until we want to end the thread.""" + + keep_blocking = True + + def get(self, timeout: int = 0): + while BlockingQueue.keep_blocking: + pass + # A dummy request so the Copier gets something to process without an error + return GetRequest(source="src", name="dummy_path", path="test", hash="123", destination="dst") + + work = Work(raise_exception, enable_exception=enable_exception) + work._name = "root.w" + assert work.status.stage == WorkStageStatus.NOT_STARTED + caller_queue = MockQueue("caller_queue") + delta_queue = MockQueue("delta_queue") + readiness_queue = MockQueue("readiness_queue") + error_queue = MockQueue("error_queue") + request_queue = MockQueue("request_queue") + response_queue = MockQueue("response_queue") + copy_request_queue = BlockingQueue("copy_request_queue") + copy_response_queue = BlockingQueue("copy_response_queue") + call_hash = "run:fe3fa0f34fc1317e152e5afb023332995392071046f1ea51c34c7c9766e3676c" + work._calls[call_hash] = { + "args": (), + "kwargs": {}, + "call_hash": call_hash, + "run_started_counter": 1, + "statuses": [], + } + caller_queue.put( + { + "args": (), + "kwargs": {}, + "call_hash": call_hash, + "state": work.state, + } + ) + work_runner = WorkRunner( + work, + work.name, + caller_queue, + delta_queue, + readiness_queue, + error_queue, + request_queue, + response_queue, + copy_request_queue, + copy_response_queue, + ) + try: + work_runner() + except (Exception, Empty): + pass + + res = delta_queue._queue[0].delta.to_dict()["iterable_item_added"] + res_end = delta_queue._queue[1].delta.to_dict()["iterable_item_added"] + if enable_exception: + exception_cls = Exception if raise_exception else Empty + assert isinstance(error_queue._queue[0], exception_cls) + res[f"root['calls']['{call_hash}']['statuses'][0]"]["stage"] == "failed" + res[f"root['calls']['{call_hash}']['statuses'][0]"]["message"] == "Custom Exception" + else: + assert res[f"root['calls']['{call_hash}']['statuses'][0]"]["stage"] == "running" + assert res_end[f"root['calls']['{call_hash}']['statuses'][1]"]["stage"] == "succeeded" + + # Stop blocking and let the thread join + BlockingQueue.keep_blocking = False + work_runner.copier.join() + + +def test_lightning_work_url(): + class ExposedWork(LightningWork): + def run(self): + pass + + work = ExposedWork(port=8000) + work._name = "root.work" + assert work.state["vars"]["_url"] == "" + + +def test_work_path_assignment(): + """Test that paths in the lit format lit:// get converted to a proper lightning_app.storage.Path object.""" + + class Work(LightningWork): + def __init__(self): + super().__init__() + self.no_path = "a/b/c" + self.path = Path("lit://x/y/z") + self.lit_path = "lit://x/y/z" + + def run(self): + pass + + work = Work() + assert isinstance(work.no_path, str) + assert isinstance(work.path, Path) + assert isinstance(work.lit_path, Path) + assert work.path == work.lit_path + + +def test_work_state_change_with_path(): + """Test that type changes to a Path attribute are properly reflected within the state.""" + + class Work(LightningFlow): + def __init__(self): + super().__init__() + self.none_to_path = None + self.path_to_none = Path() + self.path_to_path = Path() + + def run(self): + self.none_to_path = "lit://none/to/path" + self.path_to_none = None + self.path_to_path = "lit://path/to/path" + + class Flow(LightningFlow): + def __init__(self): + super().__init__() + self.work = Work() + + def run(self): + self.work.run() + self._exit() + + flow = Flow() + MultiProcessRuntime(LightningApp(flow)).dispatch() + assert flow.work.none_to_path == Path("lit://none/to/path") + assert flow.work.path_to_none is None + assert flow.work.path_to_path == Path("lit://path/to/path") + + assert "path_to_none" not in flow.work._paths + assert "path_to_none" in flow.work._state + assert flow.work._paths["none_to_path"] == Path("lit://none/to/path").to_dict() + assert flow.work._paths["path_to_path"] == Path("lit://path/to/path").to_dict() + assert flow.work.state["vars"]["none_to_path"] == Path("lit://none/to/path") + assert flow.work.state["vars"]["path_to_none"] is None + assert flow.work.state["vars"]["path_to_path"] == Path("lit://path/to/path") diff --git a/tests/tests_app/core/test_queues.py b/tests/tests_app/core/test_queues.py new file mode 100644 index 0000000000000..72de9d951ac60 --- /dev/null +++ b/tests/tests_app/core/test_queues.py @@ -0,0 +1,153 @@ +import pickle +import queue +import time +from unittest import mock + +import pytest + +from lightning_app.core import queues +from lightning_app.core.queues import QueuingSystem, READINESS_QUEUE_CONSTANT, RedisQueue +from lightning_app.utilities.imports import _is_redis_available +from lightning_app.utilities.redis import check_if_redis_running + + +@pytest.mark.skipif(not check_if_redis_running(), reason="Redis is not running") +@pytest.mark.parametrize("queue_type", list(QueuingSystem.__members__.values())) +def test_queue_api(queue_type, monkeypatch): + """Test the Queue API. + + This test run all the Queue implementation but we monkeypatch the Redis Queues to avoid external interaction + """ + + blpop_out = (b"entry-id", pickle.dumps("test_entry")) + + monkeypatch.setattr(queues.redis.Redis, "blpop", lambda *args, **kwargs: blpop_out) + monkeypatch.setattr(queues.redis.Redis, "rpush", lambda *args, **kwargs: None) + monkeypatch.setattr(queues.redis.Redis, "set", lambda *args, **kwargs: None) + monkeypatch.setattr(queues.redis.Redis, "get", lambda *args, **kwargs: None) + queue = queue_type.get_readiness_queue() + assert queue.name == READINESS_QUEUE_CONSTANT + assert isinstance(queue, queues.BaseQueue) + queue.put("test_entry") + assert queue.get() == "test_entry" + + +@pytest.mark.skipif(not check_if_redis_running(), reason="Redis is not running") +def test_redis_queue(): + queue_id = int(time.time()) + queue1 = QueuingSystem.REDIS.get_readiness_queue(queue_id=str(queue_id)) + queue2 = QueuingSystem.REDIS.get_readiness_queue(queue_id=str(queue_id + 1)) + queue1.put("test_entry1") + queue2.put("test_entry2") + assert queue1.get() == "test_entry1" + assert queue2.get() == "test_entry2" + with pytest.raises(queue.Empty): + queue2.get(timeout=1) + queue1.put("test_entry1") + assert queue1.length() == 1 + queue1.clear() + with pytest.raises(queue.Empty): + queue1.get(timeout=1) + + +@pytest.mark.skipif(not check_if_redis_running(), reason="Redis is not running") +def test_redis_ping_success(): + redis_queue = QueuingSystem.REDIS.get_readiness_queue() + assert redis_queue.ping() + + redis_queue = RedisQueue(name="test_queue", default_timeout=1) + assert redis_queue.ping() + + +@pytest.mark.skipif(not _is_redis_available(), reason="redis is required for this test.") +@pytest.mark.skipif(check_if_redis_running(), reason="This is testing the failure case when redis is not running") +def test_redis_ping_failure(): + redis_queue = RedisQueue(name="test_queue", default_timeout=1) + assert not redis_queue.ping() + + +@pytest.mark.skipif(not _is_redis_available(), reason="redis isn't installed.") +def test_redis_credential(monkeypatch): + monkeypatch.setattr(queues, "REDIS_HOST", "test-host") + monkeypatch.setattr(queues, "REDIS_PORT", "test-port") + monkeypatch.setattr(queues, "REDIS_PASSWORD", "test-password") + redis_queue = QueuingSystem.REDIS.get_readiness_queue() + assert redis_queue.redis.connection_pool.connection_kwargs["host"] == "test-host" + assert redis_queue.redis.connection_pool.connection_kwargs["port"] == "test-port" + assert redis_queue.redis.connection_pool.connection_kwargs["password"] == "test-password" + + +@pytest.mark.skipif(not _is_redis_available(), reason="redis isn't installed.") +@mock.patch("lightning_app.core.queues.redis.Redis") +def test_redis_queue_read_timeout(redis_mock): + redis_mock.return_value.blpop.return_value = (b"READINESS_QUEUE", pickle.dumps("test_entry")) + redis_queue = QueuingSystem.REDIS.get_readiness_queue() + + # default timeout + assert redis_queue.get(timeout=0) == "test_entry" + assert redis_mock.return_value.blpop.call_args_list[0] == mock.call(["READINESS_QUEUE"], timeout=0.005) + + # custom timeout + assert redis_queue.get(timeout=2) == "test_entry" + assert redis_mock.return_value.blpop.call_args_list[1] == mock.call(["READINESS_QUEUE"], timeout=2) + + # blocking timeout + assert redis_queue.get() == "test_entry" + assert redis_mock.return_value.blpop.call_args_list[2] == mock.call(["READINESS_QUEUE"], timeout=0) + + +@pytest.mark.parametrize( + "queue_type, queue_process_mock", + [(QueuingSystem.SINGLEPROCESS, queues.queue), (QueuingSystem.MULTIPROCESS, queues.multiprocessing)], +) +def test_process_queue_read_timeout(queue_type, queue_process_mock, monkeypatch): + + queue_mocked = mock.MagicMock() + monkeypatch.setattr(queue_process_mock, "Queue", queue_mocked) + my_queue = queue_type.get_readiness_queue() + + # default timeout + my_queue.get(timeout=0) + assert queue_mocked.return_value.get.call_args_list[0] == mock.call(timeout=0.001, block=False) + + # custom timeout + my_queue.get(timeout=2) + assert queue_mocked.return_value.get.call_args_list[1] == mock.call(timeout=2, block=False) + + # blocking timeout + my_queue.get() + assert queue_mocked.return_value.get.call_args_list[2] == mock.call(timeout=None, block=True) + + +@pytest.mark.skipif(not check_if_redis_running(), reason="Redis is not running") +@mock.patch("lightning_app.core.queues.REDIS_WARNING_QUEUE_SIZE", 2) +def test_redis_queue_warning(): + my_queue = QueuingSystem.REDIS.get_api_delta_queue(queue_id="test_redis_queue_warning") + my_queue.clear() + with pytest.warns(UserWarning, match="is larger than the"): + my_queue.put(None) + my_queue.put(None) + my_queue.put(None) + + +@pytest.mark.skipif(not check_if_redis_running(), reason="Redis is not running") +@mock.patch("lightning_app.core.queues.redis.Redis") +def test_redis_raises_error_if_failing(redis_mock): + import redis + + my_queue = QueuingSystem.REDIS.get_api_delta_queue(queue_id="test_redis_queue_warning") + redis_mock.return_value.rpush.side_effect = redis.exceptions.ConnectionError("EROOOR") + redis_mock.return_value.llen.side_effect = redis.exceptions.ConnectionError("EROOOR") + + with pytest.raises(ConnectionError, match="Your app failed because it couldn't connect to Redis."): + redis_mock.return_value.blpop.side_effect = redis.exceptions.ConnectionError("EROOOR") + my_queue.get() + + with pytest.raises(ConnectionError, match="Your app failed because it couldn't connect to Redis."): + redis_mock.return_value.rpush.side_effect = redis.exceptions.ConnectionError("EROOOR") + redis_mock.return_value.llen.return_value = 1 + my_queue.put(1) + + with pytest.raises(ConnectionError, match="Your app failed because it couldn't connect to Redis."): + redis_mock.return_value.llen.side_effect = redis.exceptions.ConnectionError("EROOOR") + my_queue.length() diff --git a/tests/tests_app/frontend/__init__.py b/tests/tests_app/frontend/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/frontend/test_stream_lit.py b/tests/tests_app/frontend/test_stream_lit.py new file mode 100644 index 0000000000000..cd3a394a89d75 --- /dev/null +++ b/tests/tests_app/frontend/test_stream_lit.py @@ -0,0 +1,94 @@ +import os +import runpy +import sys +from unittest import mock +from unittest.mock import ANY, Mock + +import pytest + +from lightning_app import LightningFlow +from lightning_app.frontend.stream_lit import StreamlitFrontend +from lightning_app.utilities.state import AppState + + +def test_stop_server_not_running(): + frontend = StreamlitFrontend(render_fn=Mock()) + with pytest.raises(RuntimeError, match="Server is not running."): + frontend.stop_server() + + +def _noop_render_fn(_): + pass + + +class MockFlow(LightningFlow): + @property + def name(self): + return "root.my.flow" + + def run(self): + pass + + +@mock.patch("lightning_app.frontend.stream_lit.subprocess") +def test_streamlit_frontend_start_stop_server(subprocess_mock): + """Test that `StreamlitFrontend.start_server()` invokes subprocess.Popen with the right parameters.""" + frontend = StreamlitFrontend(render_fn=_noop_render_fn) + frontend.flow = MockFlow() + frontend.start_server(host="hostname", port=1111) + subprocess_mock.Popen.assert_called_once() + + env_variables = subprocess_mock.method_calls[0].kwargs["env"] + call_args = subprocess_mock.method_calls[0].args[0] + assert call_args == [ + sys.executable, + "-m", + "streamlit", + "run", + ANY, + "--server.address", + "hostname", + "--server.port", + "1111", + "--server.baseUrlPath", + "root.my.flow", + "--server.headless", + "true", + ] + + assert env_variables["LIGHTNING_FLOW_NAME"] == "root.my.flow" + assert env_variables["LIGHTNING_RENDER_FUNCTION"] == "_noop_render_fn" + assert env_variables["LIGHTNING_RENDER_MODULE_FILE"] == __file__ + + assert "LIGHTNING_FLOW_NAME" not in os.environ + assert "LIGHTNING_RENDER_FUNCTION" not in os.environ + assert "LIGHTNING_RENDER_MODULE_FILE" not in os.environ + + frontend.stop_server() + subprocess_mock.Popen().kill.assert_called_once() + + +def _streamlit_call_me(state): + assert isinstance(state, AppState) + + +@mock.patch.dict( + os.environ, + { + "LIGHTNING_FLOW_NAME": "root", + "LIGHTNING_RENDER_FUNCTION": "_streamlit_call_me", + "LIGHTNING_RENDER_MODULE_FILE": __file__, + }, +) +def test_streamlit_wrapper_calls_render_fn(*_): + runpy.run_module("lightning_app.frontend.streamlit_base") + # TODO: find a way to assert that _streamlit_call_me got called + + +def test_method_exception(): + class A: + def render_fn(self): + pass + + with pytest.raises(TypeError, match="being a method"): + StreamlitFrontend(render_fn=A().render_fn) diff --git a/tests/tests_app/frontend/test_web.py b/tests/tests_app/frontend/test_web.py new file mode 100644 index 0000000000000..f7f97cda0fc9b --- /dev/null +++ b/tests/tests_app/frontend/test_web.py @@ -0,0 +1,77 @@ +import os +from unittest import mock +from unittest.mock import ANY, MagicMock + +import pytest + +import lightning_app +from lightning_app import LightningFlow +from lightning_app.frontend.web import healthz, StaticWebFrontend +from lightning_app.storage.path import storage_root_dir + + +def test_stop_server_not_running(): + frontend = StaticWebFrontend(serve_dir=".") + with pytest.raises(RuntimeError, match="Server is not running."): + frontend.stop_server() + + +class MockFlow(LightningFlow): + @property + def name(self): + return "root.my.flow" + + def run(self): + pass + + +@mock.patch("lightning_app.frontend.web.mp.Process") +def test_start_stop_server_through_frontend(process_mock): + frontend = StaticWebFrontend(serve_dir=".") + frontend.flow = MockFlow() + frontend.start_server("localhost", 5000) + log_file_root = storage_root_dir() + process_mock.assert_called_once_with( + target=lightning_app.frontend.web.start_server, + kwargs={ + "host": "localhost", + "port": 5000, + "serve_dir": ".", + "path": "/root.my.flow", + "log_file": os.path.join(log_file_root, "frontend", "logs.log"), + }, + ) + process_mock().start.assert_called_once() + frontend.stop_server() + process_mock().kill.assert_called_once() + + +@mock.patch("lightning_app.frontend.web.uvicorn") +def test_start_server_through_function(uvicorn_mock, tmpdir, monkeypatch): + FastAPIMock = MagicMock() + FastAPIMock.mount = MagicMock() + FastAPIGetDecoratorMock = MagicMock() + FastAPIMock.get.return_value = FastAPIGetDecoratorMock + monkeypatch.setattr(lightning_app.frontend.web, "FastAPI", MagicMock(return_value=FastAPIMock)) + + lightning_app.frontend.web.start_server(serve_dir=tmpdir, host="myhost", port=1000, path="/test-flow") + uvicorn_mock.run.assert_called_once_with(app=ANY, host="myhost", port=1000, log_config=ANY) + FastAPIMock.mount.assert_called_once_with("/test-flow", ANY, name="static") + FastAPIMock.get.assert_called_once_with("/test-flow/healthz", status_code=200) + + FastAPIGetDecoratorMock.assert_called_once_with(healthz) + + # path has default value "/" + FastAPIMock.mount = MagicMock() + lightning_app.frontend.web.start_server(serve_dir=tmpdir, host="myhost", port=1000) + FastAPIMock.mount.assert_called_once_with("/", ANY, name="static") + + +def test_healthz(): + assert healthz() == {"status": "ok"} + + +@mock.patch("lightning_app.frontend.web.uvicorn") +def test_start_server_find_free_port(uvicorn_mock, tmpdir): + lightning_app.frontend.web.start_server(serve_dir=tmpdir, host="myhost") + assert uvicorn_mock.run.call_args_list[0].kwargs["port"] > 0 diff --git a/tests/tests_app/runners/__init__.py b/tests/tests_app/runners/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/runners/test_cloud.py b/tests/tests_app/runners/test_cloud.py new file mode 100644 index 0000000000000..f38383d87e148 --- /dev/null +++ b/tests/tests_app/runners/test_cloud.py @@ -0,0 +1,289 @@ +import logging +from pathlib import Path +from unittest import mock +from unittest.mock import MagicMock + +import pytest +from lightning_cloud.openapi import ( + Body8, + Gridv1ImageSpec, + V1BuildSpec, + V1DependencyFileInfo, + V1LightningappInstanceState, + V1LightningworkSpec, + V1ListLightningappInstancesResponse, + V1ListMembershipsResponse, + V1Membership, + V1NetworkConfig, + V1PackageManager, + V1PythonDependencyInfo, + V1UserRequestedComputeConfig, + V1Work, +) + +from lightning_app import LightningApp, LightningWork +from lightning_app.runners import backends, cloud +from lightning_app.utilities.cloud import _get_project +from lightning_app.utilities.dependency_caching import get_hash + + +class MyWork(LightningWork): + def run(self): + print("my run") + + +class TestAppCreationClient: + """Testing the calls made using GridRestClient to create the app.""" + + @mock.patch("lightning_app.runners.backends.cloud.LightningClient", mock.MagicMock()) + def test_requirements_file(self, monkeypatch): + mock_client = mock.MagicMock() + mock_client.projects_service_list_memberships.return_value = V1ListMembershipsResponse( + memberships=[V1Membership(name="test-project", project_id="test-project-id")] + ) + mock_client.lightningapp_instance_service_list_lightningapp_instances.return_value = ( + V1ListLightningappInstancesResponse(lightningapps=[]) + ) + cloud_backend = mock.MagicMock() + cloud_backend.client = mock_client + monkeypatch.setattr(backends, "CloudBackend", mock.MagicMock(return_value=cloud_backend)) + monkeypatch.setattr(cloud, "LocalSourceCodeDir", mock.MagicMock()) + monkeypatch.setattr(cloud, "_prepare_lightning_wheels_and_requirements", mock.MagicMock()) + app = mock.MagicMock() + app.flows = [] + app.frontend = {} + cloud_runtime = cloud.CloudRuntime(app=app, entrypoint_file="entrypoint.py") + cloud_runtime._check_uploaded_folder = mock.MagicMock() + + # without requirements file + # setting is_file to False so requirements.txt existence check will return False + monkeypatch.setattr(Path, "is_file", lambda *args, **kwargs: False) + monkeypatch.setattr(cloud, "Path", Path) + cloud_runtime.dispatch() + body = Body8( + app_entrypoint_file=mock.ANY, + enable_app_server=True, + flow_servers=[], + image_spec=None, + works=[], + local_source=True, + dependency_cache_key=mock.ANY, + ) + cloud_runtime.backend.client.lightningapp_v2_service_create_lightningapp_release.assert_called_once_with( + "test-project-id", mock.ANY, body + ) + + # with requirements file + # setting is_file to True so requirements.txt existence check will return True + monkeypatch.setattr(Path, "is_file", lambda *args, **kwargs: True) + monkeypatch.setattr(cloud, "Path", Path) + cloud_runtime.dispatch(no_cache=True) + body.image_spec = Gridv1ImageSpec( + dependency_file_info=V1DependencyFileInfo( + package_manager=V1PackageManager.PIP, + path="requirements.txt", + ), + ) + cloud_runtime.backend.client.lightningapp_v2_service_create_lightningapp_release.assert_called_with( + "test-project-id", mock.ANY, body + ) + + @mock.patch("lightning_app.runners.backends.cloud.LightningClient", mock.MagicMock()) + def test_no_cache(self, monkeypatch): + mock_client = mock.MagicMock() + mock_client.projects_service_list_memberships.return_value = V1ListMembershipsResponse( + memberships=[V1Membership(name="test-project", project_id="test-project-id")] + ) + mock_client.lightningapp_instance_service_list_lightningapp_instances.return_value = ( + V1ListLightningappInstancesResponse(lightningapps=[]) + ) + cloud_backend = mock.MagicMock() + cloud_backend.client = mock_client + monkeypatch.setattr(backends, "CloudBackend", mock.MagicMock(return_value=cloud_backend)) + monkeypatch.setattr(cloud, "LocalSourceCodeDir", mock.MagicMock()) + monkeypatch.setattr(cloud, "_prepare_lightning_wheels_and_requirements", mock.MagicMock()) + monkeypatch.setattr(cloud, "get_hash", lambda *args, **kwargs: "dummy-hash") + app = mock.MagicMock() + app.flows = [] + app.frontend = {} + cloud_runtime = cloud.CloudRuntime(app=app, entrypoint_file="entrypoint.py") + cloud_runtime._check_uploaded_folder = mock.MagicMock() + + # requirements.txt check should return True so the no_cache flag is the only one that matters + # testing with no-cache False + monkeypatch.setattr(Path, "is_file", lambda self: True if "requirements.txt" in str(self) else False) + monkeypatch.setattr(cloud, "Path", Path) + cloud_runtime.dispatch(no_cache=False) + ( + func_name, + args, + kwargs, + ) = cloud_runtime.backend.client.lightningapp_v2_service_create_lightningapp_release.mock_calls[0] + body = args[2] + assert body.dependency_cache_key == "dummy-hash" + + # testing with no-cache True + mock_client.reset_mock() + monkeypatch.setattr(cloud, "Path", Path) + cloud_runtime.dispatch(no_cache=True) + ( + func_name, + args, + kwargs, + ) = cloud_runtime.backend.client.lightningapp_v2_service_create_lightningapp_release.mock_calls[0] + body = args[2] + assert body.dependency_cache_key is None + + @mock.patch("lightning_app.runners.backends.cloud.LightningClient", mock.MagicMock()) + @pytest.mark.parametrize("lightningapps", [[], [MagicMock()]]) + def test_call_with_work_app(self, lightningapps, monkeypatch, tmpdir): + source_code_root_dir = Path(tmpdir / "src").absolute() + source_code_root_dir.mkdir() + Path(source_code_root_dir / ".lightning").write_text("name: myapp") + requirements_file = Path(source_code_root_dir / "requirements.txt") + Path(requirements_file).touch() + + mock_client = mock.MagicMock() + if lightningapps: + lightningapps[0].status.phase = V1LightningappInstanceState.STOPPED + mock_client.lightningapp_instance_service_list_lightningapp_instances.return_value = ( + V1ListLightningappInstancesResponse(lightningapps=lightningapps) + ) + lightning_app_instance = MagicMock() + mock_client.lightningapp_v2_service_create_lightningapp_release = MagicMock(return_value=lightning_app_instance) + mock_client.lightningapp_v2_service_create_lightningapp_release_instance = MagicMock( + return_value=lightning_app_instance + ) + existing_instance = MagicMock() + existing_instance.status.phase = V1LightningappInstanceState.STOPPED + mock_client.lightningapp_service_get_lightningapp = MagicMock(return_value=existing_instance) + cloud_backend = mock.MagicMock() + cloud_backend.client = mock_client + monkeypatch.setattr(backends, "CloudBackend", mock.MagicMock(return_value=cloud_backend)) + monkeypatch.setattr(cloud, "LocalSourceCodeDir", mock.MagicMock()) + monkeypatch.setattr(cloud, "_prepare_lightning_wheels_and_requirements", mock.MagicMock()) + app = mock.MagicMock() + flow = mock.MagicMock() + + work = MyWork() + monkeypatch.setattr(work, "_name", "test-work") + monkeypatch.setattr(work._cloud_build_config, "build_commands", lambda: ["echo 'start'"]) + monkeypatch.setattr(work._cloud_build_config, "requirements", ["torch==1.0.0", "numpy==1.0.0"]) + monkeypatch.setattr(work._cloud_build_config, "image", "random_base_public_image") + monkeypatch.setattr(work._cloud_compute, "disk_size", 0) + monkeypatch.setattr(work._cloud_compute, "preemptible", False) + monkeypatch.setattr(work, "_port", 8080) + + flow.works = lambda recurse: [work] + app.flows = [flow] + cloud_runtime = cloud.CloudRuntime(app=app, entrypoint_file=(source_code_root_dir / "entrypoint.py")) + monkeypatch.setattr( + "lightning_app.runners.cloud._get_project", + lambda x: V1Membership(name="test-project", project_id="test-project-id"), + ) + cloud_runtime.dispatch() + + if lightningapps: + expected_body = Body8( + description=None, + local_source=True, + app_entrypoint_file="entrypoint.py", + enable_app_server=True, + flow_servers=[], + dependency_cache_key=get_hash(requirements_file), + image_spec=Gridv1ImageSpec( + dependency_file_info=V1DependencyFileInfo( + package_manager=V1PackageManager.PIP, path="requirements.txt" + ) + ), + works=[ + V1Work( + name="test-work", + spec=V1LightningworkSpec( + build_spec=V1BuildSpec( + commands=["echo 'start'"], + python_dependencies=V1PythonDependencyInfo( + package_manager=V1PackageManager.PIP, packages="torch==1.0.0\nnumpy==1.0.0" + ), + image="random_base_public_image", + ), + user_requested_compute_config=V1UserRequestedComputeConfig( + name="default", count=1, disk_size=0, preemptible=False, shm_size=0 + ), + network_config=[V1NetworkConfig(name=mock.ANY, host=None, port=8080)], + ), + ) + ], + ) + mock_client.lightningapp_v2_service_create_lightningapp_release.assert_called_once_with( + "test-project-id", mock.ANY, expected_body + ) + + # running dispatch with disabled dependency cache + mock_client.reset_mock() + monkeypatch.setattr(cloud, "DISABLE_DEPENDENCY_CACHE", True) + expected_body.dependency_cache_key = None + cloud_runtime.dispatch() + mock_client.lightningapp_v2_service_create_lightningapp_release.assert_called_once_with( + "test-project-id", mock.ANY, expected_body + ) + else: + mock_client.lightningapp_v2_service_create_lightningapp_release_instance.assert_called_once_with( + "test-project-id", mock.ANY, mock.ANY, mock.ANY + ) + + +@mock.patch("lightning_app.core.queues.QueuingSystem", MagicMock()) +@mock.patch("lightning_app.runners.backends.cloud.LightningClient", MagicMock()) +def test_get_project(monkeypatch): + mock_client = mock.MagicMock() + monkeypatch.setattr(cloud, "CloudBackend", mock.MagicMock(return_value=mock_client)) + app = mock.MagicMock(spec=LightningApp) + cloud.CloudRuntime(app=app, entrypoint_file="entrypoint.py") + + # No valid projects + mock_client.projects_service_list_memberships.return_value = V1ListMembershipsResponse(memberships=[]) + + with pytest.raises(ValueError, match="No valid projects found"): + _get_project(mock_client) + + # One valid project + mock_client.projects_service_list_memberships.return_value = V1ListMembershipsResponse( + memberships=[V1Membership(name="test-project", project_id="test-project-id")] + ) + ret = _get_project(mock_client) + assert ret.project_id == "test-project-id" + + # Multiple valid projects + mock_client.projects_service_list_memberships.return_value = V1ListMembershipsResponse( + memberships=[ + V1Membership(name="test-project1", project_id="test-project-id1"), + V1Membership(name="test-project2", project_id="test-project-id2"), + ] + ) + with pytest.warns(UserWarning, match="Defaulting to the project test-project1"): + ret = _get_project(mock_client) + assert ret.project_id == "test-project-id1" + + +@mock.patch("lightning_app.core.queues.QueuingSystem", MagicMock()) +@mock.patch("lightning_app.runners.backends.cloud.LightningClient", MagicMock()) +def test_check_uploaded_folder(monkeypatch, tmpdir, caplog): + + monkeypatch.setattr(cloud, "logger", logging.getLogger()) + + app = MagicMock() + repo = MagicMock() + backend = cloud.CloudRuntime(app) + with caplog.at_level(logging.WARN): + backend._check_uploaded_folder(Path(tmpdir), repo) + assert caplog.messages == [] + + mock = MagicMock() + mock.st_size = 5 * 1000 * 1000 + repo.files = [str(Path("./a.png"))] + monkeypatch.setattr(Path, "stat", MagicMock(return_value=mock)) + + with caplog.at_level(logging.WARN): + backend._check_uploaded_folder(Path("."), repo) + assert caplog.messages[0].startswith("Your application folder . is more than 2 MB. Found 5.0 MB") diff --git a/tests/tests_app/runners/test_multiprocess.py b/tests/tests_app/runners/test_multiprocess.py new file mode 100644 index 0000000000000..0693e38a35f77 --- /dev/null +++ b/tests/tests_app/runners/test_multiprocess.py @@ -0,0 +1,83 @@ +from unittest import mock +from unittest.mock import Mock + +from lightning_app import LightningApp, LightningFlow, LightningWork +from lightning_app.frontend import StaticWebFrontend, StreamlitFrontend +from lightning_app.runners import MultiProcessRuntime +from lightning_app.utilities.component import _get_context + + +def _streamlit_render_fn(): + pass + + +class StreamlitFlow(LightningFlow): + def run(self): + self._exit() + + def configure_layout(self): + frontend = StreamlitFrontend(render_fn=_streamlit_render_fn) + frontend.start_server = Mock() + frontend.stop_server = Mock() + return frontend + + +class WebFlow(LightningFlow): + def run(self): + self._exit() + + def configure_layout(self): + frontend = StaticWebFrontend(serve_dir="a/b/c") + frontend.start_server = Mock() + frontend.stop_server = Mock() + return frontend + + +class StartFrontendServersTestFlow(LightningFlow): + def __init__(self): + super().__init__() + self.flow0 = StreamlitFlow() + self.flow1 = WebFlow() + + def run(self): + self._exit() + + +@mock.patch("lightning_app.runners.multiprocess.find_free_network_port") +def test_multiprocess_starts_frontend_servers(*_): + """Test that the MultiProcessRuntime starts the servers for the frontends in each LightningFlow.""" + root = StartFrontendServersTestFlow() + app = LightningApp(root) + MultiProcessRuntime(app).dispatch() + + app.frontends[root.flow0.name].start_server.assert_called_once() + app.frontends[root.flow1.name].start_server.assert_called_once() + + app.frontends[root.flow0.name].stop_server.assert_called_once() + app.frontends[root.flow1.name].stop_server.assert_called_once() + + +class ContextWork(LightningWork): + def __init__(self): + super().__init__() + + def run(self): + assert _get_context().value == "work" + + +class ContxtFlow(LightningFlow): + def __init__(self): + super().__init__() + self.work = ContextWork() + assert _get_context() is None + + def run(self): + assert _get_context().value == "flow" + self.work.run() + assert _get_context().value == "flow" + self._exit() + + +def test_multiprocess_runtime_sets_context(): + """Test that the runtime sets the global variable COMPONENT_CONTEXT in Flow and Work.""" + MultiProcessRuntime(LightningApp(ContxtFlow())).dispatch() diff --git a/tests/tests_app/runners/test_runtime.py b/tests/tests_app/runners/test_runtime.py new file mode 100644 index 0000000000000..c79ef1207cae9 --- /dev/null +++ b/tests/tests_app/runners/test_runtime.py @@ -0,0 +1,45 @@ +import os +import signal +from unittest import mock + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.runners import cloud +from lightning_app.runners.runtime import dispatch +from lightning_app.runners.runtime_type import RuntimeType + + +@pytest.mark.parametrize( + "runtime_type", + [ + RuntimeType.SINGLEPROCESS, + RuntimeType.MULTIPROCESS, + RuntimeType.CLOUD, + ], +) +@mock.patch("lightning_app.core.queues.QueuingSystem", mock.MagicMock()) +@mock.patch("lightning_app.runners.backends.cloud.LightningClient", mock.MagicMock()) +def test_dispatch(runtime_type, monkeypatch): + """This test ensures the runtime dispatch method gets called when using dispatch.""" + + monkeypatch.setattr(cloud, "CloudBackend", mock.MagicMock()) + + with pytest.raises(FileNotFoundError, match="doesnt_exists.py"): + dispatch( + entrypoint_file=os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/doesnt_exists.py"), + runtime_type=runtime_type, + start_server=False, + ) + + runtime = runtime_type.get_runtime() + dispath_method_path = f"{runtime.__module__}.{runtime.__name__}.dispatch" + + with mock.patch(dispath_method_path) as dispatch_mock_fn: + dispatch( + entrypoint_file=os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/app_metadata.py"), + runtime_type=runtime_type, + start_server=False, + ) + dispatch_mock_fn.assert_called_once() + assert signal.getsignal(signal.SIGINT) is signal.default_int_handler diff --git a/tests/tests_app/runners/test_singleprocess.py b/tests/tests_app/runners/test_singleprocess.py new file mode 100644 index 0000000000000..3b2ad69185077 --- /dev/null +++ b/tests/tests_app/runners/test_singleprocess.py @@ -0,0 +1,18 @@ +from lightning_app import LightningFlow +from lightning_app.core.app import LightningApp +from lightning_app.runners import SingleProcessRuntime + + +class Flow(LightningFlow): + def run(self): + raise KeyboardInterrupt + + +def on_before_run(): + pass + + +def test_single_process_runtime(tmpdir): + + app = LightningApp(Flow()) + SingleProcessRuntime(app, start_server=False).dispatch(on_before_run=on_before_run) diff --git a/tests/tests_app/source_code/test_copytree.py b/tests/tests_app/source_code/test_copytree.py new file mode 100644 index 0000000000000..f109df094e670 --- /dev/null +++ b/tests/tests_app/source_code/test_copytree.py @@ -0,0 +1,107 @@ +import os + +from lightning_app.source_code.copytree import _read_lightningignore, copytree + + +def test_read_lightningignore(tmpdir): + """_read_lightningignore() removes comments from ignore files.""" + test_path = tmpdir.join(".lightningignore") + expected = "test" + not_expected = "# comment" + with open(test_path, "a") as f: + f.write(not_expected) + f.write(expected) + + result = _read_lightningignore(test_path) + assert not_expected not in result + assert expected not in result + + +def test_read_lightningignore_excludes_empty_lines(tmpdir): + """_read_lightningignore() excludes empty lines.""" + test_path = tmpdir.join(".lightningignore") + gitignore = """ + + foo + + bar + + + + """ + test_path.write(gitignore) + + # results exclude all empty lines + result = _read_lightningignore(test_path) + assert len(result) == 2 + + +def test_copytree_ignoring_files(tmp_path_factory): + # lightningignore for ignoring txt file in dir2, the whole dir1 and .zip file everywhere + test_dir = tmp_path_factory.mktemp("lightningignore-test") + source = test_dir / "source" + source.mkdir() + + # lightningignore at root + source.joinpath(".lightningignore").write_text("dir1/*.txt\ndir0\n*.zip") + + # not creating the destination directory + dest = test_dir / "dest" + + # # setting up test files and nested lightningignore in dir4 + source.joinpath("dir3").mkdir() + source.joinpath("dir3").joinpath(".lightningignore").write_text("*.pt") + source.joinpath("dir3").joinpath("model.pt").write_text("") + source.joinpath("dir3").joinpath("model.non-pt").write_text("") + + source.joinpath("dir0").mkdir() # dir0 is ignored + source.joinpath("dir0/file1").write_text("") # ignored because the parent dir is ignored + source.joinpath("dir1").mkdir() + source.joinpath("dir1/file.tar.gz").write_text("") + source.joinpath("dir1/file.txt").write_text("") # .txt in dir1 is ignored + source.joinpath("dir2").mkdir() + source.joinpath("dir2/file.txt").write_text("") + source.joinpath("dir2/file.zip").write_text("") # .zip everywhere is ignored + + files_copied = copytree(source, dest) + relative_names = set() + for file in files_copied: + relative_names.add(file.split("source")[1].strip("/").strip("\\")) + + if os.name == "nt": + assert { + ".lightningignore", + "dir2\\file.txt", + "dir3\\.lightningignore", + "dir3\\model.non-pt", + "dir1\\file.tar.gz", + } == relative_names + else: + assert { + ".lightningignore", + "dir2/file.txt", + "dir3/.lightningignore", + "dir3/model.non-pt", + "dir1/file.tar.gz", + } == relative_names + + first_level_dirs = [directory for directory in dest.iterdir()] + assert len(first_level_dirs) == 4 # .lightningignore, dir2, dir1 and dir3 + assert {".lightningignore", "dir2", "dir1", "dir3"} == {d.name for d in first_level_dirs} + + for d in first_level_dirs: + if d.name == "dir1": + assert "file.txt" not in [file.name for file in d.iterdir()] + assert "file.tar.gz" in [file.name for file in d.iterdir()] + assert len([file.name for file in d.iterdir()]) == 1 + + if d.name == "dir2": + assert "file.zip" not in [file.name for file in d.iterdir()] + assert "file.txt" in [file.name for file in d.iterdir()] + assert len([file.name for file in d.iterdir()]) == 1 + + if d.name == "dir3": + assert "model.pt" not in [file.name for file in d.iterdir()] + assert "model.non-pt" in [file.name for file in d.iterdir()] + assert ".lightningignore" in [file.name for file in d.iterdir()] + assert len([file.name for file in d.iterdir()]) == 2 diff --git a/tests/tests_app/source_code/test_local.py b/tests/tests_app/source_code/test_local.py new file mode 100644 index 0000000000000..7cc8ae8d2a5d0 --- /dev/null +++ b/tests/tests_app/source_code/test_local.py @@ -0,0 +1,390 @@ +import tarfile +import uuid +from pathlib import Path + +from lightning_app.source_code import LocalSourceCodeDir + + +def test_repository_checksum(tmp_path): + """LocalRepository.checksum() generates a hash of local dir.""" + repository = LocalSourceCodeDir(path=Path(tmp_path)) + + test_path = tmp_path / "test.txt" + version_a = str(uuid.uuid4()) + test_path.write_text(version_a) + checksum_a = repository.version + + # file contents don't change; checksum is the same + repository = LocalSourceCodeDir(path=Path(tmp_path)) + test_path.write_text(version_a) + checksum_b = repository.version + assert checksum_a == checksum_b + + # file contents change; checksum is different + repository = LocalSourceCodeDir(path=Path(tmp_path)) + test_path.write_text(str(uuid.uuid4())) + checksum_c = repository.version + + assert checksum_a != checksum_c + + +def test_repository_package(tmp_path, monkeypatch): + """LocalRepository.package() ceates package from local dir.""" + cache_path = Path(tmp_path) + source_path = cache_path / "nested" + source_path.mkdir(parents=True, exist_ok=True) + (source_path / "test.txt").write_text("test") + + # set cache location to temp dir + monkeypatch.setattr(LocalSourceCodeDir, "cache_location", cache_path) + + repository = LocalSourceCodeDir(path=source_path) + repository.package() + + # test that package is created + for file in cache_path.glob("**/*"): + if file.is_file() and file.name.endswith(".tar.gz"): + assert file.name == f"{repository.version}.tar.gz" + + +def test_repository_lightningignore(tmp_path): + """LocalRepository.version uses the assumed checksum correctly.""" + # write .lightningignore file + lightningignore = """ + # ignore files in this dir + ignore/ + + """ + (tmp_path / ".lightningignore").write_text(lightningignore) + + # write some data to file and check version + (tmp_path / "test.txt").write_text(str(uuid.uuid4())) + + # create repo object + repository = LocalSourceCodeDir(path=Path(tmp_path)) + checksum_a = repository.version + + # write file that needs to be ignored + (tmp_path / "ignore").mkdir() + (tmp_path / "ignore/test.txt").write_text(str(uuid.uuid4())) + + # check that version remains the same + repository = LocalSourceCodeDir(path=Path(tmp_path)) + checksum_b = repository.version + + assert checksum_a == checksum_b + + +def test_repository_filters_with_absolute_relative_path(tmp_path): + """.lightningignore parsing parses paths starting with / correctly.""" + lightningignore = """ + /ignore_file/test.txt + + /ignore_dir + """ + (tmp_path / ".lightningignore").write_text(lightningignore) + + # write some data to file and check version + (tmp_path / "test.txt").write_text(str(uuid.uuid4())) + + # create repo object + repository = LocalSourceCodeDir(path=Path(tmp_path)) + checksum_a = repository.version + + # only two files in hash + assert len(repository._non_ignored_files) == 2 + + # write file that needs to be ignored + (tmp_path / "ignore_file").mkdir() + (tmp_path / "ignore_dir").mkdir() + (tmp_path / "ignore_file/test.txt").write_text(str(uuid.uuid4())) + (tmp_path / "ignore_dir/test.txt").write_text(str(uuid.uuid4())) + + # check that version remains the same + repository = LocalSourceCodeDir(path=Path(tmp_path)) + checksum_b = repository.version + + # still only two files in hash + assert len(repository._non_ignored_files) == 2 + + assert checksum_a == checksum_b + + +def test_repository_lightningignore_supports_different_patterns(tmp_path): + """.lightningignore parsing supports different patterns.""" + # write .lightningignore file + # default github python .gitignore + lightningignore = """ + # ignore files in this dir + ignore/ + + # Byte-compiled / optimized / DLL files + __pycache__/ + *.py[cod] + *$py.class + + # C extensions + *.so + + # Distribution / packaging + .Python + build/ + develop-eggs/ + dist/ + downloads/ + eggs/ + .eggs/ + lib/ + lib64/ + parts/ + sdist/ + var/ + wheels/ + *.egg-info/ + .installed.cfg + *.egg + MANIFEST + + # PyInstaller + # Usually these files are written by a python script from a template + # before PyInstaller builds the exe, so as to inject date/other infos into it. + *.manifest + *.spec + + # Installer logs + pip-log.txt + pip-delete-this-directory.txt + + # Unit test / coverage reports + htmlcov/ + .tox/ + .coverage + .coverage.* + .cache + nosetests.xml + coverage.xml + *.cover + .hypothesis/ + .pytest_cache/ + + # Translations + *.mo + *.pot + + # Django stuff: + *.log + local_settings.py + db.sqlite3 + + # Flask stuff: + instance/ + .webassets-cache + + # Scrapy stuff: + .scrapy + + # Sphinx documentation + docs/_build/ + + # PyBuilder + target/ + + # Jupyter Notebook + .ipynb_checkpoints + + # pyenv + .python-version + + # celery beat schedule file + celerybeat-schedule + + # SageMath parsed files + *.sage.py + + # Environments + .env + .env.docker + .venv + env/ + venv/ + ENV/ + env.bak/ + venv.bak/ + + # Spyder project settings + .spyderproject + .spyproject + + # Rope project settings + .ropeproject + + # mkdocs documentation + /site + + # mypy + .mypy_cache/ + + # VS Code files + .vscode/ + + # UI files + node_modules/ + + # Data files + models/ + models/* + !grid/openapi/models + postgresql_data/ + redis_data/ + + # Secrets folders + secrets/ + + # Built UI + ui/ + + # Ignores Grid Runner + vendor/ + ignore_test.py + + # Ignore cov report + *.xml + + """ + (tmp_path / ".lightningignore").write_text(lightningignore) + + # write some data to file and check version + (tmp_path / "test.txt").write_text(str(uuid.uuid4())) + + # create repo object + repository = LocalSourceCodeDir(path=Path(tmp_path)) + checksum_a = repository.version + + # write file that needs to be ignored + (tmp_path / "ignore").mkdir() + (tmp_path / "ignore/test.txt").write_text(str(uuid.uuid4())) + + # check that version remains the same + repository = LocalSourceCodeDir(path=Path(tmp_path)) + checksum_b = repository.version + + assert checksum_a == checksum_b + + +def test_repository_lightningignore_unpackage(tmp_path, monkeypatch): + """.lightningignore behaves similarly to the gitignore standard.""" + + lorem_ipsum = "Lorem ipsum dolor sit amet, consectetur adipiscing elit." + + cache_path = tmp_path / "cache" + monkeypatch.setattr(LocalSourceCodeDir, "cache_location", cache_path) + + source_path = tmp_path / "source" + source_path.mkdir() + + # set cache location to temp dir + + lightningignore = """ + # Ignore on all levels + *.pyc + *__pycache__/ + build/ + .env + # Ignore wildcard on one level + ./*.txt + /*.md + ./one-level/*.txt + /one-level/*.md + # Ignore only relative + ./downloads + /relative_downloads + # nested + /nested//level/ + /nested/level/ + """ + (source_path / ".lightningignore").write_text(lightningignore) + + # Dir structure + (source_path / "include.py").write_text(lorem_ipsum) + (source_path / "exclude.pyc").write_text(lorem_ipsum) + (source_path / "__pycache__").mkdir() + (source_path / "__pycache__" / "exclude.py").write_text( + lorem_ipsum + ) # Even tho it's .py it's in excluded __pycache__ directory + (source_path / "__pycache__" / "exclude.pyc").write_text( + lorem_ipsum + ) # Even tho it's .py it's in excluded __pycache__ directory + (source_path / "build.py").write_text(lorem_ipsum) # Common prefix with excluded build but it's not it + (source_path / "builds").mkdir() # Common prefix with excluded build but it's not excluded + (source_path / "builds" / "include.py").write_text(lorem_ipsum) + (source_path / "builds" / "__pycache__").mkdir() # Recursively excluded + (source_path / "builds" / "__pycache__" / "exclude.py").write_text(lorem_ipsum) + (source_path / "build").mkdir() # Recursively excluded + (source_path / "build" / "exclude.db").write_text(lorem_ipsum) + (source_path / ".env").write_text(lorem_ipsum) # No issues with handling hidden (.dot) files + (source_path / "downloads").mkdir() # exclude + (source_path / "downloads" / "something.jpeg").write_text(lorem_ipsum) + (source_path / "relative_downloads").mkdir() # exclude + (source_path / "relative_downloads" / "something.jpeg").write_text(lorem_ipsum) + (source_path / "include").mkdir() # include + (source_path / "include" / "exclude.pyc").write_text(lorem_ipsum) # exclude because of *.pyc rule + (source_path / "include" / "include.py").write_text(lorem_ipsum) # include + (source_path / "include" / "downloads").mkdir() # include because it was excluded only relative to root + (source_path / "include" / "downloads" / "something.jpeg").write_text(lorem_ipsum) + (source_path / "include" / "relative_downloads").mkdir() # include because it was excluded only relative to root + (source_path / "include" / "relative_downloads" / "something.jpeg").write_text(lorem_ipsum) + (source_path / "exclude.txt").write_text(lorem_ipsum) + (source_path / "exclude.md").write_text(lorem_ipsum) + (source_path / "one-level").mkdir() + (source_path / "one-level" / "exclude.txt").write_text(lorem_ipsum) + (source_path / "one-level" / "exclude.md").write_text(lorem_ipsum) + (source_path / "one-level" / "include.py").write_text(lorem_ipsum) + (source_path / "nested").mkdir() + (source_path / "nested" / "include.py").write_text(lorem_ipsum) + (source_path / "nested" / "level").mkdir() + (source_path / "nested" / "level" / "exclude.py").write_text(lorem_ipsum) + + # create repo object + repository = LocalSourceCodeDir(path=source_path) + repository.package() + + unpackage_path = tmp_path / "unpackage" + + with tarfile.open(repository.package_path) as f: + f.extractall(unpackage_path) + + assert (unpackage_path / "include.py").exists() + assert not (unpackage_path / "exclude.pyc").exists() # Excluded by *.pyc + assert not (unpackage_path / "__pycache__").exists() + assert not ( + unpackage_path / "__pycache__" / "exclude.py" + ).exists() # Even tho it's .py it's in excluded __pycache__ directory + assert not ( + unpackage_path / "__pycache__" / "exclude.pyc" + ).exists() # Even tho it's .py it's in excluded __pycache__ directory + assert (unpackage_path / "build.py").exists() # Common prefix with excluded build but it's not it + assert (unpackage_path / "builds" / "include.py").exists() + assert not (unpackage_path / "builds" / "__pycache__").exists() # Recursively excluded + assert not (unpackage_path / "builds" / "__pycache__" / "exclude.py").exists() + assert not (unpackage_path / "build").exists() # Recursively excluded + assert not (unpackage_path / "build" / "exclude.db").exists() + assert not (unpackage_path / ".env").exists() # No issues with handling hidden (.dot) files + assert not (unpackage_path / "downloads").mkdir() # exclude + assert not (unpackage_path / "downloads" / "something.jpeg").exists() + assert not (unpackage_path / "relative_downloads").mkdir() # exclude + assert not (unpackage_path / "relative_downloads" / "something.jpeg").exists() + assert not (unpackage_path / "include" / "exclude.pyc").exists() # exclude because of *.pyc rule + assert (unpackage_path / "include" / "include.py").exists() # include + assert ( + unpackage_path / "include" / "downloads" / "something.jpeg" + ).exists() # include because it was excluded only relative to root + assert ( + unpackage_path / "include" / "relative_downloads" / "something.jpeg" + ).exists() # include because it was excluded only relative to root + assert not (unpackage_path / "exclude.txt").exists() + assert not (unpackage_path / "exclude.md").exists() + assert not (unpackage_path / "one-level" / "exclude.txt").exists() + assert not (unpackage_path / "one-level" / "exclude.md").exists() + assert (unpackage_path / "one-level" / "include.py").exists() + assert (unpackage_path / "nested" / "include.py").exists() + assert not (unpackage_path / "nested" / "level" / "exclude.py").exists() diff --git a/tests/tests_app/source_code/test_tar.py b/tests/tests_app/source_code/test_tar.py new file mode 100644 index 0000000000000..1936390b2e91a --- /dev/null +++ b/tests/tests_app/source_code/test_tar.py @@ -0,0 +1,125 @@ +import math +import os +import tarfile +from pathlib import Path + +import pytest + +from lightning_app.source_code.tar import get_dir_size_and_count, get_split_size, MAX_SPLIT_COUNT, tar_path + + +def _create_files(basedir: Path): + source_dir = basedir / "source" + inner_dir = source_dir / "dir" + os.makedirs(inner_dir) + with open(source_dir / "f1", "w") as fp: + fp.write("f1") + + with open(inner_dir / "f2", "w") as fp: + fp.write("f2") + return source_dir, inner_dir + + +def test_max_upload_parts(): + import click + + with pytest.raises(click.ClickException): + barely_over = MAX_SPLIT_COUNT * 2**31 + 1 + get_split_size(barely_over) + + +def test_almost_max_upload_parts(): + barely_under = MAX_SPLIT_COUNT * 2**31 - 1 + assert get_split_size(barely_under) == math.ceil(barely_under / MAX_SPLIT_COUNT) + + +@pytest.mark.parametrize("size", (1024 * 512, 1024 * 1024 * 5)) +def test_get_dir_size_and_count(tmpdir: Path, size): + data = os.urandom(size) + with open(os.path.join(tmpdir, "a"), "wb") as f: + f.write(data) + with open(os.path.join(tmpdir, "b"), "wb") as f: + f.write(data) + assert get_dir_size_and_count(tmpdir, "a") == (size, 1) + + +def test_tar_path(tmpdir: Path): + source_dir, inner_dir = _create_files(tmpdir) + + # Test directory + target_file = tmpdir / "target.tar.gz" + results = tar_path(source_path=source_dir, target_file=target_file) + assert results.before_size > 0 + assert results.after_size > 0 + + verify_dir = tmpdir / "verify" + os.makedirs(verify_dir) + with tarfile.open(target_file) as tar: + tar.extractall(verify_dir) + + assert (verify_dir / "f1").exists() + assert (verify_dir / "dir" / "f2").exists() + + # Test single file + f2_path = inner_dir / "f2" + + target_file = tmpdir / "target_file.tar.gz" + results = tar_path(source_path=f2_path, target_file=target_file) + assert results.before_size > 0 + assert results.after_size > 0 + + verify_dir = tmpdir / "verify_file" + os.makedirs(verify_dir) + with tarfile.open(target_file) as tar: + tar.extractall(verify_dir) + + assert (verify_dir / "f2").exists() + + # Test single file (local) + current_path = os.getcwd() + try: + os.chdir(inner_dir) + + f2_path = "f2" + + target_file = tmpdir / "target_file_local.tar.gz" + results = tar_path(source_path=f2_path, target_file=target_file) + assert results.before_size > 0 + assert results.after_size > 0 + + verify_dir = tmpdir / "verify_file_local" + os.makedirs(verify_dir) + with tarfile.open(target_file) as tar: + tar.extractall(verify_dir) + + assert (verify_dir / "f2").exists() + finally: + os.chdir(current_path) + + +def test_get_split_size(): + split_size = get_split_size(minimum_split_size=1024 * 1000 * 10, max_split_count=10000, total_size=200000000001) + + # We shouldn't go over the max split count + assert math.ceil(200000000001 / split_size) <= 10000 + + split_size = get_split_size( + minimum_split_size=1024 * 1000 * 10, max_split_count=10000, total_size=1024 * 500 * 1000 * 10 + ) + + assert split_size == 1024 * 1000 * 10 + + +def test_tar_path_no_compression(tmpdir): + source_dir, _ = _create_files(tmpdir) + + target_file = tmpdir / "target.tar.gz" + tar_path(source_path=source_dir, target_file=target_file, compression=False) + + verify_dir = tmpdir / "verify" + os.makedirs(verify_dir) + with tarfile.open(target_file) as target_tar: + target_tar.extractall(verify_dir) + + assert (verify_dir / "f1").exists() + assert (verify_dir / "dir" / "f2").exists() diff --git a/tests/tests_app/source_code/test_uploader.py b/tests/tests_app/source_code/test_uploader.py new file mode 100644 index 0000000000000..82789e83e37a9 --- /dev/null +++ b/tests/tests_app/source_code/test_uploader.py @@ -0,0 +1,48 @@ +from unittest import mock +from unittest.mock import ANY, MagicMock + +import pytest + +from lightning_app.source_code import uploader + +# keeping as global var so individual tests can access/modify it +response = {"response": MagicMock(headers={"ETag": "test-etag"})} + + +class MockedRequestSession(MagicMock): + def put(self, url, data): + assert url == "https://test-url" + assert data == "test-data" + return response["response"] + + def mount(self, prefix, adapter): + assert prefix == "https://" + assert adapter.max_retries.total == 10 + + +@mock.patch("builtins.open", mock.mock_open(read_data="test-data")) +@mock.patch("lightning_app.source_code.uploader.requests.Session", MockedRequestSession) +def test_file_uploader(): + file_uploader = uploader.FileUploader( + presigned_url="https://test-url", source_file="test.txt", total_size=100, name="test.txt" + ) + file_uploader.progress = MagicMock() + + file_uploader.upload() + + file_uploader.progress.add_task.assert_called_once_with("upload", filename="test.txt", total=100) + file_uploader.progress.start.assert_called_once() + file_uploader.progress.update.assert_called_once_with(ANY, advance=9) + + +@mock.patch("builtins.open", mock.mock_open(read_data="test-data")) +@mock.patch("lightning_app.source_code.uploader.requests.Session", MockedRequestSession) +def test_file_uploader_failing_when_no_etag(): + response["response"] = MagicMock(headers={}) + file_uploader = uploader.FileUploader( + presigned_url="https://test-url", source_file="test.txt", total_size=100, name="test.txt" + ) + file_uploader.progress = MagicMock() + + with pytest.raises(ValueError, match="Unexpected response from S3, response"): + file_uploader.upload() diff --git a/tests/tests_app/storage/__init__.py b/tests/tests_app/storage/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/storage/test_copier.py b/tests/tests_app/storage/test_copier.py new file mode 100644 index 0000000000000..cb23fb5d93f5e --- /dev/null +++ b/tests/tests_app/storage/test_copier.py @@ -0,0 +1,133 @@ +import os +import pathlib +from unittest import mock +from unittest.mock import Mock + +import pytest + +import lightning_app +from lightning_app.storage.copier import Copier, copy_files +from lightning_app.storage.path import ExistsRequest, GetRequest, Path +from lightning_app.testing.helpers import MockQueue + + +class MockPatch: + @staticmethod + def _handle_get_request(work, request): + return Path._handle_get_request(work, request) + + @staticmethod + def _handle_exists_request(work, request): + return Path._handle_exists_request(work, request) + + +@mock.patch("lightning_app.storage.copier.filesystem") +def test_copier_copies_all_files(fs_mock, tmpdir): + """Test that the Copier calls the copy with the information provided in the request.""" + copy_request_queue = MockQueue() + copy_response_queue = MockQueue() + work = mock.Mock() + work.name = MockPatch() + work._paths = {"file": dict(source="src", path="file", hash="123", destination="dest", name="name")} + with mock.patch.dict(os.environ, {"SHARED_MOUNT_DIRECTORY": str(tmpdir / ".shared")}): + copier = Copier(work, copy_request_queue=copy_request_queue, copy_response_queue=copy_response_queue) + request = GetRequest(source="src", path="file", hash="123", destination="dest", name="name") + copy_request_queue.put(request) + copier.run_once() + fs_mock().put.assert_called_once_with("file", tmpdir / ".shared" / "123") + + +def test_copier_handles_exception(monkeypatch): + """Test that the Copier captures exceptions from the file copy and forwards them through the queue without + raising it.""" + copy_request_queue = MockQueue() + copy_response_queue = MockQueue() + fs = mock.Mock() + fs.exists.return_value = False + fs.put = mock.Mock(side_effect=OSError("Something went wrong")) + monkeypatch.setattr(lightning_app.storage.copier, "filesystem", mock.Mock(return_value=fs)) + + work = mock.Mock() + work.name = MockPatch() + work._paths = {"file": dict(source="src", path="file", hash="123", destination="dest", name="name")} + copier = Copier(work, copy_request_queue=copy_request_queue, copy_response_queue=copy_response_queue) + request = GetRequest(source="src", path="file", hash="123", destination="dest", name="name") + copy_request_queue.put(request) + copier.run_once() + response = copy_response_queue.get() + assert type(response.exception) == OSError + assert response.exception.args[0] == "Something went wrong" + + +def test_copier_existence_check(tmpdir): + """Test that the Copier responds to an existence check request.""" + copy_request_queue = MockQueue() + copy_response_queue = MockQueue() + + work = mock.Mock() + work.name = MockPatch() + work._paths = { + "file": dict(source="src", path=str(tmpdir / "notexists"), hash="123", destination="dest", name="name") + } + + copier = Copier(work, copy_request_queue=copy_request_queue, copy_response_queue=copy_response_queue) + + # A Path that does NOT exist + request = ExistsRequest(source="src", path=str(tmpdir / "notexists"), destination="dest", name="name", hash="123") + copy_request_queue.put(request) + copier.run_once() + response = copy_response_queue.get() + assert response.exists is False + + # A Path that DOES exist + request = ExistsRequest(source="src", path=str(tmpdir), destination="dest", name="name", hash="123") + copy_request_queue.put(request) + copier.run_once() + response = copy_response_queue.get() + assert response.exists is True + + +def test_copy_files(tmpdir): + """Test that the `test_copy_files` utility can handle both files and folders when the destination does not + exist.""" + # copy from a src that does not exist + src = pathlib.Path(tmpdir, "dir1") + dst = pathlib.Path(tmpdir, "dir2") + with pytest.raises(FileNotFoundError): + copy_files(src, dst) + + # copy to a dst dir that does not exist + src.mkdir() + (src / "empty.txt").touch() + assert not dst.exists() + copy_files(src, dst) + assert dst.is_dir() + + # copy to a destination dir that already exists (no error should be raised) + copy_files(src, dst) + assert dst.is_dir() + + # copy file to a dst that does not exist + src = pathlib.Path(tmpdir, "dir3", "src-file.txt") + dst = pathlib.Path(tmpdir, "dir4", "dst-file.txt") + src.parent.mkdir(parents=True) + src.touch() + assert not dst.exists() + copy_files(src, dst) + assert dst.is_file() + + +def test_copy_files_with_exception(tmpdir): + """Test that the `test_copy_files` utility properly raises exceptions from within the ThreadPoolExecutor.""" + fs_mock = Mock() + fs_mock().put = Mock(side_effect=ValueError("error from thread")) + + src = pathlib.Path(tmpdir, "src") + src.mkdir() + assert src.is_dir() + pathlib.Path(src, "file.txt").touch() + dst = pathlib.Path(tmpdir, "dest") + + with mock.patch("lightning_app.storage.copier.filesystem", fs_mock): + with pytest.raises(ValueError, match="error from thread"): + copy_files(src, dst) diff --git a/tests/tests_app/storage/test_drive.py b/tests/tests_app/storage/test_drive.py new file mode 100644 index 0000000000000..3d9db44c10e13 --- /dev/null +++ b/tests/tests_app/storage/test_drive.py @@ -0,0 +1,232 @@ +import os +from copy import deepcopy +from time import sleep + +import pytest + +from lightning_app import LightningFlow, LightningWork +from lightning_app.core.app import LightningApp +from lightning_app.runners import MultiProcessRuntime +from lightning_app.storage.drive import _maybe_create_drive, Drive +from lightning_app.utilities.component import _set_flow_context + + +class SyncWorkA(LightningWork): + def __init__(self, tmpdir): + super().__init__() + self.tmpdir = tmpdir + + def run(self, drive: Drive): + with open(f"{self.tmpdir}/a.txt", "w") as f: + f.write("example") + + drive.root_folder = self.tmpdir + drive.put("a.txt") + os.remove(f"{self.tmpdir}/a.txt") + + +class SyncWorkB(LightningWork): + def run(self, drive: Drive): + assert not os.path.exists("a.txt") + drive.get("a.txt") + assert os.path.exists("a.txt") + + +class SyncFlow(LightningFlow): + def __init__(self, tmpdir): + super().__init__() + self.log_dir = Drive("lit://log_dir") + self.work_a = SyncWorkA(str(tmpdir)) + self.work_b = SyncWorkB() + + def run(self): + self.work_a.run(self.log_dir) + self.work_b.run(self.log_dir) + self._exit() + + +def test_synchronization_drive(tmpdir): + if os.path.exists("a.txt"): + os.remove("a.txt") + app = LightningApp(SyncFlow(tmpdir)) + MultiProcessRuntime(app, start_server=False).dispatch() + os.remove("a.txt") + + +class Work(LightningWork): + def __init__(self): + super().__init__(parallel=True) + self.drive = None + self.counter = 0 + + def run(self, *args, **kwargs): + if self.counter == 0: + self.drive = Drive("lit://this_drive_id") + sleep(10) + with open("a.txt", "w") as f: + f.write("example") + + self.drive.put("a.txt") + else: + assert self.drive + assert self.drive.list(".") == ["a.txt"] + self.drive.delete("a.txt") + assert self.drive.list(".") == [] + self.counter += 1 + + +class Work2(LightningWork): + def __init__(self): + super().__init__(parallel=True) + + def run(self, drive: Drive, **kwargs): + assert drive.list(".") == [] + drive.get("a.txt", timeout=60) + assert drive.list(".") == ["a.txt"] + assert drive.list(".", component_name=self.name) == [] + + +class Flow(LightningFlow): + def __init__(self): + super().__init__() + self.work = Work() + self.work2 = Work2() + + def run(self): + self.work.run("0") + if self.work.drive: + self.work2.run(self.work.drive, something="hello") + if self.work2.has_succeeded: + self.work.run("1") + if self.work.counter == 2: + self._exit() + + +def test_drive_transferring_files(): + app = LightningApp(Flow()) + MultiProcessRuntime(app, start_server=False).dispatch() + os.remove("a.txt") + + +def test_drive(): + with pytest.raises(Exception, match="The Drive id needs to start with one of the following protocols"): + Drive("this_drive_id") + + with pytest.raises( + Exception, match="The id should be unique to identify your drive. Found `this_drive_id/something_else`." + ): + Drive("lit://this_drive_id/something_else") + + drive = Drive("lit://this_drive_id") + with pytest.raises(Exception, match="The component name needs to be known to put a path to the Drive."): + drive.put(".") + + with pytest.raises(Exception, match="The component name needs to be known to delete a path to the Drive."): + drive.delete(".") + + with open("a.txt", "w") as f: + f.write("example") + + os.makedirs("checkpoints") + with open("checkpoints/a.txt", "w") as f: + f.write("example") + + drive = Drive("lit://drive_1", allow_duplicates=False) + drive.component_name = "root.work_1" + assert drive.list(".") == [] + drive.put("a.txt") + assert drive.list(".") == ["a.txt"] + drive.component_name = "root.work_2" + with pytest.raises(Exception, match="The file a.txt can't be added as already found in the Drive."): + drive.put("a.txt") + drive.get("a.txt") + + drive = Drive("lit://drive_2", allow_duplicates=False) + drive.component_name = "root.work_1" + drive.put("checkpoints/a.txt") + drive.component_name = "root.work_2" + with pytest.raises(Exception, match="The file checkpoints/a.txt can't be added as already found in the Drive."): + drive.put("checkpoints/a.txt") + + drive = Drive("lit://drive_3", allow_duplicates=False) + drive.component_name = "root.work_1" + drive.put("checkpoints/") + drive.component_name = "root.work_2" + with pytest.raises(Exception, match="The file checkpoints/a.txt can't be added as already found in the Drive."): + drive.put("checkpoints/a.txt") + + drive = Drive("lit://drive_3", allow_duplicates=True) + drive.component_name = "root.work_1" + drive.put("checkpoints/") + drive.component_name = "root.work_2" + with pytest.raises( + Exception, match="The file checkpoints/a.txt doesn't exists in the component_name space root.work_2." + ): + drive.delete("checkpoints/a.txt") + drive.put("checkpoints/a.txt") + drive.delete("checkpoints/a.txt") + + drive = Drive("lit://drive_3", allow_duplicates=True) + drive.component_name = "root.work_1" + drive.put("checkpoints/") + with pytest.raises(Exception, match="['root.work_1', 'root.work_2']"): + drive.get("checkpoints/") + drive.get("checkpoints/a.txt", component_name="root.work_1") + drive.get("checkpoints/a.txt", component_name="root.work_1", timeout=1) + + with pytest.raises(FileNotFoundError): + drive.get("checkpoints/b.txt", component_name="root.work_1") + with pytest.raises(Exception, match="The following checkpoints/b.txt wasn't found in 1 seconds"): + drive.get("checkpoints/b.txt", component_name="root.work_1", timeout=1) + drive.component_name = "root.work_2" + drive.put("checkpoints/") + drive.component_name = "root.work_3" + with pytest.raises(Exception, match="We found several matching files created by multiples components"): + drive.get("checkpoints/a.txt") + with pytest.raises(Exception, match="We found several matching files created by multiples components"): + drive.get("checkpoints/a.txt", timeout=1) + + drive = Drive("lit://drive_4", allow_duplicates=True) + drive.component_name = "root.work_1" + with pytest.raises(Exception, match="The following checkpoints/a.txt wasn't found in 1 seconds."): + drive.get("checkpoints/a.txt", timeout=1) + + drive = Drive("lit://test", allow_duplicates=True) + drive.component_name = "root.work1" + drive.put("checkpoints") + drive.get("checkpoints", overwrite=True) + with pytest.raises(FileExistsError, match="overwrite=True"): + drive.get("checkpoints") + + drive = Drive("lit://drive_5", allow_duplicates=True) + drive.component_name = "root.work" + _set_flow_context() + with pytest.raises(Exception, match="The flow isn't allowed to put files into a Drive."): + drive.put("a.txt") + with pytest.raises(Exception, match="The flow isn't allowed to list files from a Drive."): + drive.list("a.txt") + with pytest.raises(Exception, match="The flow isn't allowed to get files from a Drive."): + drive.get("a.txt") + + os.remove("checkpoints/a.txt") + os.rmdir("checkpoints") + os.remove("a.txt") + + +def test_maybe_create_drive(): + + drive = Drive("lit://drive_3", allow_duplicates=False) + drive.component_name = "root.work1" + new_drive = _maybe_create_drive(drive.component_name, drive.to_dict()) + assert new_drive.protocol == drive.protocol + assert new_drive.id == drive.id + assert new_drive.component_name == drive.component_name + + +def test_drive_deepcopy(): + + drive = Drive("lit://drive", allow_duplicates=True) + drive.component_name = "root.work1" + new_drive = deepcopy(drive) + assert new_drive.id == drive.id + assert new_drive.component_name == drive.component_name diff --git a/tests/tests_app/storage/test_orchestrator.py b/tests/tests_app/storage/test_orchestrator.py new file mode 100644 index 0000000000000..ff142a8708a33 --- /dev/null +++ b/tests/tests_app/storage/test_orchestrator.py @@ -0,0 +1,77 @@ +from unittest.mock import MagicMock + +from lightning_app.storage.orchestrator import StorageOrchestrator +from lightning_app.storage.path import GetRequest, GetResponse +from lightning_app.testing.helpers import MockQueue +from lightning_app.utilities.enum import WorkStageStatus + + +def test_orchestrator(): + """Simulate orchestration when Work B requests a file from Work A.""" + request_queues = {"work_a": MockQueue(), "work_b": MockQueue()} + response_queues = {"work_a": MockQueue(), "work_b": MockQueue()} + copy_request_queues = {"work_a": MockQueue(), "work_b": MockQueue()} + copy_response_queues = {"work_a": MockQueue(), "work_b": MockQueue()} + app = MagicMock() + work = MagicMock() + work.status.stage = WorkStageStatus.RUNNING + app.get_component_by_name = MagicMock(return_value=work) + + orchestrator = StorageOrchestrator( + app, + request_queues=request_queues, + response_queues=response_queues, + copy_request_queues=copy_request_queues, + copy_response_queues=copy_response_queues, + ) + + # test idle behavior when queues are empty + orchestrator.run_once("work_a") + orchestrator.run_once("work_b") + assert not orchestrator.waiting_for_response + + # simulate Work B sending a request for a file in Work A + request = GetRequest(source="work_a", path="/a/b/c.txt", hash="", destination="", name="") + request_queues["work_b"].put(request) + orchestrator.run_once("work_a") + assert not orchestrator.waiting_for_response + orchestrator.run_once("work_b") + + # orchestrator is now waiting for a response for copier in Work A + assert "work_b" in orchestrator.waiting_for_response + assert not request_queues["work_a"] + assert request in copy_request_queues["work_a"] + assert request.destination == "work_b" + + # simulate loop while waiting for new elements in the queues + orchestrator.run_once("work_a") + orchestrator.run_once("work_b") + + # simulate copier A confirms that the file is available on the shared volume + response = GetResponse(source="work_a", path="/a/b/c.txt", hash="", destination="work_b", name="") + copy_request_queues["work_a"].get() + copy_response_queues["work_a"].put(response) + + # orchestrator processes confirmation and confirms to the pending request from Work B + orchestrator.run_once("work_a") + assert not copy_response_queues["work_a"] + assert response in response_queues["work_b"] + assert not orchestrator.waiting_for_response + orchestrator.run_once("work_b") + + # simulate loop while waiting for new elements in the queues + orchestrator.run_once("work_a") + orchestrator.run_once("work_b") + assert not orchestrator.waiting_for_response + + # simulate Work B receiving the confirmation that the file was copied + response = response_queues["work_b"].get() + assert response.source == "work_a" + assert response.destination == "work_b" + assert response.exception is None + + # all queues should be empty + assert all(not queue for queue in request_queues.values()) + assert all(not queue for queue in response_queues.values()) + assert all(not queue for queue in copy_request_queues.values()) + assert all(not queue for queue in copy_response_queues.values()) diff --git a/tests/tests_app/storage/test_path.py b/tests/tests_app/storage/test_path.py new file mode 100644 index 0000000000000..4fc50d1d45dcd --- /dev/null +++ b/tests/tests_app/storage/test_path.py @@ -0,0 +1,680 @@ +import json +import os +import pathlib +import pickle +from re import escape +from time import sleep +from unittest import mock +from unittest.mock import Mock + +import pytest + +from lightning_app import LightningApp, LightningFlow, LightningWork +from lightning_app.runners import MultiProcessRuntime +from lightning_app.storage.path import artifacts_path, is_lit_path, Path, shared_storage_path, storage_root_dir +from lightning_app.storage.requests import ExistsResponse, GetResponse +from lightning_app.testing.helpers import EmptyWork, MockQueue, RunIf +from lightning_app.utilities.app_helpers import LightningJSONEncoder +from lightning_app.utilities.component import _context + + +def test_path_instantiation(): + assert Path() == pathlib.Path() + assert Path("a/b") == pathlib.Path("a/b") + assert Path("a", "b") == pathlib.Path("a", "b") + assert Path(pathlib.Path("a"), pathlib.Path("b")) == pathlib.Path("a/b") + assert Path(Path(Path("a/b"))) == pathlib.Path("a/b") + + path = Path() + assert path._origin is path._consumer is path._request_queue is path._response_queue is None + + folder = Path("x/y/z") + folder._origin = "origin" + folder._consumer = "consumer" + + # from parts where the first is a Lightning Path and the other(s) are string + file = Path(folder, "file.txt") + assert file._origin == "origin" + assert file._consumer == "consumer" + + # from parts that are instance of Path and have no origin + file = Path(folder, Path("file.txt")) + assert file._origin == "origin" + assert file._consumer == "consumer" + + # from parts that are instance of Path and have a different origin than the top folder + filename = Path("file.txt") + filename._origin = "different" + with pytest.raises(TypeError, match="Tried to instantiate a Lightning Path from multiple other Paths"): + Path(folder, filename) + + # from parts that are instance of Path and have the SAME origin as the top folder + filename = Path("file.txt") + filename._origin = "origin" + file = Path(folder, filename) + assert file._origin == "origin" + assert file._consumer == "consumer" + + +def test_path_instantiation_lit(): + assert Path("lit://") == storage_root_dir() + assert Path("lit://a/b") == pathlib.Path(storage_root_dir(), "a/b") + assert Path("lit://", "a", "b") == pathlib.Path(storage_root_dir(), "a", "b") + assert Path("lit://", pathlib.Path("a"), pathlib.Path("b")) == pathlib.Path(storage_root_dir(), "a/b") + assert Path(Path(Path("lit://a/b"))) == pathlib.Path(storage_root_dir(), "a", "b") + assert str(Path("lit://lit-path")) == os.path.join(storage_root_dir(), "lit-path") + + +def test_is_lit_path(): + assert not is_lit_path("lit") + assert not is_lit_path(Path("lit")) + assert is_lit_path("lit://") + assert is_lit_path(Path("lit://")) + assert is_lit_path("lit://a/b/c") + assert is_lit_path(Path("lit://a/b/c")) + assert is_lit_path(storage_root_dir()) + + +def test_path_copy(): + """Test that Path creates an exact copy when passing a Path instance to the constructor.""" + path = Path("x/y/z") + path._origin = "origin" + path._consumer = "consumer" + path._request_queue = Mock() + path._response_queue = Mock() + path_copy = Path(path) + assert path_copy._origin == path._origin + assert path_copy._consumer == path._consumer + assert path_copy._request_queue == path._request_queue + assert path_copy._response_queue == path._response_queue + + +def test_path_inheritance(): + """Test that the Lightning Path is a drop-in replacement for pathlib.Path without compromises.""" + file = Path("file.txt") + pathlibfile = pathlib.Path("file.txt") + assert file == pathlibfile + assert isinstance(file, Path) + assert isinstance(file, pathlib.Path) + + folder = Path("./x/y") + file = folder / "file.txt" + assert isinstance(file, Path) + + file.with_suffix(".png") + assert isinstance(file, Path) + + +def test_path_concatenation(): + """Test that path concatentaions keep the properties of the paths on the right-hand side of the join.""" + folder = Path("x/y/z") + folder._origin = "origin" + folder._consumer = "consumer" + other = Path("other") + + # test __truediv__ when Path is on the left-hand side + file = folder / other / "more" / "file.txt" + assert file._origin == "origin" + assert file._consumer == "consumer" + + # test __rtruediv__ when Path is on the right-hand side + switched = pathlib.Path("/") / folder + assert isinstance(switched, Path) + assert file._origin == "origin" + assert file._consumer == "consumer" + + +def test_path_with_replacement(): + """Test that the ``Path.with_*`` modifiers keep the properties.""" + folder = Path("x", "y", "z") + folder._origin = "origin" + folder._consumer = "consumer" + + # with_name + file = folder.with_name("file.txt") + assert str(file) == os.path.join("x", "y", "file.txt") + assert file._origin == "origin" + assert file._consumer == "consumer" + + # with_suffix + file = file.with_suffix(".png") + assert str(file) == os.path.join("x", "y", "file.png") + assert file._origin == "origin" + assert file._consumer == "consumer" + + # relative_to + rel_path = folder.relative_to("x") + assert str(rel_path) == os.path.join("y", "z") + assert rel_path._origin == "origin" + assert rel_path._consumer == "consumer" + + +@RunIf(min_python="3.9") +def test_path_with_stem_replacement(): + """Test that the ``Path.with_stem`` modifier keep the properties. + + This is only available in Python 3.9+. + """ + file = Path("x", "y", "file.txt") + file._origin = "origin" + file._consumer = "consumer" + file = file.with_stem("text") + assert str(file) == os.path.join("x", "y", "text.txt") + assert file._origin == "origin" + assert file._consumer == "consumer" + + +def test_path_parents(): + """Test that the ``Path.parent`` and ``Path.parent`` properties return Paths that inherit the origin and + consumer attributes.""" + path = Path("a", "b", "c", "d") + path._origin = "origin" + path._consumer = "consumer" + + # .parent + assert isinstance(path.parent, Path) + assert str(path.parent) == os.path.join("a", "b", "c") + assert path.parent._origin == "origin" + assert path.parent._consumer == "consumer" + + # .parents + assert path.parents == [Path("a", "b", "c"), Path("a", "b"), Path("a"), Path(".")] + assert all(parent._origin == "origin" for parent in path.parents) + assert all(parent._consumer == "consumer" for parent in path.parents) + + +def test_path_hash(): + """Test that the value of the Path hash is a function of the path name and the origin.""" + # a path without origin has no hash + assert Path("one").hash is Path("two").hash is None + + # identical paths with identical origins have the same hash + path1 = Path("one") + path2 = Path("one") + path1._origin = "origin1" + path1._consumer = "consumer1" + path2._origin = "origin1" + path1._consumer = "consumer2" + assert path1.hash == path2.hash + + # identical paths with different origins have different hash + path2._origin = "origin2" + assert path1.hash != path2.hash + + # different paths but same owner yields a different hash + path1 = Path("one") + path2 = Path("other") + path1._origin = "same" + path2._origin = "same" + assert path1.hash != path2.hash + + +def test_path_pickleable(): + path = Path("a/b/c.txt") + path._origin = "root.x.y.z" + path._consumer = "root.p.q.r" + path._request_queue = Mock() + path._response_queue = Mock() + loaded = pickle.loads(pickle.dumps(path)) + assert isinstance(loaded, Path) + assert loaded == path + assert loaded._origin == path._origin + assert loaded._consumer == path._consumer + assert loaded._request_queue is None + assert loaded._response_queue is None + + +def test_path_json_serializable(): + path = Path("a/b/c.txt") + path._origin = "root.x.y.z" + path._consumer = "root.p.q.r" + path._request_queue = Mock() + path._response_queue = Mock() + json_dump = json.dumps(path, cls=LightningJSONEncoder) + assert "path" in json_dump + # the replacement of \ is needed for Windows paths + assert str(path).replace("\\", "\\\\") in json_dump + assert "origin_name" in json_dump + assert path._origin in json_dump + assert "consumer_name" in json_dump + assert path._consumer in json_dump + + +def test_path_to_dict_from_dict(): + path = Path("a/b/c.txt") + path._origin = "root.x.y.z" + path._consumer = "root.p.q.r" + path._request_queue = Mock() + path._response_queue = Mock() + path_dict = path.to_dict() + same_path = Path.from_dict(path_dict) + assert same_path == path + assert same_path._origin == path._origin + assert same_path._consumer == path._consumer + assert same_path._request_queue is None + assert same_path._response_queue is None + assert same_path._metadata == path._metadata + + +def test_path_attach_work(): + """Test that attaching a path to a LighitningWork will make the Work either the origin or a consumer.""" + path = Path() + assert path._origin is None + work1 = EmptyWork() + work2 = EmptyWork() + work3 = EmptyWork() + path._attach_work(work=work1) + assert path._origin is work1 + # path already has an owner + path._attach_work(work=work2) + assert path._origin is work1 + assert path._consumer is work2 + + # path gets a new consumer + path._attach_work(work=work3) + assert path._origin is work1 + assert path._consumer is work3 + + +def test_path_attach_queues(): + path = Path() + request_queue = Mock() + response_queue = Mock() + path._attach_queues(request_queue=request_queue, response_queue=response_queue) + assert path._request_queue is request_queue + assert path._response_queue is response_queue + + +@pytest.mark.parametrize("cls", [LightningFlow, LightningWork]) +def test_path_in_flow_and_work(cls, tmpdir): + class PathComponent(cls): + def __init__(self): + super().__init__() + self.path_one = Path("a", "b") + self.path_one = Path("a", "b", "c") + self.path_two = Path(tmpdir) / "write.txt" + + def run(self): + self.path_one = self.path_one / "d.txt" + assert self.path_one == Path("a", "b", "c", "d.txt") + with open(self.path_two, "w") as file: + file.write("Hello") + + class RootFlow(LightningFlow): + def __init__(self): + super().__init__() + self.path_component = PathComponent() + + def run(self): + self.path_component.run() + + root = RootFlow() + _ = LightningApp(root) # create an app to convert all paths that got attached + + root.run() + + assert root.path_component.path_one == Path("a", "b", "c", "d.txt") + assert root.path_component.path_one == pathlib.Path("a", "b", "c", "d.txt") + if isinstance(root.path_component, LightningWork): + assert root.path_component.path_one.origin_name == "root.path_component" + assert root.path_component.path_one.consumer_name == "root.path_component" + else: + assert root.path_component.path_one._origin is None + assert root.path_component.path_one._consumer is None + assert open(root.path_component.path_two).readlines() == ["Hello"] + + +class SourceWork(LightningWork): + def __init__(self, tmpdir): + super().__init__(cache_calls=True) + self.path = Path(tmpdir, "src.txt") + assert self.path.origin_name == "" + + def run(self): + with open(self.path, "w") as f: + f.write("Hello from SourceWork") + + +class DestinationWork(LightningWork): + def __init__(self, source_path): + super().__init__(cache_calls=True) + assert source_path.origin_name == "root.src_work" + self.path = source_path + assert self.path.origin_name == "root.src_work" + self.other = Path("other") + assert self.other.origin_name == "" + + def run(self): + assert self.path.origin_name == "root.src_work" + assert self.other.origin_name == "root.dst_work" + # we are running locally, the file is already there (no transfer needed) + self.path.get(overwrite=True) + assert self.path.is_file() + assert self.path.read_text() == "Hello from SourceWork" + + +class SourceToDestFlow(LightningFlow): + def __init__(self, tmpdir): + super().__init__() + self.src_work = SourceWork(tmpdir) + self.dst_work = DestinationWork(self.src_work.path) + + def run(self): + self.src_work.run() + if self.src_work.has_succeeded: + self.dst_work.run() + if self.dst_work.has_succeeded: + self._exit() + + +def test_multiprocess_path_in_work_and_flow(tmpdir): + root = SourceToDestFlow(tmpdir) + app = LightningApp(root, debug=True) + MultiProcessRuntime(app, start_server=False).dispatch() + + +class DynamicSourceToDestFlow(LightningFlow): + def __init__(self, tmpdir): + super().__init__() + self.tmpdir = str(tmpdir) + + def run(self): + if not hasattr(self, "src_work"): + self.src_work = SourceWork(self.tmpdir) + self.src_work.run() + if self.src_work.has_succeeded: + if not hasattr(self, "dst_work"): + self.dst_work = DestinationWork(self.src_work.path) + self.dst_work.run() + if hasattr(self, "dst_work") and self.dst_work.has_succeeded: + self._exit() + + +# FIXME(alecmerdler): This test is failing... +def test_multiprocess_path_in_work_and_flow_dynamic(tmpdir): + root = DynamicSourceToDestFlow(tmpdir) + app = LightningApp(root) + MultiProcessRuntime(app).dispatch() + + +class RunPathFlow(LightningFlow): + def __init__(self): + super().__init__() + self.src_work = PathSourceWork() + self.run_work = RunPathWork(cache_calls=True) + + def run(self): + self.src_work.run() + assert self.src_work.src_path_0.origin_name == "root.src_work" + assert self.src_work.src_path_0.consumer_name == "root.src_work" + + # local_path is not attached to any Work + local_path_0 = Path("local", "file_0.txt") + local_path_1 = Path("local", "file_1.txt") + assert local_path_0.origin_name is None + assert local_path_0.consumer_name is None + + nested_local_path = (99, {"nested": local_path_1}) + nested_kwarg_path = ["x", (self.src_work.src_path_1,)] + + # TODO: support returning a path from run() + self.run_work.run( + self.src_work.src_path_0, + local_path_0, + nested_local_path, + kwarg_path=local_path_1, + nested_kwarg_path=nested_kwarg_path, + ) + sleep(1) + self._exit() + + +class PathSourceWork(EmptyWork): + def __init__(self): + super().__init__() + self.src_path_0 = Path("src", "file_0.txt") + self.src_path_1 = Path("src", "file_1.txt") + + +class RunPathWork(LightningWork): + def run(self, src_path_0, local_path_0, nested_local_path, kwarg_path=None, nested_kwarg_path=None): + all_paths = [] + + # src_path_0 has an origin which must be preserved, this work becomes consumer + assert str(src_path_0) == os.path.join("src", "file_0.txt") + assert src_path_0.origin_name == "root.src_work" + all_paths.append(src_path_0) + + # local_path_0 had no origin, this work becomes both the origin and the consumer + assert str(local_path_0) == os.path.join("local", "file_0.txt") + assert local_path_0.origin_name is None + assert local_path_0.consumer_name is None + all_paths.append(local_path_0) + + # nested_local_path is a nested container that contains a Path + assert str(nested_local_path[1]["nested"]) == os.path.join("local", "file_1.txt") + assert nested_local_path[1]["nested"].origin_name is None + assert nested_local_path[1]["nested"].consumer_name is None + all_paths.append(nested_local_path[1]["nested"]) + + # keywoard arguments can also contain Paths + assert str(kwarg_path) == os.path.join("local", "file_1.txt") + assert kwarg_path.origin_name is None + assert kwarg_path.consumer_name is None + all_paths.append(kwarg_path) + + assert str(nested_kwarg_path[1][0]) == os.path.join("src", "file_1.txt") + assert nested_kwarg_path[1][0].origin_name == "root.src_work" + all_paths.append(nested_kwarg_path[1][0]) + + all(p._request_queue == self._request_queue for p in all_paths) + all(p._response_queue == self._response_queue for p in all_paths) + all(p.consumer_name == self.name == "root.run_work" for p in all_paths) + + +def test_path_as_argument_to_run_method(): + """Test that Path objects can be passed as arguments to the run() method of a Work in various ways such that + the origin, consumer and queues get automatically attached.""" + root = RunPathFlow() + app = LightningApp(root) + MultiProcessRuntime(app, start_server=False).dispatch() + + +def test_path_get_errors(tmpdir): + with _context("work"): + + with pytest.raises( + RuntimeError, match="Trying to get the file .* but the path is not attached to a LightningApp" + ): + Path().get() + + with pytest.raises( + RuntimeError, match="Trying to get the file .* but the path is not attached to a LightningWork" + ): + path = Path() + path._attach_queues(Mock(), Mock()) + path.get() + + with pytest.raises(FileExistsError, match="The file or folder .* exists locally. Pass `overwrite=True"): + path = Path(tmpdir) + path._attach_queues(Mock(), Mock()) + path._attach_work(Mock()) + path.get() + + +class SourceOverwriteWork(LightningWork): + def __init__(self, tmpdir): + super().__init__(raise_exception=True) + self.path = Path(tmpdir, "folder") + + def run(self): + self.path.mkdir(parents=True, exist_ok=True) + (self.path / "file.txt").touch() + assert self.path.exists_local() + + +class DestinationOverwriteWork(LightningWork): + def __init__(self, source_path): + super().__init__(raise_exception=True) + self.path = source_path + + def run(self): + assert self.path.exists() + with mock.patch("lightning_app.storage.path.shutil") as shutil_mock: + self.path.get(overwrite=True) + shutil_mock.rmtree.assert_called_with(self.path) + assert self.path.exists() + assert (self.path / "file.txt").exists() + + +class OverwriteFolderFlow(LightningFlow): + def __init__(self, tmpdir): + super().__init__() + self.src_work = SourceOverwriteWork(tmpdir) + self.dst_work = DestinationOverwriteWork(self.src_work.path) + + def run(self): + self.src_work.run() + if self.src_work.has_succeeded: + self.dst_work.run() + if self.dst_work.has_succeeded: + self._exit() + + +def test_path_get_overwrite(tmpdir): + """Test that .get(overwrite=True) overwrites the entire directory and replaces all files.""" + root = OverwriteFolderFlow(tmpdir) + app = LightningApp(root, debug=True) + MultiProcessRuntime(app, start_server=False).dispatch() + + +def test_path_get_error_in_flow_context(): + with pytest.raises(RuntimeError, match=escape("`Path.get()` can only be called from within the `run()`")): + with _context("flow"): + Path().get() + + +def test_path_response_with_exception(tmpdir): + request_queue = MockQueue() + response_queue = MockQueue() + path = Path(tmpdir / "file.txt") + path._attach_queues(request_queue, response_queue) + path._origin = "origin" + path._consumer = "consumer" + + # simulate that a response will come with an exception raised + response_queue.put( + GetResponse( + source="origin", + path=str(tmpdir / "file.txt"), + hash=path.hash, + destination="consumer", + exception=OSError("Something went wrong"), + name="", + ) + ) + + with pytest.raises(RuntimeError, match="An exception was raised while trying to transfer the contents at"): + with _context("work"): + path.get() + + +def test_path_response_not_matching_reqeuest(tmpdir): + request_queue = MockQueue() + response_queue = MockQueue() + path = Path(tmpdir / "file.txt") + path._attach_queues(request_queue, response_queue) + path._origin = "origin" + path._consumer = "consumer" + + # simulate a response that has a different owner than the request had + response = GetResponse( + source="other_origin", path=str(tmpdir / "file.txt"), hash=path.hash, destination="consumer", name="" + ) + + response_queue.put(response) + with pytest.raises( + RuntimeError, match="Tried to get the file .* but received a response for a request it did not send." + ): + path.get() + + # simulate a response that has a different hash than the request had + assert not response_queue + response.path = str(path) + response.hash = "other_hash" + response_queue.put(response) + with pytest.raises( + RuntimeError, match="Tried to get the file .* but received a response for a request it did not send." + ): + path.get() + + +def test_path_exists(tmpdir): + """Test that the Path.exists() behaves as expected: First it should check if the file exists locally, and if + not, send a message to the orchestrator to eventually check the existenc on the origin Work.""" + # Local Path (no Work queues attached) + assert not Path("file").exists() + assert Path(tmpdir).exists() + with open(tmpdir / "file", "w"): + assert Path(tmpdir / "file").exists() + + # A local path that exists + path = Path(tmpdir) + path.exists_remote = Mock() + path.exists_local = Mock(return_value=True) + assert path.exists() is True + path.exists_local.assert_called_once() + path.exists_remote.assert_not_called() # don't check remotely + + # A local path that does not exist, but has no Work attached + path = Path("not-exists.txt") + path.exists_local = Mock(return_value=False) + path.exists_remote = Mock() + assert not path.exists() + path.exists_local.assert_called_once() + path.exists_remote.assert_not_called() # don't check remotely + + # A local path that does not exist, but it exists remotely + path = Path("exists-remotely-only.txt") + path.exists_local = Mock(return_value=False) + path.exists_remote = Mock(return_value=True) + path._origin = "origin" + assert path.exists() + path.exists_local.assert_called_once() + path.exists_remote.assert_called_once() # check remotely + + +def test_path_exists_local(tmpdir): + assert not Path("file").exists_local() + assert Path(tmpdir).exists_local() + with open(tmpdir / "file", "w"): + assert Path(tmpdir / "file").exists_local() + + +def test_path_exists_remote(tmpdir): + path = Path(tmpdir / "not-attached.txt") + with pytest.raises(RuntimeError, match="the path is not attached to a LightningWork"): + path.exists_remote() + + # If Path does not exist locally, ask the orchestrator + request_queue = MockQueue() + response_queue = MockQueue() + path = Path(tmpdir / "not-exists.txt") + path._attach_queues(request_queue, response_queue) + path._origin = "origin" + path._consumer = "consumer" + + # Put the response into the queue to simulate the orchestrator responding + response_queue.put(ExistsResponse(source=path.origin_name, path=str(path), name="", hash="123", exists=False)) + assert not path.exists_remote() + assert request_queue.get() + + response_queue.put(ExistsResponse(source=path.origin_name, path=str(path), name="", hash="123", exists=True)) + assert path.exists_remote() + assert request_queue.get() + + +def test_artifacts_path(): + work = Mock() + work.name = "root.flow.work" + assert artifacts_path(work) == shared_storage_path() / "artifacts" / "root.flow.work" diff --git a/tests/tests_app/storage/test_payload.py b/tests/tests_app/storage/test_payload.py new file mode 100644 index 0000000000000..7a64750a01a92 --- /dev/null +++ b/tests/tests_app/storage/test_payload.py @@ -0,0 +1,148 @@ +import pathlib +import pickle +from copy import deepcopy +from unittest import mock +from unittest.mock import Mock + +import pytest + +from lightning_app import LightningApp, LightningFlow, LightningWork +from lightning_app.runners.multiprocess import MultiProcessRuntime +from lightning_app.storage.payload import GetRequest, Payload + + +def test_payload_copy(): + """Test that Payload creates an exact copy when passing a Payload instance to the constructor.""" + payload = Payload(None) + payload._origin = "origin" + payload._consumer = "consumer" + payload._request_queue = "MockQueue" + payload._response_queue = "MockQueue" + payload_copy = deepcopy(payload) + assert payload_copy._origin == payload._origin + assert payload_copy._consumer == payload._consumer + assert payload_copy._request_queue == payload._request_queue + assert payload_copy._response_queue == payload._response_queue + + +def test_payload_pickable(): + payload = Payload("MyObject") + payload._origin = "root.x.y.z" + payload._consumer = "root.p.q.r" + payload._name = "var_a" + loaded = pickle.loads(pickle.dumps(payload)) + + assert isinstance(loaded, Payload) + assert loaded._origin == payload._origin + assert loaded._consumer == payload._consumer + assert loaded._name == payload._name + assert loaded._request_queue is None + assert loaded._response_queue is None + + +def test_path_attach_queues(): + path = Payload(None) + request_queue = Mock() + response_queue = Mock() + path._attach_queues(request_queue=request_queue, response_queue=response_queue) + assert path._request_queue is request_queue + assert path._response_queue is response_queue + + +class Work(LightningWork): + def __init__(self): + super().__init__() + self.var_a = Payload(None) + + def run(self): + pass + + +def test_payload_in_init(): + with pytest.raises( + AttributeError, match="The Payload object should be set only within the run method of the work." + ): + Work() + + +class WorkRun(LightningWork): + def __init__(self, tmpdir): + super().__init__() + self.var_a = None + self.tmpdir = tmpdir + + def run(self): + self.var_a = Payload("something") + assert self.var_a.name == "var_a" + assert self.var_a._origin == "root.a" + assert self.var_a.hash == "9bd514ad51fc33d895c50657acd0f0582301cf3e" + source_path = pathlib.Path(self.tmpdir, self.var_a.name) + assert not source_path.exists() + response = self.var_a._handle_get_request( + self, + GetRequest( + name="var_a", + hash=self.var_a.hash, + source="root.a", + path=str(source_path), + destination="root", + ), + ) + assert source_path.exists() + assert self.var_a.load(str(source_path)) == "something" + assert not response.exception + + +def test_payload_in_run(tmpdir): + work = WorkRun(str(tmpdir)) + work._name = "root.a" + work.run() + + +class Sender(LightningWork): + def __init__(self): + super().__init__(parallel=True) + self.value_all = None + self.value_b = None + self.value_c = None + + def run(self): + self.value_all = Payload(["A", "B", "C"]) + self.value_b = Payload("B") + self.value_c = Payload("C") + + +class WorkReceive(LightningWork): + def __init__(self, expected): + super().__init__(parallel=True) + self.expected = expected + + def run(self, generated): + assert generated.value == self.expected + + +class Flow(LightningFlow): + def __init__(self): + super().__init__() + self.sender = Sender() + self.receiver_all = WorkReceive(["A", "B", "C"]) + self.receiver_b = WorkReceive("B") + self.receiver_c = WorkReceive("C") + + def run(self): + self.sender.run() + if self.sender.value_all: + self.receiver_all.run(self.sender.value_all) + if self.sender.value_b: + self.receiver_b.run(self.sender.value_b) + if self.sender.value_c: + self.receiver_c.run(self.sender.value_c) + if self.receiver_all.has_succeeded and self.receiver_b.has_succeeded and self.receiver_c.has_succeeded: + self._exit() + + +def test_payload_works(tmpdir): + """This tests validates the payload api can be used to transfer return values from a work to another.""" + with mock.patch("lightning_app.storage.path.storage_root_dir", lambda: pathlib.Path(tmpdir)): + app = LightningApp(Flow(), debug=True) + MultiProcessRuntime(app, start_server=False).dispatch() diff --git a/tests/tests_app/structures/__init__.py b/tests/tests_app/structures/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/structures/test_structures.py b/tests/tests_app/structures/test_structures.py new file mode 100644 index 0000000000000..aaa7db18a5af2 --- /dev/null +++ b/tests/tests_app/structures/test_structures.py @@ -0,0 +1,442 @@ +import os +from copy import deepcopy + +import pytest + +from lightning_app import LightningApp, LightningFlow, LightningWork +from lightning_app.runners import MultiProcessRuntime, SingleProcessRuntime +from lightning_app.storage.payload import Payload +from lightning_app.structures import Dict, List +from lightning_app.testing.helpers import EmptyFlow +from lightning_app.utilities.enum import WorkStageStatus + + +def test_dict(): + class WorkA(LightningWork): + def __init__(self): + super().__init__(port=1) + self.c = 0 + + def run(self): + pass + + class A(LightningFlow): + def __init__(self): + super().__init__() + self.dict = Dict(**{"work_a": WorkA(), "work_b": WorkA(), "work_c": WorkA(), "work_d": WorkA()}) + + def run(self): + pass + + flow = A() + + # TODO: these assertions are wrong, the works are getting added under "flows" instead of "works" + # state + assert len(flow.state["structures"]["dict"]["works"]) == len(flow.dict) == 4 + assert list(flow.state["structures"]["dict"]["works"].keys()) == ["work_a", "work_b", "work_c", "work_d"] + assert all( + flow.state["structures"]["dict"]["works"][f"work_{k}"]["vars"] + == { + "c": 0, + "_url": "", + "_future_url": "", + "_port": 1, + "_host": "127.0.0.1", + "_paths": {}, + "_restarting": False, + "_internal_ip": "", + } + for k in ("a", "b", "c", "d") + ) + assert all( + flow.state["structures"]["dict"]["works"][f"work_{k}"]["calls"] == {"latest_call_hash": None} + for k in ("a", "b", "c", "d") + ) + assert all(flow.state["structures"]["dict"]["works"][f"work_{k}"]["changes"] == {} for k in ("a", "b", "c", "d")) + + # state_vars + assert len(flow.state_vars["structures"]["dict"]["works"]) == len(flow.dict) == 4 + assert list(flow.state_vars["structures"]["dict"]["works"].keys()) == ["work_a", "work_b", "work_c", "work_d"] + assert all( + flow.state_vars["structures"]["dict"]["works"][f"work_{k}"]["vars"] + == { + "c": 0, + "_url": "", + "_future_url": "", + "_port": 1, + "_host": "127.0.0.1", + "_paths": {}, + "_restarting": False, + "_internal_ip": "", + } + for k in ("a", "b", "c", "d") + ) + + # state_with_changes + assert len(flow.state_with_changes["structures"]["dict"]["works"]) == len(flow.dict) == 4 + assert list(flow.state_with_changes["structures"]["dict"]["works"].keys()) == [ + "work_a", + "work_b", + "work_c", + "work_d", + ] + assert all( + flow.state_with_changes["structures"]["dict"]["works"][f"work_{k}"]["vars"] + == { + "c": 0, + "_url": "", + "_future_url": "", + "_port": 1, + "_host": "127.0.0.1", + "_paths": {}, + "_restarting": False, + "_internal_ip": "", + } + for k in ("a", "b", "c", "d") + ) + assert all( + flow.state_with_changes["structures"]["dict"]["works"][f"work_{k}"]["calls"] == {"latest_call_hash": None} + for k in ("a", "b", "c", "d") + ) + assert all( + flow.state_with_changes["structures"]["dict"]["works"][f"work_{k}"]["changes"] == {} + for k in ("a", "b", "c", "d") + ) + + # set_state + state = deepcopy(flow.state) + state["structures"]["dict"]["works"]["work_b"]["vars"]["c"] = 1 + flow.set_state(state) + assert flow.dict["work_b"].c == 1 + + +def test_dict_name(): + d = Dict(a=EmptyFlow(), b=EmptyFlow()) + assert d.name == "root" + assert d["a"].name == "root.a" + assert d["b"].name == "root.b" + + class RootFlow(LightningFlow): + def __init__(self): + super().__init__() + self.dict = Dict(x=EmptyFlow(), y=EmptyFlow()) + + def run(self): + pass + + root = RootFlow() + assert root.name == "root" + assert root.dict.name == "root.dict" + assert root.dict["x"].name == "root.dict.x" + assert root.dict["y"].name == "root.dict.y" + + +def test_list(): + class WorkA(LightningWork): + def __init__(self): + super().__init__(port=1) + self.c = 0 + + def run(self): + pass + + class A(LightningFlow): + def __init__(self): + super().__init__() + self.list = List(WorkA(), WorkA(), WorkA(), WorkA()) + + def run(self): + pass + + flow = A() + + # TODO: these assertions are wrong, the works are getting added under "flows" instead of "works" + # state + assert len(flow.state["structures"]["list"]["works"]) == len(flow.list) == 4 + assert list(flow.state["structures"]["list"]["works"].keys()) == ["0", "1", "2", "3"] + assert all( + flow.state["structures"]["list"]["works"][str(i)]["vars"] + == { + "c": 0, + "_url": "", + "_future_url": "", + "_port": 1, + "_host": "127.0.0.1", + "_paths": {}, + "_restarting": False, + "_internal_ip": "", + } + for i in range(4) + ) + assert all( + flow.state["structures"]["list"]["works"][str(i)]["calls"] == {"latest_call_hash": None} for i in range(4) + ) + assert all(flow.state["structures"]["list"]["works"][str(i)]["changes"] == {} for i in range(4)) + + # state_vars + assert len(flow.state_vars["structures"]["list"]["works"]) == len(flow.list) == 4 + assert list(flow.state_vars["structures"]["list"]["works"].keys()) == ["0", "1", "2", "3"] + assert all( + flow.state_vars["structures"]["list"]["works"][str(i)]["vars"] + == { + "c": 0, + "_url": "", + "_future_url": "", + "_port": 1, + "_host": "127.0.0.1", + "_paths": {}, + "_restarting": False, + "_internal_ip": "", + } + for i in range(4) + ) + + # state_with_changes + assert len(flow.state_with_changes["structures"]["list"]["works"]) == len(flow.list) == 4 + assert list(flow.state_with_changes["structures"]["list"]["works"].keys()) == ["0", "1", "2", "3"] + assert all( + flow.state_with_changes["structures"]["list"]["works"][str(i)]["vars"] + == { + "c": 0, + "_url": "", + "_future_url": "", + "_port": 1, + "_host": "127.0.0.1", + "_paths": {}, + "_restarting": False, + "_internal_ip": "", + } + for i in range(4) + ) + assert all( + flow.state_with_changes["structures"]["list"]["works"][str(i)]["calls"] == {"latest_call_hash": None} + for i in range(4) + ) + assert all(flow.state_with_changes["structures"]["list"]["works"][str(i)]["changes"] == {} for i in range(4)) + + # set_state + state = deepcopy(flow.state) + state["structures"]["list"]["works"]["0"]["vars"]["c"] = 1 + flow.set_state(state) + assert flow.list[0].c == 1 + + +def test_list_name(): + lst = List(EmptyFlow(), EmptyFlow()) + assert lst.name == "root" + assert lst[0].name == "root.0" + assert lst[1].name == "root.1" + + class RootFlow(LightningFlow): + def __init__(self): + super().__init__() + self.list = List(EmptyFlow(), EmptyFlow()) + + def run(self): + pass + + root = RootFlow() + assert root.name == "root" + assert root.list.name == "root.list" + assert root.list[0].name == "root.list.0" + assert root.list[1].name == "root.list.1" + + +class CounterWork(LightningWork): + def __init__(self, cache_calls, parallel=False): + super().__init__(cache_calls=cache_calls, parallel=parallel) + self.counter = 0 + + def run(self): + self.counter += 1 + + +@pytest.mark.skipif(True, reason="tchaton: Resolve this test.") +@pytest.mark.parametrize("runtime_cls", [MultiProcessRuntime, SingleProcessRuntime]) +@pytest.mark.parametrize("run_once_iterable", [False, True]) +@pytest.mark.parametrize("cache_calls", [False, True]) +@pytest.mark.parametrize("use_list", [False, True]) +def test_structure_with_iterate_and_fault_tolerance(runtime_cls, run_once_iterable, cache_calls, use_list): + class DummyFlow(LightningFlow): + def __init__(self): + super().__init__() + self.counter = 0 + + def run(self): + pass + + class RootFlow(LightningFlow): + def __init__(self, use_list, run_once_iterable, cache_calls): + super().__init__() + self.looping = 0 + self.run_once_iterable = run_once_iterable + self.restarting = False + if use_list: + self.iter = List( + CounterWork(cache_calls), + CounterWork(cache_calls), + CounterWork(cache_calls), + CounterWork(cache_calls), + DummyFlow(), + ) + else: + self.iter = Dict( + **{ + "0": CounterWork(cache_calls), + "1": CounterWork(cache_calls), + "2": CounterWork(cache_calls), + "3": CounterWork(cache_calls), + "4": DummyFlow(), + } + ) + + def run(self): + for work_idx, work in self.experimental_iterate(enumerate(self.iter), run_once=self.run_once_iterable): + if not self.restarting and work_idx == 1: + # gives time to the delta to be sent. + self._exit() + if isinstance(work, str) and isinstance(self.iter, Dict): + work = self.iter[work] + work.run() + if self.looping > 0: + self._exit() + self.looping += 1 + + app = LightningApp(RootFlow(use_list, run_once_iterable, cache_calls)) + runtime_cls(app, start_server=False).dispatch() + assert app.root.iter[0 if use_list else "0"].counter == 1 + assert app.root.iter[1 if use_list else "1"].counter == 0 + assert app.root.iter[2 if use_list else "2"].counter == 0 + assert app.root.iter[3 if use_list else "3"].counter == 0 + + app = LightningApp(RootFlow(use_list, run_once_iterable, cache_calls)) + app.root.restarting = True + runtime_cls(app, start_server=False).dispatch() + + if run_once_iterable: + expected_value = 1 + else: + expected_value = 1 if cache_calls else 2 + assert app.root.iter[0 if use_list else "0"].counter == expected_value + assert app.root.iter[1 if use_list else "1"].counter == expected_value + assert app.root.iter[2 if use_list else "2"].counter == expected_value + assert app.root.iter[3 if use_list else "3"].counter == expected_value + + +class CheckpointCounter(LightningWork): + def __init__(self): + super().__init__(cache_calls=False) + self.counter = 0 + + def run(self): + self.counter += 1 + + +class CheckpointFlow(LightningFlow): + def __init__(self, collection, depth=0, exit=11): + super().__init__() + self.depth = depth + self.exit = exit + if depth == 0: + self.counter = 0 + + if depth >= 4: + self.collection = collection + else: + self.flow = CheckpointFlow(collection, depth + 1) + + def run(self): + if hasattr(self, "counter"): + self.counter += 1 + if self.counter >= self.exit: + self._exit() + if self.depth >= 4: + self.collection.run() + else: + self.flow.run() + + +class SimpleCounterWork(LightningWork): + def __init__(self): + super().__init__() + self.counter = 0 + + def run(self): + self.counter += 1 + + +class FlowDict(LightningFlow): + def __init__(self): + super().__init__() + self.dict = Dict() + + def run(self): + if "w" not in self.dict: + self.dict["w"] = SimpleCounterWork() + + if self.dict["w"].status.stage == WorkStageStatus.SUCCEEDED: + self._exit() + + self.dict["w"].run() + + +def test_dict_with_queues(): + + app = LightningApp(FlowDict()) + MultiProcessRuntime(app, start_server=False).dispatch() + + +class FlowList(LightningFlow): + def __init__(self): + super().__init__() + self.list = List() + + def run(self): + if not len(self.list): + self.list.append(SimpleCounterWork()) + + if self.list[-1].status.stage == WorkStageStatus.SUCCEEDED: + self._exit() + + self.list[-1].run() + + +def test_list_with_queues(): + + app = LightningApp(FlowList()) + MultiProcessRuntime(app, start_server=False).dispatch() + + +class WorkS(LightningWork): + def __init__(self): + super().__init__() + self.payload = None + + def run(self): + self.payload = Payload(2) + + +class WorkD(LightningWork): + def run(self, payload): + assert payload.value == 2 + + +class FlowPayload(LightningFlow): + def __init__(self): + super().__init__() + self.src = WorkS() + self.dst = Dict(**{"0": WorkD(parallel=True), "1": WorkD(parallel=True)}) + + def run(self): + self.src.run() + if self.src.payload: + for work in self.dst.values(): + work.run(self.src.payload) + if all(w.has_succeeded for w in self.dst.values()): + self._exit() + + +def test_structures_with_payload(): + app = LightningApp(FlowPayload(), debug=True) + MultiProcessRuntime(app, start_server=False).dispatch() + os.remove("payload") diff --git a/tests/tests_app/utilities/__init__.py b/tests/tests_app/utilities/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/__init__.py b/tests/tests_app/utilities/packaging/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/Dockerfile.cpu b/tests/tests_app/utilities/packaging/projects/Dockerfile.cpu new file mode 100644 index 0000000000000..10a5d105d811e --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/Dockerfile.cpu @@ -0,0 +1 @@ +FROM pytorchlightning/pytorch_lightning:base-cpu-py3.7-torch1.8 diff --git a/tests/tests_app/utilities/packaging/projects/dock/__init__.py b/tests/tests_app/utilities/packaging/projects/dock/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/dock/app.py b/tests/tests_app/utilities/packaging/projects/dock/app.py new file mode 100644 index 0000000000000..ddf6d60abb1e3 --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/dock/app.py @@ -0,0 +1,12 @@ +import os +import sys + +from lightning_app import LightningApp + +if __name__ == "__main__": + sys.path.append(os.path.dirname(__file__)) + + from compo.a.a import AA + from compo.b.b import BB + + app = LightningApp(BB(AA())) diff --git a/tests/tests_app/utilities/packaging/projects/dock/compo/__init__.py b/tests/tests_app/utilities/packaging/projects/dock/compo/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/dock/compo/a/__init__.py b/tests/tests_app/utilities/packaging/projects/dock/compo/a/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/dock/compo/a/a.py b/tests/tests_app/utilities/packaging/projects/dock/compo/a/a.py new file mode 100644 index 0000000000000..45d4bc0372724 --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/dock/compo/a/a.py @@ -0,0 +1,14 @@ +import logging + +from lightning_app import LightningWork + +logger = logging.getLogger(__name__) + + +class AA(LightningWork): + def __init__(self): + super().__init__() + self.message = None + + def run(self): + self.message = "hello world!" diff --git a/tests/tests_app/utilities/packaging/projects/dock/compo/b/__init__.py b/tests/tests_app/utilities/packaging/projects/dock/compo/b/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/dock/compo/b/b.py b/tests/tests_app/utilities/packaging/projects/dock/compo/b/b.py new file mode 100644 index 0000000000000..9cc842cfe5d60 --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/dock/compo/b/b.py @@ -0,0 +1,10 @@ +from lightning_app import LightningFlow + + +class BB(LightningFlow): + def __init__(self, work): + super().__init__() + self.work = work + + def run(self): + self._exit() diff --git a/tests/tests_app/utilities/packaging/projects/dockerfile/__init__.py b/tests/tests_app/utilities/packaging/projects/dockerfile/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/dockerfile/app.py b/tests/tests_app/utilities/packaging/projects/dockerfile/app.py new file mode 100644 index 0000000000000..605a018092e9e --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/dockerfile/app.py @@ -0,0 +1,11 @@ +import os +import sys + +from lightning_app import LightningApp + +if __name__ == "__main__": + sys.path.append(os.path.dirname(__file__)) + from comp_dockerfile.a.a import AAA + from comp_dockerfile.b.b import BBB + + app = LightningApp(BBB(AAA())) diff --git a/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/__init__.py b/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/a/Dockerfile b/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/a/Dockerfile new file mode 100644 index 0000000000000..58a2123aeecce --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/a/Dockerfile @@ -0,0 +1 @@ +FROM pytorchlightning/pytorch_lightning:base-cuda-py3.7-torch1.8 diff --git a/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/a/__init__.py b/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/a/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/a/a.py b/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/a/a.py new file mode 100644 index 0000000000000..e3b6b83a680b2 --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/a/a.py @@ -0,0 +1,6 @@ +from lightning_app import LightningWork + + +class AAA(LightningWork): + def run(self): + pass diff --git a/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/b/__init__.py b/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/b/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/b/b.py b/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/b/b.py new file mode 100644 index 0000000000000..02c078885ef68 --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/dockerfile/comp_dockerfile/b/b.py @@ -0,0 +1,10 @@ +from lightning_app import LightningFlow + + +class BBB(LightningFlow): + def __init__(self, work): + super().__init__() + self.work = work + + def run(self): + self._exit() diff --git a/tests/tests_app/utilities/packaging/projects/no_req/__init__.py b/tests/tests_app/utilities/packaging/projects/no_req/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/no_req/app.py b/tests/tests_app/utilities/packaging/projects/no_req/app.py new file mode 100644 index 0000000000000..1f15fe087949f --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/no_req/app.py @@ -0,0 +1,12 @@ +import os +import sys + +from lightning_app import LightningApp + +if __name__ == "__main__": + sys.path.append(os.path.dirname(__file__)) + + from comp.a.a import AA + from comp.b.b import BB + + app = LightningApp(BB(AA())) diff --git a/tests/tests_app/utilities/packaging/projects/no_req/comp/__init__.py b/tests/tests_app/utilities/packaging/projects/no_req/comp/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/no_req/comp/a/__init__.py b/tests/tests_app/utilities/packaging/projects/no_req/comp/a/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/no_req/comp/a/a.py b/tests/tests_app/utilities/packaging/projects/no_req/comp/a/a.py new file mode 100644 index 0000000000000..7d63cde1d8f93 --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/no_req/comp/a/a.py @@ -0,0 +1,8 @@ +import pandas # noqa F401 + +from lightning_app import LightningWork + + +class AA(LightningWork): + def run(self): + pass diff --git a/tests/tests_app/utilities/packaging/projects/no_req/comp/b/__init__.py b/tests/tests_app/utilities/packaging/projects/no_req/comp/b/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/no_req/comp/b/b.py b/tests/tests_app/utilities/packaging/projects/no_req/comp/b/b.py new file mode 100644 index 0000000000000..9cc842cfe5d60 --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/no_req/comp/b/b.py @@ -0,0 +1,10 @@ +from lightning_app import LightningFlow + + +class BB(LightningFlow): + def __init__(self, work): + super().__init__() + self.work = work + + def run(self): + self._exit() diff --git a/tests/tests_app/utilities/packaging/projects/req/__init__.py b/tests/tests_app/utilities/packaging/projects/req/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/req/app.py b/tests/tests_app/utilities/packaging/projects/req/app.py new file mode 100644 index 0000000000000..b131143d8ab75 --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/req/app.py @@ -0,0 +1,12 @@ +import os +import sys + +from lightning_app import LightningApp + +if __name__ == "__main__": + sys.path.append(os.path.dirname(__file__)) + + from comp_req.a.a import A + from comp_req.b.b import B + + app = LightningApp(B(A())) diff --git a/tests/tests_app/utilities/packaging/projects/req/comp_req/__init__.py b/tests/tests_app/utilities/packaging/projects/req/comp_req/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/req/comp_req/a/__init__.py b/tests/tests_app/utilities/packaging/projects/req/comp_req/a/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/req/comp_req/a/a.py b/tests/tests_app/utilities/packaging/projects/req/comp_req/a/a.py new file mode 100644 index 0000000000000..9f9b39e217994 --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/req/comp_req/a/a.py @@ -0,0 +1,8 @@ +import pandas # noqa F401 + +from lightning_app import LightningWork + + +class A(LightningWork): + def run(self): + pass diff --git a/tests/tests_app/utilities/packaging/projects/req/comp_req/a/requirements.txt b/tests/tests_app/utilities/packaging/projects/req/comp_req/a/requirements.txt new file mode 100644 index 0000000000000..44ccabb86bcec --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/req/comp_req/a/requirements.txt @@ -0,0 +1,3 @@ +pandas +pytorch_lightning==1.5.9 +git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0 diff --git a/tests/tests_app/utilities/packaging/projects/req/comp_req/b/__init__.py b/tests/tests_app/utilities/packaging/projects/req/comp_req/b/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app/utilities/packaging/projects/req/comp_req/b/b.py b/tests/tests_app/utilities/packaging/projects/req/comp_req/b/b.py new file mode 100644 index 0000000000000..dc6042b4996df --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/req/comp_req/b/b.py @@ -0,0 +1,10 @@ +from lightning_app import LightningFlow + + +class B(LightningFlow): + def __init__(self, work): + super().__init__() + self.work = work + + def run(self): + self._exit() diff --git a/tests/tests_app/utilities/packaging/projects/requirements.txt b/tests/tests_app/utilities/packaging/projects/requirements.txt new file mode 100644 index 0000000000000..7bf0698bf1180 --- /dev/null +++ b/tests/tests_app/utilities/packaging/projects/requirements.txt @@ -0,0 +1 @@ +cloud-stars diff --git a/tests/tests_app/utilities/packaging/test_app_config.py b/tests/tests_app/utilities/packaging/test_app_config.py new file mode 100644 index 0000000000000..a4200f615c03e --- /dev/null +++ b/tests/tests_app/utilities/packaging/test_app_config.py @@ -0,0 +1,61 @@ +import os +import pathlib +from contextlib import contextmanager + +from lightning_app.utilities.packaging.app_config import AppConfig, find_config_file + + +@contextmanager +def cwd(path): + """Utility context manager for temporarily switching the current working directory (cwd).""" + old_pwd = os.getcwd() + os.chdir(path) + try: + yield + finally: + os.chdir(old_pwd) + + +def _make_empty_config_file(folder): + file = pathlib.Path(folder / ".lightning") + file.parent.mkdir(parents=True, exist_ok=True) + file.touch() + return file + + +def test_find_config_file(tmpdir): + with cwd(pathlib.Path("/")): + assert find_config_file() is None + + with cwd(pathlib.Path.home()): + assert find_config_file() is None + + _ = _make_empty_config_file(tmpdir) + config_file1 = _make_empty_config_file(tmpdir / "a" / "b") + + assert find_config_file(tmpdir) == pathlib.Path(tmpdir, ".lightning") + assert find_config_file(config_file1) == pathlib.Path(tmpdir, "a", "b", ".lightning") + assert find_config_file(pathlib.Path(tmpdir, "a")) == pathlib.Path(tmpdir, ".lightning") + + # the config must be a file, a folder of the same name gets ignored + fake_config_folder = pathlib.Path(tmpdir, "fake", ".lightning") + fake_config_folder.mkdir(parents=True) + assert find_config_file(tmpdir) == pathlib.Path(tmpdir, ".lightning") + + +def test_app_config_save_load(tmpdir): + config = AppConfig("my_app") + config.save_to_file(tmpdir / ".lightning") + loaded_config = AppConfig.load_from_file(tmpdir / ".lightning") + assert config == loaded_config + + config = AppConfig("my_app2") + config.save_to_dir(tmpdir) + loaded_config = AppConfig.load_from_dir(tmpdir) + assert config == loaded_config + + +def test_app_config_default_name(): + """Test that the default name gets auto-generated.""" + config = AppConfig() + assert config.name diff --git a/tests/tests_app/utilities/packaging/test_build_spec.py b/tests/tests_app/utilities/packaging/test_build_spec.py new file mode 100644 index 0000000000000..c2bdb0a4c1de7 --- /dev/null +++ b/tests/tests_app/utilities/packaging/test_build_spec.py @@ -0,0 +1,79 @@ +import os +import sys + +from tests_app import _PROJECT_ROOT + +from lightning_app import BuildConfig +from lightning_app.testing import application_testing, LightningTestApp + +EXTRAS_ARGS = ["--blocking", "False", "--multiprocess", "--open-ui", "False"] + + +class NoRequirementsLightningTestApp(LightningTestApp): + def on_after_run_once(self): + assert self.root.work.local_build_config.requirements == [] + assert self.root.work.cloud_build_config.requirements == [] + return super().on_after_run_once() + + +def test_build_config_no_requirements(): + command_line = [os.path.join(_PROJECT_ROOT, "tests/utilities/packaging/projects/no_req/app.py")] + application_testing(NoRequirementsLightningTestApp, command_line + EXTRAS_ARGS) + sys.path = sys.path[:-1] + + +def test_build_config_requirements_provided(): + spec = BuildConfig(requirements=["dask", "./projects/req/comp_req/a/requirements.txt"]) + assert spec.requirements == [ + "dask", + "pandas", + "pytorch_lightning==1.5.9", + "git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0", + ] + assert spec == BuildConfig.from_dict(spec.to_dict()) + + +class BuildSpecTest(BuildConfig): + def build_commands(self): + return super().build_commands() + ["pip install redis"] + + +def test_build_config_invalid_requirements(): + spec = BuildSpecTest(requirements=["./projects/requirements.txt"]) + assert spec.requirements == ["cloud-stars"] + assert spec.build_commands() == ["pip install redis"] + + +def test_build_config_dockerfile_provided(): + spec = BuildConfig(dockerfile="./projects/Dockerfile.cpu") + assert not spec.requirements + assert "pytorchlightning/pytorch_lightning" in spec.dockerfile[0] + + +class DockerfileLightningTestApp(LightningTestApp): + def on_after_run_once(self): + print(self.root.work.local_build_config.dockerfile) + assert "pytorchlightning/pytorch_lightning" in self.root.work.local_build_config.dockerfile[0] + return super().on_after_run_once() + + +def test_build_config_dockerfile(): + command_line = [os.path.join(_PROJECT_ROOT, "tests/utilities/packaging/projects/dockerfile/app.py")] + application_testing(DockerfileLightningTestApp, command_line + EXTRAS_ARGS) + sys.path = sys.path[:-1] + + +class RequirementsLightningTestApp(LightningTestApp): + def on_after_run_once(self): + assert self.root.work.local_build_config.requirements == [ + "git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0", + "pandas", + "pytorch_lightning==1.5.9", + ] + return super().on_after_run_once() + + +def test_build_config_requirements(): + command_line = [os.path.join(_PROJECT_ROOT, "tests/utilities/packaging/projects/req/app.py")] + application_testing(RequirementsLightningTestApp, command_line + EXTRAS_ARGS) + sys.path = sys.path[:-1] diff --git a/tests/tests_app/utilities/packaging/test_cloud_compute.py b/tests/tests_app/utilities/packaging/test_cloud_compute.py new file mode 100644 index 0000000000000..0884a27e9159b --- /dev/null +++ b/tests/tests_app/utilities/packaging/test_cloud_compute.py @@ -0,0 +1,22 @@ +import pytest + +from lightning_app import CloudCompute + + +def test_cloud_compute_unsupported_features(): + with pytest.raises(ValueError, match="Clusters are't supported yet"): + CloudCompute("gpu", clusters="as") + with pytest.raises(ValueError, match="Setting a wait timeout isn't supported yet"): + CloudCompute("gpu", wait_timeout=1) + + +def test_cloud_compute_names(): + assert CloudCompute().name == "default" + assert CloudCompute("cpu-small").name == "cpu-small" + assert CloudCompute("coconut").name == "coconut" # the backend is responsible for validation of names + + +def test_cloud_compute_shared_memory(): + + cloud_compute = CloudCompute("gpu", shm_size=1100) + assert cloud_compute.shm_size == 1100 diff --git a/tests/tests_app/utilities/packaging/test_docker.py b/tests/tests_app/utilities/packaging/test_docker.py new file mode 100644 index 0000000000000..3aa9758c049b5 --- /dev/null +++ b/tests/tests_app/utilities/packaging/test_docker.py @@ -0,0 +1,68 @@ +import os +from time import sleep, time + +import pytest + +from lightning_app import LightningWork +from lightning_app.core.queues import QueuingSystem +from lightning_app.testing.helpers import RunIf +from lightning_app.utilities.imports import _is_docker_available +from lightning_app.utilities.load_app import load_app_from_file +from lightning_app.utilities.packaging.docker import DockerRunner +from lightning_app.utilities.redis import check_if_redis_running + + +@pytest.mark.skipif(True, reason="FIXME (tchaton)") +@pytest.mark.skipif(not _is_docker_available(), reason="docker is required for this test.") +@pytest.mark.skipif(not check_if_redis_running(), reason="redis is required for this test.") +@RunIf(skip_windows=True) +def test_docker_runner(): + """This test validates that the lightning run work is executable within a container and deltas are sent back + through the Redis caller_queue.""" + queues = QueuingSystem.REDIS + queue_id = f"test_docker_runner_{str(int(time()))}" + app_file = os.path.join(os.path.dirname(__file__), "projects/dock/app.py") + app = load_app_from_file(app_file) + work: LightningWork = app.root.work + + call_hash = "run:fe3fa0f34fc1317e152e5afb023332995392071046f1ea51c34c7c9766e3676c" + work._calls[call_hash] = { + "args": (), + "kwargs": {}, + "call_hash": call_hash, + "run_started_counter": 1, + "statuses": [], + } + + # The script_path needs to be relative to the container. + docker_runner = DockerRunner( + "/home/tests/utilities/packaging/projects/dock/app.py", work, queue_id, create_base=True + ) + docker_runner.run() + + caller_queue = queues.get_caller_queue(work_name=work.name, queue_id=queue_id) + caller_queue.put( + { + "args": (), + "kwargs": {}, + "call_hash": call_hash, + "state": work.state, + } + ) + delta_queue = queues.get_delta_queue(queue_id=queue_id) + delta_1 = delta_queue.get() + delta_2 = delta_queue.get() + delta_3 = delta_queue.get() + + def get_item(delta): + return delta.delta.to_dict()["iterable_item_added"] + + assert delta_1.id == "root.work" + assert delta_2.id == "root.work" + assert delta_3.id == "root.work" + assert get_item(delta_1)[f"root['calls']['{call_hash}']['statuses'][0]"]["stage"] == "starting" + assert delta_2.delta.to_dict()["type_changes"]["root['vars']['message']"]["new_value"] == "hello world!" + assert get_item(delta_3)[f"root['calls']['{call_hash}']['statuses'][1]"]["stage"] == "succeeded" + sleep(1) + assert "Starting WorkRunner" in docker_runner.get_container_logs() + docker_runner.kill() diff --git a/tests/tests_app/utilities/packaging/test_lightning_utils.py b/tests/tests_app/utilities/packaging/test_lightning_utils.py new file mode 100644 index 0000000000000..0e4a370b401fc --- /dev/null +++ b/tests/tests_app/utilities/packaging/test_lightning_utils.py @@ -0,0 +1,37 @@ +import os + +import pytest + +from lightning import __about__ +from lightning_app.testing.helpers import RunIf +from lightning_app.utilities.packaging import lightning_utils +from lightning_app.utilities.packaging.lightning_utils import ( + _prepare_lightning_wheels_and_requirements, + _verify_lightning_version, +) + + +def test_prepare_lightning_wheels_and_requirement(tmpdir): + """This test ensures the lightning source gets packaged inside the lightning repo.""" + + cleanup_handle = _prepare_lightning_wheels_and_requirements(tmpdir) + tar_name = f"lightning-{__about__.__version__}.tar.gz" + assert sorted(os.listdir(tmpdir)) == [tar_name] + cleanup_handle() + assert os.listdir(tmpdir) == [] + + +@pytest.mark.skip(reason="TODO: Find a way to check for the latest version") +@RunIf(skip_windows=True) +def test_verify_lightning_version(monkeypatch): + monkeypatch.setattr(lightning_utils, "__version__", "0.0.1") + monkeypatch.setattr(lightning_utils, "_fetch_latest_version", lambda _: "0.0.2") + + # Not latest version + with pytest.raises(Exception, match="You need to use the latest version of Lightning"): + _verify_lightning_version() + + # Latest version + monkeypatch.setattr(lightning_utils, "__version__", "0.0.1") + monkeypatch.setattr(lightning_utils, "_fetch_latest_version", lambda _: "0.0.1") + _verify_lightning_version() diff --git a/tests/tests_app/utilities/test_app_helpers.py b/tests/tests_app/utilities/test_app_helpers.py new file mode 100644 index 0000000000000..44074859f68f5 --- /dev/null +++ b/tests/tests_app/utilities/test_app_helpers.py @@ -0,0 +1,107 @@ +from unittest import mock +from unittest.mock import Mock + +import pytest + +from lightning_app import LightningFlow, LightningWork +from lightning_app.utilities.app_helpers import ( + AppStatePlugin, + BaseStatePlugin, + InMemoryStateStore, + is_overridden, + StateStore, +) +from lightning_app.utilities.exceptions import LightningAppStateException + + +class Work(LightningWork): + def run(self): + pass + + +class Flow(LightningFlow): + def run(self): + pass + + +def test_is_overridden(): + flow = Flow() + work = Work() + + # edge cases + assert not is_overridden("whatever", None) + with pytest.raises(ValueError, match="Expected a parent"): + is_overridden("whatever", object()) + assert not is_overridden("whatever", flow) + assert not is_overridden("whatever", flow, parent=Flow) + + class TestFlow(LightningFlow): + def run(self): + pass + + def foo(self): + pass + + def bar(self): + return 1 + + with pytest.raises(ValueError, match="The parent should define the method"): + is_overridden("foo", TestFlow()) + + # normal usage + assert is_overridden("run", flow) + assert is_overridden("run", work) + + # `Mock` support + mock = Mock(spec=Flow, wraps=flow) + assert is_overridden("run", mock) + mock = Mock(spec=LightningWork, wraps=work) + assert is_overridden("run", mock) + + +def test_simple_app_store(): + + store = InMemoryStateStore() + user_id = "1234" + store.add(user_id) + state = {"data": user_id} + store.set_app_state(user_id, state) + store.set_served_state(user_id, state) + store.set_served_session_id(user_id, user_id) + assert store.get_app_state(user_id) == state + assert store.get_served_state(user_id) == state + assert store.get_served_session_id(user_id) == user_id + store.remove(user_id) + assert isinstance(store, StateStore) + + +@mock.patch("lightning_app.utilities.app_helpers.APP_STATE_MAX_SIZE_BYTES", 120) +def test_simple_app_store_warning(): + store = InMemoryStateStore() + user_id = "1234" + store.add(user_id) + state = {"data": "I'm a state that's larger than 120 bytes"} + with pytest.raises(LightningAppStateException, match="is larger than the"): + store.set_app_state(user_id, state) + + +def test_base_state_plugin(): + class DummyStatePlugin(BaseStatePlugin): + def should_update_app(self, deep_diff): + super().should_update_app(deep_diff) + + def get_context(self): + super().get_context() + + def render_non_authorized(self): + super().render_non_authorized() + + plugin = DummyStatePlugin() + plugin.should_update_app(None) + plugin.get_context() + plugin.render_non_authorized() + + plugin = AppStatePlugin() + plugin.should_update_app(None) + plugin.get_context() + plugin.render_non_authorized() diff --git a/tests/tests_app/utilities/test_apply_func.py b/tests/tests_app/utilities/test_apply_func.py new file mode 100644 index 0000000000000..509a55b1ae7d2 --- /dev/null +++ b/tests/tests_app/utilities/test_apply_func.py @@ -0,0 +1,264 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import dataclasses +import numbers +from collections import defaultdict, namedtuple, OrderedDict +from dataclasses import InitVar +from typing import Any, ClassVar, List, Optional + +import pytest + +from lightning_app.utilities.apply_func import apply_to_collection +from lightning_app.utilities.exceptions import MisconfigurationException +from lightning_app.utilities.imports import _is_numpy_available, _is_torch_available + +if _is_torch_available(): + import torch + +if _is_numpy_available(): + import numpy as np + + +@pytest.mark.skipif(not (_is_torch_available() and _is_numpy_available()), reason="Requires torch and numpy") +def test_recursive_application_to_collection(): + ntc = namedtuple("Foo", ["bar"]) + + @dataclasses.dataclass + class Feature: + input_ids: torch.Tensor + segment_ids: np.ndarray + + def __eq__(self, o: object) -> bool: + if not isinstance(o, Feature): + return NotImplemented + else: + return torch.equal(self.input_ids, o.input_ids) and np.equal(self.segment_ids, o.segment_ids).all() + + @dataclasses.dataclass + class ModelExample: + example_ids: List[str] + feature: Feature + label: torch.Tensor + some_constant: int = dataclasses.field(init=False) + + def __post_init__(self): + self.some_constant = 7 + + def __eq__(self, o: object) -> bool: + if not isinstance(o, ModelExample): + return NotImplemented + else: + return ( + self.example_ids == o.example_ids + and self.feature == o.feature + and torch.equal(self.label, o.label) + and self.some_constant == o.some_constant + ) + + @dataclasses.dataclass + class WithClassVar: + class_var: ClassVar[int] = 0 + dummy: Any + + def __eq__(self, o: object) -> bool: + if not isinstance(o, WithClassVar): + return NotImplemented + elif isinstance(self.dummy, torch.Tensor): + return torch.equal(self.dummy, o.dummy) + else: + return self.dummy == o.dummy + + @dataclasses.dataclass + class WithInitVar: + dummy: Any + override: InitVar[Optional[Any]] = None + + def __post_init__(self, override: Optional[Any]): + if override is not None: + self.dummy = override + + def __eq__(self, o: object) -> bool: + if not isinstance(o, WithInitVar): + return NotImplemented + elif isinstance(self.dummy, torch.Tensor): + return torch.equal(self.dummy, o.dummy) + else: + return self.dummy == o.dummy + + @dataclasses.dataclass + class WithClassAndInitVar: + class_var: ClassVar[torch.Tensor] = torch.tensor(0) + dummy: Any + override: InitVar[Optional[Any]] = torch.tensor(1) + + def __post_init__(self, override: Optional[Any]): + if override is not None: + self.dummy = override + + def __eq__(self, o: object) -> bool: + if not isinstance(o, WithClassAndInitVar): + return NotImplemented + elif isinstance(self.dummy, torch.Tensor): + return torch.equal(self.dummy, o.dummy) + else: + return self.dummy == o.dummy + + model_example = ModelExample( + example_ids=["i-1", "i-2", "i-3"], + feature=Feature(input_ids=torch.tensor([1.0, 2.0, 3.0]), segment_ids=np.array([4.0, 5.0, 6.0])), + label=torch.tensor([7.0, 8.0, 9.0]), + ) + + to_reduce = { + "a": torch.tensor([1.0]), # Tensor + "b": [torch.tensor([2.0])], # list + "c": (torch.tensor([100.0]),), # tuple + "d": ntc(bar=5.0), # named tuple + "e": np.array([10.0]), # numpy array + "f": "this_is_a_dummy_str", # string + "g": 12.0, # number + "h": Feature(input_ids=torch.tensor([1.0, 2.0, 3.0]), segment_ids=np.array([4.0, 5.0, 6.0])), # dataclass + "i": model_example, # nested dataclass + "j": WithClassVar(torch.arange(3)), # dataclass with class variable + "k": WithInitVar("this_gets_overridden", torch.tensor([2.0])), # dataclass with init-only variable + "l": WithClassAndInitVar(model_example, None), # nested dataclass with class and init-only variables + } + + model_example_result = ModelExample( + example_ids=["i-1", "i-2", "i-3"], + feature=Feature(input_ids=torch.tensor([2.0, 4.0, 6.0]), segment_ids=np.array([8.0, 10.0, 12.0])), + label=torch.tensor([14.0, 16.0, 18.0]), + ) + + expected_result = { + "a": torch.tensor([2.0]), + "b": [torch.tensor([4.0])], + "c": (torch.tensor([200.0]),), + "d": ntc(bar=torch.tensor([10.0])), + "e": np.array([20.0]), + "f": "this_is_a_dummy_str", + "g": 24.0, + "h": Feature(input_ids=torch.tensor([2.0, 4.0, 6.0]), segment_ids=np.array([8.0, 10.0, 12.0])), + "i": model_example_result, + "j": WithClassVar(torch.arange(0, 6, 2)), + "k": WithInitVar(torch.tensor([4.0])), + "l": WithClassAndInitVar(model_example_result, None), + } + + reduced = apply_to_collection(to_reduce, (torch.Tensor, numbers.Number, np.ndarray), lambda x: x * 2) + + assert isinstance(reduced, dict), "Type Consistency of dict not preserved" + assert all(x in reduced for x in to_reduce), "Not all entries of the dict were preserved" + assert all( + isinstance(reduced[k], type(expected_result[k])) for k in to_reduce + ), "At least one type was not correctly preserved" + + assert isinstance(reduced["a"], torch.Tensor), "Reduction Result of a Tensor should be a Tensor" + assert torch.equal(expected_result["a"], reduced["a"]), "Reduction of a tensor does not yield the expected value" + + assert isinstance(reduced["b"], list), "Reduction Result of a list should be a list" + assert all( + torch.equal(x, y) for x, y in zip(reduced["b"], expected_result["b"]) + ), "At least one value of list reduction did not come out as expected" + + assert isinstance(reduced["c"], tuple), "Reduction Result of a tuple should be a tuple" + assert all( + torch.equal(x, y) for x, y in zip(reduced["c"], expected_result["c"]) + ), "At least one value of tuple reduction did not come out as expected" + + assert isinstance(reduced["d"], ntc), "Type Consistency for named tuple not given" + assert isinstance( + reduced["d"].bar, numbers.Number + ), "Failure in type promotion while reducing fields of named tuples" + assert reduced["d"].bar == expected_result["d"].bar + + assert isinstance(reduced["e"], np.ndarray), "Type Promotion in reduction of numpy arrays failed" + assert reduced["e"] == expected_result["e"], "Reduction of numpy array did not yield the expected result" + + assert isinstance(reduced["f"], str), "A string should not be reduced" + assert reduced["f"] == expected_result["f"], "String not preserved during reduction" + + assert isinstance(reduced["g"], numbers.Number), "Reduction of a number should result in a number" + assert reduced["g"] == expected_result["g"], "Reduction of a number did not yield the desired result" + + def _assert_dataclass_reduction(actual, expected, dataclass_type: str = ""): + assert dataclasses.is_dataclass(actual) and not isinstance( + actual, type + ), f"Reduction of a {dataclass_type} dataclass should result in a dataclass" + for field in dataclasses.fields(actual): + if dataclasses.is_dataclass(field.type): + _assert_dataclass_reduction(getattr(actual, field.name), getattr(expected, field.name), "nested") + assert actual == expected, f"Reduction of a {dataclass_type} dataclass did not yield the desired result" + + _assert_dataclass_reduction(reduced["h"], expected_result["h"]) + + _assert_dataclass_reduction(reduced["i"], expected_result["i"]) + + dataclass_type = "ClassVar-containing" + _assert_dataclass_reduction(reduced["j"], expected_result["j"], dataclass_type) + assert WithClassVar.class_var == 0, f"Reduction of a {dataclass_type} dataclass should not change the class var" + + _assert_dataclass_reduction(reduced["k"], expected_result["k"], "InitVar-containing") + + dataclass_type = "Class-and-InitVar-containing" + _assert_dataclass_reduction(reduced["l"], expected_result["l"], dataclass_type) + assert torch.equal( + WithClassAndInitVar.class_var, torch.tensor(0) + ), f"Reduction of a {dataclass_type} dataclass should not change the class var" + + # mapping support + reduced = apply_to_collection({"a": 1, "b": 2}, int, lambda x: str(x)) + assert reduced == {"a": "1", "b": "2"} + reduced = apply_to_collection(OrderedDict([("b", 2), ("a", 1)]), int, lambda x: str(x)) + assert reduced == OrderedDict([("b", "2"), ("a", "1")]) + + # custom mappings + class _CustomCollection(dict): + def __init__(self, initial_dict): + super().__init__(initial_dict) + + to_reduce = _CustomCollection({"a": 1, "b": 2, "c": 3}) + reduced = apply_to_collection(to_reduce, int, lambda x: str(x)) + assert reduced == _CustomCollection({"a": "1", "b": "2", "c": "3"}) + + # defaultdict + to_reduce = defaultdict(int, {"a": 1, "b": 2, "c": 3}) + reduced = apply_to_collection(to_reduce, int, lambda x: str(x)) + assert reduced == defaultdict(int, {"a": "1", "b": "2", "c": "3"}) + + +def test_apply_to_collection_include_none(): + to_reduce = [1, 2, 3.4, 5.6, 7, (8, 9.1, {10: 10})] + + def fn(x): + if isinstance(x, float): + return x + + reduced = apply_to_collection(to_reduce, (int, float), fn) + assert reduced == [None, None, 3.4, 5.6, None, (None, 9.1, {10: None})] + + reduced = apply_to_collection(to_reduce, (int, float), fn, include_none=False) + assert reduced == [3.4, 5.6, (9.1, {})] + + +@pytest.mark.skipif(not _is_torch_available(), reason="Requires torch and numpy") +def test_apply_to_collection_frozen_dataclass(): + @dataclasses.dataclass(frozen=True) + class Foo: + input: torch.Tensor + + foo = Foo(torch.tensor(0)) + + with pytest.raises(MisconfigurationException, match="frozen dataclass was passed"): + apply_to_collection(foo, torch.Tensor, lambda t: t.to(torch.int)) diff --git a/tests/tests_app/utilities/test_cli_helpers.py b/tests/tests_app/utilities/test_cli_helpers.py new file mode 100644 index 0000000000000..575802da7b43d --- /dev/null +++ b/tests/tests_app/utilities/test_cli_helpers.py @@ -0,0 +1,30 @@ +import pytest + +from lightning_app.utilities.cli_helpers import _format_input_env_variables + + +def test_format_input_env_variables(): + with pytest.raises(Exception, match="Invalid format of environment variable"): + _format_input_env_variables(("invalid-env",)) + + with pytest.raises(Exception, match="Invalid format of environment variable"): + _format_input_env_variables(("=invalid",)) + + with pytest.raises(Exception, match="Invalid format of environment variable"): + _format_input_env_variables(("=invalid=",)) + + with pytest.raises(Exception, match="is duplicated. Please only include it once."): + _format_input_env_variables( + ( + "FOO=bar", + "FOO=bar", + ) + ) + + with pytest.raises( + Exception, + match="is not a valid name. It is only allowed to contain digits 0-9, letters A-Z", + ): + _format_input_env_variables(("*FOO#=bar",)) + + assert _format_input_env_variables(("FOO=bar", "BLA=bloz")) == {"FOO": "bar", "BLA": "bloz"} diff --git a/tests/tests_app/utilities/test_component.py b/tests/tests_app/utilities/test_component.py new file mode 100644 index 0000000000000..6f166fa034a20 --- /dev/null +++ b/tests/tests_app/utilities/test_component.py @@ -0,0 +1,78 @@ +import pytest + +from lightning_app.storage import Path +from lightning_app.testing.helpers import EmptyFlow, EmptyWork +from lightning_app.utilities.component import ( + _context, + _convert_paths_after_init, + _get_context, + _is_flow_context, + _is_work_context, + _set_context, + _set_work_context, +) +from lightning_app.utilities.enum import ComponentContext + + +def test_convert_paths_after_init(): + """Test that we can convert paths after the Flow/Work initialization, i.e., when the LightningApp is fully + instantiated.""" + + # TODO: Add a test case for the Lightning List and Dict containers + + class Flow1(EmptyFlow): + def __init__(self): + super().__init__() + self.path1 = Path("a") + self.path2 = Path("b") + + flow1 = Flow1() + assert flow1._paths == {} + _convert_paths_after_init(flow1) + assert flow1._paths == {"path1": Path("a").to_dict(), "path2": Path("b").to_dict()} + + class Work1(EmptyWork): + def __init__(self): + super().__init__() + self.path3 = Path("c") + + class Flow2(EmptyFlow): + def __init__(self): + super().__init__() + self.work1 = Work1() + self.path4 = Path("d") + + flow2 = Flow2() + assert flow2._paths == {} + assert flow2.work1._paths == {} + _convert_paths_after_init(flow2) + assert flow2._paths == {"path4": Path("d").to_dict()} + assert set(flow2.work1._paths.keys()) == {"path3"} + assert flow2.work1._paths["path3"]["origin_name"] == "root.work1" + assert flow2.work1._paths["path3"]["consumer_name"] == "root.work1" + + +@pytest.mark.parametrize("ctx", [c.value for c in ComponentContext]) +def test_context_context_manager(ctx): + with _context("flow"): + assert _get_context().value == "flow" + assert _get_context() is None + + +@pytest.mark.parametrize("ctx", [c.value for c in ComponentContext]) +def test_set_get_context(ctx): + assert _get_context() is None + _set_context(ctx) + assert _get_context().value == ctx + + +def test_is_context(): + _set_context("flow") + assert _is_flow_context() + + _set_work_context() + assert _is_work_context() + + _set_context(None) + assert not _is_flow_context() + assert not _is_work_context() diff --git a/tests/tests_app/utilities/test_dependency_caching.py b/tests/tests_app/utilities/test_dependency_caching.py new file mode 100644 index 0000000000000..dc52119003605 --- /dev/null +++ b/tests/tests_app/utilities/test_dependency_caching.py @@ -0,0 +1,15 @@ +from pathlib import Path + +from lightning_app.utilities.dependency_caching import get_hash + + +def test_get_hash(tmpdir): + req_path = tmpdir / "requirements.txt" + Path(req_path).touch() + + # empty requirements file + assert get_hash(req_path) == "3345524abf6bbe1809449224b5972c41790b6cf2" + + # requirements file with one dependency + req_path.write_text("lightning==1.0", encoding="utf-8") + assert get_hash(req_path) == "6177677a74b5d256e331cb9e390af58106e20220" diff --git a/tests/tests_app/utilities/test_git.py b/tests/tests_app/utilities/test_git.py new file mode 100644 index 0000000000000..efb59d05adc1c --- /dev/null +++ b/tests/tests_app/utilities/test_git.py @@ -0,0 +1,29 @@ +import sys + +from lightning_app.utilities.git import ( + check_github_repository, + check_if_remote_head_is_different, + execute_git_command, + get_dir_name, + get_git_relative_path, + has_uncommitted_files, +) + + +def test_execute_git_command(): + + res = execute_git_command(["pull"]) + assert res + + assert get_dir_name() == "lightning-app" + + assert check_github_repository() + + if sys.platform == "win32": + assert get_git_relative_path(__file__) == "tests\\tests_app\\utilities\\test_git.py" + else: + assert get_git_relative_path(__file__) == "tests/tests_app/utilities/test_git.py" + + # this commands can be either True or False based on dev. + check_if_remote_head_is_different() + has_uncommitted_files() diff --git a/tests/tests_app/utilities/test_imports.py b/tests/tests_app/utilities/test_imports.py new file mode 100644 index 0000000000000..265181bab1f2a --- /dev/null +++ b/tests/tests_app/utilities/test_imports.py @@ -0,0 +1,49 @@ +import os +from unittest import mock + +import pytest + +from lightning_app.utilities.imports import _module_available, requires + + +def test_module_available(): + """Test if the 3rd party libs are available.""" + assert _module_available("deepdiff") + assert _module_available("deepdiff.deephash") + assert not _module_available("torch.nn.asdf") + assert not _module_available("asdf") + assert not _module_available("asdf.bla.asdf") + + +@mock.patch.dict(os.environ, {"LIGHTING_TESTING": "0"}) +def test_requires(): + @requires("lightning_app") + def fn(): + pass + + fn() + + @requires("shouldnotexist") + def fn_raise(): + pass + + with pytest.raises(ModuleNotFoundError, match="Please run: pip install 'shouldnotexist'"): + fn_raise() + + class ClassRaise: + @requires("shouldnotexist") + def __init__(self): + pass + + with pytest.raises(ModuleNotFoundError, match="Please run: pip install 'shouldnotexist'"): + ClassRaise() + + +@mock.patch.dict(os.environ, {"LIGHTING_TESTING": "0"}) +def test_requires_multiple(): + @requires(["shouldnotexist1", "shouldnotexist2"]) + def fn_raise(): + pass + + with pytest.raises(ModuleNotFoundError, match="Please run: pip install 'shouldnotexist1' 'shouldnotexist2'"): + fn_raise() diff --git a/tests/tests_app/utilities/test_introspection.py b/tests/tests_app/utilities/test_introspection.py new file mode 100644 index 0000000000000..5d0c5a80d0155 --- /dev/null +++ b/tests/tests_app/utilities/test_introspection.py @@ -0,0 +1,60 @@ +import os +from numbers import Rational + +import pytest + +from lightning_app import LightningApp, LightningFlow +from lightning_app.utilities.imports import _is_pytorch_lightning_available +from lightning_app.utilities.introspection import Scanner + +if _is_pytorch_lightning_available(): + from pytorch_lightning import Trainer + from pytorch_lightning.utilities.cli import LightningCLI + +from tests_app import _PROJECT_ROOT + + +def test_introspection(): + """This test validates the scanner can find some class within the provided files.""" + + scanner = Scanner(str(os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/example_1.py"))) + assert scanner.has_class(Rational) + assert not scanner.has_class(LightningApp) + + scanner = Scanner(str(os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/example_2.py"))) + assert scanner.has_class(LightningApp) + assert not scanner.has_class(LightningFlow) + + +@pytest.mark.skipif(not _is_pytorch_lightning_available(), reason="pytorch_lightning isn't installed.") +def test_introspection_lightning(): + """This test validates the scanner can find some PyTorch Lightning class within the provided files.""" + scanner = Scanner(str(os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/lightning_cli.py"))) + assert not scanner.has_class(Trainer) + assert scanner.has_class(LightningCLI) + + scanner = Scanner(str(os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/lightning_trainer.py"))) + assert scanner.has_class(Trainer) + assert not scanner.has_class(LightningCLI) + + +@pytest.mark.skipif(not _is_pytorch_lightning_available(), reason="pytorch_lightning isn't installed.") +def test_introspection_lightning_overrides(): + """This test validates the scanner can find all the subclasses from primitives classes from PyTorch Lightning + in the provided files.""" + scanner = Scanner(str(os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/lightning_cli.py"))) + scanner = Scanner(str(os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/lightning_overrides.py"))) + scan = scanner.scan() + assert sorted(scan.keys()) == [ + "Accelerator", + "BaseProfiler", + "Callback", + "LightningDataModule", + "LightningLite", + "LightningLoggerBase", + "LightningModule", + "Loop", + "Metric", + "PrecisionPlugin", + "Trainer", + ] diff --git a/tests/tests_app/utilities/test_load_app.py b/tests/tests_app/utilities/test_load_app.py new file mode 100644 index 0000000000000..09af874cbd9fc --- /dev/null +++ b/tests/tests_app/utilities/test_load_app.py @@ -0,0 +1,84 @@ +import os +from unittest.mock import ANY + +import pytest +import tests_app.core.scripts +from tests_app import _PROJECT_ROOT + +from lightning_app import LightningApp +from lightning_app.utilities.exceptions import MisconfigurationException +from lightning_app.utilities.load_app import extract_metadata_from_app, load_app_from_file + + +def test_load_app_from_file(): + app = load_app_from_file(os.path.join(_PROJECT_ROOT, "examples", "app_v0", "app.py")) + assert isinstance(app, LightningApp) + + test_script_dir = os.path.join(os.path.dirname(tests_app.core.__file__), "scripts") + with pytest.raises(MisconfigurationException, match="There should not be multiple apps instantiated within a file"): + load_app_from_file(os.path.join(test_script_dir, "two_apps.py")) + + with pytest.raises(MisconfigurationException, match="The provided file .* does not contain a LightningApp"): + load_app_from_file(os.path.join(test_script_dir, "empty.py")) + + with pytest.raises(SystemExit, match="1"): + load_app_from_file(os.path.join(test_script_dir, "script_with_error.py")) + + +def test_extract_metadata_from_component(): + test_script_dir = os.path.join(os.path.dirname(tests_app.core.__file__), "scripts") + app = load_app_from_file(os.path.join(test_script_dir, "app_metadata.py")) + metadata = extract_metadata_from_app(app) + assert metadata == [ + { + "affiliation": ["root"], + "cls_name": "RootFlow", + "module": "__main__", + "docstring": "RootFlow.", + }, + { + "affiliation": ["root", "flow_a_1"], + "cls_name": "FlowA", + "module": "__main__", + "docstring": "FlowA Component.", + }, + { + "affiliation": ["root", "flow_a_1", "work_a"], + "cls_name": "WorkA", + "module": "__main__", + "docstring": "WorkA.", + "local_build_config": {"__build_config__": ANY}, + "cloud_build_config": {"__build_config__": ANY}, + "cloud_compute": {"__cloud_compute__": ANY}, + }, + { + "affiliation": ["root", "flow_a_2"], + "cls_name": "FlowA", + "module": "__main__", + "docstring": "FlowA Component.", + }, + { + "affiliation": ["root", "flow_a_2", "work_a"], + "cls_name": "WorkA", + "module": "__main__", + "docstring": "WorkA.", + "local_build_config": {"__build_config__": ANY}, + "cloud_build_config": {"__build_config__": ANY}, + "cloud_compute": {"__cloud_compute__": ANY}, + }, + { + "affiliation": ["root", "flow_b"], + "cls_name": "FlowB", + "module": "__main__", + "docstring": "FlowB.", + }, + { + "affiliation": ["root", "flow_b", "work_b"], + "cls_name": "WorkB", + "module": "__main__", + "docstring": "WorkB.", + "local_build_config": {"__build_config__": ANY}, + "cloud_build_config": {"__build_config__": ANY}, + "cloud_compute": {"__cloud_compute__": ANY}, + }, + ] diff --git a/tests/tests_app/utilities/test_login.py b/tests/tests_app/utilities/test_login.py new file mode 100644 index 0000000000000..43b10519e20ee --- /dev/null +++ b/tests/tests_app/utilities/test_login.py @@ -0,0 +1,142 @@ +import os +from unittest import mock + +import pytest +import requests + +from lightning_app.utilities import login + +LIGHTNING_CLOUD_URL = "https://lightning.ai" + + +@pytest.fixture(autouse=True) +def before_each(): + login.Auth.clear() + + +class TestAuthentication: + def test_can_store_credentials(self): + auth = login.Auth() + auth.save(username="superman", user_id="kr-1234") + assert auth.secrets_file.exists() + + auth.clear() + assert not auth.secrets_file.exists() + + def test_e2e(self): + auth = login.Auth() + assert auth._with_env_var is False + auth.save(username="superman", user_id="kr-1234") + assert auth.secrets_file.exists() + + def test_get_auth_header_should_raise_error(self): + with pytest.raises(AttributeError): + login.Auth().auth_header + + def test_credentials_file_io(self): + auth = login.Auth() + assert not auth.secrets_file.exists() + assert auth.load() is False + auth.save(username="", user_id="123", api_key="123") + assert auth.secrets_file.exists() + assert auth.load() is True + + def test_auth_header(self): + # fake credentials + os.environ.setdefault("LIGHTNING_USER_ID", "7c8455e3-7c5f-4697-8a6d-105971d6b9bd") + os.environ.setdefault("LIGHTNING_API_KEY", "e63fae57-2b50-498b-bc46-d6204cbf330e") + auth = login.Auth() + assert "Basic" in auth.auth_header + assert ( + auth.auth_header + == "Basic N2M4NDU1ZTMtN2M1Zi00Njk3LThhNmQtMTA1OTcxZDZiOWJkOmU2M2ZhZTU3LTJiNTAtNDk4Yi1iYzQ2LWQ2MjA0Y2JmMzMwZQ==" # noqa E501 + ) + + +def test_authentication_with_invalid_environment_vars(): + # if api key is passed without user id + os.environ.setdefault("LIGHTNING_API_KEY", "123") + with pytest.raises(ValueError): + login.Auth() + + +@mock.patch("lightning_app.utilities.login.AuthServer.login_with_browser") +def test_authentication_with_environment_vars(browser_login: mock.MagicMock): + os.environ.setdefault("LIGHTNING_USER_ID", "abc") + os.environ.setdefault("LIGHTNING_API_KEY", "abc") + + auth = login.Auth() + assert auth.user_id == "abc" + assert auth.auth_header == "Basic YWJjOmFiYw==" + assert auth._with_env_var is True + assert auth.authenticate() == auth.auth_header + # should not run login flow when env vars are passed + browser_login.assert_not_called() + + +def test_get_auth_url(): + auth_url = login.AuthServer().get_auth_url(1234) + assert ( + auth_url == f"{LIGHTNING_CLOUD_URL}/sign-in?redirectTo=http%3A%2F%2Flocalhost%3A1234%2Flogin-complete" + ) # E501 + + +@mock.patch("lightning_app.utilities.login.find_free_network_port") +@mock.patch("uvicorn.Server.run") +@mock.patch("requests.head") +@mock.patch("click.launch") +def test_login_with_browser( + click_launch: mock.MagicMock, head: mock.MagicMock, run: mock.MagicMock, port: mock.MagicMock +): + port.return_value = 1234 + login.Auth()._run_server() + url = f"{LIGHTNING_CLOUD_URL}/sign-in?redirectTo=http%3A%2F%2Flocalhost%3A1234%2Flogin-complete" # E501 + head.assert_called_once_with(url) + click_launch.assert_called_once_with(url) + run.assert_called_once() + + +@mock.patch("lightning_app.utilities.login.find_free_network_port") +@mock.patch("uvicorn.Server.run") +@mock.patch("requests.head") +@mock.patch("click.launch") +def test_authenticate(click_launch: mock.MagicMock, head: mock.MagicMock, run: mock.MagicMock, port: mock.MagicMock): + port.return_value = 1234 + auth = login.Auth() + auth.user_id = "user_id" + auth.api_key = "api_key" + auth.authenticate() + url = f"{LIGHTNING_CLOUD_URL}/sign-in?redirectTo=http%3A%2F%2Flocalhost%3A1234%2Flogin-complete" # E501 + head.assert_called_with(url) + click_launch.assert_called_with(url) + run.assert_called() + assert auth.auth_header == "Basic dXNlcl9pZDphcGlfa2V5" + + auth.authenticate() + assert auth.auth_header == "Basic dXNlcl9pZDphcGlfa2V5" + + +@mock.patch("uvicorn.Server.run") +@mock.patch("requests.head") +def test_network_failure( + head: mock.MagicMock, + run: mock.MagicMock, +): + head.side_effect = requests.ConnectionError() + with pytest.raises(requests.ConnectionError): + login.Auth()._run_server() + run.assert_not_called() + + head.side_effect = requests.RequestException() + with pytest.raises(requests.RequestException): + login.Auth()._run_server() + run.assert_not_called() + + +def test_with_api_key_only(): + auth = login.Auth() + auth.save(user_id="7c8455e3-7c5f-4697-8a6d-105971d6b9bd", api_key="e63fae57-2b50-498b-bc46-d6204cbf330e") + assert ( + auth.authenticate() + == "Basic N2M4NDU1ZTMtN2M1Zi00Njk3LThhNmQtMTA1OTcxZDZiOWJkOmU2M2ZhZTU3LTJiNTAtNDk4Yi1iYzQ2LWQ2MjA0Y2JmMzMwZQ==" # noqa E501 + ) diff --git a/tests/tests_app/utilities/test_network.py b/tests/tests_app/utilities/test_network.py new file mode 100644 index 0000000000000..fd1665f0a1ede --- /dev/null +++ b/tests/tests_app/utilities/test_network.py @@ -0,0 +1,5 @@ +from lightning_app.utilities.network import find_free_network_port + + +def test_port(): + assert find_free_network_port() diff --git a/tests/tests_app/utilities/test_proxies.py b/tests/tests_app/utilities/test_proxies.py new file mode 100644 index 0000000000000..3331a5e69e42b --- /dev/null +++ b/tests/tests_app/utilities/test_proxies.py @@ -0,0 +1,689 @@ +import logging +import os +import pathlib +import time +import traceback +from copy import deepcopy +from queue import Empty +from unittest import mock +from unittest.mock import MagicMock, Mock + +import pytest +from deepdiff import DeepDiff, Delta + +from lightning_app import LightningApp, LightningFlow, LightningWork +from lightning_app.runners import MultiProcessRuntime +from lightning_app.storage import Path +from lightning_app.storage.path import artifacts_path +from lightning_app.storage.requests import GetRequest +from lightning_app.testing.helpers import EmptyFlow, MockQueue +from lightning_app.utilities.component import _convert_paths_after_init +from lightning_app.utilities.enum import WorkFailureReasons, WorkStageStatus +from lightning_app.utilities.exceptions import CacheMissException, ExitAppException +from lightning_app.utilities.proxies import ( + ComponentDelta, + LightningWorkSetAttrProxy, + persist_artifacts, + ProxyWorkRun, + WorkRunner, + WorkStateObserver, +) + +logger = logging.getLogger(__name__) + + +class Work(LightningWork): + def __init__(self, cache_calls=True, parallel=True): + super().__init__(cache_calls=cache_calls, parallel=parallel) + self.counter = 0 + + def run(self): + self.counter = 1 + return 1 + + +def test_lightning_work_setattr(): + """This test valides that the `LightningWorkSetAttrProxy` would push a delta to the `caller_queue` everytime an + attribute from the work state is being changed.""" + + w = Work() + # prepare + w._name = "root.b" + # create queue + caller_queue = MockQueue("caller_queue") + + def proxy_setattr(): + w._setattr_replacement = LightningWorkSetAttrProxy(w._name, w, caller_queue, MagicMock()) + + proxy_setattr() + w.run() + assert len(caller_queue) == 1 + work_proxy_output = caller_queue._queue[0] + assert isinstance(work_proxy_output, ComponentDelta) + assert work_proxy_output.id == w._name + assert work_proxy_output.delta.to_dict() == {"values_changed": {"root['vars']['counter']": {"new_value": 1}}} + + +@pytest.mark.parametrize("parallel", [True, False]) +@pytest.mark.parametrize("cache_calls", [False, True]) +def test_work_runner(parallel, cache_calls): + """This test validates the `WorkRunner` runs the work.run method and properly populates the `delta_queue`, + `error_queue` and `readiness_queue`.""" + + class Work(LightningWork): + def __init__(self, cache_calls=True, parallel=True): + super().__init__(cache_calls=cache_calls, parallel=parallel) + self.counter = 0 + self.dummy_path = "lit://test" + + def run(self): + self.counter = 1 + + class Flow(LightningFlow): + def __init__(self): + super().__init__() + self.w = Work(cache_calls=cache_calls, parallel=parallel) + + def run(self): + pass + + class BlockingQueue(MockQueue): + """A Mock for the file copier queues that keeps blocking until we want to end the thread.""" + + keep_blocking = True + + def get(self, timeout: int = 0): + while BlockingQueue.keep_blocking: + pass + # A dummy request so the Copier gets something to process without an error + return GetRequest(source="src", name="dummy_path", path="test", hash="123", destination="dst") + + app = LightningApp(Flow()) + work = app.root.w + caller_queue = MockQueue("caller_queue") + delta_queue = MockQueue("delta_queue") + readiness_queue = MockQueue("readiness_queue") + error_queue = MockQueue("error_queue") + request_queue = MockQueue("request_queue") + response_queue = MockQueue("response_queue") + copy_request_queue = BlockingQueue("copy_request_queue") + copy_response_queue = BlockingQueue("copy_response_queue") + + call_hash = "run:fe3fa0f34fc1317e152e5afb023332995392071046f1ea51c34c7c9766e3676c" + work._calls[call_hash] = { + "args": (), + "kwargs": {}, + "call_hash": call_hash, + "run_started_counter": 1, + "statuses": [], + } + caller_queue.put( + { + "args": (), + "kwargs": {}, + "call_hash": call_hash, + "state": work.state, + } + ) + work_runner = WorkRunner( + work, + work.name, + caller_queue, + delta_queue, + readiness_queue, + error_queue, + request_queue, + response_queue, + copy_request_queue, + copy_response_queue, + ) + try: + work_runner() + except (Empty, Exception): + pass + + assert readiness_queue._queue[0] + if parallel: + assert isinstance(error_queue._queue[0], Exception) + else: + assert isinstance(error_queue._queue[0], Empty) + assert len(delta_queue._queue) == 3 + res = delta_queue._queue[0].delta.to_dict()["iterable_item_added"] + assert res[f"root['calls']['{call_hash}']['statuses'][0]"]["stage"] == "running" + assert delta_queue._queue[1].delta.to_dict() == { + "values_changed": {"root['vars']['counter']": {"new_value": 1}} + } + res = delta_queue._queue[2].delta.to_dict()["dictionary_item_added"] + assert res[f"root['calls']['{call_hash}']['ret']"] is None + + # Stop blocking and let the thread join + BlockingQueue.keep_blocking = False + work_runner.copier.join() + + +def test_pathlike_as_argument_to_run_method_warns(tmpdir): + """Test that Lightning Produces a special warning for strings that look like paths.""" + # all these paths are not proper paths or don't have a file or folder that exists + no_warning_expected = ( + "looks/like/path", + pathlib.Path("looks/like/path"), + "i am not a path", + 1, + Path("lightning/path"), + ) + for path in no_warning_expected: + _pass_path_argument_to_work_and_test_warning(path=path, warning_expected=False) + + # warn if it looks like a folder and the folder exists + _pass_path_argument_to_work_and_test_warning(path=tmpdir, warning_expected=True) + + # warn if it looks like a string or pathlib Path and the file exists + file = pathlib.Path(tmpdir, "file_exists.txt") + file.write_text("test") + assert os.path.exists(file) + _pass_path_argument_to_work_and_test_warning(path=file, warning_expected=True) + _pass_path_argument_to_work_and_test_warning(path=str(file), warning_expected=True) + + # do not warn if the path is wrapped in Lightning Path (and the file exists) + file = Path(tmpdir, "file_exists.txt") + file.write_text("test") + assert os.path.exists(file) + _pass_path_argument_to_work_and_test_warning(path=file, warning_expected=False) + + +def _pass_path_argument_to_work_and_test_warning(path, warning_expected): + class WarnRunPathWork(LightningWork): + def run(self, *args, **kwargs): + pass + + class Flow(EmptyFlow): + def __init__(self): + super().__init__() + self.work = WarnRunPathWork() + + flow = Flow() + work = flow.work + proxy_run = ProxyWorkRun(work.run, "some", work, Mock()) + + warn_ctx = pytest.warns(UserWarning, match="You passed a the value") if warning_expected else pytest.warns(None) + with warn_ctx as record: + with pytest.raises(CacheMissException): + proxy_run(path) + + assert warning_expected or all("You passed a the value" not in str(msg.message) for msg in record) + + +class WorkTimeout(LightningWork): + def __init__(self): + super().__init__(parallel=True) + self.counter = 0 + + def run(self): + self.counter += 1 + + +class FlowTimeout(LightningFlow): + def __init__(self): + super().__init__() + self.counter = 0 + self.work = WorkTimeout() + + def run(self): + if not self.work.has_started: + self.work.run() + if self.work.has_timeout: + self._exit() + + +class WorkRunnerPatch(WorkRunner): + + counter = 0 + + def __call__(self): + call_hash = "run:fe3fa0f34fc1317e152e5afb023332995392071046f1ea51c34c7c9766e3676c" + while True: + try: + called = self.caller_queue.get() + self.work.set_state(called["state"]) + state = deepcopy(self.work.state) + self.work._calls[call_hash]["statuses"].append( + { + "stage": WorkStageStatus.FAILED, + "reason": WorkFailureReasons.TIMEOUT, + "timestamp": time.time(), + "message": None, + } + ) + self.delta_queue.put(ComponentDelta(id=self.work_name, delta=Delta(DeepDiff(state, self.work.state)))) + self.counter += 1 + except Exception as e: + logger.error(traceback.format_exc()) + self.error_queue.put(e) + raise ExitAppException + + +@mock.patch("lightning_app.runners.backends.mp_process.WorkRunner", WorkRunnerPatch) +def test_proxy_timeout(): + app = LightningApp(FlowTimeout(), debug=True) + MultiProcessRuntime(app, start_server=False).dispatch() + + call_hash = app.root.work._calls["latest_call_hash"] + assert len(app.root.work._calls[call_hash]["statuses"]) == 3 + assert app.root.work._calls[call_hash]["statuses"][0]["stage"] == "pending" + assert app.root.work._calls[call_hash]["statuses"][1]["stage"] == "failed" + assert app.root.work._calls[call_hash]["statuses"][2]["stage"] == "stopped" + + +@mock.patch("lightning_app.utilities.proxies.Copier") +def test_path_argument_to_transfer(*_): + """Test that any Lightning Path objects passed to the run method get transferred automatically (if they + exist).""" + + class TransferPathWork(LightningWork): + def run(self, *args, **kwargs): + raise ExitAppException + + work = TransferPathWork() + + path1 = Path("exists-locally.txt") + path2 = Path("exists-remotely.txt") + path3 = Path("exists-nowhere.txt") + + path1.get = Mock() + path2.get = Mock() + path3.get = Mock() + + path1.exists_remote = Mock(return_value=False) + path2.exists_remote = Mock(return_value=True) + path3.exists_remote = Mock(return_value=False) + + path1._origin = "origin" + path2._origin = "origin" + path3._origin = "origin" + + call = { + "args": (path1, path2), + "kwargs": {"path3": path3}, + "call_hash": "any", + "state": { + "vars": {"_paths": {}, "_urls": {}}, + "calls": { + "latest_call_hash": "any", + "any": { + "name": "run", + "call_hash": "any", + "use_args": False, + "statuses": [{"stage": "requesting", "message": None, "reason": None, "timestamp": 1}], + }, + }, + "changes": {}, + }, + } + + caller_queue = MockQueue() + caller_queue.put(call) + + runner = WorkRunner( + work=work, + work_name="name", + caller_queue=caller_queue, + delta_queue=MockQueue(), + readiness_queue=MockQueue(), + error_queue=MockQueue(), + request_queue=MockQueue(), + response_queue=MockQueue(), + copy_request_queue=MockQueue(), + copy_response_queue=MockQueue(), + ) + + try: + runner() + except ExitAppException: + pass + + path1.exists_remote.assert_called_once() + path1.get.assert_not_called() + + path2.exists_remote.assert_called_once() + path2.get.assert_called_once() + + path3.exists_remote.assert_called() + path3.get.assert_not_called() + + +@pytest.mark.parametrize( + "origin,exists_remote,expected_get", + [ + (None, False, False), + ("root.work", True, False), + ("root.work", False, False), + ("origin", True, True), + ], +) +@mock.patch("lightning_app.utilities.proxies.Copier") +def test_path_attributes_to_transfer(_, monkeypatch, origin, exists_remote, expected_get): + """Test that any Lightning Path objects passed to the run method get transferred automatically (if they + exist).""" + path_mock = Mock() + path_mock.origin_name = origin + path_mock.exists_remote = Mock(return_value=exists_remote) + + class TransferPathWork(LightningWork): + def __init__(self): + super().__init__() + self.path = Path("test-path.txt") + + def run(self): + raise ExitAppException + + def __getattr__(self, item): + if item == "path": + return path_mock + return super().__getattr__(item) + + class Flow(LightningFlow): + def __init__(self): + super().__init__() + self.work = TransferPathWork() + + def run(self): + self.work.run() + + flow = Flow() + _convert_paths_after_init(flow) + + call = { + "args": (), + "kwargs": {}, + "call_hash": "any", + "state": { + "vars": {"_paths": flow.work._paths, "_urls": {}}, + "calls": { + "latest_call_hash": "any", + "any": { + "name": "run", + "call_hash": "any", + "use_args": False, + "statuses": [{"stage": "requesting", "message": None, "reason": None, "timestamp": 1}], + }, + }, + "changes": {}, + }, + } + + caller_queue = MockQueue() + caller_queue.put(call) + + runner = WorkRunner( + work=flow.work, + work_name=flow.work.name, + caller_queue=caller_queue, + delta_queue=MockQueue(), + readiness_queue=MockQueue(), + error_queue=MockQueue(), + request_queue=MockQueue(), + response_queue=MockQueue(), + copy_request_queue=MockQueue(), + copy_response_queue=MockQueue(), + ) + + try: + runner() + except ExitAppException: + pass + + assert path_mock.get.call_count == expected_get + + +def test_proxy_work_run_paths_replace_origin_lightning_work_by_their_name(): + class Work(LightningWork): + def __init__(self): + super().__init__(parallel=True) + self.path = None + + def run(self, path): + assert isinstance(path._origin, str) + + class Flow(LightningFlow): + def __init__(self): + super().__init__() + self.w1 = Work() + self.w = Work() + + def run(self): + pass + + app = LightningApp(Flow()) + work = app.root.w + caller_queue = MockQueue("caller_queue") + app.root.w1.path = Path(__file__) + assert app.root.w1.path._origin == app.root.w1 + ProxyWorkRun(work.run, work.name, work, caller_queue)(path=app.root.w1.path) + assert caller_queue._queue[0]["kwargs"]["path"]._origin == app.root.w1.name + + +def test_persist_artifacts(tmp_path): + """Test that the `persist_artifacts` utility copies the artifacts that exist to the persistent storage.""" + + class ArtifactWork(LightningWork): + def __init__(self): + super().__init__() + self.file = None + self.folder = None + self.not_my_path = None + self.not_exists = None + + def run(self): + # single file + self.file = Path(tmp_path, "file.txt") + self.file.write_text("single file") + # folder with files + self.folder = Path(tmp_path, "folder") + self.folder.mkdir() + Path(tmp_path, "folder", "file1.txt").write_text("file 1") + Path(tmp_path, "folder", "file2.txt").write_text("file 2") + + # simulate a Path that was synced to this Work from another Work + self.not_my_path = Path(tmp_path, "external.txt") + self.not_my_path.touch() + self.not_my_path._origin = Mock() + + self.not_exists = Path(tmp_path, "not-exists") + + work = ArtifactWork() + work._name = "root.work" + + rel_tmpdir_path = Path(*tmp_path.parts[1:]) + + assert not os.path.exists(artifacts_path(work) / rel_tmpdir_path / "file.txt") + assert not os.path.exists(artifacts_path(work) / rel_tmpdir_path / "folder") + assert not os.path.exists(artifacts_path(work) / rel_tmpdir_path / "not-exists") + + work.run() + + with pytest.warns(UserWarning, match="1 artifacts could not be saved because they don't exist"): + persist_artifacts(work) + + assert os.path.exists(artifacts_path(work) / rel_tmpdir_path / "file.txt") + assert os.path.exists(artifacts_path(work) / rel_tmpdir_path / "folder") + assert not os.path.exists(artifacts_path(work) / rel_tmpdir_path / "not-exists") + assert not os.path.exists(artifacts_path(work) / rel_tmpdir_path / "external.txt") + + +def test_work_state_observer(): + """Tests that the WorkStateObserver sends deltas to the queue when state residuals remain that haven't been + handled by the setattr.""" + + class WorkWithoutSetattr(LightningWork): + def __init__(self): + super().__init__() + self.var = 1 + self.list = [] + self.dict = {"counter": 0} + + def run(self, use_setattr=False, use_containers=False): + if use_setattr: + self.var += 1 + if use_containers: + self.list.append(1) + self.dict["counter"] += 1 + + work = WorkWithoutSetattr() + delta_queue = MockQueue() + observer = WorkStateObserver(work, delta_queue) + setattr_proxy = LightningWorkSetAttrProxy( + work=work, + work_name="work_name", + delta_queue=delta_queue, + state_observer=observer, + ) + work._setattr_replacement = setattr_proxy + + ############################## + # 1. Simulate no state changes + ############################## + work.run(use_setattr=False, use_containers=False) + assert not delta_queue + + ############################ + # 2. Simulate a setattr call + ############################ + work.run(use_setattr=True, use_containers=False) + + # this is necessary only in this test where we siumulate the calls + work._calls.clear() + work._calls.update({"latest_call_hash": None}) + + delta = delta_queue.get().delta.to_dict() + assert delta["values_changed"] == {"root['vars']['var']": {"new_value": 2}} + assert len(observer._delta_memory) == 1 + + # The observer should not trigger any deltas being sent and only consume the delta memory + assert not delta_queue + observer.run_once() + assert not delta_queue + assert not observer._delta_memory + + ################################ + # 3. Simulate a container update + ################################ + work.run(use_setattr=False, use_containers=True) + assert not delta_queue + assert not observer._delta_memory + observer.run_once() + observer.run_once() # multiple runs should not affect how many deltas are sent unless there are changes + delta = delta_queue.get().delta.to_dict() + assert delta["values_changed"] == {"root['vars']['dict']['counter']": {"new_value": 1}} + assert delta["iterable_item_added"] == {"root['vars']['list'][0]": 1} + + ########################## + # 4. Simulate both updates + ########################## + work.run(use_setattr=True, use_containers=True) + + # this is necessary only in this test where we siumulate the calls + work._calls.clear() + work._calls.update({"latest_call_hash": None}) + + delta = delta_queue.get().delta.to_dict() + assert delta == {"values_changed": {"root['vars']['var']": {"new_value": 3}}} + assert not delta_queue + assert len(observer._delta_memory) == 1 + observer.run_once() + + delta = delta_queue.get().delta.to_dict() + assert delta["values_changed"] == {"root['vars']['dict']['counter']": {"new_value": 2}} + assert delta["iterable_item_added"] == {"root['vars']['list'][1]": 1} + + assert not delta_queue + assert not observer._delta_memory + + +class WorkState(LightningWork): + def __init__(self): + super().__init__(parallel=True) + self.vars = [] + self.counter = 0 + + def run(self, *args): + for counter in range(1, 11): + self.vars.append(counter) + self.counter = counter + + +class FlowState(LightningFlow): + def __init__(self): + super().__init__() + self.w = WorkState() + self.counter = 1 + + def run(self): + self.w.run() + if self.counter == 1: + if len(self.w.vars) == 10 and self.w.counter == 10: + self.w.vars = [] + self.w.counter = 0 + self.w.run("") + self.counter = 2 + elif self.counter == 2: + if len(self.w.vars) == 10 and self.w.counter == 10: + self._exit() + + +def test_state_observer(): + + app = LightningApp(FlowState()) + MultiProcessRuntime(app, start_server=False).dispatch() + + +@pytest.mark.parametrize( + "environment, expected_ip_addr", [({}, "127.0.0.1"), ({"LIGHTNING_NODE_IP": "10.10.10.5"}, "10.10.10.5")] +) +def test_work_runner_sets_internal_ip(environment, expected_ip_addr): + """Test that the WorkRunner updates the internal ip address as soon as the Work starts running.""" + + class Work(LightningWork): + def run(self): + pass + + work = Work() + work_runner = WorkRunner( + work, + work.name, + caller_queue=MockQueue("caller_queue"), + delta_queue=Mock(), + readiness_queue=Mock(), + error_queue=Mock(), + request_queue=Mock(), + response_queue=Mock(), + copy_request_queue=Mock(), + copy_response_queue=Mock(), + ) + + # Make a fake call + call_hash = "run:fe3fa0f34fc1317e152e5afb023332995392071046f1ea51c34c7c9766e3676c" + work._calls[call_hash] = { + "args": (), + "kwargs": {}, + "call_hash": call_hash, + "run_started_counter": 1, + "statuses": [], + } + work_runner.caller_queue.put( + { + "args": (), + "kwargs": {}, + "call_hash": call_hash, + "state": work.state, + } + ) + + with mock.patch.dict(os.environ, environment, clear=True): + work_runner.setup() + # The internal ip address only becomes available once the hardware is up / the work is running. + assert work.internal_ip == "" + try: + work_runner.run_once() + except Empty: + pass + assert work.internal_ip == expected_ip_addr diff --git a/tests/tests_app/utilities/test_state.py b/tests/tests_app/utilities/test_state.py new file mode 100644 index 0000000000000..e275817f680fc --- /dev/null +++ b/tests/tests_app/utilities/test_state.py @@ -0,0 +1,282 @@ +import os +from re import escape +from unittest import mock + +import pytest +import requests + +import lightning_app +from lightning_app import LightningApp, LightningFlow, LightningWork +from lightning_app.utilities.app_helpers import AppStatePlugin, BaseStatePlugin +from lightning_app.utilities.state import AppState + + +@mock.patch("lightning_app.utilities.state._configure_session", return_value=requests) +def test_app_state_not_connected(_): + + """Test an error message when a disconnected AppState tries to access attributes.""" + state = AppState() + with pytest.raises(AttributeError, match="Failed to connect and fetch the app state"): + _ = state.value + with pytest.raises(AttributeError, match="Failed to connect and fetch the app state"): + state.value = 1 + + +@pytest.mark.parametrize( + "my_affiliation,global_affiliation,expected", + [ + (None, (), ()), + ((), (), ()), + ((), ("a", "b"), ()), + (None, ("a", "b"), ("a", "b")), + ], +) +@mock.patch("lightning_app.utilities.state._configure_session", return_value=requests) +def test_app_state_affiliation(_, my_affiliation, global_affiliation, expected): + AppState._MY_AFFILIATION = global_affiliation + state = AppState(my_affiliation=my_affiliation) + assert state._my_affiliation == expected + AppState._MY_AFFILIATION = () + + +def test_app_state_state_access(): + """Test the many ways an AppState object can be accessed to set or get attributes on the state.""" + mocked_state = dict( + vars=dict(root_var="root"), + flows=dict( + child0=dict( + vars=dict(child_var=1), + flows=dict(), + works=dict(), + ) + ), + works=dict( + work0=dict( + vars=dict(work_var=2), + flows=dict(), + works=dict(), + ) + ), + ) + + state = AppState() + state._state = state._last_state = mocked_state + + assert state.root_var == "root" + assert isinstance(state.child0, AppState) + assert isinstance(state.work0, AppState) + assert state.child0.child_var == 1 + assert state.work0.work_var == 2 + + with pytest.raises(AttributeError, match="Failed to access 'non_existent_var' through `AppState`."): + _ = state.work0.non_existent_var + + with pytest.raises(AttributeError, match="Failed to access 'non_existent_var' through `AppState`."): + state.work0.non_existent_var = 22 + + # TODO: improve msg + with pytest.raises(AttributeError, match="You shouldn't set the flows"): + state.child0 = "child0" + + # TODO: verify with tchaton + with pytest.raises(AttributeError, match="You shouldn't set the works"): + state.work0 = "work0" + + +@mock.patch("lightning_app.utilities.state.AppState.send_delta") +def test_app_state_state_access_under_affiliation(*_): + """Test the access to attributes when the state is restricted under the given affiliation.""" + mocked_state = dict( + vars=dict(root_var="root"), + flows=dict( + child0=dict( + vars=dict(child0_var=0), + flows=dict( + child1=dict( + vars=dict(child1_var=1), + flows=dict( + child2=dict( + vars=dict(child2_var=2), + flows=dict(), + works=dict(), + ), + ), + works=dict(), + ), + ), + works=dict( + work1=dict( + vars=dict(work1_var=11), + ), + ), + ), + ), + works=dict(), + ) + + # root-level affiliation + state = AppState(my_affiliation=()) + state._store_state(mocked_state) + assert isinstance(state.child0, AppState) + assert state.child0.child0_var == 0 + assert state.child0.child1.child1_var == 1 + assert state.child0.child1.child2.child2_var == 2 + + # one child deep + state = AppState(my_affiliation=("child0",)) + state._store_state(mocked_state) + assert state._state == mocked_state["flows"]["child0"] + with pytest.raises(AttributeError, match="Failed to access 'child0' through `AppState`"): + _ = state.child0 + assert state.child0_var == 0 + assert state.child1.child1_var == 1 + assert state.child1.child2.child2_var == 2 + + # two flows deep + state = AppState(my_affiliation=("child0", "child1")) + state._store_state(mocked_state) + assert state._state == mocked_state["flows"]["child0"]["flows"]["child1"] + with pytest.raises(AttributeError, match="Failed to access 'child1' through `AppState`"): + _ = state.child1 + state.child1_var = 111 + assert state.child1_var == 111 + assert state.child2.child2_var == 2 + + # access to work + state = AppState(my_affiliation=("child0", "work1")) + state._store_state(mocked_state) + assert state._state == mocked_state["flows"]["child0"]["works"]["work1"] + with pytest.raises(AttributeError, match="Failed to access 'child1' through `AppState`"): + _ = state.child1 + assert state.work1_var == 11 + state.work1_var = 111 + assert state.work1_var == 111 + + # affiliation does not match state + state = AppState(my_affiliation=("child1", "child0")) + with pytest.raises( + ValueError, match=escape("Failed to extract the state under the affiliation '('child1', 'child0')'") + ): + state._store_state(mocked_state) + + +def test_app_state_repr(): + app_state = AppState() + assert repr(app_state) == "None" + + app_state = AppState() + app_state._store_state(dict(vars=dict(x=1, y=2))) + assert repr(app_state) == "{'vars': {'x': 1, 'y': 2}}" + + app_state = AppState() + app_state._store_state(dict(vars=dict(x=1, y=2))) + assert repr(app_state.y) == "2" + + app_state = AppState() + app_state._store_state(dict(vars={}, flows=dict(child=dict(vars=dict(child_var="child_val"))))) + assert repr(app_state.child) == "{'vars': {'child_var': 'child_val'}}" + + +def test_app_state_bool(): + app_state = AppState() + assert not bool(app_state) + + app_state = AppState() + app_state._store_state(dict(vars=dict(x=1, y=2))) + assert bool(app_state) + + +class _CustomAppStatePlugin(BaseStatePlugin): + def should_update_app(self, deep_diff): + pass + + def get_context(self): + pass + + def render_non_authorized(self): + pass + + +def test_attach_plugin(): + """Test how plugins get attached to the AppState and the default behavior when no plugin is specified.""" + app_state = AppState() + assert isinstance(app_state._plugin, AppStatePlugin) + + app_state = AppState(plugin=_CustomAppStatePlugin()) + assert isinstance(app_state._plugin, _CustomAppStatePlugin) + + +@mock.patch("lightning_app.utilities.state._configure_session", return_value=requests) +def test_app_state_connection_error(_): + """Test an error message when a connection to retrieve the state can't be established.""" + app_state = AppState() + with pytest.raises(AttributeError, match=r"Failed to connect and fetch the app state\. Is the app running?"): + app_state._request_state() + + with pytest.raises(AttributeError, match=r"Failed to connect and fetch the app state\. Is the app running?"): + app_state.var = 1 + + +class Work(LightningWork): + def __init__(self): + super().__init__() + self.counter = 0 + + def run(self): + self.counter += 1 + + +class Flow(LightningFlow): + def __init__(self): + super().__init__() + self.should_start = False + self.w = Work() + + def run(self): + if self.should_start: + self.w.run() + self._exit() + + +class MockResponse: + def __init__(self, state, status_code): + self._state = state + self.status_code = status_code + + def json(self): + return self._state + + +def test_get_send_request(monkeypatch): + + app = LightningApp(Flow()) + monkeypatch.setattr(lightning_app.utilities.state, "_configure_session", mock.MagicMock()) + + state = AppState(plugin=AppStatePlugin()) + state._session.get._mock_return_value = MockResponse(app.state_with_changes, 500) + state._request_state() + state._session.get._mock_return_value = MockResponse(app.state_with_changes, 200) + state._request_state() + assert state._my_affiliation == () + with pytest.raises(Exception, match="The response from"): + state._session.post._mock_return_value = MockResponse(app.state_with_changes, 500) + state.w.counter = 1 + state._session.post._mock_return_value = MockResponse(app.state_with_changes, 200) + state.w.counter = 1 + + +@mock.patch("lightning_app.utilities.state.APP_SERVER_HOST", "https://lightning-cloud.com") +@mock.patch.dict(os.environ, {"LIGHTNING_APP_STATE_URL": "https://lightning-cloud.com"}) +def test_app_state_with_env_var(**__): + state = AppState() + assert state._host == "https://lightning-cloud.com" + assert not state._port + assert state._url == "https://lightning-cloud.com" + + +@mock.patch.dict(os.environ, {}) +def test_app_state_with_no_env_var(**__): + state = AppState() + assert state._host == "http://127.0.0.1" + assert state._port == 7501 + assert state._url == "http://127.0.0.1:7501" diff --git a/tests/tests_app/utilities/test_tracer.py b/tests/tests_app/utilities/test_tracer.py new file mode 100644 index 0000000000000..995f0e3da6ccd --- /dev/null +++ b/tests/tests_app/utilities/test_tracer.py @@ -0,0 +1,27 @@ +import os +import sys + +from tests_app import _PROJECT_ROOT + +from lightning_app.testing.helpers import RunIf +from lightning_app.utilities.tracer import Tracer + + +@RunIf(pytorch_lightning=True) +def test_tracer(): + from pytorch_lightning import Trainer + + def pre_fn(self, *args, **kwargs): + kwargs["fast_dev_run"] = True + return {}, args, kwargs + + def post_fn(self, ret): + return {}, ret + + tracer = Tracer() + tracer.add_traced(Trainer, "__init__", pre_fn=pre_fn, post_fn=post_fn) + traced_file = os.path.join(_PROJECT_ROOT, "tests/tests_app/core/scripts/lightning_trainer.py") + assert os.path.exists(traced_file) + # This is required to get the right sys.argv for `runpy``. + sys.argv = [traced_file] + tracer.trace(traced_file) diff --git a/tests/tests_app/utilities/test_tree.py b/tests/tests_app/utilities/test_tree.py new file mode 100644 index 0000000000000..6d69e40236440 --- /dev/null +++ b/tests/tests_app/utilities/test_tree.py @@ -0,0 +1,171 @@ +import pytest + +from lightning_app import LightningFlow, LightningWork +from lightning_app.testing.helpers import EmptyFlow, EmptyWork +from lightning_app.utilities.tree import breadth_first, depth_first + + +class LeafFlow(EmptyFlow): + pass + + +class LeafWork(EmptyWork): + pass + + +class SimpleFlowTree(EmptyFlow): + def __init__(self): + super().__init__() + self.simple_flow_left = LeafFlow() + self.simple_flow_right = LeafFlow() + + +class SimpleWorkTree(EmptyFlow): + def __init__(self): + super().__init__() + self.simple_work_left = LeafWork() + self.simple_work_right = LeafWork() + + +class MixedTree(EmptyFlow): + def __init__(self): + super().__init__() + self.mixed_left = SimpleFlowTree() + self.work_tree = SimpleWorkTree() + self.mixed_right = SimpleFlowTree() + + +@pytest.mark.parametrize( + "input_tree, types,expected_sequence", + [ + (LeafFlow(), (LightningFlow,), ["root"]), + (LeafWork(), (LightningFlow,), []), + ( + SimpleFlowTree(), + (LightningFlow,), + [ + "root", + "root.simple_flow_right", + "root.simple_flow_left", + ], + ), + (SimpleWorkTree(), (LightningFlow,), ["root"]), + ( + SimpleWorkTree(), + (LightningFlow, LightningWork), + [ + "root", + "root.simple_work_right", + "root.simple_work_left", + ], + ), + ( + MixedTree(), + (LightningFlow,), + [ + "root", + "root.work_tree", + "root.mixed_right", + "root.mixed_right.simple_flow_right", + "root.mixed_right.simple_flow_left", + "root.mixed_left", + "root.mixed_left.simple_flow_right", + "root.mixed_left.simple_flow_left", + ], + ), + ( + MixedTree(), + (LightningWork,), + [ + "root.work_tree.simple_work_right", + "root.work_tree.simple_work_left", + ], + ), + ( + MixedTree(), + (LightningFlow, LightningWork), + [ + "root", + "root.work_tree", + "root.work_tree.simple_work_right", + "root.work_tree.simple_work_left", + "root.mixed_right", + "root.mixed_right.simple_flow_right", + "root.mixed_right.simple_flow_left", + "root.mixed_left", + "root.mixed_left.simple_flow_right", + "root.mixed_left.simple_flow_left", + ], + ), + ], +) +def test_depth_first(input_tree, types, expected_sequence): + assert list(node.name for node in depth_first(input_tree, types=types)) == expected_sequence + + +@pytest.mark.parametrize( + "input_tree, types,expected_sequence", + [ + (LeafFlow(), (LightningFlow,), ["root"]), + (LeafWork(), (LightningFlow,), []), + ( + SimpleFlowTree(), + (LightningFlow,), + [ + "root", + "root.simple_flow_left", + "root.simple_flow_right", + ], + ), + (SimpleWorkTree(), (LightningFlow,), ["root"]), + ( + SimpleWorkTree(), + (LightningFlow, LightningWork), + [ + "root", + "root.simple_work_left", + "root.simple_work_right", + ], + ), + ( + MixedTree(), + (LightningFlow,), + [ + "root", + "root.mixed_left", + "root.mixed_right", + "root.work_tree", + "root.mixed_left.simple_flow_left", + "root.mixed_left.simple_flow_right", + "root.mixed_right.simple_flow_left", + "root.mixed_right.simple_flow_right", + ], + ), + ( + MixedTree(), + (LightningWork,), + [ + "root.work_tree.simple_work_left", + "root.work_tree.simple_work_right", + ], + ), + ( + MixedTree(), + (LightningFlow, LightningWork), + [ + "root", + "root.mixed_left", + "root.mixed_right", + "root.work_tree", + "root.mixed_left.simple_flow_left", + "root.mixed_left.simple_flow_right", + "root.mixed_right.simple_flow_left", + "root.mixed_right.simple_flow_right", + "root.work_tree.simple_work_left", + "root.work_tree.simple_work_right", + ], + ), + ], +) +def test_breadth_first(input_tree, types, expected_sequence): + assert list(node.name for node in breadth_first(input_tree, types=types)) == expected_sequence From 3275fba49b3892ac3e0f512267e2ac19ef84823e Mon Sep 17 00:00:00 2001 From: Mansy Date: Thu, 30 Jun 2022 22:45:15 +0200 Subject: [PATCH 43/89] Add lightning app examples (#13456) * add lightning app examples * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * rm init * restucture app examples * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * img Co-authored-by: mansy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jirka --- .github/workflows/ci-app_block.yml | 8 + .github/workflows/ci-pytorch_dockers.yml | 2 + .github/workflows/ci-pytorch_test-conda.yml | 2 + .github/workflows/ci-pytorch_test-full.yml | 2 + .github/workflows/ci-pytorch_test-slow.yml | 2 + examples/app_boring/.gitignore | 10 ++ examples/app_boring/.lightning | 1 + examples/app_boring/__init__.py | 0 examples/app_boring/app.py | 57 ++++++++ examples/app_boring/app_dynamic.py | 67 +++++++++ examples/app_boring/scripts/serve.py | 29 ++++ examples/app_components/__init__.py | 0 examples/app_components/python/__init__.py | 0 examples/app_components/python/app.py | 24 +++ .../app_components/python/component_popen.py | 7 + .../app_components/python/component_tracer.py | 53 +++++++ examples/app_components/python/pl_script.py | 65 +++++++++ .../python/pytorch_lightning_script.py | 65 +++++++++ examples/app_components/serve/gradio/app.py | 53 +++++++ .../app_components/serve/gradio/beyonce.jpg | Bin 0 -> 132520 bytes .../serve/gradio/requirements.txt | 1 + examples/app_dag/.gitignore | 6 + examples/app_dag/.lightning | 1 + examples/app_dag/.lightningignore | 8 + examples/app_dag/app.py | 137 ++++++++++++++++++ examples/app_dag/processing.py | 14 ++ examples/app_dag/requirements.txt | 2 + examples/app_drive/.gitignore | 1 + examples/app_drive/.lightning | 1 + examples/app_drive/app.py | 51 +++++++ examples/app_hpo/README.md | 64 ++++++++ examples/app_hpo/app_wi_ui.py | 61 ++++++++ examples/app_hpo/app_wo_ui.py | 58 ++++++++ examples/app_hpo/download_data.py | 5 + examples/app_hpo/hyperplot.py | 34 +++++ examples/app_hpo/objective.py | 63 ++++++++ examples/app_hpo/pl_script.py | 43 ++++++ examples/app_hpo/requirements.txt | 3 + examples/app_hpo/utils.py | 54 +++++++ examples/app_layout/.lightning | 1 + examples/app_layout/__init__.py | 0 examples/app_layout/app.py | 101 +++++++++++++ examples/app_layout/ui1/index.html | 10 ++ examples/app_layout/ui2/index.html | 10 ++ examples/app_multi_node/.gitignore | 2 + examples/app_multi_node/.lightning | 1 + examples/app_multi_node/multi_node.py | 36 +++++ examples/app_multi_node/requirements.txt | 1 + examples/app_payload/.lightning | 1 + examples/app_payload/app.py | 31 ++++ examples/app_pickle_or_not/app.py | 55 +++++++ examples/app_pickle_or_not/requirements.txt | 0 examples/app_v0/.gitignore | 2 + examples/app_v0/README.md | 18 +++ examples/app_v0/__init__.py | 0 examples/app_v0/app.py | 49 +++++++ examples/app_v0/emulate_ui.py | 19 +++ examples/app_v0/requirements.txt | 1 + examples/app_v0/ui/a/index.html | 1 + examples/app_v0/ui/b/index.html | 1 + tests/tests_app_examples/__init__.py | 0 .../collect_failures/.lightning | 1 + .../collect_failures/__init__.py | 0 .../collect_failures/app.py | 46 ++++++ .../collect_failures/requirements.txt | 1 + .../tests_app_examples/components/__init__.py | 0 .../components/python/__init__.py | 0 .../components/python/test_scripts.py | 40 +++++ .../core_features_app/app.py | 17 +++ .../custom_work_dependencies/.lightning | 1 + .../custom_work_dependencies/__init__.py | 0 .../custom_work_dependencies/app.py | 56 +++++++ .../idle_timeout/.lightning | 1 + .../idle_timeout/__init__.py | 0 tests/tests_app_examples/idle_timeout/app.py | 71 +++++++++ tests/tests_app_examples/layout.py | 24 +++ tests/tests_app_examples/pickle_or_not.py | 24 +++ tests/tests_app_examples/test_boring_app.py | 23 +++ .../test_collect_failures.py | 39 +++++ .../test_core_features_app.py | 27 ++++ .../test_custom_work_dependencies.py | 22 +++ tests/tests_app_examples/test_drive.py | 23 +++ tests/tests_app_examples/test_idle_timeout.py | 22 +++ tests/tests_app_examples/test_payload.py | 19 +++ tests/tests_app_examples/test_quick_start.py | 69 +++++++++ .../test_template_react_ui.py | 33 +++++ .../test_template_streamlit_ui.py | 33 +++++ tests/tests_app_examples/test_v0_app.py | 59 ++++++++ 88 files changed, 2045 insertions(+) create mode 100644 examples/app_boring/.gitignore create mode 100644 examples/app_boring/.lightning create mode 100644 examples/app_boring/__init__.py create mode 100644 examples/app_boring/app.py create mode 100644 examples/app_boring/app_dynamic.py create mode 100644 examples/app_boring/scripts/serve.py create mode 100644 examples/app_components/__init__.py create mode 100644 examples/app_components/python/__init__.py create mode 100644 examples/app_components/python/app.py create mode 100644 examples/app_components/python/component_popen.py create mode 100644 examples/app_components/python/component_tracer.py create mode 100644 examples/app_components/python/pl_script.py create mode 100644 examples/app_components/python/pytorch_lightning_script.py create mode 100644 examples/app_components/serve/gradio/app.py create mode 100644 examples/app_components/serve/gradio/beyonce.jpg create mode 100644 examples/app_components/serve/gradio/requirements.txt create mode 100644 examples/app_dag/.gitignore create mode 100644 examples/app_dag/.lightning create mode 100644 examples/app_dag/.lightningignore create mode 100644 examples/app_dag/app.py create mode 100644 examples/app_dag/processing.py create mode 100644 examples/app_dag/requirements.txt create mode 100644 examples/app_drive/.gitignore create mode 100644 examples/app_drive/.lightning create mode 100644 examples/app_drive/app.py create mode 100644 examples/app_hpo/README.md create mode 100644 examples/app_hpo/app_wi_ui.py create mode 100644 examples/app_hpo/app_wo_ui.py create mode 100644 examples/app_hpo/download_data.py create mode 100644 examples/app_hpo/hyperplot.py create mode 100644 examples/app_hpo/objective.py create mode 100644 examples/app_hpo/pl_script.py create mode 100644 examples/app_hpo/requirements.txt create mode 100644 examples/app_hpo/utils.py create mode 100644 examples/app_layout/.lightning create mode 100644 examples/app_layout/__init__.py create mode 100644 examples/app_layout/app.py create mode 100644 examples/app_layout/ui1/index.html create mode 100644 examples/app_layout/ui2/index.html create mode 100644 examples/app_multi_node/.gitignore create mode 100644 examples/app_multi_node/.lightning create mode 100644 examples/app_multi_node/multi_node.py create mode 100644 examples/app_multi_node/requirements.txt create mode 100644 examples/app_payload/.lightning create mode 100644 examples/app_payload/app.py create mode 100644 examples/app_pickle_or_not/app.py create mode 100644 examples/app_pickle_or_not/requirements.txt create mode 100644 examples/app_v0/.gitignore create mode 100644 examples/app_v0/README.md create mode 100644 examples/app_v0/__init__.py create mode 100644 examples/app_v0/app.py create mode 100644 examples/app_v0/emulate_ui.py create mode 100644 examples/app_v0/requirements.txt create mode 100644 examples/app_v0/ui/a/index.html create mode 100644 examples/app_v0/ui/b/index.html create mode 100644 tests/tests_app_examples/__init__.py create mode 100644 tests/tests_app_examples/collect_failures/.lightning create mode 100644 tests/tests_app_examples/collect_failures/__init__.py create mode 100644 tests/tests_app_examples/collect_failures/app.py create mode 100644 tests/tests_app_examples/collect_failures/requirements.txt create mode 100644 tests/tests_app_examples/components/__init__.py create mode 100644 tests/tests_app_examples/components/python/__init__.py create mode 100644 tests/tests_app_examples/components/python/test_scripts.py create mode 100644 tests/tests_app_examples/core_features_app/app.py create mode 100644 tests/tests_app_examples/custom_work_dependencies/.lightning create mode 100644 tests/tests_app_examples/custom_work_dependencies/__init__.py create mode 100644 tests/tests_app_examples/custom_work_dependencies/app.py create mode 100644 tests/tests_app_examples/idle_timeout/.lightning create mode 100644 tests/tests_app_examples/idle_timeout/__init__.py create mode 100644 tests/tests_app_examples/idle_timeout/app.py create mode 100644 tests/tests_app_examples/layout.py create mode 100644 tests/tests_app_examples/pickle_or_not.py create mode 100644 tests/tests_app_examples/test_boring_app.py create mode 100644 tests/tests_app_examples/test_collect_failures.py create mode 100644 tests/tests_app_examples/test_core_features_app.py create mode 100644 tests/tests_app_examples/test_custom_work_dependencies.py create mode 100644 tests/tests_app_examples/test_drive.py create mode 100644 tests/tests_app_examples/test_idle_timeout.py create mode 100644 tests/tests_app_examples/test_payload.py create mode 100644 tests/tests_app_examples/test_quick_start.py create mode 100644 tests/tests_app_examples/test_template_react_ui.py create mode 100644 tests/tests_app_examples/test_template_streamlit_ui.py create mode 100644 tests/tests_app_examples/test_v0_app.py diff --git a/.github/workflows/ci-app_block.yml b/.github/workflows/ci-app_block.yml index 1561959f3bf31..5eef31cb8c8d5 100644 --- a/.github/workflows/ci-app_block.yml +++ b/.github/workflows/ci-app_block.yml @@ -30,3 +30,11 @@ jobs: - name: Block edits in tests/tests_app if: contains(steps.changed-files.outputs.all_changed_and_modified_files, 'tests/tests_app') run: exit 1 + + - name: Block edits in examples/app + if: contains(steps.changed-files.outputs.all_changed_and_modified_files, 'examples/app_') + run: exit 1 + + - name: Block edits in tests/tests_app_examples + if: contains(steps.changed-files.outputs.all_changed_and_modified_files, 'tests/tests_app_examples') + run: exit 1 diff --git a/.github/workflows/ci-pytorch_dockers.yml b/.github/workflows/ci-pytorch_dockers.yml index 6365d576c466b..f8dec54280271 100644 --- a/.github/workflows/ci-pytorch_dockers.yml +++ b/.github/workflows/ci-pytorch_dockers.yml @@ -10,6 +10,8 @@ on: # Trigger the workflow on push or pull request, but only for the master bran paths: - "!src/lightning_app/**" # todo: implement job skip - "!tests/tests_app/**" # todo: implement job skip + - "!tests/tests_app_examples/**" # todo: implement job skip + - "!examples/app_*" # todo: implement job skip - "dockers/**" - "!dockers/README.md" - "requirements/*" diff --git a/.github/workflows/ci-pytorch_test-conda.yml b/.github/workflows/ci-pytorch_test-conda.yml index ff2783a4695a1..21ab32ae303f5 100644 --- a/.github/workflows/ci-pytorch_test-conda.yml +++ b/.github/workflows/ci-pytorch_test-conda.yml @@ -9,6 +9,8 @@ on: # Trigger the workflow on push or pull request, but only for the master bra paths-ignore: - "src/lightning_app/**" # todo: implement job skip - "tests/tests_app/**" # todo: implement job skip + - "tests/tests_app_examples/**" # todo: implement job skip + - "examples/app_*" # todo: implement job skip concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} diff --git a/.github/workflows/ci-pytorch_test-full.yml b/.github/workflows/ci-pytorch_test-full.yml index 37bc42a33335c..66173e9c14327 100644 --- a/.github/workflows/ci-pytorch_test-full.yml +++ b/.github/workflows/ci-pytorch_test-full.yml @@ -10,6 +10,8 @@ on: # Trigger the workflow on push or pull request, but only for the master bra paths-ignore: - "src/lightning_app/**" # todo: implement job skip - "tests/tests_app/**" # todo: implement job skip + - "tests/tests_app_examples/**" # todo: implement job skip + - "examples/app_*" # todo: implement job skip concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} diff --git a/.github/workflows/ci-pytorch_test-slow.yml b/.github/workflows/ci-pytorch_test-slow.yml index 5c6a95e707008..36251c202c49d 100644 --- a/.github/workflows/ci-pytorch_test-slow.yml +++ b/.github/workflows/ci-pytorch_test-slow.yml @@ -10,6 +10,8 @@ on: # Trigger the workflow on push or pull request, but only for the master bra paths-ignore: - "src/lightning_app/**" # todo: implement job skip - "tests/tests_app/**" # todo: implement job skip + - "tests/tests_app_examples/**" # todo: implement job skip + - "examples/app_*" # todo: implement job skip concurrency: group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} diff --git a/examples/app_boring/.gitignore b/examples/app_boring/.gitignore new file mode 100644 index 0000000000000..94018704d9f90 --- /dev/null +++ b/examples/app_boring/.gitignore @@ -0,0 +1,10 @@ +lightning_logs +*.pt +.storage/ +.shared/ +data +*.ckpt +redis-stable +node_modules +*.rdb +boring_file.txt diff --git a/examples/app_boring/.lightning b/examples/app_boring/.lightning new file mode 100644 index 0000000000000..c85414d8c498a --- /dev/null +++ b/examples/app_boring/.lightning @@ -0,0 +1 @@ +name: boring-app diff --git a/examples/app_boring/__init__.py b/examples/app_boring/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/examples/app_boring/app.py b/examples/app_boring/app.py new file mode 100644 index 0000000000000..9ba11316c65a1 --- /dev/null +++ b/examples/app_boring/app.py @@ -0,0 +1,57 @@ +import os + +import lightning as L +from lightning.app.components.python import TracerPythonScript +from lightning.app.storage.path import Path + +FILE_CONTENT = """ +Hello there! +This tab is currently an IFrame of the FastAPI Server running in `DestinationFileAndServeWork`. +Also, the content of this file was created in `SourceFileWork` and then transferred to `DestinationFileAndServeWork`. +Are you already 🤯 ? Stick with us, this is only the beginning. Lightning is 🚀. +""" + + +class SourceFileWork(L.LightningWork): + def __init__(self, cloud_compute: L.CloudCompute = L.CloudCompute(), **kwargs): + super().__init__(parallel=True, **kwargs, cloud_compute=cloud_compute) + self.boring_path = None + + def run(self): + # This should be used as a REFERENCE to the file. + self.boring_path = "lit://boring_file.txt" + with open(self.boring_path, "w", encoding="utf-8") as f: + f.write(FILE_CONTENT) + + +class DestinationFileAndServeWork(TracerPythonScript): + def run(self, path: Path): + assert path.exists() + self.script_args += [f"--filepath={path}", f"--host={self.host}", f"--port={self.port}"] + super().run() + + +class BoringApp(L.LightningFlow): + def __init__(self): + super().__init__() + self.source_work = SourceFileWork() + self.dest_work = DestinationFileAndServeWork( + script_path=os.path.join(os.path.dirname(__file__), "scripts/serve.py"), + port=1111, + parallel=False, # runs until killed. + cloud_compute=L.CloudCompute(), + raise_exception=True, + ) + + def run(self): + self.source_work.run() + if self.source_work.has_succeeded: + # the flow passes the file from one work to another. + self.dest_work.run(self.source_work.boring_path) + self._exit("Boring App End") + + def configure_layout(self): + return {"name": "Boring Tab", "content": self.dest_work.url + "/file"} + + +app = L.LightningApp(BoringApp()) diff --git a/examples/app_boring/app_dynamic.py b/examples/app_boring/app_dynamic.py new file mode 100644 index 0000000000000..6e3fdfa3ccdee --- /dev/null +++ b/examples/app_boring/app_dynamic.py @@ -0,0 +1,67 @@ +import os + +import lightning as L +from lightning.app.components.python import TracerPythonScript +from lightning.app.storage.path import Path +from lightning.app.structures import Dict + +FILE_CONTENT = """ +Hello there! +This tab is currently an IFrame of the FastAPI Server running in `DestinationFileAndServeWork`. +Also, the content of this file was created in `SourceFileWork` and then transferred to `DestinationFileAndServeWork`. +Are you already 🤯 ? Stick with us, this is only the beginning. Lightning is 🚀. +""" + + +class SourceFileWork(L.LightningWork): + def __init__(self, cloud_compute: L.CloudCompute = L.CloudCompute(), **kwargs): + super().__init__(parallel=True, **kwargs, cloud_compute=cloud_compute) + self.boring_path = None + + def run(self): + # This should be used as a REFERENCE to the file. + self.boring_path = "lit://boring_file.txt" + with open(self.boring_path, "w") as f: + f.write(FILE_CONTENT) + + +class DestinationFileAndServeWork(TracerPythonScript): + def run(self, path: Path): + assert path.exists() + self.script_args += [f"--filepath={path}", f"--host={self.host}", f"--port={self.port}"] + super().run() + + +class BoringApp(L.LightningFlow): + def __init__(self): + super().__init__() + self.dict = Dict() + + def run(self): + # create dynamically the source_work at runtime + if "src_w" not in self.dict: + self.dict["src_w"] = SourceFileWork() + + self.dict["src_w"].run() + + if self.dict["src_w"].has_succeeded: + + # create dynamically the dst_w at runtime + if "dst_w" not in self.dict: + self.dict["dst_w"] = DestinationFileAndServeWork( + script_path=os.path.join(os.path.dirname(__file__), "scripts/serve.py"), + port=1111, + parallel=False, # runs until killed. + cloud_compute=L.CloudCompute(), + raise_exception=True, + ) + + # the flow passes the file from one work to another. + self.dict["dst_w"].run(self.dict["src_w"].boring_path) + self._exit("Boring App End") + + def configure_layout(self): + return {"name": "Boring Tab", "content": self.dict["dst_w"].url + "/file" if "dst_w" in self.dict else ""} + + +app = L.LightningApp(BoringApp()) diff --git a/examples/app_boring/scripts/serve.py b/examples/app_boring/scripts/serve.py new file mode 100644 index 0000000000000..17c431ca378ac --- /dev/null +++ b/examples/app_boring/scripts/serve.py @@ -0,0 +1,29 @@ +import argparse +import os + +import uvicorn +from fastapi import FastAPI +from fastapi.requests import Request +from fastapi.responses import HTMLResponse + +if __name__ == "__main__": + + parser = argparse.ArgumentParser("Server Parser") + parser.add_argument("--filepath", type=str, help="Where to find the `filepath`") + parser.add_argument("--host", type=str, default="0.0.0.0", help="Server host`") + parser.add_argument("--port", type=int, default="8888", help="Server port`") + hparams = parser.parse_args() + + fastapi_service = FastAPI() + + if not os.path.exists(str(hparams.filepath)): + content = ["The file wasn't transferred"] + else: + content = open(hparams.filepath).readlines() # read the file received from SourceWork. + + @fastapi_service.get("/file") + async def get_file_content(request: Request, response_class=HTMLResponse): + lines = "\n".join(["

" + line + "

" for line in content]) + return HTMLResponse(f"
    {lines}
") + + uvicorn.run(app=fastapi_service, host=hparams.host, port=hparams.port) diff --git a/examples/app_components/__init__.py b/examples/app_components/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/examples/app_components/python/__init__.py b/examples/app_components/python/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/examples/app_components/python/app.py b/examples/app_components/python/app.py new file mode 100644 index 0000000000000..1386a699a09fb --- /dev/null +++ b/examples/app_components/python/app.py @@ -0,0 +1,24 @@ +import os +from pathlib import Path + +import lightning as L +from examples.components.python.component_tracer import PLTracerPythonScript + + +class RootFlow(L.LightningFlow): + def __init__(self): + super().__init__() + script_path = Path(__file__).parent / "pl_script.py" + self.tracer_python_script = PLTracerPythonScript(script_path) + + def run(self): + assert os.getenv("GLOBAL_RANK", "0") == "0" + if not self.tracer_python_script.has_started: + self.tracer_python_script.run() + if self.tracer_python_script.has_succeeded: + self._exit("tracer script succeed") + if self.tracer_python_script.has_failed: + self._exit("tracer script failed") + + +app = L.LightningApp(RootFlow()) diff --git a/examples/app_components/python/component_popen.py b/examples/app_components/python/component_popen.py new file mode 100644 index 0000000000000..d3af5ee2d55c7 --- /dev/null +++ b/examples/app_components/python/component_popen.py @@ -0,0 +1,7 @@ +from pathlib import Path + +from lightning.app.components.python import PopenPythonScript + +if __name__ == "__main__": + comp = PopenPythonScript(Path(__file__).parent / "pl_script.py") + comp.run() diff --git a/examples/app_components/python/component_tracer.py b/examples/app_components/python/component_tracer.py new file mode 100644 index 0000000000000..9edc48cf51a29 --- /dev/null +++ b/examples/app_components/python/component_tracer.py @@ -0,0 +1,53 @@ +from lightning.app.components.python import TracerPythonScript +from lightning.app.storage.path import Path +from lightning.app.utilities.tracer import Tracer +from pytorch_lightning import Trainer + + +class PLTracerPythonScript(TracerPythonScript): + + """This component can be used for ANY PyTorch Lightning script to track its progress and extract its best model + path.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # Define the component state. + self.global_step = None + self.best_model_path = None + + def configure_tracer(self) -> Tracer: + from pytorch_lightning.callbacks import Callback + + class MyInjectedCallback(Callback): + def __init__(self, lightning_work): + self.lightning_work = lightning_work + + def on_train_start(self, trainer, pl_module) -> None: + print("This code doesn't belong to the script but was injected.") + print("Even the Lightning Work is available and state transfer works !") + print(self.lightning_work) + + def on_batch_end(self, trainer, *_) -> None: + # On every batch end, collects some information. + # This is communicated automatically to the rest of the app, + # so you can track your training in real time in the Lightning App UI. + self.lightning_work.global_step = trainer.global_step + best_model_path = trainer.checkpoint_callback.best_model_path + if best_model_path: + self.lightning_work.best_model_path = Path(best_model_path) + + # This hook would be called every time + # before a Trainer `__init__` method is called. + + def trainer_pre_fn(trainer, *args, **kwargs): + kwargs["callbacks"] = kwargs.get("callbacks", []) + [MyInjectedCallback(self)] + return {}, args, kwargs + + tracer = super().configure_tracer() + tracer.add_traced(Trainer, "__init__", pre_fn=trainer_pre_fn) + return tracer + + +if __name__ == "__main__": + comp = PLTracerPythonScript(Path(__file__).parent / "pl_script.py") + res = comp.run() diff --git a/examples/app_components/python/pl_script.py b/examples/app_components/python/pl_script.py new file mode 100644 index 0000000000000..4ad17b459200c --- /dev/null +++ b/examples/app_components/python/pl_script.py @@ -0,0 +1,65 @@ +import torch +from torch.utils.data import DataLoader, Dataset + +from pytorch_lightning import LightningModule, Trainer + + +class RandomDataset(Dataset): + def __init__(self, size: int, length: int): + self.len = length + self.data = torch.randn(length, size) + + def __getitem__(self, index): + return self.data[index] + + def __len__(self): + return self.len + + +class BoringModel(LightningModule): + def __init__(self): + super().__init__() + self.layer = torch.nn.Linear(32, 2) + + def forward(self, x): + return self.layer(x) + + def loss(self, batch, prediction): + # An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls + return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction)) + + def training_step(self, batch, batch_idx): + output = self(batch) + loss = self.loss(batch, output) + return {"loss": loss} + + def validation_step(self, batch, batch_idx): + output = self(batch) + loss = self.loss(batch, output) + return {"x": loss} + + def test_step(self, batch, batch_idx): + output = self(batch) + loss = self.loss(batch, output) + return {"y": loss} + + def configure_optimizers(self): + optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1) + lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1) + return [optimizer], [lr_scheduler] + + def train_dataloader(self): + return DataLoader(RandomDataset(32, 64)) + + val_dataloader = train_dataloader + test_dataloader = train_dataloader + predict_dataloader = train_dataloader + + +if __name__ == "__main__": + model = BoringModel() + trainer = Trainer(max_epochs=1, accelerator="cpu", devices=2, strategy="ddp") + trainer.fit(model) + trainer.validate(model) + trainer.test(model) + trainer.predict(model) diff --git a/examples/app_components/python/pytorch_lightning_script.py b/examples/app_components/python/pytorch_lightning_script.py new file mode 100644 index 0000000000000..4ad17b459200c --- /dev/null +++ b/examples/app_components/python/pytorch_lightning_script.py @@ -0,0 +1,65 @@ +import torch +from torch.utils.data import DataLoader, Dataset + +from pytorch_lightning import LightningModule, Trainer + + +class RandomDataset(Dataset): + def __init__(self, size: int, length: int): + self.len = length + self.data = torch.randn(length, size) + + def __getitem__(self, index): + return self.data[index] + + def __len__(self): + return self.len + + +class BoringModel(LightningModule): + def __init__(self): + super().__init__() + self.layer = torch.nn.Linear(32, 2) + + def forward(self, x): + return self.layer(x) + + def loss(self, batch, prediction): + # An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls + return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction)) + + def training_step(self, batch, batch_idx): + output = self(batch) + loss = self.loss(batch, output) + return {"loss": loss} + + def validation_step(self, batch, batch_idx): + output = self(batch) + loss = self.loss(batch, output) + return {"x": loss} + + def test_step(self, batch, batch_idx): + output = self(batch) + loss = self.loss(batch, output) + return {"y": loss} + + def configure_optimizers(self): + optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1) + lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1) + return [optimizer], [lr_scheduler] + + def train_dataloader(self): + return DataLoader(RandomDataset(32, 64)) + + val_dataloader = train_dataloader + test_dataloader = train_dataloader + predict_dataloader = train_dataloader + + +if __name__ == "__main__": + model = BoringModel() + trainer = Trainer(max_epochs=1, accelerator="cpu", devices=2, strategy="ddp") + trainer.fit(model) + trainer.validate(model) + trainer.test(model) + trainer.predict(model) diff --git a/examples/app_components/serve/gradio/app.py b/examples/app_components/serve/gradio/app.py new file mode 100644 index 0000000000000..7bb3e7bf790cb --- /dev/null +++ b/examples/app_components/serve/gradio/app.py @@ -0,0 +1,53 @@ +from functools import partial + +import gradio as gr +import requests +import torch +from PIL import Image + +import lightning as L +from lightning.app.components.serve import ServeGradio + + +# Credit to @akhaliq for his inspiring work. +# Find his original code there: https://huggingface.co/spaces/akhaliq/AnimeGANv2/blob/main/app.py +class AnimeGANv2UI(ServeGradio): + + inputs = gr.inputs.Image(type="pil") + outputs = gr.outputs.Image(type="pil") + elon = "https://upload.wikimedia.org/wikipedia/commons/3/34/Elon_Musk_Royal_Society_%28crop2%29.jpg" + img = Image.open(requests.get(elon, stream=True).raw) + img.save("elon.jpg") + examples = [["elon.jpg"]] + + def __init__(self): + super().__init__() + self.ready = False + + def predict(self, img): + return self.model(img=img) + + def build_model(self): + repo = "AK391/animegan2-pytorch:main" + model = torch.hub.load(repo, "generator", device="cpu") + face2paint = torch.hub.load(repo, "face2paint", size=512, device="cpu") + self.ready = True + return partial(face2paint, model=model) + + +class RootFlow(L.LightningFlow): + def __init__(self): + super().__init__() + self.demo = AnimeGANv2UI() + + def run(self): + self.demo.run() + + def configure_layout(self): + tabs = [] + if self.demo.ready: + tabs.append({"name": "Home", "content": self.demo}) + return tabs + + +app = L.LightningApp(RootFlow()) diff --git a/examples/app_components/serve/gradio/beyonce.jpg b/examples/app_components/serve/gradio/beyonce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68b6084475b019bd37db953b87c37ec905b79b86 GIT binary patch literal 132520 zcmbrlcUY58w=Rqd3L+w)ROwxMm#%b(p-K%!Iw6Ee@1Q8Xw;)~V9R-rmkuDuVPbdxE*nlb^tlCgzB1+DF1Hui$P(6=%ya=tP*pHO>GYZhOqi>rr>uRPno#4_L6>#-n|F@C411d zaQoJsJGTk%5D^g)-t-Q-krNP75K*# z|HH?y@QBE$=;V~tG)zH|~+11_C+xMe?06j4|H9a#s zhgrt0tgfwZ{My{YA08e5J~=)6bN&ymTLgFhbIAS;?0?{*xWRRsknj%Sy?=1sy6tmQ z?@$mDv5MTKe5rTO+Kq}$^gZ!orKB(4I!M^X^belcxR2kb<`7@z#Qy{BU&#K?fQ9~F zA^T5Y|Ah-f@Zip^o5{OFL7+%*eaMp^P4K_hPkZGLwS}cmm|C0cJb#ZmD^^L|8d3_? zek*?$k~i(OEg*;nJ7!`;U@-onQ>sGmHQTNy$x0ostJU}VDK*5tfgY%aP@bizAB?2? zRBWe@&w;%Y#Zf+7mNvv{Q-->LmdZ}?gf;hW;XfMIm-uUJJ1Qcw1sH^DC%EKljvtZr zb*uDETJ_^UjuUiDVxgXPvH?|j=*H9O*@=BNIUHOg%IJ&8b!?E!&yxT;e4aD_BUh*U z0~TT0NJsB`cR-A0;+wtNa)Y^=vYei|%|cN@8|ByYO9uu5>>cTCTxXD@c|XQp2B zHz#PI-?Y^xXaRy`yRMnh{GDcV0v)E~f;C!UEmIPtV2Ot6ciN~+U;&tF&j1b6gd*-qxz>TK1s~SQI7Rs_N&}<`nhVCV=H}88&w(czR z5j8P@Ub0+x#M(Gr!_M!z7R_LRG^%(!9}!ozkJ$bd$oM%Ze_P#&6*Vp7`A7Gv1PEqi zM|*xA=w*;aqLh7n#9;w`5NAV!ow`28K1ckA(J_H~#gCQ`KFx1q3iX{8A0w~;Hx7(M z{51EA+6H64-710NFx%!sdTv%mN>NkGMsWS;GtPs)u>G*b63IL$b0jR&px|Z6uTL7T zHj@$>v;77Ar3)}rrqdH91|A$p?sumV1V-n!T=ubU~!v)npa za7wEQqFzvddDUbLEmgszh}x2@VI{u`blW}Sas6sVK3=7j{dcTh7HhJ1RavQ#Ce$w_ z>@-UtsfNLCyLla>j+vkM)@XdtfOj5sDNvyq?AuyBw_zbwnC{P@CFhxBoaTC~(VzUU zsBoS)Z=uVG#bhD$;-drYh+-3G1*ysUu!t^d8FOTle6~@PP_&$nd=@d~hSCRcipBx3 z1&zTndShMA)28ajI&BQ( zOpc)tCzK#F3_fDUt+%D0m0fc;RsS??Z?6$n5Ol%iq0N`nY&s`@zFE-tGniW^f{C~( z9uViFI~@pg51q#JX-)|2k872yBUkkNxIX|Q{K*CRd~`(o)V9sv;iCbJL(~T^VL>7$ ze+lXld{u%Woa67=p0h-?d}Mvg@wWTxeGP#TV8VwUw7EGr&Z7eRl2s~~z43k3u^xNi zk(^9eXHbq!LaM2NU7~{MuZ`O+kRJRQ`>RF>Z;R9Z?Fc_xG)r?cxbPuBnW+0n<;$I~ zvIR=CrR)U8U$nBhmPAu(f8|tS{QXWY&NtU_5bdYYlz@OuPc#B`c{2XX-)kxQ@@-vv zQi+p4qD6$_qT<(%!`t#Zem+J<@EH-gPwWtFHkaiBZbCnW_7$MAAu>nkxMG3I93}el zNoEqX>;T%PKCS(9e zI_r#|lzoEVe^?DaC~6Oj_!Q)i8B}jM3xYmANwIXM8rt?CTI;DaRuSW|cKu7>n6|2R zSKR%grCsRR_ix-Ao#H0zl1Q1q1X#jtwxY${Z~b1d2C5Uys)6oay4g>w*^<`!riU+U z^wDpw#|~sS@#Yrb(-Sp6!)_R7N4Ozfck{<~e|}VIh_{`pG+EMkRPh&Dnc$Umt;H>~ zIp@V3{@+`VpsgV6WqVC~*|s9dC3}c{sp4#voOI#GxJA1bi>LF2`U&$|GyX&AQu=HJ z`?OyXwW6A-T(4QUvDZ{7dcuE1i364Sz4u*f7oCtuBkx@Inz?B*{gZVr4aYYsgG5bc z%xRbTN;9jRZ3Qa=!pcl(xO;FFQhKbqlMU}vzflZ^1^1JxAN(U_pWt=@H6wd!O}TPD|Mb^fE1o6 zCq_PNWWS9BeNL2Q!#)&u0BUPE80`w7iuy}{3Y7Od@aZv7QIP`U7%E~IKGRc69*xQw zeKp@Z;-pT}fd#V)-0D7j&vQ9@=7{gZ8HeYJc!;&cvYI55Q(1KXFr-ay`PwkPXVZv` zmAO8#{g(P@k1i$wO&*rVgwSCG%UFKES&^S9TLzq&flTC#tqEDv0hy+Sxy+AD)Jwr; z$^8(*vM3rLe5jAKg?}5_@<8=|YPocPh9pw6r#0PYd>?}PV~B-1g4g^}MKV zvk-T1_)@%OMGO-kR_+YuYDk>#p^aWL01JA``(xHPKnxBu%UB5G(Xt%Y!r7P9yf~XZ zR+;_9f?7J>5qO47+{|S`a~&+ji0=?uxK8b?zh`woHPUmHxQ=q3IV~Dy-c3_2b7w(i zq9yB-R5Gcyny#~!vx1a6?t%)SLB-A}$zYxXrgBF%G20SLXHj+PyeCI;y0hP50&H13 zBN)#%yXbFko6@_2)O(QWam&r1Rz-`0o{yA3N4uj1XSn3ILBXHMiTB=>9rhp|y9>D` zTN5-_D+NPy!ZXW>Omu73)Lb*PwORhk$9TemseLtNA13dOjA`sTEBKy{5sKoP^sHdu zlf|}63aEFT)&U^PHMDb&{cNPt=>tBaT?cD{#b+PXh0SLdu*%+Z!k;+LX?~_B*ta*d zPgI8u(GRz#$`~~lI)|%Ef$f-Bi_Xdi?uG&ul+e7D)#Ce)!8H&sFNa+>h=1Tq}shhYsJfRXylc@95SZZ3*34YM?^kFDpaWq!juqm0>>p8z*OH4*! zfaTd5EvkK|@RXtT^-mp+*W=K}ntSy+ju6*~$k;NT$>JT{1h{oNb02GwdrJ5D(hASr zx!~~Kw|e6O(v{hLuODBOUmjWArSdhzMVS=Q#E?~DQY0z+298k{sBO)>Lq45%U(t!Q z0TICb)R(3k2rcRIVLXI2MKk2w)0^1iM93I++=m5^K27T9$|ltn>0B?9$;RQ#u(F!GJ;Q+^{ zZ;Ru>tY_)peK6cnEmDWS%r3eFi@((^U%W{aiTDr=;m2qWxBf(BHk_j!YI>3U!u5lHZ3-+tq ze+lfZ&UkXuaE#td%Z|Okunq z1!1Qp#%o5^<*ci>9V=w+Bi`sr`-W&nouRsD1M-Ls_#>`~FH@EIGz4b+K(`}mR1Tef zyM>~4-Wl&wblbyiCW zdq&$wtHmoxRs~0OW*f-7jkn6^=q-Hxnp<*TD11Z?yO$Cmo)9_WZm4?aejaOq*-yR<5kv|K$rkOro@KB$$*4&klmpsRukN6f^ETh-w7VmW~ zFWG)owVqrxHlbx|v9ohz=ZoE2eMsC|kQ61LS(gRC4E^x!rM&aNprtJ``Pg^|s zrMIJbKHo0zqW$r7Bk$(q#Q?Cji^$5&3bB4zTr{us>KQC#~^5YL5@e`9J5`SF=@ zGjd0}FUScVrsb4|R51S@4>`VD^)x~6PxMIpy0H4qKJG8UJBe44btx@Rlb zJl9U>{OD&<_j&T){jYjwY^BDzzP#3H zGl>H2){Mwmg#P;-^dZ`bo=kFTMR*GnH(a{JKVkl2QMC}w;LFdbvMDxHUU8Pg9r~Nh z@>A)i1McHHPcpWXos_EL@z)bGYPavWN`n0S^5H}lRaGQTR{);1&~8|Q7xj7r%=rJr|G)m z?;S5K1V>?gbX=BR38g5T7Enc(x2)H1#%OO2O7on@oKHrSkP?D zcJnr9yFzyY<1C443PKd;s$sbsqkr3hcvqu1Abfsyxz~OiwP|U02>f8gzV1LFU>>eL z;WzJ_=61wa`j?^4cf}{e>A0e&moz4FhIAg2^7R$uEh;~N1qA0qs+7Rneb=16IH2n8 z??FBb4X4u?zIdiS2>)58^HyiD)p_5-|%W;$#@|o!m zc%Hoemq4%i)4(?nXENWGSXJ-1!mJIaA_pb@67(cI23fRtCxGppLl6k^+gJ!iRdt3f zVH|VR?-W7%6u}!SdY);^F&1Fe+BYS~ z<^d1Z#ik*6>qAr@{ET^W!wv^#Y$%*;pnfd1P>Q}bwm?Ew`^0!A_;vjh_{zmmk85Q?z{+QH z;s|T7o}<{=rWpN~pj(O_ndY{_9G%Ms;ER%z=BR)z>kw^9IxCERwn7wi6>U;r@&vCp z0Du@i$N0t9pR}frnl7{Oy)g1wO8}(wDhp>ETg8zv1Q+FBde|a&n>2EI+lW~FO8bOd z!aBK=*iMJ*Udo_;5oot2UV$C>uuJz~rmG^y*+StO4OH5bU2Tdn!S|NAxpC!4qHuSW zj%Gn(FITBPJoJ%`ryTEbo>5e%Fh&e zpLqVbS?;E~6y$?EkxOf5Ocv_;24F6rjfRXx+)cdqCoGY~t z7VcwdQ>vD%*CFk5EUg;DbLA9enM}&joLdZ3RW9m=aJm+~hZgxo4kf*O&}_`jptjt5 zhQsz441yo)j-VH`%7tFlnt=d}^`%wLWw-7Zez{r+zaK@}8TrDG;e$+kU7_}NnTffD z%)HFLA@ds(0IyLz13Z_N*yO2jN3t$sSNWbnhn7ex)p}jCQCpLr)5%0|CSywe&z=11 z(W*Q?|I1^<(tL2iB`5k=h@zgPddW9HFWR>NWLj^b-j!k{xAnP&`b5Y(oyHq&9yvKT z*PHy~F}ASRh;n8|a;X*-nLHpwb@?0xwFJH7{M{G9847~B`m#jr(d|OlCF|(I7iLIH z{>0{MpuxrQlHB;w<^5ird!jK=AJvJfDI3G+3!VBKL)@oVKr z#6*5i4(h^mz;pxJ7ZN3NM}HKW1e<-(x%6O;Y?owsocL~a=AV%7lM)FGXPJElP2+(| z)_?$#nw6G=^_GX(bu9sCpd_}3K9JWBT&855WIgfQ$xAjM$~MgYBSDRYVpmQ4In4)& z{f95zO3&iIJ3Aw%PZT)XLI-pw{F-+I$yPog&6Y$#ngWH3nr;(?qbf?nd{xb>CNDgP2clwATxDY4I=F$GKp^s^u71Ad@c zS^u!(_B;QuMrbADE4>&QtiDBwZ?`IJR<}r0^AolFh5$;0!A#HI&#Nos2+Y@-10UP) zO*K&DZ-(inX&Q8^9%@TA1Ju3tJ==?0X_O!V-@JImSL_fFwM=MH>vYq#q{7u%#xm^Q zB@pd;X#o~!UC)xAs`vjLZH7$1DH1yyyC#EYXfho!T#Ye0lW||;G3!!?xmO#tSE~Su z`h$(BK%D~v^YMKn(!T^$mx`0ey+EzLS6*2c9@^3N7qG}nUe<8@3SJ=t%o5YxM%@?> zF8&(2^g5In9q@zeCMaUA7m8J&-WJ*P@6N+4KAp(j28M1OT(xb#`r2SB@SBW5581xN z)SSQM7mC^MqThZJt#)Bpcygpl)N#=x2P*Jw?}RgPo&`v(H~$=cj5R|x{{&UNxI%R` zwWV#S`UL$Xe>D2^FM;(Gl&B+1qqPBK0T|3(QNiB_67qUT$p=0-7)xDmExIRY$TguG z11?@@okq5%Zv;BofJX}1_UVu*+H&P5SgWJ`W*Gs8#qF-lyLl_*cbq4D3p!=qTlmiZ zddo85MJg@qIJH~ZwLl8g=ZI#wIXUuPr}icYnysEl30B%mL`uQpoW@`>*<^TG6Z=qjeB zGbD1xAVXM z=-A?Os!~(Sqdu0{Ab%L$QX z<>&C(O0b~hdj)YzGzWPAS#!!$JiyDTbJ0F3*x-!03^WI*;QCe!y5T>a$`rl@eOZDZ z?(RIQ-S!X4DMHJ1J*+4h<|-)utmxR(5LNA>on}M#;(I21(2?K_pxmmkm(ICcl^Qui)eRw#l=+;d4~|h*q~is+Il6zOE;`E-X!` z#!=9ufs&Z13u*P1cvWl*}X#NZVv@T~nw61RRD;fYfCLuh=L$ya?UNhdn-@`E=9!T(bF> zS*UkhbD|o@1wD(c85fhhLgb9`%8nA>op2zJ(rQG3Gw}d&#(T^xO;W#^JNJyaD_HKp zsHEajx_$~6(Zd`*bLjhFnLM(~Tos11U|~ielh;}5fW_PQi%0dN94Nu*lrK5($bNg7 zsgCg;wKS*Ui5}g0c5eH-HBQLOW1ao!Zlwavl86583-VrwdL4(XB^115XJe=8F9Ff* ztIvN4erDy0eH&Nc+mgbZ?rc|DZQ|#Akpt*`v#~s#PhDWn;^xmfK7^TMhw7}53nwCrFO=oW$&^^)2DNBp}GO%QBSD zvGLp2=62KY4EJ_ta?WOrF`no*{(lMH=TQd>|0U>9y!2s?RbaGS03KwXzK*!2VvacR z5h$yP*sW3FE8c?Ls`IlS6M}OkPoDy5sK>>*X63VY`SmXvT|+?kp6m5U(PvMuS^kzb z>j!EsY4S-R7Ry|G|AbrIN0q6`A?UFM$fjVKlcm8ca_GD$Xl%0-z_Uw$xz{! z9IN=;;z~QFkIW@HE?zb76>Uc`gKAfwZ)k0+99IO5$!W|kDElba&% zCx91n=SD)Tt`VB51S03bieqChNc!HAuk`QjZCm%EzQUXzaH=}Po< zkmAp8O!CN?%V$d@Z`zv_VR)K|-=nl$52^?ho}!CyPS^|6h{3CqnP}f3ZMCI({@5Zl z^JuHq-$bHVLw>D(Xy0Z9g^Gs`17u2z2@Z4bfCN&9&0j%(1b!)(HYbXyl^qe zIyc6s_d}vaVsi;6?uq-7NI9iWoNGeGea+>b5pR5R&=2rLcH)iL1>D|3&c09Wglnd0)#d06rQRI$33D<6 zIgvkqR)A}dL(rxT;EJD#+PbnbPt% z(eqlNYQ&2I2NRvBV1B%dk6z*Eax3|(WSqRQ&!_1V%2CGFIxt#9wrI+(L-UcK=cm)K zNQ>m4iWRBQg7DwsaW5=&Acdo)pq!|uNlTStm`x_*lsnRp?OLek>ivC@_`~&u7a)>M z9s7fgcU9s-w=r0gIojW{S+aD3X|G&XOs3c)b};ljtQd%cU2TW`#bc3v(d;m@5^0pr zAAg@eL92@HT6`LtAz5u(Ggb=yO9t!vlv^kT(X$V3cUp%>!!PAx3K<-1n04E;8TbIQ zPFR8-4-c@wWO+H|;LF;0MPZI!gRN_VGy5`#e^uO6+K2fqqjaRdcO&_M`V7#YGrxzQktu^R zack})E|;5tkH`8EC**@2u{ZYr#_-;tBPJFjY zG(1yi4w}s2mmPiHx1x98@ChEwbnv^iaEZK);sr<}^VO$I13bN~T3zeRU5faULZ|Yi z%_c53k$LBekB&ku!EXaKZlcOs2r2O%LO!7T*P+!MM+i6`yb-Udwe9g^Oyyi|f_S`g zAGYHn5%nctyE%2D%FceTq5O?}vRLrcW|8~lB;XK&AJA(ClmlzmXs z67Zq_Sc_#shABs;j#mJ8(a!ND`R$puF7QcKoT}p@m*h8=hRnk)N7?N8p~GA9BSS6r z`mz$YQg}ad__ki=!CK?Q7uX~c8{_XKs~|hL;Z8{za{%iozuOyh7PIUa2@5fUrXy?Q z#2O#3Q?+ZMSElZWf`+vzjU8lk`0G637Fsjb$ReSO?;<>0Qr$QRxBh(SjdIGNNJjWz zN zZv70z-|sP*e&^0Or--mlEAbiev`YGOcVfPG z1mCM&v;&l4QgiJ2Ji~Uq)9PJ!+xbkbfWrsbiZR^-V_yslXT%U!*otESFi8Bj0?9#1 z7i5Bg0#>mF|JI&4llgjIGSeHUR0Vq*Lp3NUxK^v-tSlt_K&+NiJ{3DLDGf*?W<5qr8}CBFm}&< z2HzxPb-O*@LT;Lg%pF-}0bEns7f$m45z7(Uyruo7zZwf_*XkfuyEO%h!hwK;CF9BV zy4^)~Tjxa=6pevWLI0um=XyFr>a~~_1K=$(&W3yIuEraRQI^c`F1^I+^k=g@w?Sa< zNg`d`iuvqqg*K;>O6KuCJyVl`r%ZEEFu>uqKSTmxGO!6%b zsljOv24*cHHTr%DwjMOm;fuN$vMdG?zj(N6DGh0b$G-WVxFf7NNl=L$d3mrz704Gl zslgU*Xv7F}!Rd&*;FOs&4Zdx7>b)1Zb1j0O`N)lILA162GVDM(GKZK=U3R1taj4>_(bfHa zBeleeoaHEkJxj8y0*;p2Jm5^$j?GB`DCbjyKn6vz6W^h(9b7-*l8VXh;2N6^c2rpcBU)Z z!}&gCXB=qt zQyJGi?HPU71*)@@qvpN5FDH^Eitk%IC5Ro{K6C{OHN_5oan_rS7z;EYNXoQeFc)N_ zSj6zZ>GN~G+==FU_><w97Oqt zZyQPjtbt`I2!a;Q+%KK!{hB8vcJCO7=Cd;dnKuY&l zl3EZ$ij;^!p=brB2D}Q5J|M)Sgz=^F?OKepL2pIH3!H+(QrXUZTX}OnaG7Qnx-5(FDmK&vLK(#m$6S9r8GcR#r`xwBDiXP4MbQ`J zx4f&iqOF-LOT|%?PtmAZW$(1^R1_=b>bH)JJ}ZBNEP(kiz1F&DD-kHsLek%dme0a2 zbENr-tE+2H`8XW{uel6PDpJh6K(1E`PxdOf+k;OreQ4&o(S%(@5X1GOb|y{E_Vl+& zU?w3d5{EJRev#s(?)V2NVYAk5Z2`4ERl9b=#KT!~B&Wd@ax#}PcMHHij1k?6_psQ- z*1h-6DT@$z=4&_p^WOI%{Nr&-(-*HA!qB8XDMR(h0NxkP*asnronY}fw($>DRfz2^ zb)83v2^{BPO%{C*jQJ2%d&^T*AK`U0x1epK94TxR^jBR6cW?l5aB^*RcVTf-a~mK!LdE4V(qIk)KP!!9=*(8Q#{YO0Q7 zw*Bg7C4>(LAysGYD(Wayi|_~&u4hfH842>Dx~k^9kO~b1oRRnx%v$)ycaD$&Ue3_- zaMjg?YScsB(PA{Sh+RS2$j0#+{#6wPt})x>#dS=8uWS97LpXH}H;L85)dSz{0L{WL zM!UoE@{`APK(q6rW(eTqb@9?ZZ;kh~8v>0v-3_{lsF}Yp*GcWt;P0GBpRFxBjBm7M zb@*Sk!c&S9sGLz|5n+MGMNg6$AhNJWmf~<3jZi=;y=z|Sf=)=G{QTHp;B+r-$_39= zZo3m170x)q6Wi#z=Xu)JlL^LTy*kAN*SyJRRMzoZFp_>ejM^!B%rOpyv*CYKhKx-7 zbnFG>J(wH5kO{QX_+4|$7rBmJ={om`vIA}ZB_KCD}W?NEu$JKyAuIKU9(xW zNR&ua?#Kb=FM%fWSMHt+1sdP|?~AwbRZEeu%Ud`GS$XEJYiamlK#tU_Nhp7^4#^g# zb+{Yt2PwmFG~NBi)7aQF?lmmK2UG$j1v&b8?X8)9tQ`W%JmFZ=E}Sajb;j)!K$eOy z2rR@qEkTGmy^G?WS@vT`Vicd?wD&|mXw-Q2y85#V*SgETdD+hEcv>cpylS0IgNLDw zLd07oug8{iUu$fb+9AEP*&ITRVutnLhv+x?uVck_=U+Dq^ZSpz+&uq%scvgk+*|0Z zs>R^5pFguVv*EFtbH3ygaJqf8_n0XH@z~%|;RaI|^GI?u4?#J7!uiL)1l9jyIxKDL z9qBfnDC@F(kGRgP+4#2Qc+j4{9v#ugj8{KW3wx~hT0>9uF@e6LqE9mU@Xqt&{ogqF zr^n&8x#V(8jXUb7vw*QXn{J7VUo@F5C-XKA3k4F)H1NU3I#1ue3nSmpem^*~BM;Os}dJZ(f&Z6P!jcI;u8J?r}Fy z2uVOT163>Lf#N=tR-2oCY-WA;;K2c#E0ZqAaa>%o$q;Z1xlnKjJ^r3lxDHM7 z({EKpyU#%x6$^g9YN}9*%0Dj&OGlnJC55!;t-*?Z8#Oy$S=3KdB{YWEPB!3udVk0T zh49LRM1Wp@9TV?gb6N5YUtS4A^v`KT?8t!B#o;H|texHwrK&bf&5LpirQU$7D$hj> z!(c`GBXvS*QO=gzq+mL-t6yBKKBWL@wP@cb#U{HM$~B)l+UU=$tGK&r!kWCUvr5z- ze#3Qmd*Yv#V~Sc&Dr9CiC6j&-R#N)@CN{epN&(|f7n$FC>ABv@jN6Fx9KG)w(iQwD zdD(LqpdO=fP&dke*tpd8A`Ot17;FqHOAd&MTt_uDefrVKpbj^ODnSkhC}Qm}$?K`f zMn2YEps*iIbi~}l#6CtOguBWdh{{g zO*aeeQHA1AfsT4wm3`cMiay7ub}F3`XX+m_iwWN&W@t}4H} z3Df9-u_^i2Tm5W};ajU;b=g?dA3nZidHfc?s#TQQXb|k9S$w*9VOpKzdH9VoPgHMW zvGt1Hct#a<$@G!_UPO!7!L@)vkEo_=`srT+3>;&B)!a|oU+vpvO+{3Oa?G>Rj|!m7YmdT;}<} z<>}2#kp!%eR>a}k@)+L()Ik}aquUJ5a`2)767Pa%_M7}2Si9evC76AQm{k|@A z-+enBe?9)+k%f^Y_HSr~?Zm;?(g|JZxpYWSuOGi0RmrSssod9=bMLYsN6wGu(@*_? zm2+poWcD_?Qbd!vID>U1GQUsYfCs96hFVL9`#_j|{-1rQB2{(K6U$Uv$@stJEQ2#osPpKbHIvkC=_ElRj?%atwKAUW`+hO zm<;OT7vtlT!WgU$w$q*IbmWpvdy+^H>yvE6Z}cvT+-!lV&xeP!Yv+A!yNr$s=7EHE zFQNP~kE9IL@O^7@Y=6ej(tG|AI87pgJd7sy78%v{j$YHGn~fT+dTEL?qZxl#k@NA-kulj z9qs7~o+Mk})#gUcYQbRFCS5oexEdnAhtWRqx7y!ES=^(YQ{$DeOFqI5|0U1?KJ>sL zej_VzWZIx@zt2F+-NnH!GlN0F228+i{D=;vvmaQ%g-I4BGunkGoD?hs3kqcyZ{aU; z2yT!CW>R?UsNaM^e7T%SI{ zIp(e#C5x;tnvd+*OC8eKFsU8cm_D$ziK{ry>k4^~I`4@4O}m~^{liDiM$vzy`P;1U zc5~x~Tp|-6!-MzNPW1Bv>`5cZ{ST4j-qyx+E3~7sQMfkj8TSR#*@s>l60cJ1g6YkX z>!U2U=8uB}ey$s7bSv9dH-{|$WP1(*Om?eX>o@NU$9*c)(0|49+yf==e;8D8JoEi8 z!2_523d88pSFt>CkAy!KsDCFLAmYG}%HJ%x(ROM%m7_Ugk%#v?1B(>19{!P06|QtZZ1$I2Oi>fOEN zGh>zOC!`GhYdIAoKZVurJSTLF56Fc)Q8P%j7Yy2)uYn5WH_rfq1t{b^SyFj>MF7Dj zML{(IK1YrCp1${0I8EqY0RyXUOq=M7v{Q<3{ReIvboY(pOV}kkwioZdQ+lQG1toGs z_|n2%CW3%^{`nu2+t_c3lj5pnYgYGn{S0Lfix>#M_L^khiXWbR@F70ey4;CIma{&| zz!^2jD~Fs&)9p)o-`CJ%sKEnJoBF*3n1ihD|JfRLvcbl3OmODM~T?@fOR zx~@6Y!qTERL4y|^b#SJVFC4bgzxX=jfCqC$@lKs0^CnbZv)>87?Eh!eH1lh*y`b*L zyWUyCiogQbTLf?L@B47%FD(sm(~c zs2Y~Zx;I0or*-f~7P7yBZG0n^yQhUHUVOn>G)Gh3PBeVnxJr>&f*iZ`yC(A{GbJVq z0%tOJ?|C{w!Z=3r5c}a-BSYxX_j`nHKeo>+a0%o+yiKAnBspG{YY}{QcfYG}9jxo? z>dV!pf$t03P2==>*+5yi!+wutt@Y*8+d(geEomM%)>OgEkjFDtXO8RjV|`TGsh?L- zm{H^LV1KCV#>P@)3xng^3;Sr=esu+#-sV(|mz|-XDgAxeGgD{R^kCqI?X%d_;^A| zi~UsKWpteEH2QZuU{6(`{gGjW#_ZL3XBzb+ySXpYGnuxBB`?=D=snn4kcYiG8C?BZ zZJI1>KSYut(M?h^IaRr(7Gw+AiE~yz9{EL;b`}E_F4q4H8Bb4-KA8B>u%Lv^dOiw# z2B4s`oN+#>Jq01lE^AKxvJqUkKbdB~E?0_kYEC!XW)vzX167$aYIF+Buq~L^;~*$jtX~xS8+hlhWzdW=nIe^ICx1 zsVsTR`kBL80?|WXbv|z=JU!5eTL-Z=C)wBKic8<`c?Cc0CO8}~Nw{ULXBlo9mdYt1 zK9%Bb35R0Gu~i8F(AHOWI0*^0vV90|`fnZl`sD@)Y&449n5IE`S{T466E>!G!gY z9BcvN=Am`DmoVP`VTt;k;w)?VSc(nD0QtjVnq_(bAj)LqeqDQ^ zymcO^H}q|3;M6SeRn2;rRnhhRF_&WyDaZ?u5Se#hV%D~G9Kp_mn4FgUOYnNSwjr5O zw%g41hfOk_zGPDd^~mW6GQ-Ylqvm_=^A>bTJ*rg_6P`o%6K(>%DZlynKCK$wENl)k zcC&ggk1wrUSm5wl{K-|>Rr0k&e?Azw$c(pG*;*o(*zN(@oEvB|hJWEazRSDM`UAeW zqLPWzSl7@D0%%Mc0sJhX(x$UAH$rJO0JKBh~ybYjxqP*sg}0nuYg-ucvpj)K=2)HnAUv zb-$iFy|!>X>*3qD6YUx@oXD-2VW=2VwBr#FqNTudC=jIk&g7 z)wQ20%TUv`Q)w;2U7I`Qir(78RyJ0VyQ6t`Q%oneK{K0n{S2}2$AtV#;H`5@Td1z| zy+al^7Zw*GZ}ge%aW0!8OsyQ&=1HAr()Cax*wEX_c2?<<;=V`t!}~7kcCdI`Uhs7P z0NS^ZYnlz#jjri7(!+bNTWQeT+uZ5iGSjw3zuN09+_&<5qSnR>`|Deat7z?SSM06g z;PZIb!~IJeD`F^q@||5&?+ZBET3u}3`fuFt#o(PQ&VzHMQN_0Jr#-aoZ7sF>KR@gP z_9*zpZSgbV#7iylk5{#l-HnUL8sgg#i0&3hzG7|OSX@G35+p)MMoKty_grM0^PYL? zc|Msr{CVkL%-8%9_v4kOkMR!U!6fRU{VZ6avpF;NmpS)?<&K3J&4{{LB5b{vb_z;eAg{wA4+IZ!<_)Fv4s0 z46Qtt@{QK^sSJA}GhFO~)<$DH5DK3gJjbjC2CYS{M;Rz6>eZF?Tk6+Kt8R|078&C4 zROuw#Zn>>}8s6+=yI!BokNg!o;|8nof8tffgXhI3hwpV*wcU4EyOu_^)3nPA zgC?Kh>p~r(xwbazCDp~1yfaGo*ZOUgCTR@5U*$fD;q6byzA(~sYh5eL@a~gobK+f2 z1%hiWHo@#-l-UdWr{4^pYVu=iSS==GM7c6s!lFndm+fzjHql>r&s5cKFP`$!!(00# zFo>mV818Q#8Lf0pDk&L`qpMvK7hBaNM0ji(OKTf@ZNF#~ejofkEcSjMGDdt+@f%o) z;$*p%H5s&%cN{vlp?5J49j=vWU@SCm3+dJ(Hj3xXy>{}_2%-LS&2uPMm1#8PiK{tt zC-EfOcYX9)-E{1J)s$e>3N+JJgek>Z>88?7?S1Rg_gxO>;l71E#rBorJI2|dcm?fG znH0!urSVRq0_sr1VT5*;$iJ+dUkd~flW;>1sN;y)3(3#)GoKZ^B| z^0dh{=BKIY_SV+!^UT0ElXE;VD205)l+NKm!q@8m0E%=?Bfwt-r|_Mz)8(5)ylAhb zy<*LI;~x}e^H8}HD@2zT+Ds6~9MDV{t+CVKmE;T*e>)$vF0DFvQfL<9$z}1+jc%f~ zns#ezN76LA`0e*gY~|;Y9YXARv#^N8DB&F7iss93vaO1urw@0Tb1QxQ)}PmN(ZpgE zDs4eu+AUVeYLZ@k+CKFE0D@q6-uG0G<898U^WQ&+Y_zL=^UdT2TU+TRNmAVjn-NWK z3P}XTM2RiD%Gpw*sQ8QVBJW*3BJjbwEf1j2a$`<3h4pX_j6N@RiqzH9cy~Ax%-tw(#lZK`QyuB7zKd zp_)03`jrZhtB(fKyj7`qpT_!ciEh$UVRhpzTFNM}6=1QjhfT4CLZ|^!8${U|8=NYJ zB;b+s*qSOeWTMrg(_3tkzS?@)+Vtpo5R;r!>w7<2-rtYveWBxT8^&75C& z9?teSPUV7a13=Q>O|XM-DGR8U+}upy$3A9Hm+b6Ed#-A--Q63HB1?F!POEQyc`f8J z?YE9YqFQ3z0_NjUT|VX;AgaxDHGyZ9GD##qAL;k@lK95TSmS${r}3tze*`Sqw_C*2 z8qOOThUAt-hflJ%xK%PMI-x_D*yri1XG@(YN58$1uI#QYZ1rh{u`|u8UD({>(-!eZ zu{6y$QnqWG$l5&<6hN^U0hyV&2ZMzq=RH~GPAUJQdKtWfrFDH6k>`H<+|UD8r{#u9~
t|(Tt)ka`yC-9-2TR)4O4mu-Wozl{ zclc}9W7NJCd^WhhzSFI(;n@j$asx)p42~OmGJ(5jI3SP?2;7ceeYze9xi9W1)tn z3J_7*S=(o(iC@=mPft1UXTv=!MzgiGOGNV*YLc-G3}>M2`2KuXtS5$SthVOfQJfL6 zKn1&DKmnHk^ei)jUXP~fHnBI$ZcLl;x%ezs8;*KlbDnT>^5(k#0PLk0X(1#98$v0N zr%Y`C6N8+Rd-30oq{8L$ly0f$v~8!Q?QiSnq4M~w&V2m0yKh@5P{?x3K_C^z-bWcEVDa9$jYit)H7eEtVv9Q`ASuZtIuKZrrMGs*M_h4TZv?5U zxLw~>c9LH1zI|2uU(n$C?=y;0zP8`GUp?>lc05k^L@=YzGv?ps^2Sb1SDn$QW(Pi} zZv#9U^PO`@w=l7C9EM`qP(D%t``tJka8GW$3is_!GTY$-HDUlb%K)-q2IGyRJf7cC z&x?4CU|%-YWM`1?T<*Xu{{Sdmqq^rjXM@xn*O7`)rrL}XdL?^bTRrsO`bV>dtth20 zi=*?}@A~}_^WME_6V78;K|roR91MV-oDs;uIUb$s$+gR9AdSTF`EEl6RT(GHWCMfK zIl#}aZ*|n{4AJdyT2zsYajR@)j36g*IL<;2{A2MRYpy`DTe2)1l{G z>&LAaIKOw5TJ0y(eLnT+>to)bRZ1~ZOQxL`x?8T+zN@XepEr1uPD$+8nAM^sfkHOP zlHVwE+%9qW9FC&CR`Irj856ofgDO>W3#zCfWq`=%pvl2FBe1Wud`V_j7W08(K_hPf zT?j1sVH|sN3Pv&iub~UcRSl zjAom6^jma#cm9^g=l6~@r?m5$Bn!GSj6f^KPIiI`InGW2$l|_O@vfTB6T=D!rz%f7 zkz|MrnHfrkB$DGj@G)O`c&|*JSuSN}g`poTO6h{Ke8faLo;HDy1_|WX$-X?aieoLx zCs=;noba5*&!}{d56nEK>*vBTmb=2WWQfQlWMlg5HyoWoTjBOTJk_+uPVe(@N z4<^2|@%4~PcLF6lB@D(xhAMc%w^7)P?%8ub7{it>Kw`xsZ&r z#t8@{K4Wd}QY#@H+O(GUW+j!RBli;lCL}$6U|6Zz4^f^ttc_ef#j-;Z?Up-f(pgn< z?*;-qPD%NUJC;zqV36HZXXxf3p7|RaId@t&o%1OUdnT- z)!``JJ>H2YrkYu4{UegUH2uiKncu(P-E`aUTAz9T$=5I9zXeM^E+^DHIcp}ZVQFrX zT-+72#6Bg{5MUSkZl5T%Y47y&k46z7qte}9)l(<13%+<_3r<_mO%a$^9Dn{<=Dr!15 z+JDV&CBJ>v5*2W=Q-ZU+T1hQ=Qr_Jg-ralIAI;96ZLj!mP`k2?MyGQKcOpxeOxG6C zhC^v3yLnfQbrQz#g5`{>AtW@n^@L7SlXI51(;!qnNC(bvB~?%507|9SJ4_dZy>z9)9rQpMze=UGsvPl`7MlB z_J_>#?cAhf$!j@2SDC2A6!4;Kudl~pusNn46{8r=O-GvDTC?8oT^;`Y4?7Ldg00CU z{hVy#p4REs%J$t^w0}guv#;$*tN126J#PA2U1#C6Nok?@&dS)yaj*Cz?fz8$81W2R zd~Y)yDqBgebbk?AT&==tQQBHuLblgqVf~Ttb4B9qD@^hK0K<0Eyg>$`rOekJ5YqJ@ zD#OHH9Jqi-CZXWnO{9WLEpE~ln>rozkm?pU!61zc_P`(GgW(Uw^!R(Gz*k2|G>L93 zpHWGK*JX6k70fqQcd+@9eWuM8>7QG&K?K&zwrH3}p{w^V_67LiH--?jNtev8Rk1Tkss7AG(?~ zztK!F%VHi!md$QeVrZDphmSWF@)>QPe3sWOa|=3^8@{K*yu+Ij^=Zn`7QPC=qlZQvahZCzqz~r09zjr6j4Z29=WCiToZyjXD8FA`Qo$hiSkruBg`NA3;Od~ zN(daB)g4FuLVwp){{YY)rhg0herTwCasL1me>7jBiYkc;D58K2fyo4c$=lZ-k*K7Z zV~w1V#^5@DPS#}vgs! zOM$elemZ=5zL&(8cOrY|TPqVinx&aCNHjUr+@;K`G>o_Ro@U+h%0M&93P!~?*IDt? z6Suv@jr}af_bCWC-bC<^h+1Of;&Av1Pyj9)ug%9t>LkPQyFQc z`OCwa-S3R`?ITapEG;hKlJe0)-AO&gu9*WlisQr@^npYL*`$%QeG(gsfUfB?2~HLd z!??4Gs$%gn!r_{dN>Ns|qVHv`?|W{Zy7$!m1BY^%P^V89RW$kUX)RoB7Z+Wm{Xh<#>c~1!(Q9 z#J3MC$u#RHZJVto4W|y4Iw7-AOOfeMeiA&l_C$fi2oJg-xJf(C#tY z*xOs$I}jE)8`1nD1pfdIyfGJwo;0`A?1v<4^u4Wh z-)mi3%=mNm;Lz=KxpW_f-X6WOTde}x=TXJK(Uh5vJ|W82E?b-@+ShU&Q)sFkncRcNV%mln}JnGeZPPXK8MXMi%P-09d&H z07;fZwT+u3OhZGv=`b~&97IxF(TaDHSLm;`+P>EAx76da`l_8v^I6%;M{BRsrM(YV z@b|(!N5J2+N5m;SJ8c#1zP;kB?HVmc-C1t#w2d*_P4M`JBpz6?x?MK!Qi9rH=0|I6 zzI!x|tUfN2z?z?g_0NXh9I~J7ub_BuSMhGVBP}9Dd8*5$S=vE%yAhq9OPOs*jDZ~L z?Uqz3KcjDm9}n%k8}PTn-w3puCVA{UKmPy;$BibJ%3EzOP?truhAlr(n24shzG?0j z!h6E|&_vA|GT+RL`ET(f;qpI@{wizNjv#};dN+rBC3hqdh@;nhQ*WhRwd}CT7(|BE z{86dh$pxrT#@^o9**|c1bk4M=imN%RQiTaNo{@5GTH0N{mfKA>ZF6B4H9K8jo%PpS zB<#C8@6+Ub1>k$}aGGAeQc-np;?EuGbCLn`dX3VlLeA_M7C;6z zqi8%h+6RU$u7p$Rrdx!PARIKB;hRdiyOrE2e==bqX|O{whFlB*;@a$&cU~0um8T${ zZ?x+Ab)*xy-qD+FYCG8{c3iiX^1Hy~_Bi2G0rQ^L6R*$8qZKP_wyieayKCyY?Wem! zjFL)sX>zyb)tmWwc`q~0G#e25WK%=W_Koe-u|~Pal_SLjTNi>02xKJgJZ84!QdaQe?h+kd80Avhz>&Ii)&BfPsYr1sr zt9mWISJ$?y)cQO%DAT1Xikgd3-78y2uE}n$w7<;z)8G$F}&OLKBiYj2<}ZdSGMfaqV6m@bgG(C@tCJzz`?Pw`m#Ml_P>r z0G>}c8Ly)(ky<1i$GJgJ7o7T&>)WqtJ2GU*In#p+}TNLjOYs_?y)ZN zUvzu|z%W);>^cLU1~XjFtEbsX43j)SJaTMurGaGf!2@EDti+Hq;fDwC?4?_g6SSgP zqk&0}cEyyqIA1l`fFn-1T*$-)1DfGAxu#XRRAiRpFv^jD@qr*kJUPb*31*F#C5ag| zb!n#=IYQC5RinO%U)A47rM!%&$v330D7ELKr$GSMu(Y90xhoK;Ix~t@{n@NFd{U{ zkMW7o;IrnVlttL;1PgVYBJ*)-caWm2iH7LZ7$uz=N@&szVrLY znd4bv3_2A^r9?0MPN&kmd&G#aOaXOLu(NG;APEaItXR+AK1m=R=N;?8uB?}vvtEbL zP_v4Dc2Ci-({K25#r#g^5^UU7MPnpRL1=@LcD6Q+pmE0|9c$+w5lQ8x)NZIs?%y-7 z%NYzp?Hj&$j@H2Yz%r8O2R}vYat58_3ftxfXkZS>GIC$2ag4493*V8N@DCQG1{UF% z%IuOXYZ?_~R&&b|2xJO_810;%zEq_sw`Xm7Z)-){$?D%l)b(~s&90Werrou_sz=HG zE|^^ER}mtJl!;ib25~Va88{fpKncL(KH|Pf_<;|bD{WwsC>z`ihVq!k$bbMoWC4je zz#rJW374!$gC0+uZEJWs4&2 z4yz+HOi0QC2{_(H918R>bC)uF&!ztW3t#%z%;d$wKFP%|dP#EY>Au_P_onaH&vof1S^pt}UJ$ zHcX3y3zYjg;g!PVJ9OMKbJ&xP*$11u)FYWxTdOqCurqELb0b6{o;1M&_p7(f+zv{q zUA*Vgbr_vgVk9HZW-pvSIUg_w1o6A@_2#@+Rl7EN<^8y15tb6hx!UXjQz9-&1=#0l z;B#Mff##~Dr7wS#G_LOLZT$Lvdsz5-&~)d@wY$5u^xJ(GMQeJkS$`2^7Q_gXSRNr3?fv2*%Z8_lUt9 z{vNz#C4{R9=_tjdpT%a9SGQh|^R|XAn{^}0D@n;EZ8ckdJ=a}N*&o=;PS-vi{9o`# z#GNME@*O(MU(&Q~R^m9CNMh4GP8QPo7q^VNY`l52@*k(yicv2VArH+JstmpK>EaSsCY-dyJ;Qs&+*!XplMFG@Xs=}sXncFOO)0;;bIGQYw7@GWJGtT4vrn$9n zlBDk1bCeoMb-Pw>-4*^{VTQ{az{R(BWbe}YKU-ecdau{=d*YRzpW?3o`19c|xiTf> zn$5k|iF0(Mt){(gXLbvj*@}5r7Sh<-i7-*^^+^jAc(T8f`X7b$dk+xFrdTcQ#kHee z&8b+KQ+}Lnpa)cjTR;(`S4if9<)da3MzBj69$J(7q4CD{J3kU?&X7kXo~3DhbM{af z%ui#c!K~XBl!(|1*yFpkR7NBvvJ!bAx%Y7qt8+5jvo}P&L9LAbcyw}3HRJ2=d z>ur8}qNS&cEj;ZfTDH<5jywBrwA@?4D2?E~5v+4r0Ld!3k|(!}l8lVAd4W8Z^2gTS z0sjDL3tt0x_e<2{(%#!gynQ!C)U9mhir&y!B>qH~5$V`;j%x>#?Qawl`A}a#vqZmU zxG@irbgA@DA19w3^omqiZuplWmI85u|-1RS62gac||xYkut?*u*ay z$64CWk*MfTZ+8v6z{TV-Kou4#rtxmmUr7^qjHQ%2(4 zZ`~!fx?R22rR9^a6$wShl}RhzH@>|#xA|YU_X+zPc)IIeO<8ZGYfIZFp65om-j_CV zYV*L>zi3gN?_sFg#cilt#-$b`a|BV!=^EGV@9bUU9Y*`cgHF_hZ*{9^(CU*;iqRZ> zh`hVJ$0>H!_rGbkmhLrQ`4XU0wJ?z@8YI+Bq5i z(Qq!jH={d8a#>m9^FAfoa01w-**`wlR+I&v%+gfg#CbJW?HuJT_t*0Gg z>dCI4#7q0@X}qMpi87J21~z7C10RCn>{POx?h$h3wPj7Mc5Qdj-CwP>^10~e^y%X8 za+Q}fVBL~STdTEiKgY8_pmrX7pUea5J?IP%LskOS4M+V#f7ez20MH(<)hpl(Di8ehANmW_ z&*6VB%@q%hKjL4Rc8VyfBq+**&JVF8nrk{P0X&`p@w@rs9^{XpuQ~DOjx@go=(>ir ztKV7JTj}<)+{-oW>jV(48)T0ol~R$F+~G;%XynL&vaMR33UaAdY7mONn|&Sv#DvTYpLq`d33YWboP)>Cx)~uQTxY?+^KC| z?8^n^)Y4m-r9cFcOO3mdAD%mDP<^II zU|g_B%#ucghWu#w?fXyY+FVa-;rOj*pIVWVaGLGz-DV2n=IF{yrW7VdmSV6pnBfMdGgzCXeB}c81m)Del(F%T{X!Np38szO^lKbjwAx0@lM#jynS^Fd~FP zxb}OWi+&&YSKvSRN-ebs^sOgEvAMatw-GI^vD0K}B{SaJqD^sp&UZeYYdo=BJkv#S zGbFo6{AJL-E$h>2DXaL4NtJbpZmq5~jXLfqu5~LbX4?vCw|ZJKT4{Q6!zsA8S)N9j z1-#c*mlK#RqvC!k{BhU6XwQq@68H(>3!Cfj2>2^p(eA}7m_zv9Y?_6uyyOsc!{}lLN29A$@4d~thQZms=K#S?Q1%5RA^F+ zS1Prmvsx>(pKqU+Mf#`UZyM@;Fx1oHhm9_->@Baf`^|pp&RJFov>ypLmQ5Dc;gKSj z?H^)>OY5s?BTFl5tEk#LQt=sU&;B?3Rq+M3op-Ow&|7$e#NHad(sa!q%<}Dg7x0aC zJ(!l@yDp!v_{wXgwDMtqO>DnvzLMfeMX*V=&1S>G{uI4FB3oJX3nIGKyLB;Ei(k96 zg=DhQTO!?)OtiL+($Or@P-;^{B3u?rnZo{g_`}40Ao!W%IXpRMr|6d-ZM!^&~*$k5nF1<9N27M2zaOmW=m8jsn-h+-~MTYtArV-!-BhDWY%L(`OQ-f`5t zNv>O+LgF7JyxMx)fjlMQWA?1iku~uo>Uhe5FHMXDd z9rZYB^HjAZcNGUYyL|Ws6ak`WUX_kQHdHbjTDEjTvtz_|d5NcFVx}b)~K4g4}8PeaDA1 zcq5i*rt&mfQK|ULRgv5wmrzNhwy>7vcgJ&aADmpl8I&rRiBycP-@caix=Aj-@a9-d zJfS;uR<-x<`ueV?;V;HWq)#5*Yc^qR;9XYA-%YpNr7rCBqEhDUO6SeY*LN3JP^b!a zFd;`I_(y`?OOttXB0EE`YFDwfvl8g=@4MXOumrO-%tMj;zq`nX zWS&_jx{_m;3I&S^17q*7$WikTClOIoqbc2dQHs@W-~0vlpHqdSDb5N`_p-H@mc9Oa zS@k~S{ggf!wcm)Yd`W+Ah^^To(zha%@*_>Kv}=ws3Eht4oZ~h7q3~zJ@!H96Jdv{8 zn6mE36*6(h;lbm!eJkUi*~j6WQb(u31P^UtY9o#(KtP@%%AR|<<04RZ08S9(631x$ zSMaBVVACz2R@}<61qi?^va0QFIDBM`9-NMISzxf$YSeX_=G8r$cK2UT*7i5dtHTc; zV-(wpNxiPxS6*t~oAtfVNzloLIF@7%R4#$IgYVdK{EiHGV(?t6IN6BL5nr`b@?Q1<+S8r`QE{Aoc9NR8I1j-j`@JR=e zpI@dp>C(E}OE6gPktJCfm`ItBnK&nLOY zbB{ttKDF6e+p?<3BEp+6<-uZbUvXZ9obCc%CL5WrZL zaV)4&Mh4@xnYlcL9exk>2~ZaGyUber~Q5>k!qGwz?y^n z`=mx#T?uKfB50UMw&l55l6iRx!v)AWuMO8U{gKDC?2w6Lob9#idyAR z@JwW$E8cuXENJS%rZU>020@=K+6Dxeg6CtG42OUnz${fsBgH&KzHA}o8)l45u|*~e z&eAbvGGKrtjS>_JrIZ(Z9+~*;(h-VIDOyc^{eJ%d>uo-hE}W;kwzgk~r~D3wi1>{g zWztJ;ut@1JRvbqqwYx>;BY)Fr0F|;&RXzFeEqdK%i_N-e7Ui7;$cwvts!HYZMm8Ya z!;B7u*U?@h^JkC(6U>;%jTk3rRgy46WF4wV3n5+DM-{T%)VwRzlhm$Qo&e4{DoGwAsjE`cU6!|6 zTKo3CyC<{Q^%K&|MP%*rO=$fW{5FTmJ}|JE;=W~(e5E%4v_bOF#QP(aUj;yB4ivD% z6*$d&-Qo=et^u0e9LmH;x>FHj8*FE0^8R4*1GQU{g>ps+ud(%=0_$X!lC!|Dnj zc?HOBc7idx^T#>)_P?cef!q#shJ4d)HZ)OuRhtB0^ByJ&cHf_i)kC!}4YL~Id zHmr!QSxM-Dj($_uAd{Y$C(vTQQOYZ*RHr#FjuE?hTJ2r$*H0t!tj3QdLRjD&1AO_x`-Rj5oVpoSpQk1-t7co|@ZB+j~B} z5A8$#2;cDbEoS64!y1!s^*3NhiLwRNaE8t*T9LDdd-iCF%gLtv}$C8m*6m ze`b44b{S`u9U2$BE=WZn>M%`Ugs|S;YDQH`jQLj+Dz?x`@UPnQQqy%SKa6*_XJpZ8 zx^x8*6-ISPWW0h9(m8dK@t`8vlt|G@0gwQ!SbsX_l_CA71sMC^T9ozG#W|#}WVP4t zKS#^dr3#U4Do!cEFYv7vn|5C-v+*P2mYJ(uU-$^5v%lJ6(~O1DUJ0(Q8Yh+Y>S^W(Gp8>Zzu#?M!6?Rffh-rt$><=(xh=~~0wMKi-JSMyB+#$zzzJD;^m ztjn`#VzEyFiI+0NH!2d`kFq>1s#*B>>C!c7Mw_9OFPk;g2uLq-j)@fE zO35v&2_&D*M+f2^Pf*kE4b{b&ac^&UVhJ=*$pU$XNg(qYEz=~7mgtR@#$CY#o_>b- z7kIGieiXB}GA-5itm$oXvnoYzr>3E0_NmJtk>+_VZe@Wjzy;3bESOS$*D}FIt{PSC z6*xh(`@3I6zrvoLn_T&920hZMsq)E1&z0NlIc)Xlt*6fVT>iFy;FuPd7QPJl->F;M z&E^a1KMUz_6_3sqH`f7PLp0i*#hmd>{{TFWWGp~n6mR@ZzL&t>HT{w9^!Pr_sM>sT z__g4x%|3Ms!$j6RPo~}pCIpsyX|$We<(K)+p0TaJ?!eFEG}~G99=|8bl(JMI-IvDE#2IW zaeXW?GQs9s#L=ben9ajl+Q?QVW=rC)G!a{`z5d!Cw50wi*R@}VUKG0{N7F1PeQNO) zUcx)5W>uC8sZLd1IP$PSSyo$;gcgXEugI^6T9RooTC2l)$qXjyFSG@dQ@zzBk08rs zGQ`6DVPSNsb(rmKr-`04oX(^14msk<;$=L)0WaL&mZey`w(g?VmhAR&c53?Qev89d z928@T#lxCV<&;xfYMt7WyVa*}&7U*;W%z}!TwFEYqQgq?u7vW-r+8#b6cEWa9NW*X zO&N|>)E+5(?UOT4a~npn+FeQzC&5tbFMIu|@h4fBNp4bTv}?7un$-s$srv&4{D#Wc!o^b3td!t&n8()o7N zT*OxD@6Cp5nJ=y_vv+wa+r5^@$Zj(M>m9iA6Uz&c)P?fzM*rg+3E9I zjW)t7d33lhwLHR?u-F+Ti|mp}Y+|}im!c+$R9SyYe`Jq=e-8c@>pI2V>sWZNR!fPs zFA?hTJn~!I>vt&~-mz+LZ2rvmR@z(+v0le)!rnjhOD{GU9^j9fz7|{fm&dn{q-nP? z>HZRm<_p&0!LN8H!uo}z_c~sajeJ|n=LxeMcJ_K=7)f`sf@_H+xKEPjQIpKLx{nct?g}`+Z{+ncXbn9$EV30GEaGbcJh`;t*mEuyl9n8<FRvTty23}X>RY5?no`QTbo-uHJPCi z#Qq)*GIox~njnK>lSE&a46-q|Bi78oLRMuur8W|zi3G?P&AWLjjJgp;MF z?Ly-EJ6mb2P_LKv_PM%+NP_&Zw$pU@aDL4_+|w9jVzWi_=AGh=d*SE9tBq$=SS;@@ zu5RMCw0Wnx)^0B~IG`b?k^HjSjrGRctxIfc9kIBU+6eIm^Yed=zAuz6X7%R0T+U(x9T7Sdo`JZs;emK{>cjK$QR_bG8p?E`ExNSR6)C#7TsB3cD z8#|p!`pz#gn?=wp?AAR6AIuk*w({KCDx#A9OYw)ra(qJggQ0vo@b03jWbpQ(FN$@D zWKn70&xRUHrlGCsx6;SDJ3ULrmoa}~T1PPxq!UdWNTf&?!YqCuUVJh5X>DrJoomJ# z#Hpk%o|g|CiLGidIlPA|R$IF(WD%sZ5)~0z&IW;IW;gr`VxJGb8F-IT)HQe1d{yxh z<4{d@TexD9=I+N#i_g1L1x{c-3wot*TSw5SfTiV2_HKl_@ zAkr<^K^cGxJpv_=i5^)2oWAcgw%)JyZ1BK3l3l5{R+>L4C7w?r8(S;8pCROO*(zhx z;N2XD0gTbc4)R5LuC$iA#QJuVZlxY;*{>&ZO0+WGPK;z=hY&(7t)AA{&E;*3uxQk{ zX6^hP;olqlL``eLp9$LXM%1&UYS)@6lhgM`;a%GCu}|S=GCFFHpXcs*?9{q7{D7CW94GPhlGFNv3?$z?sUul00GYh z#igv%!FzKx%2_?cNhGt|8DtF6-3a1PrK2#hW!%FJk2CmL{{RHR(JwUp6IIscpHkFe zluGY4 zXGr9U;+>}ZB(U6<5x^3C@3(%{T#Efg@J6MhYS7!AKT^|g-ue}kycZDMLXl)BDKu>8 zOM|!oM)gXHU7Xn@n3>7UNhtovb=(ZMPQp zwwJKqTj~LB7UDY@QKen6B&H~BEdrGXN~08rUsHomHGp^WsQVu)XGwOCOlBVnlu}Aie7bF=toC0&&c+mQmGKkn zYpbg?qO@At>G$ma00ZuQOHhR({{Tp4npK^eFESJcHD+X4NRgtAqc|l%c5S$2T-T24 zIM3pAz3nw$iuF6m8~8 z%H$RSm&%SZ&6Z;_s>+<=B^uI@^O?7&{%f77oT(<*sn@w8UE4OCWseH?t&Z!fNi3))thG|%RKR31>*VEcUam_EHby08Hbw2Tq*}qoa{g)e4Hw`Q<~B7Z;PVRA-Q{*)Fh7? zWdIWrJ7v|1<&H21C*}-LcP{t89nOWVh|@aKTEojyEEY{OT&k|>0+6@Ai45Mrewe=O|ASaiVz z{GvQ9X{y{@+-bU0kzU=}=|5%jB9`LTD`;KzI*Hwf+N6b9o#$z$jz2b3oJ(~Ir@QzU z_F%v9C8fRavl@JI41;wSnH$00P#Q$zqiVbNtI!O24#&xpRlLH+&7j& zKHb8s?yl!i>nPh!S8FY8)!Wed3FkTfK156&3>9aIQ}oth>+K{{U6J)8w@Aj+>`kUd!jp z_NyoiayreiUP-jN6QBUBhl8G{u^yQGdCB*#FUCI$ z{7LZY`EIR@liR}~-0IRJFv$!V26!QI*>V8`rv|n(9bI)hBxwdN0~B<@1~ND&2N^tf z>({k+VsJF5#yGqcNlJ@L-Cp#dpDWcSovhZshUYB|eR{W{jEtRCD|0y7??q*6U+&v? zdl-6@BG|iy`9g9!gPamik+AJ;d$Xr}QaShC6Tli$&8zT%^ z0M0T)b)mK?KvK-(?%*E7IPK0e-;UMi8hzZ>n!UZmVnFt~zLL?R=a6pIT<{5Rm6=XK z1RUej*UF>ynr@u+Qj70vzFlnkdUQE6Ch+v&WBWD#0D^q{ zS+dc8Wt|^SwQ|#2=u>Dq{k*cq*DY}+#qOnLaKK8)B!a@_ZJ}iVs@ld|4#W;4mQUM;)UbT1cp zXZuNB+3wR)V;PKnjyUbStD9M-jY>=v)uUGdSq$3{t^glXd{@*TUigdQdqs3h4-06T zubd$Pm_DS|a%u{o0wcK)%``-|GD?BUsUOeWMSxg-BgH3sbKcgyou1on(sx@U^jxN@ zub1JfL8p3JrF-ogsdc~KcV*t+Gkjytli*n4hDoz=;~@kfVGLHb*B1uvUFVbr-QwLF zWr{+;6Tz>`{{W44cN!mtZR{V)N7nTdszV7`6U~*b8s=nBTg@@H%Y(5-#~T8S$cleW zz9attXlpt&8a=^OFa4pe&37Wf&3$Q@rn7lr{PD`$p+^TGiKw zb?aE48YC9?uv+;p=^dMxAh+HD$=w~ab1v}5bLYK%zXw&PM-zsG+k&ZE-q%O&t6KKc zTYS9F1Bl}(;i}FrSkro>-L*?pm#6##^TyWND~YbeOk*v0tnn)Vr2V2ch!VUoRcR5J zgR~N)hUUJW_zQ1m2Z^+si}&2Jnwi4)EuV&tGK_3gd@@iv{M zjdm#s^DMkN@~E9+Nrlum){w807+@o2xVn4;D_mj@PS$llv*O`X*vUpJP?Q=;KC*(h zyt`}a^*=blP7b^|z1gWK>elYc_I9@ZzMCK1*ZdO)!e8)_d^+*Yof1oMJX!F0bxZiJ zq5|G&?RBkKD#Z|H+c|HaDNKL&YW{@S*J#&SvX6j*EQD3+Sa#M-(&O`w>lG! zmW)z%?P&Jj{Paiauk6+13rl?^BGCk*4SPbg)U=C9+DAw=uMfm8rN(!>b80#>$smYl z_o&MIM$kV|v|U9nAoG?si*#{0PI>bpSx8U-7+u&Su1~FgIe1%CweZJ`mcvDWSZLaY zgQj@G&USw^>2Xizo9i`&A1J~NM(*5*#@G=9zW8D>H|qDp4MHtX!MY{PZJ8F%IA(b{ zc^W*b$W9eVmOYFJQMe4?8vTcat0_>chm@}2Wfb16Cc1C6n*RVhpPT0tr%t`0WeINe z_>xz>{{YLc-RY`90f%r8KEIbg{c2Kk%`pm)0+G|VZr@Myp7inc&3&2W1y{KQ#sKH% zAIYl4M<4}oeSe{;pThoMnl2w4f5eGEBpd=q-sAm{SmW0U2_w)G&>Hy1_QCkGd*F`; zCx`qrE(eMHO08pQI=;yC>twTDQ1M=~91E#yQD04IYvnWB>Dp2v{j%ao5qBcnCBonaQ#_Yekx4A_3?H{VV`*=A z&}lZ)`2$WdX_{)EK6kvD@r2hhG_u^iq_ddN+!TDuvmY|xgetZ0C+(-HEv1izd?{yp zWu_#a48HMyjCF*%^DVUB4UJ0r-$=EA(m;}GH_+N&-P|+0jRIR~vc?{B3y;AkgLUf3 zHms#8j!D_M%XQaH7e#wtWA!`^A_=JLykiv>?aGv6ce>p-_&%rOKg16fYg6lYn*HGy zTE?W>l-?S-xD&MT%{ABAKHDTLcIcWmn`ojtRE8J3)J^y9@*|TSw}*TYs9j%0;jLFw zy45^QsLP~TPo>RhWwlv#J4CwEbPL;wFCtq@{Z{2)Ue~~qBAR`jzLO%%B>NuaXW@Q_Br{{}a^8`sc*(`dXS$0|yRw%~x4fDsQqVJ6 z#~4JFyJ1?M8l3rQ#YtObt@QQRMEZWGRccPGuX$%IU3Z_H&5d8DY|7wo@P3qeQrS7!+xGeY`Q+T1PB;mFV(9LaXu(o}H+8hRap7 zxV?bO{i$_h7m1>tX|8oWUjB1;_G@KQ687Ut^P2sqxQW%Kc*Q^B_r;r!6I^PV zwvII^taUrfoiD>0j;4&aHyU-6_c7`^>%(yq#%!n5qMqW*2<+=+YQAGE=YOjBE?Z3} z!MY99mKOSOjU>F*W51bXxl1)IJ?mJsQJBN6tX5FUH2(lC<#xJAru$U0l=+;-w45b1 z{7KDxPgQobYTbIT_;c66VVoM0{{WYEx?OhN>X%2|^wRb$yfNXghuSWkjUlx?eVRKM zG-+95p6Bfn6E>%X*`8`KHrF@cUgwc_pcM*7E5PJK=U$&>j`nbiaka4`=ba-OqQaPj7#v zcz)(^(@17%b7MZwoD02X`^?s-a}jv$UrC9fm6@S#dP?-2I*!vxruVaby6vT}zkZ!H zT%}r5PggBccGk&muk+ikn;r}CwkPqQiARTDQFy!=;1`-d5Pg-6ocCTNm<81p@5cBb znlTjiZ*a1**lbAxDUTOFJn81n_f?<9+Kfwd__fpgC807h*xPBl3Z${eaKcy}ZPFth z^nuJaOXtq(G^?Lt)vU~seU9zq8m6Hw#-XiQq=@!0vzYEdh9`3zmlDe)>L!`9c;rYF z2|yk-<9%~W@SF-Em(B4l^|QCwZxSD}Xc{_|iEgKbz=Qi<^hmQq0Qn-eNTVoKTRfiD zPEeysxwyS%*0O5XzMESA04wg!l^au1no4U|vq{~2y6fHf7d$oL-Al$EI9Vr_eNN)- zyxXWPa~tTkW@eV-Pt%|w659U&QHPuB+l6+~w1E=kZ*MZbm+^zg+D?IBz7(;F+%pxC z#iUDczhShrkZ9UuB4ztcmXD{YeM)%)3!PU|dr4y#3C`~ZBF_LBDHCn9qN~}O!pVjslgs?jk${2NQem=DFYcGevDuEzSh~=%GKoE z8bAOHR7P7KV-TleAs}r~00RI4PK8OvNh@g9yuEbU{Qm$x=cQ7VB@~)^*>%x0wX)Z? zm+PVT_KD*y4@(f*!)YD5HpYpz?=KpF5*@haAcJdznF?fQpsvPW9ch=)G_cLb%-D^* zsNOsR=s|QU=L5+*zTo36fnS=|-Z`4uOwPqv2L0i4Qamf#^`2{9rt&YGod7f)i`!^!&4Mh$%a#k6*LRb@4RH)A zEh9)nGQ}s^nE7*w6(qQu3;Wp+h|&qRvz)Y;8Lt`9{tRgz8oiqHLA4?&5>W(e(@DIJeW}n)3#Ja_nm7#d7LvbATHuq6#HZsUnT6Bc}0K~z*Xrga2SjoAx zlVo0e7Scx>D=mJC=zc4f@*!|0xLBl+2|m#yv`E0=kTw!QA|1eN+`leBAcJ3vz6gq|d$Jm4OQwnS!`rGZbngUzLr1p8PhtD{ZGh?j~72&$zs< z=?Zy!i4(5qHtSv-@piB+^vLH}Eo4ham&GAWJ^KX7?vlX{U1q)LQE7nvbwkwZtmSRZo&3HzVGUj`(aYX-Zq zXO?(kxwYP;nI)42rbR$nL_`dM2*Qv7Y;?Ee6!X&NP}Ug?sLB-@trNtq@XSrM)-7^p`Pm{>%rfHu{^ zBNgmLKe2_qou#}`q;PJEMo_Kg#-xbXf;N1tQ~{jtZuzfhzW9gWn>`X^;q5oXzAUkd zN!D#gM9`KP4i$)vo-VE@=WATcVctGO)I}L$qw&UkUtC{iGnXyqYUt2eKfR zTSqhahe{Jbmh3}F@msry7?2dkSTduo6oRF=ULP^XOPi8&l2+BVyrT3?FE99W(panp z4xcBpjAEVD?Yg%5*(a{OzYkm1JXH&se8*H(K=F{O!^|D9#1cs71adnJ3i;#5{w1`# zM{^@#QPrdhcf6dKcSruNAdifse|Qo|I5_#M#Qy-aQryWhOQ+oF;7;iranBXwMIemf z{K)?LA^Azc0kQ$l1IsnfiXUi(JCwOHGxGbK#u#Ik*^<7WdyM{d(}&^wR430?Gmfd+ z&s%GB?a?c}?wQF~h;UU>rB9O9wwm8fd0R`jO*Gu+zAi1TiD0@Y+qKYBIPCmh#Dg7(6K~PI$&a9Fy)d&3y~vZxcbN$s>7%#EHoc3XJE0 zjt_5tKm~aArqQ`BSR9<~;191%DlIj5rDbhi%YAKo`X4Eb z_H`?|uxiU#tFGzaMwZ<@4DDXy21Zf1G$$MqI%AH&_v@a0>x7L9-iHJQF9fe4K3>6X zaE&7tA9TlzyAD$ToMQ*Ex^-yTa7fv~Jw|=~x%cUc+W24LiF{Gx-5X9wx7xsNFLhZ2 zVp!qPt%E(pw<_y_Nl=?T|-|M3N=Sw1E)Fsk0f0fc!UzYr`RX$X zwN{fr@TS-c#@AL_MxM9Y*|bK^<(B4IH(Z>;-el3U6AK0LKOSnDbUJmn*)3;To5PlR z&Gp2>Ip!kq3|dB$8nby}7ACs8m6zru!eeYgNb3IpX+2f7j~(dZSRq|2!;?v=O)^Nq zB}lEUudiW}H8=+P`eO*SX(X=J_qLktzKYD^uQgscxGP@vv*{fh zUXJ&^x9Qyc&heLpZ|$|4dnxYP6;!#@Y7#@+2*H{jslGX_~-{bh3s_9Yz)9{Hx7MVLi-ofMP0#ARc|wT=3d!Ha;-4jQP5y zmF~N#*v2vEvRSpuS;K6s1feg2H1q8TafZx{V-4b72k@=c*Ni+LrcZMfwaJr8yfVDP z87*$_CQHe!uOcu?;iE-Zm^e`sz$>GVyZc64i;aA_Dw9g2X{Md;W})uucYQ6cx~{q( znBs7ArH@rwii(}pm6NmG`B`s%nsi6@hx-bAQ;SWD_AAjXFG?E^1Nh59Q$57-3zW2n zP_?#bWQ}~e;=Hx9mU~j!SnF7;`W>2y;Jr!SZ2CN{`u(5Z+C5;%|vs6|83IHT!FeFl=OY-!8M@y;1E{J8g~w z_PZM^i=fZ`x;92AUys4~k ze@>60KSllv>;5kBpToZnc**rGCT&jJ!xonJmQ8N4-zc)uEn(Cx4aWVK5F<-GGshO^ zGc+DM4+7iA??v?!VLC9#w z-CWo6?f(D-&hcHuy3>3*@eZpU_l7)2rfWyS(%f%Xx4OO3qLy1L_L6BHa7CTWx^==Z zl1H7lWF$HLaQ%!tVQJxwPT#|^m@N|bQPngpS_`#U@4Q1iGThv0o-5PDtdiX%&1-R^ z+{qF=Z8n>N;ISj_xOW`rV=2~hm$IB`#-r^fps#J-x;J}meG|XPXEo1A&x-DZlH7R z$2HH{#>8&9iA2zZQerG4k~99M;YsRbO{IXy2f608!w^Q|P6j{2u0K9M4wd^Ans!#Q zjAf?lnI@ChYi+jvr^c75ulG+Iy+?EH-~RyBnq+bZV!6p320xd7^XzJtv#HzYx>lj8 zUAo5&m8{nc&w!*xyNdJNfItJ1*d6L7(uA9KNnO6lYTwCfX+|=YT5*bUl&2WQC3fW} zWRmXNNap-^tw-Vi01E5c#Lu!TI|-!o8McHu0yCaDSvD~C2RwEBlh@7Ph-|e>i$g2k z_>*0>u(gb2wZM3xMVHJ|=5PBUxH$P$B9X#jJ67@ikB4KCq#hT(x3;;5WMl0*{r#M`5-bdi z(HOMY@2y~F3<8*$aG(nO>x$<(`K|*EQatpzw1<5XS6jU_T{OA$8A3}5iLRHw7^l9S z5?UqO^j+7?pR~V+bngS%_;bTrQ^s{IFTu9g6WWDyb7^;Ze($L3^O$5?qUoFv=buMFJ{Rw{2I*aSyw0G?TsYx!Q6{mnc7Pglpu-iS%>0unU@I`T_Tn+1Q zCAOgVmj&dtVK}#BTaBcO{#iU<3GuJSzlV>1GoK82Q$@S+tlE@NBHJAyt}gt!BbH*R zb0(o2Z5SbvSgx%MLODE=&GC5Z3XUqhprHspRc5S`igs&GuI<|T+p+bTJfh+4XC|j* zB^R!mJv@_Z%ke(R{gyr+L85#H)I2w@>1n2&N5pS;;tAT-OXZV*dxYjDt8!j|&eiQYZx5ud}v!ZG$~J1M<-+Sar0_x_rF1{)BkCG8cXeg5lh zwfgDTOYVHnt$aIePvQr|T>+Nx+uHbZPPNnpqbAsNomWu1)^!(YV-h41L2U}z#84qJ z$sCsOBrz-v?XU2{@5J6M@YjSOhD|;VYgW_r%eApqcx*f?Y6RD|vQLu2c`Q7;T~_Rt zx%1_?3PYE6_gz21+I_EyaSTzNt)mUQLL0LRL> zPPQ#Y-NGEsUn^ThrS11`zNgVbai>Yf4*u57K8dSob>(Y4m!e4Vj}_?=_*X;JVS?uJ z2|PimSlCwJUctca0QP6a$ zo)Iq8^m!WMA?GT5KEaAS&0&Hlj2o-ItuA}0c?FB66 zjv+bH^avi!CNg08R`%%*vvVb~f#!(qSYe4Yu};bIb#HC-QA>O5`}+1rJZ99RccYYV zez$2qJ-^@|3w%%2EIuCS{v_1(TV)TWY2yA_AIp+!-G9XKLvi7&2a-~rXOQw4Jhs?K zw>+!4MEt4oC&Xu(>s8cs3x>Va@7wJYNh`dT@LNLC8{3r+m1$hd6jo4$ib&*GhUpZP zPr3g9ZS8W_^TS$vV^v7MwRPQl#hRL_D;!o(rMLE64j7kq)1$b(5pN&4^5bZ5N!$3x zb*|iMH*;QxI(Cs08Sk#=luIS0lo7YtZR1SrM*BKEtk1cgRt=rNSE9 zs`6G^ySJvc3~oA|IHJq4=X_nGQ9HaM!U1D2{iC9Bv1ZmrNS5-^@02XQKEz8~maSorQBxIOP#iF8W z8us7_F+Is!Z9jmQ3l#GQ0I6?~EqqaFd2)5;hD%E;dl+>801s)&Dzxz0Pq}TNjy??c zceeK5YQMQb`?Xt{U9)otKXd-X-T>6TCj3aT^DWKMTcl=zV{fypt1y~XjN=|$0fcIc zwl!43j1nv8^Sm^0l$SL%8$~5$Yr96>uJ675HzO^stIqIT(2BgB_SdW3w)V4ny-(QR zfciiDB7PBDYlszZXO2tfx*%riYp{UY;`@z09YapO*Q1L0Y&ADWDGj~7!ImTBG>jTuncicJ41h=N zSw>lssK$;N#TYKU3B;9Yjozrpcz?O>Sklf;3&tEwF(j4Ir9Vl1SwFSy3V?Ghd;8 z9q}K4{{Ufs+F#aQXgHEz*>&Y%6wy}y& zk{c@zG(i!_3j{+r1cBXNSu?^{uTl}c4rsbOsq^x!mD9Vr)hBPBhnWhwwMz6MN~KwQ zXBMQS+V`yX+huEO_v(I=d=UQtf`oX7O4O|^JaMWY5m?CHQ{AZl0Ev}~0<2Od`D4|U z!8?{%Vv#uv7Z^Xa-wQr7=)ODAEwz6SYS!0w_vs$N7!4~TBT3{w@ zgOEQrzh%GpByN}S+u+xXJb!Dh>i!<_ABMGAq0%mG#8YXS*htakONe9<%%a;8N9Wrz zqmh<50$BxHflAOHs3dS<-m;E%#~*8Clzc!Jd1GTm51EN^Q(cko=nD*2J! zTHf1FG*ZDV1w6BrWMb;WdXPT1(7ZM@Xk&;#Hf{NodYo*r;= z)ux+XN=YScwbJjlmp-@qL+c5}D@T>xCw7zF>1XB9YP%lRnn#`C`-xHnku7Z_FqAwK z8=00R9$PF@nbtECEOJPvc#TU)r9U0NX||I^yVU2JQxqj+eW>r|JWu94OA7`LQCW!c zx1mBIB`2r@HUG<@ggjiGjyHyQaBKuURtM3Lk&Fl7e63_ovMhSQFv zbcR=rZQ3+=RFH^>L~7XR%NZkq`@%fj?I#9phlHVoabELD%Ii%co42p7omH4*)0)K8 zcWp(tZm#V+b-L)a^k?(y`#^Y_>r(L!p&hc5aPF|%Fp#5Lsb=|Im&`6)?sN?y+5t>* z8m#;)@Wqw9a9-TU9J91>Pdq^uJ((EX<%75HBq!z{D)P?9w-@3IIW?~lYBCpTqD4#=4fH4aK*BblpxpL&JUm(#&MT1d#`h-(B%aOC%;+jau1kH-6H3c8Brz<9@Ysb~ODj#TvwZ zB=FaR=ZG1_QvIuM zJ|kJC_;cc`tK0o5Zwp>uTdA_LyRp=7Ef(WWiWneR zCAXDW!4MyXKL)-C>i#Y9HN1M3oSsl&=Do+3v4~zUOUM|Mg%MSZa!mVLBN;d+74-1N zWY{Wor5HkW=(MFv-R708wu@WtwY~JVr^(lbb%}81oc^y`E=o#Gr5DSjpMUfD9&p|# z*6!_FS&9uvS%;oCkhzhkoDd?kid9|6h*>3>q>KU^0JjZUmR=(9M6D&|%a{>KQys?R z0aYYAmym^zXc_CgoB)Hj>c4>h0B6_Nd@JLm@cw}Mo|AFnKPN-DTUj=|)>f9bS68;o zAt=_u8jk~Bw> z7};SFeWF6-temK=X=a!#U%OWhpzEl{o74Ah@k!aG+RxV4*_=|q8B69#5#u1C z=f9^Tx99ZjUu}NIz6g)R+NJHRcFjJeuj*QUj|{-$63ABH2D0%;aDwS zQ=4W{H9codnkyI)&f;XYws)54)Maxb51Ea8=4XwD7b?ZpjGr{)XtdYbva-`%top8- zpI1VUx5Uw<_Envgn$fE#_v`-v41SGx_ri~%=pO^VAKOC72DPl|aCyv&8qa@ut3zRI zje+LKlil4~#Nz>$AOf&IydHt$ABWH<#2*-0!xE;I;SDoN(qUvoWZk9cBI@jb6s#`# zh19BBY4bA7+nhPCLhRFW{$2I{aQ-ac zSn8hBk+S+Ye+$bB^zuIJJ*;ylz)-Vv2Lfxjl;hT%? zbHhF$yb-0Ip>)yM#dM-iwq8kdrx~m*FiSPjAoEDdV8CP^p zu*a$E%(6!zk=-WL8cjt2Qg-?GMk^+EJ4_mMF1ZI4^p2&cUTV+qmeH0tZ)KlY)zeQQ zqx%)4R$pm}m`n34D-3TevLF)fl*ychBkilyg(`R)I=huRQBr#)Hm=f1rmmLS{-=#X zeD4)0EnV-|MP=)KKU?EFitk{xO%&4Rs#!!i4G9XtC#yGFiuZ0>i zYCb9P4w*5M*?c$Pi@VF)hl!Y4YTho3%C(Mtzc#@O7;Z2_r17&b+U#rLuZX@1O(sn; zG!e~xrR!cI@b%M1#$~yh%6mJYuBuB%y{_&73y@Wq9;UvND91`N#8T#JF{cKu^s|go z*QfNI?sz!-KefArZ27c$^|rfR->sT^ZhdFrFA?q0e`NWsE#b6|%i=GL?R87Y=TUPk z>E+!|87K=z)7{Q*E#OB`Rwc^1m#^u6!c!~!Q1KR-Ey0^j*Ze2p?-2c+SUuEt6XQ=YcLFgTY@9-Uvi*;ctr?&xkD01Ytb#_+M3y?8=P0 z)+=eW`)Om7J5?w0!j{{g=ug33ExDKCmw~kwNR5hoBJh@)i;S8Z>0*I&tdxf))=wJEiDa@qH3CwF(z zrPp1V?|-o_tlkOnyjMyiypO?_Lh=YutSdd9y)?ca(U^IN(xt?DV?4Jnw9UHR;wZV2 zMg45><^8gFOW-X%{oUtj*WG#>Ni?d^h6ggD{4&zxRy1DD3-l4Q>lpNjq?-8Zaika*G0A-tP)~O5qS{M| zK>}D*G~h8Vf{K;rN7=@#=N%jzZ24WT?Coan+syH^8WE{Ewed<$R(*c!cGpg~@_*H5 zgM3Y=YuYWAscUx?#8X*YOw&T?EDnY=-7GUiOme^_EWTpyP|OucU8cPOknU`7GH_2B z0OKTk<0p#zl>LIfIa>Iq!S;R>|jc%P+2+>jq@*2Y`6)J>^L(0R?l|^*udt{J;9%6Z=D1HSdLVC=oa3 z#oBgP1n~NPsTn66;CY$v)4gk%#-y+qsM373CrQE&dpWfm{mq@Z9N|wl%j#81RN;w) zz8ZW2qqS5mc)QM$L4H%1m2B)NoV=VN8%hgB<|)Sr)fdDng( z_?tvF4Ox~*UUvBj7H|kJjlrcbOshUkc0U9`vht$rhG^Ejo?V0 zNd~Rpx^0rmr2vu1?jREiZhTrHTV8NQomjPGl$IbINj$6y*?-e^->Gr&3K> z-YUfn4{{Y(y!?EdJA^2VK zQrbIvbk#KNXW^fViW3FJ&Yh~qHT)4=t}#3gT`sKLnPoFtq;N8Bln=qz@U7RutIyh> z#yZ3?zL#y{{ZmZ-(}@=AQorzqn{2n+?IkhW-AfOMFR$T(D=T$IifuivmSr4C^BhYL zV_KyyTl3-^Uz(GRq?)_aYgYT@udD-6ah@>Ckk`;SAC_YfDI0Tq{gTj^z88&1>iWw{bIE+%V`mlsmA{IVG& zRNSQ3*E;rrWvg0vB6quj+DmKACi7H{cU)Reb>XXdZLDOtbG7bFu_%>{q@+nYs~HhC z*U$d|8ay>DJ|5Gx+o@)2h&0yI-|X$>%(pYmHo}`>L|CH|q>{qR8+p*Q>Q40s+|IJP ziugV&Xy>-Hzwme3lG5;nj(uJ`OG{RUNrLXg%M8u9f*6T57AkAtuyt=&654id*SfX& zR-5koU-3SYYNCxBHlm}iEAPu|@3q>0im}F8J+<$}uML>sH!|4W==#JHBQ3p@b6(r) zSCY$orwb$>YSYrx7X@8oiZQuLisODH+FjjvKUBQ?CA@c5eiitKuC$tb0L3NcUTV;eyhg7vAP`46>!n0dV zc+nTg#5V_mNEIKCZ|*L~m^pYE?Txn%7d>#J#P_gQ(rJ34A^q-}X5({1YSb>#am>1q7ae%hMTTijdN zHRL8+?L+$}INot1MWtEX0evY+03@t)sFQZpmF86$^4A}k8okfjMW2SO;wxpT?3VTX zpEltwRm`QDCiz1(uvmi@7!s8O4Yt0_{j@dJe~UVmys?80pQEkpirAlYI>DCuSpbt@ zXx@8M2_z;Aize-nxnIw1f5qcR@W!#?Q{=&WcK3Gn4IEBMN8M>>92uA84YlR~00Kl9 zfUKLpPvK0tPY;IWgH6d@B<$Ul`)r=xU3NS?^AQSaa&0@i-%TB!sd}q+ck?UT_=+3f z9k!Kjs9=I=CSh$S?kz3tkpwH|9C>#WZAH0-LV(dO;@(<;{Yn1-!65z>TTg9e;_JAx zBAJ;1T!n@}dYlosU=BGM0N|Vt7}Pa*GY)X!bTf*?*JaHkamE>H~mEIv-d4k-y-f-?WG9_oR&Uw{V1=HI} zY9m-6hfbc%U+PiZO(MnwiEpS{NWyO_XptR|oiD_CJi2zfr^9(`I+d0=B?}yp zZw{vkmM40tkhU5{1syh%Ulx2${gC`M;%!|ujV)%=#0xUEn5h)D1!l3-EO27-P^nbE zc_JQe-1THQb^2_-IK<`hRcK1nXB6$tR5R12~xx>#_N{XQMY%~ zM|anwz4bpSKWa||{15%F{C%(dc7x&F-@@yE7fk~ENr%H8F7Q^rr}&##xM*#c#ad36 zsm-Bkx@%que7UEV)$OmP((Wa>mfP&>B%hS}-|WQx1o3XErFd&n(lp&PUEM`tsb1=~ z`o6J$rNbagyK!})o9j5WZzpxd+;(1K2X8LcSrnNU_u||38}T)a^64;HTHRSIUD`)I z`G`j~mEd=e?J>qCc_+0lvBv5Tmk0?HF$_d{H~bUx#n*a*M|F8)Z7rlJ1%%ptrNfw^ zgt-3zN|xkXqe$y88DVjgY)CP0Qrlwl7l>+Zhb-Kb+ovftDK?vOjd>Z-05={{Rc=mcQ9+ZQ(Bl zL8INaWL?)+`p&hf-e2mPjnfq0B13QH31*N50V6+We$U?rzBc?%(_&3q#J>{!tEQUR z%W?ga46|Fz;u&sdmRl0~bTC^)*1*d1UEDRyWO-2nCH*A$N&f%@{qQG+qtkUS6)D$s zTWMpKV=4!ZO@Qu#IK*+rw#>|>t<*;xf&dsCjQ#!L&jomY!?ylLm1HcV@}pJ=ishOt zh}2=JF&HDzF>Ffq6S%AZ|V{5|z=Ro>@+Z{{YN z@gu6FkU5o$d1nh5!l37I3QiOeoZ}-T*Wy?0!lPBT@d5;LBZ6az1IF?>QpvXg(=UdM zoDkz`$Z@n+?9_UcNv6%afU#joI3%8-R1#e693c6e@S`LH`0M*>v{7if{-O+V;u!?7 z?F}RdK}9hN3XTA_+-8?}zs;u%C` zHaG@0-6)N69Wocs+q5&B5s~p`uRb-0yt+X54X7(gEQjWf2a|HDs&TqR3n$7?2MvS4 z`orOVrvpJ|n?aCW#_b|uqVnLAZjT>Xf?4PVVCjdah1`WA($Exb})P2os}k1gxlwYv#s zn#4@SXSrY`5Uepq%XaG|Vpoy%`~{+TL*X9DJTKypi2ew=w3gmxeFw*0Cen3CZY`y2 zi`gyj?JrUbiQ0J~wbLP%+Iwiw29`M@MrHc*;m?WXzS3>1Z{fGP+ouR6a?7=wW<-#y zs{$ni2{s*}hEcJCBk`}pj|zC60wlAE7{?yv5r$NhZVo|L$;J;Ps_}Xb+ zX}F~rK4$Ee$tNeX>7svm=SwA_Uh1Vd+FtX~IdpsU`6aEk>-pjPQ`~s__Na`|_6u84u!Kz|#2 zP4Mx49xctG*Y!v=+ow9~@Q}t?cM?H6nAK6@7O@VdDjd?3lXFW)s%^a;w){Ko zolYE#se39CjBb*QHc2+Rb?x^pY<^vMxAvv@nSbH?e+FsVlztfSo{|#E4J*U;7TO)V zSgQG~V>)KWnqcwEF*3Za#g!S^+b80G6yHs)Y6jj*={1R^xBET3&n?9BTDwYbBa#Ux zX&N+`Nw(@RR2h|+f=RE{{crvW_u?CwCtXWcn7gqv$7cwMSqV4+T2RQj=wr(P*QV27 zLj8(#?+eJnOIMcX{cJPcAeFv5vic=f&A{VHaoZV!g|&N2d3-$z6ZJ zJZbQbF0J|1qZuWmX-imHtvUeLKq$XyW!GJIJ{j;2!+W{(n|ZZ(_gB>%9kHy{uT^iWSvnq>{91zR_JhxnuFitb;U!<+@u<4MLHtN#GH#k%X zT&_d9r#k|Hg+=X>1$@u(Z%9~HYj=-g0k%AY$Rq)u_P$^UJwF4F4jJRdV_vDoPTZ?i zcGlO?>GR#a4_c;093B?Yr%fm&EAM$ItsC$6^zM97;C(8?Q}Nc12Bu`T(`_!IySsJ! z%@*RR8WtlaNlF|H<6vxjynjI-u;;>kU&CJobg!`Sec$X`)%}SxUaQ9+4BeK0%EZ^5%1bGPA}(P{l=U z#CU}qCKm{sy=NX|ue79_Z?awg0M5smfTbAKl=&1|eJ^db?`z-ux?dl`ULBKO{kuF7 zYpA*|i{SqN2i@LZ#pR^FRkoUJqFYO#ToqfAZQj@)-v0gk#M(1lm&aw7!v6rX{{W8l zKyPdlUe>kG7V2}r<_CjFm&HCdxi+^J@hfebT_(>>-*qHx1V%(GL$E!U?6G|piatC3 z(H=gM`dih}JWJvoA~tzeYkO-OeKSk9Xoxsy78{Kg_E21ajOEleIr;Pc3g_`Y%i)j1 zoqxevb>OqP@y?B*X)SKbvIt^|J!<~|TDf5w?%m~Cq*il6%F6OgUKdsyyetL{*2hu9 zQchIkQi^FOqUMW|Pp+5swYrmwtL&jxZc|?F$=^*pyp{Cnx%}dM9KOHtPP_3N#`h8# zZv$SPHrH#kYPXknk8QOuL5U5Ts=6Y)viYn~GdQo-zZU#2z0p1@cw$Xy%yxbc_-ChH z>i13t&o*xe*~8)qo->7mcp8jgF!Kzs4ZyB3@`wBrQ{hxU5$rrms_Pb0n;V}Ncz?qh zYzu2VaNl{;3#pN1a$_>u*}KUqBbP&P(nt`<0sUhAwjzB4;n&B_ZYy}=yuZ|Z7vR4Q z-WXLSj&B(}_LABp;O}&@hfrTIXZ>7coE)0;v+B~GZS^V%s*uD+5mD1c2uIS|Klm(I z9$8>&)tqeFc9Tx>T59j#UY*a%pMlejLHu>&Nn&`@$2#@Y^V^VEE}5z8-|&w3PShQ_ zA7@#$3Gsk|L^uFOG4pqc^z9c(xzjZZ&@LH#OXEAqZPgY_$@ROthgj{TCoKL^v|DzX zP1(b;k>+n-sD1;zl-?4BMa(34m*dBSz8>h-S@RNIC&2di*H`zZB34sx_PfNAN#Pqr zw`$WcklP0NTlTS$z8`!;e+ffw3u(6(H)|qDNMovBUqKD*G2?tUjFUty9!$uw2YI93 zBrED=^ea@y&WAHnoh55sE?A{@?b^=QR$4pma87jSuYX-TUd~qX()QER`mO8f+J&W; zg{(CDE4xWe_rzTf%wLzxith2Hwwzmp{{Vs} zw1!LTdw&Dy0_P7Co9k4wo?|SZ^3ET#YKdqfl0wofZ*R1QQU~+f;Fzvb*WtNll53Xn z&8+tdKLI>l8S&qOKGLswrrh{$9|h`3BfCjw zai=}y(%epnHmsMoP;Y40a52jqF3CCw#vDOORQjzbHq|v#o!;%*bmHI5w|4c@uS&~m zX^NWBSy?5c=25$AM|HA4On+tH8(T}_Pl>m1NpoxD>00DYmc=I1?5=f~^*h*XV{|b_ z+Lgt`_Oa?PmR4!!zF2n@D1PmBeyWyYaR*k{GJ!m^~Tg5E3XFpLAAZG z)l*FIMU;rwdT)k&U27x~!*+=+h4GRL*cKa;e#7zW_IB0pJU68LU$=rLv$9`_9xT1_ zO!}1HxX~fguVI%-Ru=(oCu^jD!jRh|#wEkSxAmwMH#wsw~j&tq$<+)8ciG}nSTMy00668bojGT+q4 z?Ah__MEJMhFAaFI9Y)M*v)O5B;yWvAly!pY@+I;wItez&?6F8?UH)XsBy#Z+UxB|K zJ_N(y-xO+|GWanSo~dE+Kf~~9x=cES`f5{O_-0*X!L8|!=l=l5j;W$u*~ND9*-LdL z-jr<;DB^oISvl{49~AW;fPWG6-|WV?i{Zj}Zad_;xsOs);y(szx`dI(YRNR7RmQR5 zy=-a_OKouv+pOMT#y=wF$e&jy&1>-PCYBcwdEwn&Rd0Hoy!Xo~MlT%{$?5i&MuR5BxXcXl0J!HEk*h-dMPeqzST8BO~Q3z&HVkPI3V?oAECE zX{+FAUl@NL=&_X}JA>P5OhF!-RFXQ73Xhn8e_xGAVras2+LS8B5>0IRoPRBQ-r652 zhf}4Af_hF4SGP%WOHapb&W}>l;?;ExQqE^aw_OhA(&h(1<;k)24JWdd5ll* z){h)!^HaGae(Sdm08{dadX37&?KmW7IR^xG{(OIKI|+Yh{{Y(Z$Kt-51g&Y~4;uJO z#MXXOG)Q&1Rt+Osi3vILB+|6wF^w>?NpTpF(Q&)uIJTUqO9PLMCpCtzPA%%M&AV^p zysv%rJuHq~)iHQ!E@z0NHy-zji%BJAbndUaT~EG0YTt$$kBz)|Yb8uAvNaBl&KYdV!OCS&+vI5r6@&=TG*R(zQPkTxfp|G`&_hUta#$(%)Rv zuBRz3p`&PUS?QM7Q-x_J^Y5aIbIt<5>m`hYpD?fWTjSWFy^3utTE{k-<6S;D=Cizr zn6D)v1 z{C%d`O|9Gr+CZxEYPNda$A`XR>8`f<>#delCr(rRBYGF zd-u0v=s0Sth90bHd#*E_WAbZFUv%ER`y=x2#GVnK;r_qzn?~^!sMB@Z?N<9z(5>Q< zHoS+$OX2&4@g@8cTKSE0VGWg^*e&fNSl~AZmkTU{7m0tSui0M5Rq-F}{{Zll#d`Li zbENpkUif!yb>ld0<%(PDnRJ~x^TTOxc^i4MrS_+#TUqP306d@RF4TB^QR83P zpI-4t?NRW1;^p0*k!|4L5PT%I*8EH_t<92;6xc(o=$em*Q%{;`oa!d=F7;hTLXB?) zjD~4HqOGx62_{(Z?rn3O(dhr zX|7#mARV%Nu78Lh zGu=lfrD@?iza-0Zrs@{@nn@p)?KD>K-p3$lS#3@8qgcFF=%Q0?_PF8RAX|}>mdrFqd{{T?8*y#?XtN3jrl1sfd09H$Jt7#g1vfSFp2b*-8*go);`Thd$ z+d@vs%{WV|@=o7-UX1$(7U@B_@};Gl(WKLFUzY2yt3H(QmZN!n@sq;(nk10f_^U#? z)2`uY3zfQyN7p<#GD9kXCAHPRh%`GJ&=(Ut5=Ak}?#}1Mt9y8~ySEn(kYD(FN}4-s zQ0pu=);B99%DP4iGYRa+_Jl-jd@*bgbA{nQ5kqnDL%~{}k8S5$Uh7Y<&8Em?f@=vi zSuLO!c1i#eEtSpp+vHGMHnzFDl&DxXk5BPmhP3N_dio32x{}`38~sAk(n+u;hD*i0 zy@E-U{|nOKy%gYrT`dZN*AaRbw3+dfNIlt#zl8 z+vm{!I)7~4K`wl8scLts&`GO!XTmy7?bXCZ8cQo(I$1S&qfod;7Q2O|zyZvz9D+_O z@}J^|iY+yJjVnulwYHOKZ5*o+4d!tYB8fv|Ge>V6N=psKNhNdhHY@e}{t8K;P2lev zUTJqQ%>!vSdd8V5mJ$n_YguBCO}LScRb!gPt->(ED4ZtDqcQv*)^Fv$zn$e`#Lp`! zVhe<0(ZJ}~01=!92a+rIE(NFUa_CfTzh!3^ZS;BEj{22ur!FV2K%w9N~4_MZfs0KqT(Z4S5a zN5g66`Hg919kT)%f`lu#x#gD`C$FV^hl)^M-wBB2<%)7iHG8`*x~tz+*M9asr^DGP zP_0SJ-*AqLSN3)3b+y+;({uM%!k0$oCEi9rABhAGdk}qiQ%JIhRNn{NkImy5yJv})DXMij7 zys~kZE&dm)ce7Wu{F?s&fPGG5g-O0x`^#A*mb*LO&-!PeC5%o{*)kW9s>B?Tj18xu z91(-h9`$N1KGd@-MB!w}CEeR3@(U@(N{oeW!;Dic*uelK60cd&2DQXmW^q?-9CPu>^(I7 zIX0)+;~y?V1VH`3JAe(4K{?%yK*w5GwDh=a0!Z8xbS;C%Ly%l@bB)J7*r--?-WNF^ zAtOBHi39_a>0KqQw1Ht_pascn1NTWdCnp^br(aCsyJ^#m?2@*rH|Obnp2sCxvW25} z?<*~Pb@J@I?O?Lg%yE;22pyDeIbsONDh^L4AP-Lc>g~!$6}bqZPSAG{GrOMNnB)BM zp_GasLn&Mhryibw_r`OMKb>G|H)`nMu;6a_a0vr}oSuiTW5?h+YN*n~$)#qVua{Ka zuCKGx?s}~@=7l**e9>>wB>OML_tSpI1+CsPIYl4<8+HydPCDTFdwTj;n%-SZ(}gUg z7aZ_N;Ba~8zte(iXT&1;p>q%{(zL^Jkq!clqbSVBV8L^M4Sv^Zmkkz# z@x>{MX(5y`A+{L8u{*FsV+WEBamYMozX?BW*(a9Xd9E5JmP2@@Kme<#Xx4Dc$n#*} z;N%mP+Ir41c35R?Tbr@MMR-0|T?6lwfkHXDwRhh5m z5y;N=lm6vYNjU+G%A}all$&@_N6PHLvngOPUq^f!@fyb{ir_mj`y7^$d66m^WZo{( zVlTZC6n%*nGIr+#SIXMvrv<*Hc@%q8Zz^Juj#0eDjBb=NyQW>K<4i5&t zh>8;;oziW0l&c2$zk3)dCm0_oaK(Ct45f3Pen$8WB-dI-ohz)9EGn^~M@x2&GZ2l$ zy9p9Bn+DvHp_uSNvFppoDt`1kZIDVn;nVuVj+h65XyHvnyjGXO{93gA$Md#7x2CB=RPXSa2FL&CJ|K zxQPp>GKO!u3Is*kVcv7e#^AUaZhb{Ws_(n9()aA1+I0K>025kp{p2jVt9QPS?zZpG z&EMx;W5M=U-em0y4g+CAatH9c1Ox%j2RQXPuM_b{hV1PL5!)m~Y0l6w!knNvBy|TF zCm;b|TI;h!VR;k;ft!&3;LIaq%#cOeb=^o+kA{Vd;}x_O>%c%erNH+6K}TDMy!t={`;`<|Qs00i&wdi+NHsJ*78~^vVEFt1_cUv)2w7+=7o6}2EVFbk6tFz?fgIBy$4^AMyujo zBT$T4fgFcV(lxZ6-pg_gkXqY__sJG;Wl%WVivDc>0N{}tzMr9dSnY&2Sm|1J ziKksK#Fl#2v8u;$6~(D#DfY>3q<6aud2YU;r7%;}856pW@D?;;-5`_*7j+A=5k?c`f~a$fw^V7j61M9PxIaTYxsv)_@Vn@{AAQ{vo(gLCx`7&d06WL zc=K&K+{!sKtc8eB7QTaqXu_spR=uML(2S<_(b?H6-ED0(-p7rbNvc+hvwA&x$;Bn< zs_A_%>-(Gk00fipK~DmFCAQO$tHa|>e_qsMF5Xq`%zxO{)=2^o$|biDNdadB%_Yk* z${+{nU-&GAyx$IgXW=!KtSNiq?}`&@&Mr1a@Xz4C4chr%XI3D2(P}sLaUucpN)R#2 z16-f{5;DR$Y&-(DFO54*@df^#iw^aY8`y4im=@wqhvkOq;jSh}kQEIOCn`DekN7yB z$CASz?DydtOPh(TZ#6%RUL=sb!T!|*+Qx%#WjxWw0*0Bi{1zjHKx5hgC(fl!Vt9_F zUUw$zRg#x=6&IrN{8^;xI$3=vC1om6QnvfH-qzR3%g)E8{{V@#IE?T|7*$kDOK>CDs(9s?0YiBLIB{S2(=pQg7ak3DVbBtdmb$Z5!!pce~rk z#YwtwQGFoWee_pX?R|gD_@m*y^{w6K_K^8(w7qh|TO|zJBDA!IM1|d0e(=o`kj?-b z!#+MzEAL;~KgLhujdp8zj8Weo8eHly+{ktV7;8x`A&-2B%(E;LT#1emB)KgfR2|=) zeh<@cyf5JRzSA$2J6TCA#q!I)>^EWvoVznIL`fvdkQI(`*!v&sPvU_!>)lU0avNPk zQ;kc$-?U6^B83;vk`2o{)jpfl2%0_KVO1ia$q7lUJKr;n&0nw0%cUxYaDRIITQRWtAVw zneX(-5>{Kd&>LIb8gX>dz2mq`#q)1%-qv3(yDJrjZ$d|aa^1bcy zZuaBE)M}*l_gmWUw0bW)y&KT}?7koTT+(%IE8)%Fo~XYLykq4{8H!T`{{Rd%dyAbq z#^XxU?g~V<`c1~7iF%S*Nh{deJW$?Qz_238R#J`V7md>F#ZWab8;7#)YJ{P(K%c;GZA048|J|f!;3h z0^EEz_=z0w=oh-~zo)8L_?F;8wigoIO?zi#f$o+&29n!Qj^fjNF>2N(*-#(p_rQ8Q zA0K`W`1|1pj!v0(;tfCHmXT$9XL;mZ*=w3TfYhw4bx*RRmxo`|uC1buOZK=Elu2z` z7D?ikPcs#Qm1=b`^?!8X``UG1#N{7@chz3k-rYw+a-}*|f91?ujYy=lQnJ z0~rrrr5_#$kHWe(poH(!JW;0VP`D~cXf3bRTOo+si5LPfGIBZqe~|wG*^9;L`!asf zejJ;_PkG_%9cRTYeJaImcX_6NYU%J#rudWL#gwb&!C_~vwWDhax`d4#w9;wq75j-T z?XN$tTbsQv<1fOUFILcQR^rE8(`@y9Hg6%I`DC+en|P&x)ntZ29KmFXfOK-`-bMFc$~ZHQ+&1X({WN#eDx#G7tLp)(KTnH*2jsCr3x76;-jT0&3n7r@#Svowe;O> zbJ#TNVFWTNocXgftVrnW!y`S$%#P#MzY4$LwVEQ?_#46gGZDOV=-Q0-@ECl*v|c>6 ziE|ybv6vW=SCY^$kP#w;VVq*WcwqkkQ@0>6CxgMp0XR78>5Ba0{{VuX{4k&5U+lZ% zD+?Gz`pt;4)im{$QBo+h`=xuf^PDo<;>v5*d7J%WDUE($1$vxSMs>10Z!i41QLhxQ zd(&%3+g9E5w!fz)O;$8>96aT>GmF;N=3QS!weMxr%lLcaOaB0jAF@}&ABesl)Lk{( z4~Lc>A+^*aGG2dcYBp*#=0K50F`97DG8A9~{h~!D5Q_X%{iVJEOYsBXzKijz!}zuF zPKo3FdHi8-XL$lm<|el+x(AEzWw70G9Nrncw6w9;EG_3*9>(r9zA#(5iv5E9k*w`> zzu6rIyT!shPHniaJMhkX5^HwXt`Zo>87deZu_4f>Yf+S z{AU)QsOpw_LS5<5>Ke|krrXJLC6<=YrP=9TGt%u#Pq8%ni@Upa)7D0L#FvLHG-|Ap z`0K8|AH_Pze7cM_QGA;>1$=vU3ZAN)_I z>z@z2VWZAtvR1UcwA1b*Mi+LvJdo%w92Y7#s;m-S$boj8kq*!qWP1EtG{&r6GPN~y z)#IzXUh>z=)%hQ(Wmst_LUND0E?3u8qq=t0CErf{Pnhg=jaSFt2K-s#>uo~M!}`aE zukQ744aFgmZshQ)-{0wa^}GrUvn<+YiL5Md<1-?L(}V0%jE!D(@axCv;$3rC(XA~b zv$?(S;kv(u7Exz!;ynuCCe*CqNZLnRxvgQ=;((+r2?_RdZyfO3z1j`m6TKJsR`imx*-S!oTs z>4Gb(msiA*BI*qU@JNxLu`7+6V&m$sh#H(4t82p2+s)Q&G|gVZ*45>U{UUqmwM%%V zf#2mfHupMI5IjzHqFh=&OEQZ1YUOmhuM7N0@P3ydo^KD9@gIe*ky=SD5=k4xNo=Av z+d9Ezdo{xq%OtQ#7{E-|+?rw1d_k?vty`ES7urSbwTy2n2p3h>bPXR}(lEJMKhiF( zeAq(*cV5VbM>1i0B%>t#qE78Mdn?H+UsU=lSmcxUxs%mw{_j<1WBGai0D{hVa%~Q8 z4|tACXysps9u?6wYg^)pwiSZTD|^*fyYF(uW!bLsZi zA}y^Lia20PD9&4JP07w{_{8xQjFz`|JL54tcMc?01#rdN?t)1mFVKHQEoo-ak91S_E&b5mDbvC`TqbIs*1fix6K(SKAyMtbX!|jyXyDd@cyQ4 zqcR{*Ha8n`$L_JjfNjHIgZ?~mU)AUQ5uf5R-}ov zE>1}$v4QQ8kz76{V3s2*no&-rDN0ajYZRiddpmXO=i2Aca1JUou{1h5k#Li}l(x0D zv%Q!6IsK4$Gf^XJhxs-J>TOL{>uA?CGloB{jhmtTk=dt4hudg-jT1C~{h_KiJO6_6JPeOYPj02JMufgzhRHB-C zsWr-WXE86-itrOFwox8o0?*6~id_rfF%wmQHBrL-_8f@C^$7MU4!k7!op!Hvkp# zGR=dKM`44L#(LMO=`&lEj$3yu+%aRF!?^)|UrglkM;zDG;Bg+&rOcL^>$a;~-E6*A zdUifr8HSB5*Tr|)B)M#^x9jJlNc9_QWSN#hIOtyZM&y zZ+5^=6o7G(dwvJ;>DSXZuP*T(u`D+4<**)L0SIHe9D+#5Ja*)F`d0p8H-WofO8tEWbZE)+PB z4Z$)w0Q0vPIU@%->CJq`4m9iHaP;RS<2kN((^*Ao?`@lRzst9+R#K;juk7rl1oSGFoN?8bX2N_VSo~MlDX1@TxZ$As0Ju6q47k8G9 z*6oggK~^5yazW&A$7=qP{wH|*MH)ic*j-y;qKZiZy6#BgjQNrAz!Pp#bHTo&5Ds-oU)5po*NRIs7$Y~60$qSI zEUb$F5xE6JZDLdom?N=0H^gmwOq;_ohg)%&P)nCzFNE70Nt1*!ZdODjGZVs`5+@Ca zoa2UswNtg%Mw?CgZggTOsY<PYp2bAS7m+uH9uAV0N|Y-KheG^{5r6n8;IXj z(@|{oD=8SQ%Xv}lS!Rk{{{S1);fL)qd@VKLkHg+Hw{Xx!AOh0*CX;^ElzC_*X#f~pfOu{*U)Nrr zcVlxMz15U!wp(cJ?V^rA(Y!z`Z6tvJXCwxZN}L&0Pbol_kP=-6EUK2SWgb4Anf<&rNX8;=Uu`qPwCbey7_uZ@Y zv%ar&Z_C%iH7O}yRV_Yte~$VqU2osa_1_jGmQpAnNhv$yeiAku?vUpUfOH(3^skZp zedBe9?TDr(MPsvY+$4@r$kVSvg}&$?^2{Qr>zeex7wUgyfr{k;BPa=D=9hMn9CAcq z2tYU-<2cQHQSk>@-jj|?AD=t(l1Af^)8$e6@m^OesMC72qITPDUG=*3>7!TI+KQ)6 zDx+TYxt6zGG`E(z>23WyOX8o3NBTiB$PUn~%r-j$7$6Q0JF*5(9CbX`Qn|a2{UgA?6C#IeI=R#}8}BSei=W;YqPLbg7aZHh zCnG8=^27E=zHK|k9M&Biih*jJ@}iQ=6~O%=UZumnXLIU|_2Ma050b876xpTTv{@%pSDYmFBQo2b(DX&D?a3I<#7$ zj9RG7lKqWhiB&<8Ey0kC8vF>gmgmI(03JRf_>%Gd)h4BNuU%Zs_+?9p^>`v=!3C98 z-Yb}MC(g4xhdCAd58&?-Uifd}*TkUjG0L&)H$|wPgvmz1YgvjPDXa^j|9JW|D62wYSmV zv(x$q{{Vtgc#0h-N77@G2+UTVDc0so%*bN7Tf2)EXg*)L6^LZ=Wf{xKBXxYA&d>NP z=ZK~Fk^4^m&pr-{<2q!ktJ$2)5n`~^{vyj_@&%jaju@>iqzt&)shJ4J754Y+8$HdA znW8qI8^H{IC40+7Ayj>qSk~e~%%Ci-GC~IB0B{Zfb`iF>hIgEG*ZrLV?WA6z9-V2Ye$Q9&-*~?BUAJiMbsbm4-X+#`Yn6ntgG05u`wxZUNdf)L zQR$X?gt2bCv7we}W79vHR^G(Ie)sh>Z()R%NlcZ!*us z*E)HP*8p2yUq@>fndqj@cYWO>^E|P^HN|`lz8)U%_lWef=B4${y?Fykv?@T6YWr8p zJZ+Kg>>*4J3`cHpnv+4kZB1m*ujQHMkO|^x7!qY`J1HY}&Ns42`@O?IFeexQ)+Eqh zT1jbOhC!&b&v;S=E9c+EsNYC9qeVDBX}lAEVVei>uIg# z5XeEAdstwVJmIi3@qMH^tAoLKBjN>+@du2qZYGi|-FfcT7KUA}tzgFP-qKYh2am~u z9IGi{gs{OG4e`?xXQu0Z9JzT8p=EN~hMgf*XzqT}T1gPHd2>N5GOJuev7} z-e{g8mMhH~+(irvWv<3zk4)7gxVex;ZF8`MyWDLWNzJ3A5}CO=ksgZpM{z8~?o zf;=1X9{T#;_d|2w{Z~}+C6vvoUHDG+;?@gQlKCQvd7ny{!>IY1ygGufUSgqumS&M9k?au|Tk#x@r8KQNSBSz2c zAHsi$-Xfnu@W00$S5~#N(eHIze+PKt>5ayaE&M?>vA=^@)Ql-}ac1%|xEC>qEiM*2 z#Dx!&9D|!RjO3`|qjecRT9;n!(n)`HCwnfBMY-39Ue(JC+Fsf`u}{j{UfZo(?5>wv ze?Z^1UWIG$U-qxoFD^st5qS4ZzwtkSbUi)gN50g2I~}IIdkQQ7*lJqCY4GZh*a+?5 z)NEzbm0^ zT_v*AUiu`K2qUzW9ZC}~n34V)d~&<-U+n|?D*Ooejp2((bw7wd0lZ-L&#bqU+SZ+} zcwWu*8who2RUT`dFI4cI)~c5>!Z%$^Ep88)9n`BI-yiTvkA>&qFYOQUhfUJ8#klZC z#6JQ{@mIk5a==wCbT1U@eh|MiLokh2NHkqe{{U05h!Q15)Rq~+cyc-NTE4?M!Oog- zr;Bb4E80y(H!JI;t?B0UJp64X^=j1S)1DqmF_W@VO4_Bp?DTr+eOdhtGwpqu!(@EN zt}&bdGH?cZkH^#I&)UmRw%5K3_|L=gYBAakPfBR6E-}6B>|l}`%jRX;cB|UOBthAS z`qvSwkVSn1BKc7?vpadHb~z-2xDBE;IRHE4g*YQ{=ZuQ@zxKcJ+ITbI_l_@K{>MtX z)Vw`)q8(mYql)GPpD32r&psh(H_R}nGS2TRq>zMN-+!J-Ln+3}cIEaGYqQ%&du!yi z-%T!j<_?tV;%G*i)zyvLPnO$i*W=k(`Aha$_?w}4PxgA#^vnH1F$_^&>N<_>qg$iQ zTU8gb&k$Ia+}sFVdDI=6Pp|G-YeogKp^U-dLa0wQVm)_(S2{3dc)$^;4$Y+iChO zlQ4!oSH&Ob_V;?ZMn_PVc3{{ew%@YF8aeZjjDI`lJWGkIf}ue;Q>6*IaZznb$)uZ4 z-Aem((e^)3$}%bt#Z;8wr0TV*)oP-h)9b%&-$Z=%@t@*cviPUOJ{Qv^I)s#?|uV$AQ^6DDCu@8n@Syl#XzZ2`% zpqooM6MTyuoq)Z%y}9YKJ6M^SfyXiXM#Xg=pP^XTxU$j*#5a)5pElyxMbf+>tl4Se zMUfs|@n3~DvBrWHw+Qi!@m~*?My4YdQnguL@@cv4B$~5J&qkl!*57qkl8zpoDrv#R za_y$cUB5jaTWZh9&xiW|0QTp{V{DCa4}x^r^-T{}zLi;Ro-55JSsi7C!h*lr5G~!( zMA9-_Tf-JSyL=C*JYnJ8b6T>#gY6A1_l0geK@Wx6VJtTjXm;rOu(pKZL0|0ew99K6 z6)zRMD69#ui}jm}&kgD}`Zk9kvcHqWvt8f5@+LudV{|PPKJ)jWy0?~ek7t){H{Cq4(c1q0-zz(L(a2_7l8Y*$ zSLk#vS<;6h_E221<=)QL-P+H$Z8{znB5T|t*L2u5`|r+*HM29-aeyhK3}vwt+Tqwr9u0~ z&gRobx0?DDcwbAi&^0j}i7e=U;UClFvs?R^SId>-vb3~MJVFYza>)2n$&buW*+;}$ zH-_Z+`S8AJH5heY2Kej3o;8NkFc;ITwJWQi4(jn2iQ~)*qrJWBNB~xb9pE8cAJtnJ zX4dryB)GDlPtq>5O+&*IVd44hZc|m$H5q0KrS^&J;_s}zo3)zPuJ6mX+OJdR587+N*1r-zXD{1F!IOD+*BXC?JR$Ly zLs%{4k?rP(L%W*hx{oJb4CBO`h~72u^!954S!*}?eXx&kFr~fC(6>ymQG`JhPk>hg zzXram!~Al>;^&2gq@z`;jX&==#_4Tk-}-(R5n(>HLDWe!qqVN|+25jfUHbGsGSP4* zTreuGrMD0P1D{?`_be;p(B+)!$p=$Cw7PQoDrrsNA8yt~?oPF)1 z(*)xc`i^szQ_L_}sJ5jlm}39_c&1KU3r~Y~Fc(BMm`ET6mgrlWnWVnklR8 zrvCsd_CL}0;nuSrv7~8QlyWP+=D0OaHldG+MykU9V>zR@Ho(HnG-vnueX z3norjl1b;#4n1+(*2C0F*1g`kKTR}R>XP#5yErjWjG-8~-syC^z3f^osGUeh-DGD-@NNn?;m&p7Td zM|_XYjZWK1*Da^h^;>vutnRH^-dH4DqwXUsg#!-5AROdm^zXJd5X4Y$PAY$8$*x|T z`ZoUntDKRXs8xiec|lv9Lw(k-`nu@4pUz+HPyYY~N7Q}-+(T!gcuw=hSIc&gHI=l6 z+{*(qE>du0Rh5GLpSzHfK-vK$eFytAe&0Gr#vczRzdwY$b>KZlNF$bO4;5)r+r;v+ zlFebL+pF4Lz+0opV36kl4l`eRYM-$uz>EI?5RVi80KyTa%c|Sl8>peXwzt#~CCENv zJh70c2ZaYKlafdVy<1QCJK?K6CjS6Lhe6U|w7Q7KVTRHu)JL;wZB;~2=rOeVf={6N zzp2%R8dMb|@e!XiD^ruzK38Yt^jd9yGu!&s5^++rl9iK>yM5KJo~bt9U6Wfg^Rvf4 zwML&U<;*Xy7{JU-7E*#C9B5d}yao=uw$KVN0B`2MH$Q9d+MmK2SA*_cYu3@;Lj;dK z?Zm<>OK@S{sMlC!P~$2-R&2Hcp5LY)w8!inr})CgS-dHzOQmX$7)yJo$VXEtVq{p= zN*0B{`955#<%v^@{1W}7zu=Ky6E3Ba_rzX4)qF=|3{N~7T(YpWVibgxB9Cli85`A+ znTX(yb2us#KD7_+iq$Dni%xP%Qs%cUqOGq@U+2?Wn1=Aujs~AB+Dl}$UTV*`NB8Q# z4n8n`)mk>CuV+R3X0rm{$#FFC$>c{fnS-OG%vj7KP$NcB8tep-mKEtbf5$s79(Yl% zqyGSjef^9`C9|IC43WmEk2*MHV6tawjmL2umD}IdrT+keW@|9(KW5Zo4R0$H07Sqc zjN|1UdUGy%k}<|>+B_})00h(U!|{HRR(7?3Xr;^7X$>?Cq*n*!ZvZ zYlp;sAo%^^U1vs)@9cU`tsmMi;!W|(B$ox+F)l!mEMRUU0P@G>ukKIa{;P3y;LR3W zf@GWQwwsw>Ybh{C0c9){s$35~Wf%;&4nW60ioXMYWcUc+_y!Pp~IizR#3Seq}<0D8Tm`%O0}tX86_v0Nl-F}3aVKq zA)5hk6c7pd7d(@S_dQF*c5+)u_IZ^T$y}0iBebo)S3{gG2LO@6@IC%f_=j;T{i<_^ zj}r)BQ;5%+tfAQC0IW_KNIZhw8x`YMsHYnw^z&QkuGjoecMW*|0Nb*Qx23*+@ILe9 z4~Tc>IOZ(s;n~jRI9vg|{J`M1Jc08L;0|lxKZ?F0wzfC^TNPN8f;VBZ-*^JK!74*2 z{^{qM`v>Aas4bw8E><$p8M2T8*e3~pdDdV`Am!}z_fBx!v25}Z0dL2w8p zWn7GBJ4Vri(0(;I96zA18cP z@vQr9VG}EaiK9>?$zn-j**$(;qoCw^V4sP#yVkmzM}L%LyAVO%ILaP}89l!-UY+Cp zarFzyS~e{Yn9GdttO4L2P6!#~_32(CsLQaHB(~Pb!Sy_Y&N$jJj(xpl<*HoVUZoB!tQTcXhi=A9V+ij?}(i!ry_>jHp z#td%)Tk6sfp^jCzqk={UdCwY{yrnp)}XdN>KHnQUi!`_gj0jdHY;{57{@OUM2y5q{6UC4eLSq5d2EON!X6 zi-w=X8il5s2zb;2ZLO}ABRJ)i+bRh~`J?uAyVI<0w8$@|ksnj=U7e-ugc9i`o{^}> zX=CtKWh2y%-0fSC^bybi@b$4m3-$M5a=ysAUEukU4VG{_; zvX#fLmGe-_>Qj?;ttU7oZ{<-}R?^x%HCtK$_(9?6t%yUGivBrdX2Bpt*_|B~*!=!l<%1GGOFw32MSNlL zBgFch?y=*45H;ktt*X7W7Zaf^CB!q|U(IewEUN3|xjtJ24;nGaAOrQ@uYG-G@PFbq zr)}qp-9p<=@XdoDiCQVejpnNA2hEg-O}xYcN{tfjBai^|Kn%ZrD)H@%&EaIYX%vg? zBjNKml$grT2Cr)`n2Uw;E$?DjWMaF&(gDdQHTF3q6-QaQMn2MZj9atXGUoJd+F59= zW5LAE$tM{kqaB|_m#5>~%=1X)T`O7e&aEB7TwBdF*088-d9QJ070?p{wUow!X%sdv zsezsW^e+{-x6~%_HLvd_n?lnh5UV;|Tisn)+Gx6q#l2%zTU)Ut(JWz^L%YcMRyF1N zEClGc(A-ASO{d*kS-P=1p=5!rV!wo(l@i-XKq_&Gquf65KA)&YWVh1gx4V19xzg+} zj^+`Id7@dg{d&@LD&A`?#kH;ZRM^uA=L%G@HT0DivwX3-X)DRv+bvW1rR1!-9#vMk zWh-5~D?N9$p4;u!+Wq$N8MOPhywi1U90=aiR=T^?Vl2z{n;5NNj_Tqq%)m<40pywz z51Ml#VUuNS-)pj3O>bGCvgfa^7)u zqFfy&`r0)Qe>R@1kEiO7ZHVKNXMv=!xrE3R&ukgx49cT=6(NYE=+@*@QECoWyKmvU zY4z6E(`B*6QjF-R)Tb$1l~igv>15=*wZEo^apCwh%e(tsN*F%Rb09W)j-7LIvDoVx zb?)1HyE#H6a^>SbbTY?qNSZVrP&=J}K!0NQ(Cz$r@fYBSitbX|K=JQ{Z6eSvyh2)N zEPP9CEH_$o`g;iElTn6gr&APHTLcyw#riJu39rik00wxN-95FX>&<6>49s981d-)$ zm6F+Ahno{*ZiTko!s_M$F=C7Q3E*ql?{B;V;u~uZwD_CCH?a$=i+kOd_kJ9e$@>_N zNYySCBa-x3Tp(F2?hLl`EPi@OoH*`!G^d8dYff-{vW@K-SvYFd@1^g3{Et@%)kxFC zDJ5Ap?I*3Qt<1Hv*4JC>*#4crVLysD5cm@B;YW-feLB|bN`DI7YBz8{+E2s}hLF#5 zrO$qf(#rGeUk)`rTSS&fWm|i@tp?U9adeLw{{TQ=@J@XTPuFz+0EhlMwl+~~9v1jf z<1dC@DzdXPNS2zH{3c!z)pYp8vIPRw>@1q{$#%%E1@7o&*sQhup!_`XOy3QBUt)AW z5*zz36-{ID3fEPh#!G8?d|lybThH^LHxgZ~zPY7H{{Y)E!hy@&-rUV>vO-z^0HmMz zBKO3oyi2EiTJXe@v^RQor!J!|mn$TfR&mRD3W#nbS7cwYi`d|{5wx*2tH$xV#$pZO zaQ-ja=Ga;@_nj)qQfpszCb!XOx7}%Vwa1xZ`Ea#sJ6fbw?%Hoy>-Vp_U60s*9{9sc z(4f84?=Ov=wv(ovTJmd`M7Gncl(T)lVym`m*^n6REY9uh(UnGO{#*Y5Ywy~>Q26)a znV-aOs%gIpJRk86YpokZvATj=e-WmaXCzvE{i3E-P8ig%*i9_7tx|$f1p{k zE%*Nb{1hYhg4DFVYe4ux;VozFI=-yBzNz91*!2{(f@|06$zKv-D z$LCyXDRAO9lHTdZ=jXytgm#*xzk+-;x;~Y!YFc)tZ>{x7WSZwh)I3{zuUuMbcDEN% zKxWhpxQhE!)D|$-k%;US4hs;tvp=htV&RnF>Pbvz89m!#*!yo)j@6l8) zCgaM9m;m!$SVM0ibevoMUHn+Nmt5E5dkAB&mr~QF@b8CgRJYn=voggM?cxEE96DvS z)Clu}EzQg_n3{csm+?&M6RTRD9-N$18s@h5y1xBdf1Zr{{G~?Is@BhSw$W){(b=`F z)sK=s6X}-EaR#e!&2t8Yrs{qwitafCi3QfKvuO5LG9zqRuP!bwwHL8F@d%1*mwd?x zuLAv`ymFRWa%+r)Po!&lwb~`pLm|J_^&524JUs)$lLFe=>5yIO#Yj>ZqC!Dg3;u&^ zei60!ec-PUY5Gh)N801kEyK>S4=(0gdyP9l(k;~dn{70^YyEM_`9t2xaW*7^8vHi> zqWoids}{1-?iy<(p7pI|b%@J(b!j|S7HmTu^2qkqki%gD{_T<(B)WK=kD1elr5bKA zQPwId>i%7{cl;Nf&syqGl;x{QwAHO`7vHsyki1=X$HclD+QV@*_0wvPG?OW~l6fMM z)3dY!dfk@qxqg0_;28CS6z&$LoTDQ_>WBSU91wnU$icl1UGDu z0dDCeWMsX5hn7;5@bRk{yGARWzP9DFzgDkp6Vbkm`5bL!8u3@RHj3{1+g-l>F5SAH zP5cuHq<+>v8m_gAXea*wgx)ySCzwnal(ObhDUP>;tsx9@r@#K`Bea)TZk}H)Ert-2QaC!4pDNSDdt|>QtG*L?F z-v0pb;f86->8G>4n_vD4x$T-rx6pnh_+G|Y)>zZSek1VuuGCAIpz#H*>~|@87hxF| zu$VluSnQk}62ZTVKlmyag04I@@y}4zptWSuJVkSPb73MWMUjouB;dCRN6`vC;7zx z0D`N0F4g=;;D3icG_=#+TbZNrD&5SH99!F1>l2uoD}@C}YesnGa&qfvj1SC7;o;0< z!sqlN?a4P9a=W&VneNkGeHHx9+*KIQ1zq(~f11%Huif*qKbemhX+CAs+`zk;Hi^{- z-&_Oc1Jtg5QrXUX3iISXOzP{95!J^~z^eiV4;aWi_U66s#dapv?%rnHu*(mVBusMo zA38Dr0C2O1{e7Ktkb1Bz&honazJ={u#R(hM#Wox$-26IUTCIj zjdIB4+(V6mO+vL<*M;9nD9KrGuGi~lbbmo`jz9ZDCc?^Y?ljVVT&XKxz1I8rGt{)H zrn!zN)FLw`;gSP!li%o1=UttR)bYGK?jARdf~eiW$s7O zeuvQ2hbqzZ@4C~kpKthO?z#@C4bsC4DUxOX09h^uepcvPgMpmm(B$)8v0*K_McBh5 zjDQDG&u)PA&TvOe*U!2Y*|#X;8EgZ_aJU_Nbb=?ouRVX$TBC*mxxO#{-eu z7{?V9m!)yFiI1aWh66d=4n9+ya(#QAoY1%)TAjpj21hwRO!Ymgw7P%_G6Cmk3f#9| z598O<*R^z1ttQ~3F29;dXuB=H4L9OcV{LAqN2c0oZ6EdMb&ab2>__K?jaZo11ZOR)jTp|iJgeAyrYgIwODbA4>Db!VgMS5iSX$2Q{%`VbmiIq3 zJagb3GwnAQM$#O_S%%a|nH*p_0Dk~RE>2I~=R9M`^)C-aV=_l>%C^B{l{RvvcKp~< zK5g00JQ7c}eL3UXCvUe~h`@}OxZf4bQsZ!NV*yDcdSz8Qf^a=+<}GVfj@4QLg+YkX zu*$$P$Tjo+ui}-rca}L7HnQwloCaYk`C@g+9I^xijsxU-9UC7Pr`cmt$m-|>ip~fw zI+6k8t8$~W0rfqwg_knAZ6~u*dUe-N@&$;E`E0MYyRDaN>3-j#^$cDniP4c~$&ql| zn~ zCQ?3h1D6bV>GFZ=_+y&!SB@@w3F@ z=TN({65*HSETj>?wG}XQ5Ej8NKy3*FSr=LUQ z^Zaz<2N=66SvPCz%iC6!t$!}Zg=-fF?KcIVa56U_k_qJZ&p7&b`d6Q7k||foIVZ1W z^e3@Do_+bQr%;qDy00se%ui;?7|wsgp8VGve8uG3o(bqagE;PcbT#_MT#}VHvQbf5 zCfZ!K)!NotUM^=BxytlQPQ`d;r|_wBa&A7uW@I+(cd_ld18Bq-V@y`*X{I;kcL zO+qQH9|1WL{iQ9OoPVs5Ks!rk^n*}D{?(2Rt9cdr+RJfw8?cR`i^;N%>=wj~(#{Zt z)w_I#mOh{{Sm)9#j{1Gbmr)Zj2C{RN~`9t9W}%*7ciJXd}CS4eK_~D+reI1EcF2akx;; zmU~%kZSW%t8W}%&gvfVupS3OA8ZDo~x$UL1xqX`bt<119eVz-OxuBNKXL3TxJn1t* zG5-Kmt1QUhHMs-cG>a+q{{V&2%V!i9GG2H+Cyv^5SykvRe62}pw|d#G?^N%iJlekgE_Cm1MAzTSmDkSJ{CBt3 z-vqo-8Q1&)BsW2l;AofnbF&9^w7r7v^71%?Df1$=)-G=CreVM;LuUgXS^IeDlihq} z@s0P{VT@dOGvQUz!x?SO7LYYnx@9Njf3n0Bvh+kG9>%<{;75+GrM~cOvoz0p;jK~+ z52&?{BX4u0-`$&wS=7p_G*U#e%^b17r0;Gs#(t*#xU|OAuB^OcW*}K~?GHiHE+HOi zO-9jWdnt{?MlkPgqUJS~WMpynlE`wwtK}$8t{*)~S}MFH7W=8R(&o2Sw@*Ep>{O)d z;971rqwrey-8)~SZrAL7Z+OR7mrBtz-vwPS{Dr2}d{rgYq)#AconU3PxG}K;L!;i8 zic$oMeBqK20{B;oG(~L;+BMX|D{E~!$VRNPO%;`_@LXxPAOV@klIWzWPnZDS!*R*= ze;t2k>Pw{Snrt@_#irf(hfaI9WoyeD{T}Mt*3vD_D?@Rm-P_#x(8(re2H2wjfRUeR zp-rz{O$@e%-rrF0_1*obXORM2SXoOl*+sYpeYbWu2=1W9&`Ak|nH5ca-6=dPrzmO5 zXD8$-3-W}WLKftt=5a-ne6pDSpM1J&39GOi75Vc)MiO+uOvw}fZu73dpo;E zXHBya zhwbdO3pu5Eyvyx2&S@;~+E9!{(Y?E(%Nf8f(nlawLA%&KGJj!delfhe(X^Xw7sI|G zOIEvsPKb>*_eb#+wXU~i0~rInO#_*CTSU#}TtKs&i5M~aE!dmmaV6!nQD4}dO2wxs zE3D$?)u7cZriLLh2Glh7k=EfMWrfis^Zeil?tU_9P+E9TP4N6vLlypw@efe&#r4(G zG}jiEmKst^r0a3O@yKF}QPOX$Y=BaN=1o6`wqnx{7ZHe;)+)jlO-q(CyWY{aOW#$q zy7jr&3J_7VS4|}JU3z+c3-b9R_L2BMquS`+4)~?ueHb9r?e!Z^66tnAXeE79Nj5TD zc#_6gdVg){_pytsBE@a*>~2gW%8?ndJ_h_;wbHypquOdSUfbJ8KbaJd8cjZ&vT*AF zxx_NVGc2n#5;{EIPRVzPBgg9xfP4qx4~W0BkHmdTP=exH{{RTVqj<8~_Tpx?Z9Bv9 z+*)ax-Kem)NzhFMdgYbOiyhqlY*x)WOv`SV{M+&G?8W1M3hEEyZv_i+;v zg{`O5^qYAZrF})LCO4zk?aX|vAw%eCoCw1@!G%^NqJJf>fl z%B3uyKf@}~=BFCdoNK*#qN&NWo{H_-*WOn0JxoRtd6p8E8j_2tR&b=#vgPNT6K`9! z6?XK#i>hbOJ~-30--!PJ6Et6ljd-8i9zFigeILWqd6Qh}Iu!D5(jInYXyUw{>r&K(9L~yOEdli}rT#K85hQ>hHr=aJ9aV@ViE`@b#tT@+_Cy?}{#3S+2Dyv*u4E zjVoQ;Nqv~yNRp}`ge`fOz+Z;m8M*PspYaP_hDFo#MYh-cKcZk0s4SAEiQ)KLe2W}b z_O`lh=B+X_6}oF{6@z4P;r*A!9y8NCGvL9h$8~D=y2h{K3x&U!ED}u%zM*ApG}9mT zzh#CysO-r&2rX1(v5@%GDpA4Z)$@wY%M(>L;@Wbv)yX%$uYRd7o|u`-6N$rRv>J5i zN;iylR#94{md@(h{%0HUCtK0(63a-jyE0sOitY735?|a*cbx{(;(LodGy+v&hUZcJ z)QJvQ+Cq~{wMwb^OT$*0ZP$(UPZju*&{^6pnJgCeh7l#VhjgiB@}sndONCz|?^lW^ zznWvZM1nY@D4uJo`Hh!~HD4O|`%Ccsz2x6()nuDdxk#^7(?!skXM;|;xSS&;nlQGI z#M6X~KbJJ6qYAQmkHilUX#N1xd_g;{rgYsl^H!hj*N!caTR~^N^+3hRwdZVe9I1=# z5hi56MUV6O;vtTo*4$+X`)kXa`C`|-r{kvo0FT*G!%q)No*tc~++#U!r!}sZqE@`S z*`E!5*1r?~0I}n|(@o5_7h1>mrlY25k+M&6w-+{^OX?D=jON}QH%!~3T7)P4BT_H7 zU`DUSZC>VUJwpECNsaB@m88-co-md=XfAFHT$nu=+RcN4sEyR-Qqu)Ph&x!iS ziM(^-i}~71{Z7oAR$0rZ*)-p^d82iZVz4{vmlDfxjN2m9?W|E=ReJLQJPOL+4Qq*mx6p61;OEJ`iRtjz3#>9ULkTy0)x zNB2pmQhg%^{wry8mdR@L>7kB484L|RS0v>nc9Yq(yKikhbl0Kge-!*>d*Y<a zo5q*(Yp_joERa3Ff&Lu9{{RTymA`t3;d|M%d%J5GA^=GZl7W;^?Z*DkKeOfbtUNX2 zxl&2AJqtwFZS?!An1s?=Sz2mS&#(B1t1_cZnxxjcCA&ZwCb?y?X(G36MtgqW;^)Lo zKgFw{{5$YIofL3U5|hG zOAfQ*c)lupL4|JZwT(~08b^k-*d%FZx{?nPqHSa!FplNyWr}GMAnt}?gS-|`*Druy z*&YM%mxgo??O5Jz7eSj&v5x+4GD)==bepKWxOOm9jB4^+h+>JEzQSP+izJd`;cta{ zJ@u8o0u)5naYe?-N9s+Qq32b$!?)!c}$ZOi{P_-Pb}pH zZ*{eH?$Xmvo9UwL{(nV+qTMLen^AH}q_l5sG<`I;=ilp#?Mbv<3&p-Qy?9k6@O1ma zWy}}1kgcpTBD$~<4LeM-TSQh4BfBlNLla+{U$&;548ODAhP0-LU9{c`)o(u211k}B zZqrbORwis9DK*qC%5vu+muOu5OYuX;b}R6o#CpY~k0yeAM%o{SRzhv%)inPA38Xe{ zAxFWHZam9LAuJorfx-giO?;jENE^G4g*r`>%pPq&!mnzHj3YLn1;kOHKQLI?QX=GS z7%B@hSIa`BSmUV9%UQ-zdpjn&-?LAx&t5c=r8}hVnrZdXb?NfI%>GKedt#$iTVt7I z5^6EKDty}nak1J6RRs$cT%clyvB(}V;>{ssN4<>!T+9{5aU$BVjY#C@a>$L009PdP zFn*r+rDT?#D)C*_v2h)=cX!adk?jdR%&!W>Q;>xO@ABuB5re=O@Vl4=*M{bH1U{b~ ztiT_z@Hs_9eW&xG`-ZV>@Ei45tc)+1B@mG)SFlGa@%`1=6a!HmF?s z6WclJ3Ho*w+3DpTAP>qx`Mu71@<;SI!St`naOtGuC9{8pto=1VOPZ8rE8RQ2-j-`e zt@`cV^*ReF6v)c17YqQyU}ua0f`1>!y?1(T(!m=Sa6+zf%yYOLaoeVO^v9)nlsjXQ zq#%{t4nrOYBNzjyJ#)|LnvYlUWLhqtCEU@MiWO&(6^`~C;|HJw@N>w=Kx?`@%a>gh z`uQs_^7DNTYP7kbC8o>Xy6I)dz}y1*@(&({t!q9h z@b`wc{{U$}nI^-S6aw3aIDNeVBRTt^bs*p$#xEQH0KrXu8hCHS_dW=Z#9B6)soKwR z45se%TQp`Ia~YXbhB*ay;PK7?uaf>D{{X>Iz8>0LPj7qU-5&Pgol+FH)@>$~d5i** zAa*UBoT$zS9r&+j0hZUsH_J|>qOAFpTTM2$^7CD~bmygng+~V}i9xj6y_J(&cX#=v z>VHBPUm5LE;p4Z0#yfeSWLad{B$nz~NS-iTZ^p_^G}u@86tBSxCE*g*;hLNKY;!R{kwF(iFa>n;YQSaPkE+! z52;#cmKS%j+Cy?cifL99-y~&-Oro~dAYfNz;t$%Z#CqdB9xDB!{AI8Bf@tT}NGQV7*ghfnpl$Xrey-ud0P3Ob*jJXi6OBA0EbUc?~NmRBNKm|Pqjsmc{ED;`^LDiq}O=i>hW zA8Nm}CB~`$00_9#V6xP%BD$5M^J9g)i&Gh%;L2b=WczSqW-B2XP+un>A$(f3_{HJf zK7C(VxoIMCi@5FQmg4U?`S8A40550S6=c?Nu^}=?|biSJFA~u`1AIhhQm>gC!Kt`9LOVUjEjBEm3c{Bp^C6lqH8P4e=)-tIPD ze|DZ{??3z{H#evegG&^MrV}0kSlDGl72_G;{bya^jmSdb&VB_X8b&wUPh>Ok!w$iY1O^&g1i@%<~F zyx6&uC*{fGwnstf&Uy4W&2F@1)Z~B(-S7Ofk3pK`_3M?kw|%5Geplpz52sJ?{LOvd z8T(1orv&-$9hH|v_Ez7o-{teQs(U(9lXgjK{{REK?Qiok{5RrTuMBwS!~P+)VzOyk z#g+OU#Yg%)s^k(5LgRungOl5n`)%;A#;|I-PNGsoX!UH~9H#QnUfcOo!dw<~dE}BC zpWWfq0s#40N8n>gx>l2O9OMJ(oE|=hzg*<{SKr^V*Ts9Qd)q5pyQtJyUs}U)8?a(* zE+a^V*2V_{+TaF~LWj#ol)8ZYPZ?x#%yPPQ>BcIq5W`tb@G2?SX*4hWqorQx&#I$4P!-`TWgyT7-fW)GR1Rj>R8-ZUx>zIMQZr0 zG(Nk-kc=(u@KtBe3#Tn^?<;&OcW%$8U!tz%M>d^FMY=eJ1qoYCY~8n4>G$6IA5dvF z_uc{cvu&YWA-A5xTo6dpJBvWpxAMM|dW#ycD{zk-E`*aMx7po6T#@=g@pdoxO}rA4 zC|k>W&xbmMwHSdP@x3V2R!#*Xq+u`pJ*~fD}m@QuJ-g%hJw#Mbfp0#gxnLc?f zr;Y>>5D=`Ao!kQcyYc@3!PdH7wXNt@E_|bDY2tk%;!lznv(#SkWsWO^0QsiZ7ILa> z22m#XhGTd6*WtliYaM67GesPc$M)T0NfH;gxce;dE6ZkMe3@C`iYthjTVqdcI_GmS zA8&)JLmig8N0ziXIa_yaS}p5)?RV1cT^~1zUdI^itvO!WX?-tsXSLUEosL)H8(Q1= zzgV`ifnn18L8Mw9zZv2 zyL9_ABIFl1Pcd3Y3V~xP%v)}1c$(#HCetmaK5X_dmea1ckYbJIw{vT9WFPXzC%Cj^ zY@2z5Oa>OOd_S{es_NQipi997myogEOT}w*A)3nW-2VU+B1eHzMGk^DknK1uJ8Dyn zJT*C~Udq=_?Z0YZ;6q~aQlS-sI?#HhDMSTb24fV7dTeSL>_ko*Q*O?Ypux}GZrCn+^ z38uZcHz8s~u(gpSe=%AK5px^39v3~>xzYSQ(OI+V`X%xpjaB#EX_n*%h9^?LX^MDD z8Sa=moUwINjCMX5zP<36k6w`^ZA(X%OO%CFM6&5~*=i0EB98)T;c280Lh8a;U}KG{ z0P^U{6&zci?DPS}p2H0!Jrmu;dx6Awv-eC;Uoks%uL&(!Tn8 z&2{K@VCJe(j<;@Z3$nGe*KV(M)5!Yb<3#WmhxN}6__s^5IxiW36l24}*@QYpGh8=lgA~pNYILY5xEdE108VrVJg+cL32us{i;sZye`vTSGD`u zuIKbs`zZV)wbuRDShaZqhu@ zaBgp{j(j(=BwWgUA@wf%tp7;6^K6})%f4I_Dyc8Ul4Mc%1)mn;hbyzURb08CUrne!`S z`TF?lW#YdXd`j_c#Fn?SLvelP$En2>k=WX9y<5FXS*B@-j9cl-kwj!r2$Cm41jtXy zapZ9LER!I?;a@F$MjbptUlWy=n$fH7?yS0p3qn|o-!jEkPubF{)K-lr&3!j}e79Hc z%={?vvguLylg5*~TETs&eTPZ1TQo`K`#QmGrbBWBpK6>X!&=-bw30<5TmgtgOrt0E zn%6G$&k1`!xXQ^6h7Sn4GOLe8B8oI+QQfgXc7nb^dmlA_)-gL3N zk~l|+vVI>Z!&Su8qlQU)IdaBOdQI%*b*}bJ+3$Xc`>btvQKvfJ%L^{MSuJ(jPxv=z z{(kSiBlw5Kej&DnU+sFm)9U^vlE%@lt~C!3- zgRNcHuBug&ap$^HeLijL^F24?N5{LFv^y__9}4Y%vOF!T-06BYrEpa=+s$IuIOqE= znLXTB9$mMGt>)B0kHgw*i29_(CceCA{@C{&Pwb!Yc3&BMO}z2_=BG30eiyg9)AY@L zOAK1es6nT=lUTBgY&Nz!Tr*_ou-vRu+z2$MGeta8$~nISd@UX)*R?+jX?A*KzBt#S zZ5HcQf8Kw>x$sTBrM#MyrwrTFr5%*aP;dSd1v(yqoQI0fw{U zI5kM8){UYpwi9@n)mu&Q>=SI3+4S!ZTT5ph&5eXIvKE0Zwp`BsSDtv2$DR`Sa=OQj zucA7KhCUlL)vkeUJC=vVpB5U~wF%^wRFIam@XgD^f2_xAD+@P{+}{Pfy0*8k%`vXM zY3$`EFL|pw>a_XaO&4Cr)KP?aV%xN1Wv7zsZT8pCd!BFmNqDkL4~c#Q_#>j<8<+4u zhV;E(#1LDQo-2+lZOR`HjQ_1_(MvP+wd zUt94l{{V<*lI+MYnj7s-cZST#BL`Q%ogjh*D55CrC&Czz?albb<7xb1uik2~e(OxL zz1MGGmnh$Cv+(VdT6DU>l(>F&V+EU}X^g^6?j06g#qu_PR074)Z#h@Zp%01&)TBSrgm zp?zl?NKPb=aWOO@v4|ZbY?Ilc`L~G)`J=*XpATwwl5APEEjI1)%6>+UsKk&C}Nn|HSMRi+-?wF}jf^Ff+v zETWchByB!H6Qd^f4Ye@wqK}l6i~;kIf19g|cWo-_Ey1_W$U^<(y0V)Rwht<=7gN9_ zu1`XJ*Y-U4>8bny_}Sra7{PNadQ0k7_8uX&E>=68GU8a{m59$WT0Sn3dEey)0}WoM z6lBrPa!UAEByi%~8vbH%t3q<4S_(BO&Q#^y+E%^q+TG9S=Q=fM#HirjxqHu5w)n>PL>9i8;#LCPRy!R&M~<`Ju}G0 zIURFVEH7F141&XML35mB`Gl~+2OS6F-n-2v;T76o-cUdY0I+t+;G^NWUYWrll5x#f zwD*)+jG~&g{$0PW=Ot++P1B9srmULzZkOuTi15v0_A&Sa6^rV6m%{%53ElXzNnYi( zY3%K;r@0qOe8F=h(LB@0oM5vuu1-5w&U&}(CHph#R}ZSU!#@M1?Yx1~LE-x#>2c<( zmMa_|GDa1+AcEUBSLqg|tH*n%E?uUc6$+A>iDe)huF^hkdgS1N&wBCe$BtH<+=PNc zu81S~70|cL!=@x<&ei~fj=PAjrOR__bl0%2I*^2vZ7On<8h1%u!KTuBTK4MNyJop# zod+n+Fp|>BNlB|&==arL>1ceF2kgJ$?+8NoehAPs-4b>Y+smlv+Ei9D0_dg#bv2|B z2+@HBP@SNF3i|oJi~BzK*TP>FZN4v9+{tm`-8W0U)2(hV%j+6?=~og$*EXMJx>@71 zn&?9su(ygoH)@wBIj_?n2zZ)HOJ~#XH#diFbx3YCSz@-1S4dHe9(%C_$@W;C-Y7v4 znKm<#gf;rZ;m?5IvuBTdDBdH`t|Pww(t;b4cV(-^ZE_^HNW<8rnn)tHkTe?zc-m0e zUBhv&zRK`b@zlMh7abVd^0d;EoKjoLdbE}LyI-!hGu|u9GOBUI=9wGC(yrs|r-!7h zm%5)k;^NiYPRZWM-skg~<6jwkc%JOoPM#RDiL{fbY?r~7)4Vp3qW=J;4dll!&mj%= zhyik-6~GNd+K0uv&jx8e9+C9B+qkVIh->iamKu88fbwP1+9-qqI90r`4qN3o8Og8I zFNj|byblM9HLncm_p1YHzA@5ts22AA3FI1b+G#fi#(8C5I?q$Od(BS5$3SZyVGAJabU2#F`QI|Og>XmjOQ51QP;yK^>z&Q*z1W9qa%&DhKdm!2ECfEPOq+(4z33 zv#4EN8#{ReXl%8iKCW8SrRNOq=PCk@ps1m z0NSTr@f@$A_H6oe|8mirbr(#P8&TKwgz^Z_}msA zb@3FbsPkH0^qsG|dUV##{(Ii&F*vyWSK3MP)QlWs(WtbY+wXVM&qLtPihd-X{{U3G zA<0IOlz?{tyN(7v?f~O~pVGVz?rCI{mL;-5Z1wifJ+goOcs0^^k5hZAndDhAjLcE7 zk+*jRi9LS33HIZV^GdGgZQjSP;&6IngTNlV*XsBzFR;VYii?jlV&we)0O7T_<+1tZ zBD0-oRK3&pcW&!V@9X$=wVR$%5*^$g+2fB-PtW=q@{beWwAT_v8v?t4DgY-r{PWZ6 z$JV=TYg39iCBt#I3IQ1obI_cB2qPR4Pu9Gn_qTU=ZVavRlY)1aTyyV(-?vT(udK=N zypol9tvOYj**zMy_36^u>%QMTn`5G(>>&O5ty1lEquJ}%r_Xxn8!#xqh9_!+#z_E< z{Qf@m+UZw1&x7vty=v8^w`-&fU0Knkgt!t!Kp+s|aKvDd{9P$@O$qfKW+)YRZoY;T zRs@FI6VD@zXMf6s05gy?z^|u%Df}`1&iGB?oo#I77Z6I~>OuFSwq{Wqfz@}AKvFOd zy?Pkj#}7{vhQUjgal}PhF3n2sX0~_fe_qFtg~4K7DwwKnQI#3{uX}CyC9Rc{etMr= z{?322&xL$Vq1yO2#@B79>kwRv8!NdbEpG5#8Jah}S2^Q1+s7PMYYjB~P_j6kA%SYw@N?ApFrb?Q; zGs|xzFCvDC*%~{gjLC22Ab4|3@W+Yu6obXlTU|$> z&cv_wpV_5XhesY^Xqxg!8_bFcHzn=5-P;F^(Hg!lIN_?fWm=g0x=B*SS>mv%-Nsgp z=9Y;jo4fnZrpt51il5zU2-ZfEeDc7|#t;|DJ_q>9_riWL@mGX3hG^~lMl4`U zY2-zYTeOwqwGp(e`FpEPaj^X`wWjHan433Dwh z^wC*qb=yyt+8%+dXzAnKDsK%eGt1)t01MbyUU)lJhy{ILOn4==*StA%AMch^e=VJ@ z&63WNOKGVzYi;|BiOKlWQqrw0E=APSw}*9@72RI7c4d@;G35te5y}JqiJcfzK=z1*3U&-46DX9t#=5f^!r}x*RSAyYWTOo z+MVRKw~eN~%wBxs&(l}?M$TxZ^Fe01wvEz2QbzsaN~S=-$+(P+?H5YaEp=@*k_&{p zg(6!=kz`l6v9^#bHl)02uP_nM=EET@tGDJQRE~-CZA3?_*;wD+Xp$Sdc_*=HWGP_J zGDh~y+hI`9-G@jcERNR27zY{0qUbPdrozr^;I}PhFtNOb=6QUZmNs(8MZD6doR!r^n4AtOc>AOXW{y5C>v zmvY8#t(@<=(O=Z!Yp+erYIWW`X4E;bffF`faf^=}FIvR?*Rq(e%$n@Z5G zEuUSc?K(sog6X05J^eg5m&wtl*P%vu29KpwwVmEaHfCy&R?Pf5~mwD>+I>pu@P zT?r+gK$cK=zsCB0v3|Dm-DRhO$4&69jI$3fb}uDHEFw}rKed!tziF=&O>;HBkzuIm zS2A5`@BWPmRaT!JmUZ7?NkQweh~G;$3Ph z@{?-9$4N~^92QdZD|;ZA(}S(OvOy>2j<_5EiEDV)A16^NRDl?>mS*W zZjoab4)EDoNYJA0j6`O;kR*k~WJ*vRXpCX{c-Vz&f*KN_$M{Dg_h;~PM@=1h%YspxOXft>E0yx zgRMr3Bt}5+&e~Pcmd1Hm8dXTvX&-k#xL@!{zk^Mse$n0#_@W?PE`J+*4e{=|;tc{f z^HWaLJWugU!@eQV;(4Tw35GOi8cSPXl1FcCD(`exfh+Mp#rg)jh(_ZYil&6QYj>3{WAXm!9Vm1 z&-ia&9%(vWk8`E`J@|R>k6G}RhFpp4E-&spP2ouFlG^eA0DnBTdQONp?koyj#~~qF z0bfOd=axGqr%ozyb)hLsrz~vlmq&GeuJ5MjojQ@HhQ_xhlcy-9clMy~x6i%y^FCJn zgMVxdf5-m-veEcMCDK`shTrg%=|*{Bia8|GHM^*_zYRxWYRoQmDAZoh71Z&*9_dyl z+JJ-cZ^z#qYWDV;j+dos7f|Y2#r?wG+*{qq*U4t{O!n~0v5ZmP->i^7?wU`q>CX%y znqwlTkAJ~DS5kr>jX&EdetcQs5A6>NTY@Hz!V6tK>RF(_wYF7=S;l6Dc;QG@b{GVP zNpQ@6w}&U)@Wz7(j|uLR5lP6EpE3BHIAO;epF?aIN0T@^TXpmi}ha_ zSZO{4wVP78GTv)5>N6$m(cE9(3p9^b)0%uJ`#sgOO=%p@aAS*SF@-`$&_5WwT-qnX zEf>RYJeNLuU1~Qr(Uegw#Mi?5_j|@941BR}@jzD+nErC4;CwUTd*2m!#pj;S!}DtT z%yTuRwwVpR?ev96D(sMayxZX>u|OS%B^g3{{WYDIbxgF z-HJ)1?CkY#ZC|0LZ&ognqXd^dlfPLcud`OuU%1}!G&(1Rd@-eHI&>3i*ZNJNZBFek zRwHrZOVHOQ>LfvO74&;K;jn_^1`yiY4q0Ut=(=aZ&xQUBx3KXa#SL&?c#l*qsgDTg z{vK^ZPm1w`A@22UMQ2#&g3@9w=hGBf#XP3oJE`IFN-xyfe~vs+;!g}%X_`i!`X7Y_ z)ufW?Hs;O^Q(5yjD%!GX5yqA<$~?4tTOo5dmnugDVI!7D`fP4#iIq9ggl7oPNl)Qc zx>xCaR+jyB@o@QlJ9AXC((A8pU2p2r`;F_ZAHn|s53DZyHQ}uq*HCcs4;SmqL}SG| z_M>)^%W5@sN$zLSf3$8a#-y4QSGO9&p|ez)X_h!U`^{B89%)u1O|#WJPvfm|Zw2nL zs9Z;(ww0vaNX@AmD+RZjF9zv0?VBkljK7Gavy2&Ku$uBxo=`OXW5fD{7O8!sYX1Nb zyqmpa!d@DZNUk*h018KH;nz{Vu(&rjA7)7I7UC9dGTs=R!tZx2%<#x^b^Tw%9}T=Q zsQ8oOSB>?55^9pp*IpsBnNlmATH-d}4wd1kUG`q-_Xwcg-P)UdA#P< zXIfCEqwh*XF>i+Z6B@YBhUDdo8w4TdO{&qh0vw{`bN68lw1y$G{#4 z(P4cnQSn%UIK_^aB23q|UKX=iW4+XM?N)iOg^cSg^GSDbl3Yq6M|1hT@eATqzZyJw zr`YLwcA=zvEbxuE(|k3f3$tq`r)_JSoAGZ6TV;JuQFM;?T-2ggo@wM;iRZX6LaFDU z8a_UFzv8Z|rr1kwY2nWX>ET<${u|OmFO^`Ip>8iP=8Q$>-Ax#9E_|q^xx8kuir&uO z{Uq`}3)Qr-tI2OBrxmW3rCCbazlchKW1?x(BqYIWz4l3Xt0P+pAtECrLI7pB4;^WT z%W)K!wUTwJ)`My;{{XwYSC)Q`Mq`1Zr%EQjo`l%Xnzg- z8{$oC!~$I^8DTaSGFyXjaeaQef7$Lnz@&epCx;-qF}oQZW4^o|Q!}9z^1sHP6G>;_ zdw4v9WvF10WJcXM$Oe>(41)I_)UF@z8a@2 zaPYkUZ&j|Z9wX&{X_cjQTt;qRA&j3bda&hb6urZ}NN>b;mDMi&wMQ3kz z+w|94-={^*Dyncv=@y;j_1ovK-RYv&<*$wR-(m3M=)zc|jvJPi@!%uMneGZlbmc)C z=iPxGGNGRArdMt}d%;kqr>kCDh1~=i16js^1ZII?x|RY$MyVFy{ zHE3nCzqqx?3hi|$8Nl5z#^an3_dq1{E4#gMxRxuT&~Ops8T64zb(9ScFl7ZuM~#dxFshI1;JIwhFHJVOrwm9?cI_N z1Crk-KUGR!?9`gso!XtzKg0@T}@-CTFGTSt;8}Iq=IF@Nh4r# z41jLHAQjI%*Y!dBAO6$!o<7j5G@lV)i%%A5_VA_RDhT4a358pj5HV)?2n-ZBj4v!l zBl*jVuu7I^R?X%tVKX~D+eCuE+QK@&!%DJTO(s549@b7K4 z(EWqb=8kh1ZW%s67YG<i#U~C0|(`wanH(BbRc)#)32cT>q&df zS*u>wwf_LiUZ3WD6-enWRd>U_7V{{X>Ez86}~g!qeE zhI1v-4caESC0VW^k^^-t(!gZulOD~1P}@=qRr!A|}bT_np6v8rEP$cln< zr0PZ4LdZ;*nE);vsLpp1qdhTSNbCOqvj>K?xY@4!C8tE*RL{T3!IOfiv&)T2mm>jH zW!;guf=I6j@n7tt;J8tg_(I)VV?JcG3n4p060$TqOR-{H1GXZnj514Ct^Tvhr6{WK zsiSskQc3OcU&{LSDif&-OAQ)OloivAR9ew#e%9}!KR7-y{@nJSAcpVz8FhQcpB_+} z+G|)`g=}s@q_I1QCxg?kTKvWMiSaAO-w-b)*qW3UgLjv06UtI1o8@JbruQeGnDpkq za{NX6B6xN+4sd0B=RHq1G_wQ0sO>j`gV z6?x*lPxMYyr$lC)eBdA<4X*%VwK^e#`(2y(cj|TX0{4;G5 zMy{At4vwGQA2SvI0PFg5z%}(v{{X@3EhveW)h%{0fdpX`MDeZ_h*5weR0|`F?qG3T zKDnYnV-aa(j|&UTv7-RboT|PuSeXDNvw#3N86efy&*zwF1@5~g6@Iqcb*EiZJUV$S z@JgjgFMaCgE4J@#TfWwB{NAkde-Z2QS!qusC6%Q-0fi+~495WE05V4a91L;Aeti60 z@obm(cM*Ba72VxbwpGxKG5Y5tp8c!n&x<}UWQs^F=15k~Hdfm#dGgAf5;6$h4*=v7 z#&gAfba>xeYsz4*0pRjJv)xXpM2Nq zm^>4#sl}}{PTE-~rmyF<_w_zI8H@U*KWQy}qnT*Vo4+mER!>#cH*Gb2k6Q45!+HMz;W4wcirB;> zvRkEfJHF7@*2@|gh6o;MEh4slTV&mkSepC$<7a@OZ-rB7>1-bAEjq^TSUK3XD)Q-5 zPk=BGPb5+n>AwTl7JMo2M_AIlai!m0x7%$U66;*dRiXU0t~{(lX4jZ!y`Nj@3t5ZRPnp{_K&UOW5MmjK`__PauYSwxuxh1&lB zjpiA}Y({;T`zbD3FTX2Smu)q-;<{ZQq~S0zrIy#9B5tfb*x6b8tG>N8Z~U+P_iC}5 z8{69}n3dA)=8e(L$BIi9FyX(2C5yv@EK37YTAD zj!cCTGZrfzKn}IL6T_*c^fHx$M1t1bJ9&*1w@qr+@kXu#2=1-7s@o}VB}+P&#>4vW zwT)U-s#Ho*bmFHS)VGEy*e69O?`!0C4ABo-< z@UE?TwpZG{J zOYqZBu(Yt&w5@Yamg>V)(xF*ox7K_we{TN(Xo>CPbP?G{x>&(-%W$_-fhfjD^IQ8i zc>e%a@VAb1TMaVu>elc+FU1=OHFffvL&$l(qZ>FJ8STMHp9ay z$J57QV(n6DYMN;%-diQ2w!L=MTSN6e8pJ$i5>%$IK17>orkYW1$?NHB-u+&S@#{!5kT7B&D>9H?|b@=VI4LxPPfwa*C z)-WvAR|Y7h^Dzs2*y|EB*6A$gQ%hAb%C~lq#)2te z)MHyWELeoI;m;L#AHuqAmZh%g9wzZ7n9mdv`R7HR!B*xJ5Iw8k-D_sRO#m?2fqN2M z%&|aM&U3PsD;LRlMx>IrI+{w)OK9(8-%FDyP)azW<37S_ouH$^mL)r9bFS%sn*W4WG4M7FbB0a+G?8Jm=|{TmXm3ka?fn` zR2>?G@_;DCK|e-m_CExE1AHCv4VJO-55(H7%=U8F zL3QDu1j%i9=UKd4a+WriI<2-PyI=GhG;7R7_DhVaF4^k8!5qFm->#U{*`}iYTqoxucwvaC5fu} zY06F1QI4C%G}>26{WQ|rdOm8^qlcp|X(+`(yqfdY__S@JFwQR{kRKx5n?= z7smPn*?2?1b~5;f^bs5L!0_S3|EE%C+9qAf3BkZJ8MuLh@mVj`AZ^A@u3{g#7oc?x+*x`dZ5 z(JaDMPJiQfi1lyTkMK%|8cvs`X;<29-Ikr9t(~2) ziq_u8OVi`GHaA$C?6wT=F*fpt`itZL0K-{+0RI5NKE4%rHs1c`O-IE3Ingh?MRvh0 zE#g$@YxD|mSbF%H(SoM3?7C7?jh(7WDMHI- zJInBTYp0EqDC4tyuL)KPr96IPN)wFl$trN2=X*BVlG5Fs?`xlo{t%bIzYqLfX-J~jBSg#Y&C5Q!#1{XEdKyy1tN=Awz!H!wU$9Ov|nhtyNzd=v+@4`#Xk;1 zEsmRE9E)}Rwc+{n>u4g$cGT@$YYrcLkePsyZX+d~BUusE%WeRAm%|hmkEL5_dN#E^ zosH~zkBIIyYnyxftwY3m@0V@jUlvJnCQZ6L*7uPA0B2}wXI(toADT&KRZ4%f4~8|H zZA(ehbm;GGTTJmuzcPJ{#%0rQBAVhkjp2=Ej6#<&8Ew?9!lkUrcNt4J;W@4vbhAlE zn)Kq_6T0QS+K#u~B(~c{-23buSW1$DYnew_uO_v!*SptUPt8w?ark=bEk4sxhf~z2 zwu@DQ4GTvt737z9R}TbFD=oa}zG#ii_I?(@Aow#QKTo1jf5hvDraKLuELg)D+; zc1n@zcJ@%DQKqY^c*gl{ZDF{XA-l9oO+MW6G=bsx zTEkSd@a4tD=B{uGwGwP&w$vG6a$i_aMNj@v-gHH}|gyzt$UYPV{# zOR0E%1hQsBz_8QMqQ$b9iOBku0LOj%cnf1;w?raKjwejGAUREJShCsWzhqxtwo*b4Hz( zn?Ji^Av?iN`YS8GuSe(AciC^L?=A7)#&*ZVS~tVJ8q31>8sfztio9K^%G%DgKI^2G zNOc`@N#nZLZ$8#yhR;d0Ni>Kq8b-H{D~o$(kLE8KU0mw-5KnV^Can}YjFM|w?yG+# z$+&BI3teq8EyCVkPb(_Nr^K$hbZRZ_qx(v!JuJ6+j;DEJr%aLQliAsV{{Uo^ZM25r znVuGFa3XC*gzB(OY+gMk$In|hrMcU1INuPP%^KZcx|%H-JwJ4oiFoS_R**^=+F@#_ z;yal^Y1c7Iipxxvrjgl@xc|%wT<+a(7|maZ5_mxkt8rladI;%X^~9cWJ~6|U){Tz$c%x9 zX&P;hi>x(7ytt3UKMy=RJQHg-cUgi>a_Y=WJa*B@%J*~2=1At+1z#f7B{$3xdz4cz zhhWtFVE5P1>aMy?tdT9=r#ufGwYo72B+)3um$#1)opk$I{I<51;wy;*GVfj!$6DN4 zUz_3mDn+@L;ue*XOwDJeqDW(DCL?q=_rXDDD7jfmsuB@M`Y6Rv_Eb{;0BCVhsGI&B zn!UW5-rMxC=f?P&j#|Q;T5ZeWntP>vSLc0Bzr^1Ztu6HXUki9rXVEmDE*V-KJ}IrF zxrK6;@m<8sH(N%HE-j#AJj{OAaVacNeDmXP5X1IUZhlpWtIrBLs$9#ov|*cu+h|+N zjX4udbnO2CGS&{Yu0x@DLTiYwwM(7bi#M4)q)c0Kux-y7ea1zMI0Q2gy>sDSCGjFl zbtKo9jXc5SVQm{Q5y=Z?IN8+fL_D~TG7Ai$In8?+ep4#Ag;Bx5D{|@aPj@DstuFWZ zHKX!z*^Me{aD=Ynl5mCWwNB~wzx87yTao55U)|0cONh(eM9K-;)3}9H^#sO}C|u)q zIN(=x@X`dg(zR_&$RUbI<%BbEL{ZB$fcuwUC=fUR0npcp>Y9Z5mBij>$cgq?o)~^q zX>#hmOkjf-{C&+Mf_E`6=a|IR5}w6m2^k zU}29NvVN}@O~Q=~JfxhfNm(SKqNB@oWoGnR-%WJf`Rpv!d15g;C_`O$y`Nt-Z}@ZO zpAgITyW6XxNl9*`f;Jy?sz_a&XSdzYJ^ewZ_-4>*A8WQycLGl*+sCj+w92{{VxQ zR}B@PmW2^zLadl5Ps$S{up@Uo90Sw8KZ0?MMzJ$Zdy%Omp7(95f0n+wAGYwcFG>-; zxnUXIuFzMu{q)}2pRzx*{{Y8`wR<}|4OtS~TZqMPvn}LI6JTqcNED|0{Kq3DOAP0q z(w_2{`d1f-9fl>J0)b(S}kn#Tc-NoMe2U6Y1h$D3Vr4TZVrcy$AAtncXkAG zj@%mbeHzxv=4j)>yE-GjxICh2ryRw8;!^WciA_td5RS^uS=80 zmZrs9dZ7Rp+T$U)=MFdU2?}Dt9Wo7l1_D@l(s8W_wAy;4uXpc1HElk6`S{gn)`ae) z+DKH0FFhe#D2pp=TFn3||72t9Oc=oB{ zUnWTecaq9kyq1$=$cr4Qxl2nZ4TuTDs}K_)udQ?AV}@^)IHjhEB$nGd?`w8@YI-XUYBO>M8@t-i<4-X!prj~d*4veX84Puz=Whn(th35rHJNYs(NRD}R2&pZJ%kZIFH3PA+XyCTW4e9~hL6u>$R6lMjZ>TtjVUqpOI@lC8zO%$1Vc_MQ>Y_P_t z!K0CIBz$>g#vcQJT$=bh;-ADTTWeM?aTAGDn81Ch9?(hHq+wMGo&X#U=DtrAO-ipV z7^y`yYg+3_tG`D6TAxKqQ;jt$m%W!WcUozs+u5(dHhk;j8<;H>;PS1WFD_+wbVZa+ zD+W`OmL-|Olh<-NJm((-d}8r+ri%oz$t-sFlSb>f$PNHwh|$#izbcX!103;Qjq#J? zHLjj!V>+=>BBZF{S0H(dIbOKmBO(Ly0gjxC{G|B9@m@`K=HfP;W-T$1CWtg;82p29 z2a;8eSb|PS&TH&Adnc`0r6)D+TU)DJSpNW(`}aOeKg`r4QWV=t=_I7x_HFrTvhTl_ z1@TA5@?YLw!xV(9sT^g1<%C=*-PPFlCzG7l&ANu5(l#ZNI3(_4jAQB7fH*wzdf4IEj(VmBx%PXKPn?0+76 z_0KSQIXs_!YSiU{+q1o%j{gAH%=p~K ztm@QCa&9Rn81!E+&+Y-1e=HnibDvJ<@;K+z*WJIe2Zx~4HQR}mkWUt)V{IBpBN1K8 zaRl%~V4i6zUgS-bBg;r;I658;q3`N zz&yP}`)fzno>vE^X$sl?fO?^wi{w<2;U<8KKQ1IrP zHnAPf}U~tLn~L@pR?AlFO9wp_*2EU_F84kdbft7TfsfdQAr)=+tT+@(#66S zg>3C+O;Qaz{#j|(tk6QO=7@)>e0KPEYcIpE6YEo-vPo-w@bglN#uZOJ?2}Q0!}_+R z=U{D*P-6u5jB(|O);7#QivDbKt4}>nbyQV4ww2x6inm3!%TGVM_6jq;T}9s4gLX;l ztXHDbzs>H?;tS&Li4DhzHS24=kqb>G%I8j*u*K)Nw6(RCBIA`a>K2zOfxhMJZIBe? z`BnIV+Q!Xy6}(#rH7mzlZPe|{EOEsqFp=9^!EFmZa%E{*NL~-x{{Xe8!^7Zzi2f(F zxk+sj=lf=%xnGtsJae=maflir#eVmK#l~1nJ>HXo<$c#GyX(8M{cm=^IIV`}##WTw zmosUr?|oj+zL&SnG8f0KI$N8G?N-ymzAB1)bs3iCD0It1^E3ND#V)1sJHj7qCU|4M`&__37!~~yd<(VI zS5^3Ou*L{TZ|1pJq>?D@Mb*59-rs@X$l+4P@ zZ!8I>YSLWk_e(Hk*lk9qe{8nkE9^-2u|h?DH2h%Jbt}DZP}cOlT4^jU;MSS0;FW_m zt7~^Jrj0a*ITsS zhxYt1FD_OrjwZrg8j`T+QA;_JVNp9H=e`11Euhs2t9iG;rlJ|J3MLu-E$ zTftAZ!}fTtAZ_tmII}@9(;is?l(cN;=P&ptrNzd(@H@xe78cXVt61FK zc<;luqGos=>rAy>dMis?KzzG()u6Mww6zQtIPOd-4joq?suK84)5N-m!_ObdCN%Gf zUktoU;g{2Af#ZV0`h5!QT&3hvMqrj(t!isYQa3EfWr8isEx3}tOEQeGnT9tHO}}N% zoFueQtI=q+R%>I_%i^n0S8JN;@3WP@O8e<;^>#lwHOQ^?uY>;p3pERwY&4xa!&=XX zybt0BwSqY1m;E{`oiyDfIbEw2^^M#&4ZY;@+Eq9PxH}I7-0N4KC(!I7No_nWd8Xq2NGq|Rcy{Mh_$TpWM3rR^;g25JUd0fZ zq>kP*40b+n5$;4XNU0jC%_M5e87C3sdTyQ|)4T=YokL5Rt^7;z150b2a`Hx%5pL2b z^qZHCQ2R%b4VI;UB#Kon-Yck-DUnIf35lCkxm0eY%AK`uO`luezV_-yGCYbdD(kJT z?cHm$>AtqrpCkNE*0lcs6nNu6@JEHM?=JrUv#$oM^Y51K?_1R;c(gAM*fpGswka%Q z^XE3OsD);oRxGl-hxfbw37PvjX!HC>z4)2qPwXpyfIqWegLMlJ9O@d~ou$=|w|!xI zZ?E{vRS?~y!!-K$iXhVTXpBZ_CA09v_KKTk2>$?t-?G1hwU3B@67^pdcy$%Eo4sgw zbK$(#Dp*?T(xjl0`!1snt zTSe3KXZWG~^~$9E01_7*bPLuIDwcZ;UOb%aY>Ym%Bg zW*GJ?dSr^bVvs5F@AxPP@fXG45O|B@0Mw+9;m5-d6zP8f{v1IC;@xRJDbz3Y%}>QY z5qwz#!17znEVYdrM(~6)JhDtB(RD;5MnE4_@KU9a<~3y}I{3-$kvv zb~(6Wynoi-AA@Q%sA8%5C+z0sqVFWH*7keqd~f?3cpCTO#=U#-C&oAM>0S-+cZc+! z9Qd(pAQoC>*NtzlHMn#y09x4FiC}oGE#R=Z*Yvp7d6Efi^x2UnX(Wf4{jhu=@jF=X z&x?K{_%}_}{5EC1y}QzM2*8S2R@ZY$Xx>;=juebe?&QSMLnI)`k(hUWfd0k51f|gQ z&kuYQ@TQ>p--o^!c#ikP-w^y&ByS<{hs3>7@ZH@*a?s57v)fwS3!9N>tM-MrXqsS= z7}Wji`%QRL;lG8x6zcl-j4!l54(Ylsi#49Jaj9L|*;(GjfBuW*TeQJq8nw)_M)vHA z(%cB4w?$Zxs{BLYpRZ*YjAYzm=+kw*t&)_bqxWT|mhFG??~Zn`nYAhk4oal?eVUiT zljVHcyLH{#-*f)}Ie(1y-ZAl17nV2vA=YLZkhy}`YFeG#wkdyN&hH+}>!(;EoNrkr zZ!X=X5f3nwAbh8z>3%8I^}8XdMxH_zv$K}&<_Na4#_~gQAv1lV;%OzfYj#NEmOz$w zFdRF_=Kk6K(jNzOTP=TBgTr>ZT>3tve7E{-_5Iko5y^8rvLZ;4NpBs$+MYX&IUP4Z zUS`TEF-U(XJ~Q|}>rB-2&kt^5jvW zY2dAMR`DCF+AgM^7rh#6_ogWtJI@#;liuG;8sJR&FN6~1Aiup-ODkLVj@CowNSi*% z(S8-%_#Z_0X``T)@5b8ihxAM93ppU)X{2jjExlvm)@w2_m#UNBUBr@ZREtc!o${5;U~Uxoe-)CQ@pYFY*7h9%ZsD9zrNrNgK--;O+Ab9o>`JbI41b3Me* zCpXt}S}F&%wk12>HcQ*eQ*|rNMl|N^)FB%tx~TiQEnfGrj47&gp-xe}V`h|=yv;>l zy_2*1&x03BzDLq^OIy)#;u&WeeuZP0Z z+@V#C(MSWh`&(Gi;J2}bbS*Z~H3@Y0wA1f(K+9pLX*0()lsD6;*>yeErDp-XM6jAC zlF4^8Zknv+vb<|+7r~E(J|yuKy}Q}nUs+n|{vVmd5$ZaIx2oHiY-f>UQw6nww~lQ^ z?Jc&2MYW!Dxd!!x$MRL8;F828D8_2nPEB3AS?=_{-QJ^?8`?JAxn*y6MY2gfv|Vr1 z`2PUoH;Z%+1ULLDKZf=;*LI#7w_P_^)NK+IbFKKM=~&*$43_HA2sFdC&TH7xOL>GB z5=6u8Gu$7C{v!C!)4+C8L8s3h--+*nOEHZ@!Kcc=D{tJ5y7?X3?4=IaSNDn}2*mon zy{SdvO(y$N^Cvpip>H#+su8axH^01}$xW-}3FJF6pE?Cx&vQ6&%V8TJKn|F^;xc%J>B$@YZ{4nlup~loCqW!&4JD71RS%M zC5}Ic4kqEG)O9HKl|_4nRRrzGR{7plJYlx6A&3K~HA>r4RWnMg&c+!MHz1M&lk*(% zN%!SRz~ErNLylteYA17jka=K9md(Y>?6p!c_qQu@az+N>mKFLHJS{3pbCsZ>7i5>Z z{vOKfZCbVa9|s&%>diqn%~7P=ZMSx>*8Z-{^q&TJW^WYu&q~qZjoZw*O-AjCuGr8q zxObQ{F4)p7grof5xn%&9V7`O#(^`mYI!=M6%97aW*2s`bxl%!8h!#pg-1tMoP4K7T(|FTXn|F(RYXF+cDL>a~`XTd+T}p(8-G3X+4XI$N%IzYJyKCmH zb5#D-)E?^LKhn#2fM+UPBc;aG=gSN8GATLr^{*m7?YQMxK5BS$;}1E$RPAQ=vQ}&H z)q0b*3k&R3-Ac_lG}~%AJ2cyB>t^-)k2TdldwDC8`DqE;yCE<^01mu#>CQ4T4o|1) zID<~KxrqT|68)vHaLsV0%x=d(T2ihL-eaDe*O=^*Yq;V7mXc`IcMKL%;SjLxmDo;y z!o48e3wt0Q0^wlf1_g`CWXW06i;VqIo{!SHwsxSv@e(XNu!3;CpSM%Q(Vq3&i zyIjhon`zqGt6e)@`gQH*e&@pUB&gAvO(zJ!_i3f`zV`dotdFuj9O+F2aYHuoI-)8* zVv&m%A#*Z@*jL!P@rS?wx7F4j6}Gswh8ZJOmU!77;@R!;>~XmzWZxr5cYMn}#Zk1y z#d+7mp9Qtztc|)0i+sF8R@{PGniD$$p{tdnn-PLF$8_uTs_sHMKvR=U$=w@&Q&%5MbU z_*Yn)QP6K@v64Z$;>BZ->9UxywdPbIBVeNfNSR5KF3417y)Qxdy9S+m7L}|S^zBWU zY=+^N?DDw)d6t(unk$G%JBUrC4$#1`$o8?JrlSi%G;yuI#vudjW-FCk$N>z;B;fEy zIKk)5wf_Kz7B}%+Ms%xd!?C=GZ(lAt1uG)#khb6)1bEIkIRuLJoRW6eUcWsr`kd;d zV@Kd z+-E532d_k%M{p6(G^&%s`4=Ta?z_VQa`*`n(-eOe%VEXrL?|71_fo7K&uKSADp&a zNYV@e(FW3a6x|F?Cdx3sGU+SXHn+2S>)mVSRIA|Vu6ad8Cal%3do8c8;p?u4=r6^; zj24hv-^UH}DzX^MOFLpTZQ^V`1BQ@|tbc?ADUq7|y7;^MMMh$t($e?Nf;kKd9f%NZ zPbxDUi~(rKc?;l@NCzD=^Do5j+GgWb^VRisk~T6Ds9B7RfRErO+= z-`Wbx9ku<~Qc9tJDg_zeyFYn}Bw%(Ov5NXUn}urClBrh7%E>7`7OLB}(`kAntz+l& zO!g6kqfN_~OElHq*6(XKuARJ*=pPk6BVFs|qD#kiD(m-{A==n1PDjW`C|ew312ysw zi1pNkm`L7y_$mrHAu)mrazPpN&rF{6S6=ZBxv2(uBxvDmsfH5kv2*eUVgWp04teNt zTvV$rb1JyW$u2i;_9W!<>-c+D>$z4@O9@V!pCfv5`xzDYAZQv6n zmDZ1R^BEx5tp<|ON>p1&CNfBZ;DQVplG-a{C?S-{?zzBf`5q@|rAHrma>G5q>Ildu zKmB_8o4|Jo8u)%dz}v>nbz?M(<`mTuQ6aTOC!~mmR~RiH+0$+Wt)6^e@a5+Tr+pLA zTK@oe@F_aUHFoTx+S%QH9s2p(NBx!lz!Tfe@D;o%Yv*cLT3lKlsbvEcD`>LYT54AI z_Yu0Rsc(B>boSbUvWHvf(>p}&PtuT&?1`d!IMSV)3VzKW2PQ;K+4P4)~`|x`M`ew41oKM1Z2D#QHv@4yUDD z+bXaVn-{pfv7K0gThAZ{fs((Qc(M_tm{y%DQ=c`{N$BqvYwPCLx4+FV*D{suFqEm* zeD2LPqR}{|qq?(=-*%Sn)<2X#_$eNc6XR{BwQnol>H6=1F7IR1BXQ(Sa-JHp8lkr+ zWL^IN+FDJ=nu>S9a~ziPD3&MB!x}!Ay6=Nx-!xF$vg$T%<`CvXaU?5lU5c?1m{wyP zic~Dm8FB))-_-vA+w;KBb#J3<7O~q*-VVI+j*oQK=<2o!e`MO&K1p*ETivDg+eaw* zJlWW`7%;EOzXo2tj*X&e+Jc3%I)<%baIRM==3IqK#a1DjTVm?V!9^g9;2QliC&#Q$ z4`3%}DdFP?N%g&!t?R3|qD|j(<}s8NEOlCSEhxD@O*Bcg*Il&y&b#(d@MQCRQ)#BR z^Q>$vFP+`Mi3PpNM;@TUGR*Ol6f;E>s-#OJNfc_s2ETp29qRUe8u5?9KM$b^s_OT5 zTE+X!1Ycy+wCh{n5M0IfcOqy~Cb<#atrPx!$tPB7^=Equ;r_PDw;+oqUjGp?~?WqCEY zY)aDFX}XTHD%zx_Ax5~h4?3fT*xzP_%1rUb--;d`@d(zhHN87jo)7GucI!*HvblJs zX0_ICFB0BlM+(I)riwPVyi{gbp_uFk50BB`+w;aV$EXWi8;6ee!pB{>jqUzNhTbci zZ397Z6l)<>zPHo03#M|7xo69SjiVL$>);DfsmG*RUs(Y1Mwre!-XbUI6&J;Qs&&Sxs$i2CrqPXj+tdlN*bNc#bUbfm&6ttMarm#Z6j0D^(zZW zH1iw>b=8TsTX;;C!P%k{&E?MVw1eJTc=91%dlcqoC{BABe86W{oEMXNDf% z?Rr~HG|{_7{gX6BA(AheNlYs+W>i!A=cxFK4-NQ3S2y~wQzwaaN4C<-$#5r|N(-)B zJ>2k<3SQq>UPn8-7}=q0?NRbC!Dm$*>icH7QgV~Jvul|(cI@8v(%Np%qRVunX#6hQ zJr}yJw9|dlyYsR6f%`Q0W*t{b_~m1%S>Bi~wOeg7Rf^G)b&g?qaFJdk+&ZAPSbVF( z>$#l~!ez));C~RcdtV0r%@^wp!F_%rj6UX%kz#s@E41LFdUb;Rm07$-WoTeiVMsx)z6QO}CBh{12mE-Dz(n&9tY*o-WjF zbu002ZpJ%XhSY5JOARwnj_>#1XVV3+o&n|x_m2o&Y4_i>?Z=2bVESFIgYcu^y!!Oe z#T@f$8b+z&pB-sddYmxJ0l7&elG5ESl~T>-+Y~l2i402^`pgw-H8UDYalBOH6!ubS ztre|%yXc!wUg-IJT9c!QX>(eg*6z;R=uTEE?Dh;F?>Ss zu94x55$!FRuC)IE6YE-h`iF?TWvRt)u*YGdL34M1eRAOuZ{o4G8)UqK-nft3bKx(A z0JuO$zkUvW#vcy;8+cjxMbL)%86}>ibpH zuNb_O-Cf+wScYJqZF~~Yt~J?iJ}r2IKsrXH;tfAf(|jRgcOyQf;r{>uK@pQmvYI&w z8g2HZNA`}hYXosW+9kHLnnp_`TD$v7{5$Y}#BTy=*1k7b8^it{@eCd$xr**jF3w|R zq-$0&$9u>U+hbB`XN`{87CbQ7xF6W+VCP>2I8o$;s$uzQDwkB@DJZMWE4xJ|^xsRh z)8%p;WH4FYUyZLClZ6aSYEBfa>Q2#=R3Uq(DK?_F-$eF2C*i(_@bC6F_(}G^9QcDy zwbZTjldX8KRgy)vyw$u-EIakbf)&S@_fuP}7LmmmouNkowh~9-*T!3q+9&=BZ}E*R z%$kRTe`XCUP>uAG3>4DkIgYHcm;jhGR z+K0#Y{w4U~9k#b=r^9n5iQuzua(o}4+Qok&Y0+OoqGCTr)M-Cr2(WwMSu zA^Q7K@W;ZBfx0%8ABdW1wIOF~C6|V*BaUrPR=Qac_BDY>;D#uGIlH~MhR!s^D3N(d z8-8Ke_Q*LK&kJflsX&Pi8WiQZ^~IG;;t zE-mC#nrIZs4az&XmHz;vPuj1ySX<2{ z!fUhLtSjOTSv2|Popjq*jxc`Ar7Uf?US9rTe${^nbT5Qjj+1| z<+s*fRnj1{(k@1yaNDfzXO8uX#EBeB6i_Q2i4kwib6hP9HY%sooTW}REzH%Ml5y4f zufxe`eZEJFoT$=RnJHGN)RbeYgk7wp)AQEdwmw>#;?G<7&*28Oku;lK1}mQ%T+2Ky zzT)QMf9&53+uJ~lSZUf9&?yL)q!ptnrZYBy!unL~ZtSaV%>;?%_4<{ts)-W5e2A-NB0cT=<2l*y&nz ztV_F7x0-jKP}MI2F^&X^!%SOWCHI66duZV>(lV5#DAQBAzbsQ%*6(!w{)*N;y360? z(f8{Oo`RQZKbe{}K;(rlJHMp(qZ2T*vPj})ECBm5OG<#T|S+vtUt?3=r#IU}N zWL26dEN7cLXSPc4@7u#po5R;X3%(rQ$7ye@XkHwf#{N6gp^+pu+SiKipt;g?tCdL^ zZQD-p$kXhNoVay}I)ENlC-sZr55u1e*z3B!r>>+>_%6jG)%-_!G)f|a;qQPYhgtD7 z3{e>pRb{V$KG_g@n48_&05tZJ?}$Drn^*C@!_73&=@4FD*x161O2Z1OV+3VI!_E=IFwW_Q z07VKuQ1NPu9a8&Fc4d&=Pc6_nB_?JK3?!0Ed2uoWf--+9_hz^9FZB_5FuFF^(qF(g z1saB@acUOM&IWw%D$zZ~%J5P^0LI$ze-w>kGuPY%#*&YM(Xy}cGmj7`hG*3)_A&;sV3hoMwRzTwdd%x==Idf)1j53 zbdDx1D@AUvi~wYP!GY<356#9g+pT?t@N3~lnR{<}<9`|c@8Q>kB}=VDIT0Tc>(UZz zw0mF_Jc`#bBy&guDkk&2cWxeS@RQ-xzBSe~zYp3;ut~d8)$ir`S4-01A!eRr!Do_K zaBNj_vP>U-Hj!Sh@yp^Kp6PAip9MuNpMv}&5x2AnBvLk`sN4mD;`-u9O2s&lcnaT?06>KyUOr;4bl;p@w+X-%f}NvnGKboK6MLWGtkg+8vdHBU`izu~IU z?vK+if<76PE|DmUB2~AXHdK!wz=$13Q07?W2RZqQ@sM--kNZDpv+33`#^FHoe$MXd z&lj5%aRyQm!7?kj0rC*B#@-HpE4~eAMlb9Ywv8oOmffwD6+F$WAh<Wy3g!fZfNURoQWthh7$gwOa zGbF4A<}64cLc!6tvm~l^kEZmU5nd@JmupONL6$({%VB`UQ9!}Q<0I@Ol6Ega<9`mc z_gL-x=9#>a=BP}_;zGQiG7-0c7a(#la&jx_-3}-|&$>zDm8FeWbaA?G1a2Z{agQ)D zAch#@+PtW{LNRG*mHYI*zk%6Wv=fr|udC5JZnS@W^zGfN@N?;=+D6$NZqJ9vUCP8^IPp-J&=`PT%d9Z1Uzg$W0DRJKX2;#eciSUFi8|lLXSOKTYw5VX~RQ^ng)Jj z+m;KDGv==&@mGcKt}X5+GC~ZI7}C}#ooAOTpF8)1U@YB9IplPzOOh)6J9+K+G;2z5 zn^97;()`+5?K|1_y4Ruj>G50O=9zgGp(Iw#Zj**d7(^MCG8iY4&aNRFPS*uUBO@Ze zIQ)I^TGCN8e`ZElnpXuC)RhD%Q?(d{0aw)EoCC%`pdS+W0{p{xvRK_TmqA04tRqw* zfdK+ZBDW*=n;P67nov*>erWLz!0)$9G=jUO%Ujj>J{!$z1n3fv0l zp@^$Rsll|ZcAC0tPQ4fP3h4b_I^i@^(k_^{o6h zXi$;^WkJL-wgzxWZ2tff`*-ymSCs1B6Ki7N#>_BEhFmZpfxsgijO2QA_5FbOlkk=c zcerS62xTl)pp1y~k_=#A?p6nUPD%LyB%Y_@zZiHV+etT@1c(%{T#h!Aj=e?>4}ZqK zvo7K()SMqWOLVUF_;0=TK2tEtmK#__rlhZa?WUS*y_@<5FEzc>Pqmj_b=$7}FTar8 zqeeoJ(1Fiha0x%3AFWa&kVjF+xB2JRxAkjXj8wiK^dxim^XcivLqtSLCO-2O zC*21pjydc8b6;UbYE3)!(-veji{P@vRZU{*sSw0}9V3f=A7B1)jEva}*eBMMPVDH3m1 z30^bdSj4)$vuA`XSJ}wgrw6#t1Fd?dgB-AUfufDl4<<S&^2FoJ7&Vs(;hiI4gv=G~i3ehu)3UOpBUPgrC`VZ_DR>JvQ>|`G@;_d^ue&;V;G&OB9CgNaEEja}kb7bnQ|Y zMZ{6ymq}6+bm_QaG?2t{v4B6G&j$ELFAzSLt7;4*)2uHq-&7J1jS9y&k`jEf1Yc@J zlrB|_uFb&0h5ow#0BNf|BgNhoc{K=aZS*}`O!2mpEzRud_E;{o@-=HyB#XQyis`r4 z5!pm=WPUk+H@E#8}@87}obQZS1ysU-49>-Ka|Q*5zqO&Mhk zk>*@o2ZC6UxwvOCt49G}2>GVtRtlV8oR=$BZFjTkuh(OI(=S!AH0rKZ7T3JpmDelz zUeC$->d!^+<<+i@;k`g<(!+1!PZWz=Js!d$vQ4DvZqkOHNcOTS-e>Lik~=9>v4sd# z%D>&e2P|#86YyJ7)nbKW)x2GK0_m6YVZPC6rr1Sgqv}(~Ajr4VqJ2wVaLfhW#pq@u z2nWQ!27EBT5`HXrd*SV(1E=V=clxJ~HDp{HTQx}Kztin*j|nt(u|=c^fopi~C6U7x zW)~HoMX2-%-ZB8gQN>q?~@O9!he4>Xk&V zX0%V$YVE4(epi0czZ9bQi}82F7P=(2iK9R*yg#Mfhw}{jZi;`in&Me99gOi?&pd^Q z6Scv(+N3Tocn`yuZY*K6c+Jq$?k;rbW>*qieV!R4wjlh@T&z1V*&seJ30=N5@bAKR zy8f+h?2SIJs7M{-oH8faBiS|m*48Qnen654l$@z>=WyWt8>p@3hjz1RuPv3#cUpbc zo%X2XNv!nwri~0UTFi>_JeKboKqEz0kOl>#^D+7kF-C|NP(B&Eo=D&~51$2V_nB6`IuLVz zpxD^E1Tb8-yA1N(U8xS5cAqLBRLF1E-x6JG78+cB6KSt6uC<%nE5h*-Bv|}OrC7;# za|_dF4@?sG&37e_(cQqJK?4Ez5f7>JO$vp3j~VGQIs3ih2D8*zO#ZGxuq8% zExQQq((#f_jN#^;jxFS`*D-W8t!iyB6zK5p3WZ_9Xi#PY+K!k+D|c~bSxEZ&i0aJVX`}LWMw0tk^Uh3 zC)0i?e$?L$G#MrT08f2$Lee~uDqGqe158nK8%-fDijz7?7n1BF6H6_$tWV57o$&Ur zeXaN>!g1+(m)U$7rK;KEnDqkQ-Ie#onI@ziV*QW*djQmL}ZNGfLMJtZTXl8DOZ& zoRv+Y>A0nTdsMeeWz<;9S5k#JMe^x(?47@*-2D;wnc;`<&&FFT3#$f`!&f@Sov&VM z6Wm6E@(n`6T!hOFyeQDcBpT+KqC8gRN}}E~jj%GP<3DAMKF{J`?X+}Di|sWb)({$e%d^7Q;onv=lb?4fp=BeTT01((;+E_@E%`L>bZnI#TQMV>Xk~Fxwn61sf zl%v!?V@*569vAr4@IO@k(bBbx-B(zTQSnP@cHm#WxvpxKy7kmIKw?XkwYQ2W^!u4h zC)xHNDR2P#9JAT1UI9yA_NsPvz2g~1PhEGnTm9So-FP~9d_-Q2Q(HYXO8vjjvhV$% zX^^CLv22ZEQ5<1aMse~1$t0Ww0Tps%Pys~^^B)NPrF;jgd`j>>li?jM_DdLb9~D>) zYFNtv`YLJquiGJ$ZzWxfBq#i|kx@U2z4FbAK_2qE!Y<;xBV-M#fOIL0K2JIA*14}7 z>eJ}{AGo-*h)a1ht+k+Ti9CrT2|lQixdV)zI-34~m$9iq-APcLHFU0}$ta}Kwzqoi zuC`jApJ6Lh!{aE|rDal;RX8~$(voh?r>2izEzg24{v!CCwV#8&2ly;1-P&6%-lZ(@ z#+p1aIWP-rdwA}dWRgbiAUN89D;u*EwVmYGKQ%8p7 z;%N_$WmSF*%Zo1@%oG2w! zH;qpy_7tS?ZZ&B}50ZpeEMaKqqMhw--F;17E~h(boG+mUwz%(k?>cI z{3>-X7~bl3wrgW!rfN}}9WKgLio(mog4$G4@c_z=ad!>m()naGl0_GtD}kE+a6fME z6y0n801AqVZa_PY^p~29#}T>nByz-H7XDE` zYn@kAzWCAL`>j6xFKwsLpX`5Oo*8YHd)-?=wT38eS}=yz8!J1}^1Hg+eXNHfMo}J$j{ny-$XGblQd2 zi~Jwq2*S@X)inwADWM)(NfJ#GwCwId#Lp$iFrFoblM() zajVC91)?)gbE{lidBGM<={&;T4I@mnS7krDlqdSar{T*cgI@UMuDz_bV^Y&>BC@vA z?TnKIxz_HrYs-B~;quc%5{sKm&kS!9NX-v1zu_l} z+WK^g+;~sn>P@8R=w?-t=pAEMx0h2$&`1}_f=PEPA+MHjohwFPZYoi;=+c(GHhOuz z&$fh>N6jCzq^{F?S$1EouC>u$L-wQio2L9I{hU52Y2xxZwJmlr;0d)GJ(ibmsp@y9 zR@N=v-ZBNfrHQz_v2=t8n&U^aRK>h%7x_2Wb^T9NxbcOp(me8c>1U)PNa4c8oy-|V z-y+&_CdmaQWh(p=U#p+BpN;h|8TeC4(Jm&vlTW>mQuu;qQo<|ULh9yw3mY5Ktft&w zHlwTES-hENnky(I!I7PRCAAxijY8oAi0$s}bnD18wYAJr%N#;Sy%!J>n?W+A)v{cyS2B6bAiTf7R4I6~D{cneiL|zOkCs$E^ZUjFl)mt$wXFF1*H5vw zI+VJt{k@%x5TKY`$n*W4I6^5ID#ApN;iCa$!iFuL=3fO(eSanMMY7V;YkTSL;L686 zsyLbBkaddbwn-AKNX#S+BE%6(*V9)PQG5*2KjCZmnMRM}{d-Q54*_^$NFkEz!x|Os ztT5`HFx4QzlE(X4ca|Lj79MPD*P4+?z=*%=G;s-8(RQ1q$hx%Mozi`+@;^VtVXf~~ zg_pHl`unaoSJz!^){jTFryJwjog?6{gumel&~I%|jJ5nl;tfW|ad#zzQk{#UnW6~g z;J0X@XN+tUeX3Ktc?aZeR&dfKghz8Thq+>U$c0{Q!=i6ua-%zU05}y=_rw~OsjXf3 zgHv-PlT2=HyvYox(lm}0naZ;`j2*HZXL^h-I#F%5_UV5n;$@g`b{R6WZh2UooE@Qe zjl=@L41t=`oeyUy)RS|qDvnLN?WT(Brthc7=TnX$N;K^nbmeHJWSVVj+WKAE&%L#@ zO|(b~ua;UloZxgf5-}i?=$=}Rx!8LUY1YtrvRhmqF{48w62yJw!m%nx%((ypI0x4v zhK^aTEtC)zcDW9l?L2o@))NYbX`Qrw3M|w zn}7z=56HlD4i0$FBil9X9|!c+)Vx(-5*a5qY|}?DA$HnZtIsSuNXs;VStUNIPax*J zUj8s8k_elv zI^sg1K;RwMmSgh8RU6dg`Ev?$M zo9xr=+sOT%_yeO`&7t^OQ)~bc%O2!lLd|aMW8J{}v6MnM^%)iW5%5n;k=oiuj0Tf$ zoZCtA_ifoKuLOaPa6#n$KbPOLMTA!tR<) zv`$zlLCA2*c44!hm=@=dE5t*C~SNEoy(Isr2nR+7`^4mPS@DUD!WNC9Y&eEk*;+k=s0TC_}%!#;E zCmH_$zIT0jBN^>fTH)ino)m=;HVUbHB9tMp%tivXF_F$l1dP|G1$d{`?{Aix^y~O{ zzAt~0`>m(oQNGn}9^F!M3&k!m7%|C&&n>$I#&eQ{4cOzkt_NDZie`Pq6S~q2~Up;lV zndf?yoNawPkjT=$S}cMlAsKP71_~t^JeLHF{G8&r*!(?ja(ug(7F(Gam6g6`DjbEC zqXaC8o(i9t2m}lY^kW2)OcU)B#VN>HUt>JV2`$FZH=|-VJAo`gLPkjKG*1lMT}=hl z!IhJegeh>u?+vk&46dAjtWHiy^o8)armrons#|^DjdW*AX)Cv)-Lu0R;fuv9+|3w{ zDIt^Ok7xn#q&v0_5r#0Ji~vR}=kJJK1GIZ0XJ9&|yQ9Xy#FcKC8@wCG`c0v+xmHwZ!^Lp}Tr#Ui zBTXT2ueWSSB3wH&5X7hilg1S&PAN&>n)ki^+c($nzuwV}I`W*Xv`Ojbvh!VD&(!=$ z_^0sbMHJ`AZbLM21rZ2KZeK1^M$sCB^KHfm`9Kx<U`~o>b!{#h*$icW)-g3Ji zZe3$=9ZAl1{G+k`qWHv+&esw7MWc}Z`dkkok%=n7&fk?X1LOmqF<+lw5wxgdSCSZ> za6=@Bp%fV*Q?;N%Pww`dt1wc`Hi5`BwgSCaN)A(LCugO;Ys%}tps|!8T9xd%owaE+ zt-g=XuCL7ejq%Tf*G-v$k7D77Tn)fU_2=o| zjMwNV#H~4A;_aO_0`m@2BmfEdnuXk;1_1ER{J`Xa&=_M4C+qTwkrkecq`@T`}1)CO;mz||R z03JIL`eXk9*Ib;VZKbk;H?CQ^<7wkQxIV)@J?qxBtuk1P+&IKYq!2#yHd|_{;07aU z&QGUuamHxg9k|ssSgkywo5@q=AKfZ$IV^c)3PxO&ILIQuS<7p|;bNs`)g{$;Ppj*# znzq|5Z~4w`hMi1BDMcjZE?qA6OXlvD{W|nCejn+EU24*4S8O8WD~+so4XWqvj-0O^ z)3vBs0$0+5uvZJH;GPgU6UN=O*l?cyX2juY5f4TmJxP>6Xl`AKDW7 z?q*;WgjTTKTnQ9~;x8&F2Ij%}*B>d!`)lFv!i(FzJHuky#tANO6Rx`ZyG|bWLA-j#~K9MS=YkOvCyS9)8$fIE~`sv-P+%!tdB=5!Ok&olf0u8-u_m) zUTWK29@+Z?+UnYmj;=Hva_VSpF5-jinw;u@E5!3!+~{-5Di(r$O2Lfiw(Ouuroiep zN<#ktN*@Ka0N)J!Pw?XLqrcR2l(_x#%L>D%n;D-?)NW_E`JqcMvdawS4=J|-6$&{b zzYP8sd_UB_9@y%BExNOo6wz(}0Jj~aw??_O)=27V(MQICZ)|a}B$l%71Tr8U& zNfi&@Um3nC+k7Xt@dSPoG1*@HTkyrUrySAB*B3fwnc_Vz!h8EDk~T$-NUp8Scb6Gt zwv}a(WPdGpeovp&`sCEo=1C;Kysow1ub1L|hEq?C^P-Iks`-VJ9_og&haVO0okBT{bdpa8l__7sf_aof9n2k|>s)ie(b zv&=6ST-N5jmq4?be$Oh~%695LCZEh_n99gxf(fJ#8F&^j;bJbnpZhg@I`IDh!;J#Z z)`WBVpFW9T# zEBLF%*EaI}RPjcW4eZu86M62si^eC@G-ia9o*g?(d#En8Sf_2;h4hj~9>~DQ;qQw) zPvW18o;KI~N2|>?qdvEMx{j*Tq-rI(xs}@9*2XXbqem9V!k|7~vZ`PxTJ=BLhxU{4 zAMGXaJlN?*cksrOJ;Lec<8zzVdxSIUm(p$<r=aRtP;*Kpc+hVo}w zZb-UTw@WzKvIt?*q-S-J7_H32Z&txt>AYE~X_r&l>KdNwyXa$0Hp1veWV~43)f4SE zk(G$vNejdyQv#Q@5_w9_q?+>EEl$oKhzlLZl?lDnZ5H27LPLF)<5`mSCy~MZ)y29_ zIV6@=00Ic8J|^qvFK$j{7m!V7XXQW?OJ@z4Yj+4hibs_$9pr>?<{)=8jgmSR7IMZ$ za+93uFM9V%?Izv7l`Wp0Ye#G%-8?knp3d=JNyV#cr*A#%&%Hlph#Gxo!=+(5yjB<2 zcb7NOTuJB8reEpKw>HrLr6A3*1R^re9Iq9*U$es~ll4d9AB-jNpTOS`UZ5Y^-YeBE zH0=$7hy{k7ab<0yTD7976367}XH~g^SlQexscj0ZNGtQ(_A}5M!=4q>uJu5M{_=Y( zOPFGbj-{qg6|Kygk&FiJm1AXZa~Fj44K>~(&v9*Q_BpMixl|u%zDujnrU&zD^6cZ=V)%-9Ts=N$;iGqG z-EDV!Uh!61w!b6l@Uo2@vK113-m|sbtfHNj($TK=y}uTE*TL@-jRV8bd^FTV0^TbI z*WTjww!?L0B(dqR=$7;E^WwR*wYRpC+Ay+bQMi;$tGJ4nTk*8knyQ zpRL&2Fft?P{vnR?{BBs}W+L7h?WT;zEu^=&j&@u$Zr&NMNv?R~!WtKd{5x?H_>S^D zCI}+9F-atPwT0Ejsi?;cmg_4;ZL4Ya_A=VsNwFlmS9f$}dcupx+P9A5uxT{Sz9Krt ziDjs0&XL-byR?O@Z*J{lDz5|HSbdh-?lnRuzOq!3MQ{shszp*tH&cvlJ+zYDt*))D z*XD6igs#+^ac^Xsc4;?nH@nv8{geHrG$?#=tz79Art(c{{{X|b-y32}dxJc>wfBUy z+iP7hLNIwREH2Yhj7bZ^WtM$7L`01=EpO;wvvu~ZckzGW{mj$Hs@V8rS@2G;c_pQ? zLmV(_8t$>-xhDP2^Y#jZ=`x$&K)(6CU{cdd`Z9laB z7wWdA(&b87O{m%Uk5$#h>`_}sA-8C*<+H<;`%I9+)09QXuSX}St7bR~DK#%^7Z<&o zY2NqIU0T=CejavXDAU8@r7wmw{I2z0Zr1+*Ep@T|QrXDS*~GGKb}`$rWm2n?2;3NT z-WV6b9rNj2$BnNv`%M}B&!comK&)hRW5 z-qCv7Mv~py{{VigZzT_o8r$Alt=6mlAF=q;`(*q`Uk~^P#JW1Q+*k7W+s2V;GHGLV z^xCzjhZ=ak%Au{5EMs@m>_fbSEP`eSXaIjS{{R;gQ_?;!_!d7AMZJmQ}}E9Ug}nV@VR~l-&$%@-rjhrJVD{h zSf!RZp3X@%y>C*|CWmj!8o_NPlq=?Z)5CXq9Wa0o=Um>US=YqESANc|NI^G!6#1{( z)^}Uzt$mNxvKXoqsqZgnqc^47zLvjjJvD1(ug3oX0d$+o-`Y##J?J6r=1m>EJkl%q z&kvQRLe~(?hk95v7ZQ_ftg81OSC%mw5$azWyjLaOvExlEOVs6-(?zh;`~~5oXj#J{*jvqIEyd0C{;z#4ie3KhI9pM_k{sMkBnB_D z&lQ-K8)h4r6nT|WWmjJ#5na^8(cP%hrjqXC7_F9;(ciAVr_|x1p-!WI?=+i9^jBJL z_1fN-JP%UWWY#qA7+PKVSDqi1*GSXL$+XL-+-Y~(Y<6}QaLbmKD~m|wwvtgd{CuR> zUnz?%^E4j^_1%9~w7a#^uB3fZ_}a%Tc8uo!>S!K4C2W$|Kbhhvql`UqGqjs3;EK2lg%1YtGp<_Ncr*4D^CE-xj*Sjr;1 zki@qP_3GajK0bIm_IvP3{5kkXr&;)e;#|6WSzUPgD4O+rFcnrUL3AaR<%(Te@<`;h z{?ULyi)|vL4EAxYn|*FoI;m1rW}_GG_D)XD>uo%=`5!%wf|T5+Cam8tb+)~9*8Ml> zWPE`?hkhUa&)Os0c*{Yu@i)dLNwq%;d^(+Cy3oEE$1G+`j~skfxQb6M>%{jj9G6}m z@U^hMw9}!Hr_>0U76<&h_{rkm5PU)LMb53O-QDUdc{I&5_aT~PBjuJ>jj)hGFw0v) zVo$P1g@k~}+i_Gr#Qk5$-GfG-3 z#7U)zBbg8no%~g#PA#skVQF3|R!du(mR1aG!r?c3xCRLot-jC~&Um7T3`SPJNXu(F zRiW*hO~NWKOE%nIyIX6oF0A=neHSb|lwIX_Cb#YS=)E@Y^X{Q=@=0w4y0ai+imFZu zxm6&7c8#r;9^~Y5YogR5H*005#20;>Tij!4GE7%-ewio&jtAWh!SBmm+sUP0yfUM! zM#{r}KzPm?H$#)djjqHT1|$QHE1vhH|d6xFi#pV;~}vf@KB1 zWm3OoG^XjwHfz~l`dM1nww>(lwcXE?!`?p9FWqV1-P@(PwZ6Jq`fGdcU0~-?NNtMC zD#Vbt0C`}%N>V%l#_;1O_)7OR-HT|ns8+&aWV=3bZHxqvNYJr4X24C$wNS4hEQ`Q% zo-e{mv0h84+<;7CIire0o$6gYsMPV7k&-k3=jJ&dGZJ?-$2VHMlL8Cft4gGtD7V|@ zJgRYk%8Kd8Jh!5QTot1#m7^Ipp&K;%yK=kf>isXLm$OIQ!^zDrd0scY<)nK4JM6k{ zaj{v*Q_h)L=eoIcL@H121e@Pt{{RSltZ;$L6kwx@{WkrWJP$3k*M(L~c6O56Q@fo} zK=RS7Zfza>#lQeO>hR;yhag~l4Wa4M>)tA~wYE)>Lo5@5fNe-%kTfx{!5JWK3=T z%gw9bwYSyvT6DI)pR`kpP)RFW?t3@Wwe0QFL+ZZ>nNl?K3qu>G)HvQ4gPwZe9G*vU z#e2q;cW98q8cvEz6Dlbv$U!53RD+ynjN{(EY|!K}`761?t{W*Nq$uN(ci;ol9Q{3g zPQzK81&vn@$Rm#9lNy3fO99{1j+ynPO*JK>*SD&DZT&0lsfAcMT`MPky}nodZ@v5a zB)5_z%+VHj!mw2U;E6hM&Isy#2k5iMYC}p1k)y;xkg+WC1(lTTR>r`r3lV?@G6@yU zMS3A+Q!>o#cV;FbLbs@OUI!hHImS7y6q@SNM!8mx84Q7<^AHvQ7Av*XouG`B_5=G=5;7D>7IaGIwCCVTMUkGqest>64Fa^H;iW zog_h=w5sPhG8nSl4a&T2BaczeFl*6MSBC9E?h?2qWN{R66(_2MAG_P0+Z3MuwTxtj z)?|<>yEvR3-y#e!5uEhMJe*)0k;oXK)QeVa>udeVf1dVxUtoprWwwp|>t9`tR(mMY zG+Ave%P#nxjBLJg0u&!VY1$Mv#fX5#31?&XtF_Ry1(x0{LVUJGXwq2OR5Gc;sq;U0 ziZB={7}DmbnD3G$Zs*cwl0k&(3OBIBs(%AQn`!(K-@{k zf22HJsQr!^nJzaz0BB>Lc=8w$Gt)UB^gqlB_`~9EziPYZ^VOwD2s60zO{+O7(MIpc zFVyfD5HNYEttBstdTHr?wqLJ(bwP%U=GAL-?%tNY```SL@ejpo#F(m1>XOB6vN#;? zeZ;sZ0)V7tafTdc1E8;jej~Xs+<9S^Si^bD>$Ghz8B~j&HXc-C10Cz@&m7+*qTv!} zo6eoqSvKyHLFPA>Ez4x7D$LXEBVrXRjrGqfw@bJA_qp)Tk2KhBXR6#8<+`=B)Gy??WJ`3l zx_A~V4I9gY%29I&yj$zTff#$J5y~WP!|$-!jYzmth28m6+m$P()z$p3eQ(=C<1tx$ zU+*+_*81K%E3Guu+kVFr`zCxKkK#qo#V;03Y#P_X-WfK!d;&0SEq}Cbe$1%zND?nC zY@)dG*plDi&eGRM@Hd0B%_j2lU{`Lpsz+>Q)C;mCzH3@q zq;Cic!q$7ZxA4WkIX`FpdOrpHHHX5#VTRgYi(W0#wQmt6sVjV%4W*o#t5{2M58Si2 zhSN^BfwJJ8PT&Sme|h#aJ}uS!W#cV>!X6fg>iUO_d}(K*>sD~I^2esxFOv=J{-bvZ zZ?ITe=3{+=-;}ZZ1$sI08vS{2BW~e$pO1{h>Ttd*S^WX>BYo<`&IqZE)*tZ){Zst%m4; z0y7Gu>JnU(#ms(0a6w^b3Qy<0EBK@1KaKu5)%96L?yqp~2A87Q*#`+}19@ncI$UMj z>`_4=X1ct}y~o*Er3(wLcCK`-Gr+n%^V@}=S2lC5rKm*`nAY)TRr>{th6X!ZW>=E# zc!WmkMVBZLNYj3ahs$VXKFY0V!mcZlF{Mu2@TVus%+z((&iAuz+e>PE^;+`9PEopw zU0#mMTj_mTU3=?eC&OM0vGD!W8g#P2W|r|{xV)Hvx`oyuaIC;E5nDgn1T3H<7RxG; z#lIEn(^$)JZB{rFOle*@rt-Ycwh5<>Il~znD{Tpu?8_SFIU3z#3|QfGtCp79-s$ep zPXu#ZM?8|oRW2k#(V2|I`AVYTNhZ=aBci_L;BdYnmh$M&I%%ta4A1sxV~@$X3FHCx ziCz?zGVq3xnXUGbCJa`^5HLM>YE`S_T}sY#lu~P%zF5ac%WjXVTJ(A|j=W<{D?PPu z%iZ~R?Y;KY!1#ZAD|nH7SE(!JF*n)(GV+ zLisO;ei?YrU%0ilx3Rjuz3~&rVWYzmZ8Jr=R!G`5Q5}t}Ov5r5l1AGsr+iUnzf?R8 z;XMQ3e}FACJNu13OZ__jEoa4s37zdPF07+fvC?6hCi5=rbZMlPTNv&fOQ>DS_gC>U z7}aNpG4!yssz(o1tZhj9DpcUFE4yoJwztvW%I2AU2~?VOBK?(|r70)N%NCzhx?OGa zN4rhp4-fn-(q8Duw{cptrM$7D29IoiXw`LnLD^&3bS1m{4WN`0cdFf6L9s+G>F`gD zJauvLi^n&&utP4dq75rj(rqJ-N1o!!+en7)KMv?}!z-*pO(tmJmQ+@f8;#N3Tg^1_ z%@Fe6+8g$e)x1F0J|6Ix`y1*q?$iiFEV_M#znu2=6UyVw6GtSsm$wNTzxqtO7^#ib zxL?`!@5Hv>5WFXOqPs_Dr(Nk++LfKwJ9 zE-V-D5ikoadw_)YsyYQ6~8CP;NnE5=%OwW7go8M0XP z-9j}nTSp$9a+c3Moy=E-Hbri%SjQsCENBch4#~J_VRW* zaM_c6V_&-wcjqUeQp&BcfS{?6O5w4dM^J00)R%M$Kk2HU{Bi#P#=Ps`z4f){!_N%0td}x* zdVUyP#?jos&2cK2r&syBz!6t0s$?L_j_Aiei>ZQOKs&xqt1`P^dp)LxI7C-?b(!+ehl)jl>^THN31nof&xbtR#e>4nUmTvrPIWBJPK zkX9RrhfWey+DIoA@}{}_L3qN)#9F(-Yh$Fc=$BTu@Wutz%*_?@$dK-o6lwg4qE;Z1 z6oN)DPpo)@Nw!E7@8xQD5+N>+|&TygI~UVNEN^r1id@mS59JAD7?oP(O>h?a#yYi(A&N zVet%q7Bx*%T)UPb8MA^n5=2p;+ZUdzZm$zZ`^ab5v8jAlN6!zA+-{C0F|SN7GL zZ`w*#BP(wg%C)||8~OG>Qo=*nP@@h-bqKC!y4$x-*ZFj}+I}g!(&5rPFMn@=J-We`t5&(7Z97WSboq>zUL)1zky#mOHhb z{>d%0_P4DhP|eNE#Kkfsv&b2OBypEgtdHe-4~eDHEaA5>J^I{R-5qAkJH)s4OfGFD z)8~6|Wkg6G-sx5~bQ8nARNo=biwM7Hr2Xkd>A&8;4ea`Bd!E(OrDWeXD!u#Z*Zvy& zd7kzAL;Pse{12gCd?Wa49NJV~AkiZ6ABcQzkc%xt#ZI?kI|2DU2EutaFk6G+>1Nw3 zT1HmNXyk@{PTbr_0FUysUezybh4tmjO>M4QNbPX*$NS+Vi?T^jxdLdUX(OE-0aZX) z90Ol_{9Loub?qbJz5UEg*H(TWywRTa;Nn>{-RwTg9^J`&BzbJLC?=GwtqqV-6e!LY z;_r&DqFv2vdWK$YQsh;&ruR(g~hf9}&s&{gzb@Sz+l# zJ98%9x<7aCTYi_^`TTU7P1|d0t-oH~KQqBDG|frxu60`pW&Z$)t7) zRvADc;z`0I!oFNAq_zT*7S`(8?b}D52%1T4ri$;(jpSQ{a7!ePNCFQj*ATMc5@vIi zWdoei{AH-05Ne(oxQMLq$8&gv;~2P_!q_}9K`Wo!TwU6%$+<8YrSeMe+grLmtEKp( zTZ-dE@+>t}k8)|(GORZD&|0~M^41HMFC)4jjKHVPo+(>=Z6r)1U%Z_hL}MD9jU@#r zwRZHipY_{g=PFc!lw&Wp)MUC^@n7&?Ri~-*7sTHQ%cox~o?ME=l-kO}_fq0W{HG%h zv8x0OIK~L-E6yQ;cC`5w8!jP0t+;MKe8@&XW*~q#-ay82f%*sW3&ED&F!0UXP(?I8 zC9_MplFi~U%#% zW-4o1mE#-KlGR14EpKbTzVE)9n)1qC!i-XBr5SH@uA_ZDcRh39$ADJvRgvx@DAwQE z_V+VBeruE;X?s?Xe4&g{><`flij9ImKdwImyfgOuxaB!yw`8)5VEfkg+Y)(goj?U; zlgvm6`_Y+W88ZNXK0X*-#F^33+s<~H>gMj^VdY#0xnJJ2`%1gWqC&ZF&ax{Z!3Q6* zKeGpdrM=U%jYz2;Y%0a?RZS7sibx<4Sa`!tz2$!-pG~#5YyKzQ-w1pXZSC!2bn<+-nFewLGjfUv>$Q6J1Dg96 zMDV*=!j4#%b=(yj5|>}PpdPWX`^0c?K*6qS;pd0pkXl=P2KHRd zU0thf)miGkey1s-s>B7W%ZmncS7lcNqi{;bDY3pL3tuLF(kSef=)F_wyYPJ6X%i zGh}WCFw&?~#&IbMr;HXDK9zobbIaO?B@}$wP6M$Dv6zSo8=e?)F@Qd5rEd$grJmS= z&CXw8G3SPjR0MO6*YO0@G?%`)k7di-BORo$-yuw8Sd45etfK^AsL1Ydw4&D9{{WxK z?ehG!nMO4&X>Hoix8?r;0qTvloS4J;P&4fFaFLm8LF4pVKU4Kc6E}Sn;>h`m5eSI6Z-O=f}(_3FG zAt8h|#T+v;DguKjRQ=W?jx*PeYRuY=>5LE+{Z0taIp}_eoN#J0rL0plfs}~kkojlikI0DQpH>|!YEg9k-e%MEPo>)b z03@62?#^1guy%{)*4;d}v(wLeb@TAwh&t5Tc%DX`wlFceys7fyH`^%P^9FKAAZHs_ z)K|g35q0$YJkiOyrJic+vUoaVb@XFz+hIjxfYw0UQQAewFk!hk-@%E6pp(EUpSj?95I^?<`xB zAp!>FeZ?|u%vOu%b6lnA1sQ)Z=JM^e7i{Im$ju2c|Un5 zr}ERMUia(iW1>;!O*f~PTQvQ8ba!{@vG7NU{4}~Hy!R4EAZ2+Gm&`#R-U^F!#!5Pp z9(h!GixRL0b}DUlMrU z*IU<`;dGx4Si#~gLr!FpS(?h_Lg<=ZwD$i1zeY1G5h}?LaAcl6+sRt@GV1MGS8c0u z%hukF^-HFwk(t3Y7+w3(Yo|*;>0i0zHogMVd>wCXpkIBT`!mB@6qD+@j3p*?)TV;# zS@8;@?)k1RbiFL;z_(S@q2u?7pEdDp@~44(0ioVoOL?s8+Fq%x_?pTh?{Kr~D{+4- z7+yHuILOv5^qU8Z25Gd^RR%aK+I&T(-Rt`Pr)DO%oa=h@Hj$kkNaVEBZLW12D{Vr3 zP0Ubj%+l#_$}b8QoiCy!MI4czTh)JO_1Cw)#z? zUg{UPx3{#gw6>1YO|B%AjF1oQhwSV7CtQ3k@HdF{9eYxO*Tfe#+PvO3@jkyQ>6cnf z?xwL_K9!KrpnHamD%rDyu{t5x&?-J@dmxeT* zY+YVU_Un6HFHaGFU}&~8N+w&ozp_a7_u8zhi;G)En(Yu7QsP;qi_0E6$~^$vxkNyf5~x_cvO-gI-A`=BKD?Hw9YrRn*j7UClGZ=?%1N8vYA-x5a-Ee`p^Y zjZ?(BzM-vZSGNrdO>-+VTP&Vn0bx~ChA5y}W^w>je=KK_Sr_`|zpZQfjm)N9LsZtU z1Ki(SlPtzUq2!8GFDQvyacNRLjS@5vsv>!Sef|4Ad^GUqhqRkrbHozb+TL8;X&+~^ znG@|gjmEP(MrOE4Gb|8V==SqnEI^B7b{HJ)A7Mu?!Eoji+fk(nV`oY>t0biPl2O(= zrK5g#^GA(Vwj&=_Fy@kk8d8J0i?yBarTRagnmSuAf?7B1-SDdK#naQeuis%u;f~z6;fSPd1@y_Odn>`tEy+nB`rP%^{vf z*bKl%u-&L8;#AATcu|as{RsWH{xJ`UHny5pjuz6-O@a+ND6L|F6(dxQW4D>2CwZ9! zBYFMfMQ&ZoK6idT>N>3VSM#JzH0$RWOlW0g^5btTp<=j(+HWmc$UnVSkP@I~t0Sjd zH0Q*jTu?})A=Mbd3`2!WD9BbFz7gpx+W zC`W`clNa%MxEKJQ0IkmpSn67>)D}8zwAWWxk&P-liR9P}fmv>EdiiT2$jFdq<#jH^ zkhK?uJPlz3E%vc_AKJ`pH0>zK1o5)U43BYmcSz7g82NDhlq*O;^JYfM52Jhu;p^L& zMx_OcX_`KkeB0~cD_WzixGgL*>a(oP4YVFvXSa^#AQ^;lqcPY$>{cd@5}e^i3Z;3) zI9)6AXr8OC+B45kok_ZroAzqUm2B6o`m25ZXV8BQd@-W@8_*=zH4RSRRq;*Umpn1t zd5tU@WD#30+QdmaDOR?JWN)cSwn-$GXHVTk%syuLwfjT9sMG1%4D!XM*^RMY%=<1b zBxvM~?jc!-X|89K{MRlF!B#ohR!GzySK@EQ6XHvItt&;j{{V_|(|nqR^|Yi*smE;a znDp}7h_lic&Q-U!frDyHOWw%F1&S}0mP!0oe{(bnutRR%b;q9Q?x+iSaG*k6W4B1v zRH!V&8_63$;LCCaR)5-=np3FdctThAQ?-&?H?mjLOIqkLd97Hu;c#g}n_kYNz1y-| zyX)J{{yR5bCDiqMnJz72n@y5g3yI-(X^JT+em)Fu)m!x>}niZt3fP>$$S^JLwzu4z=b|F)8f3lytlTr za2`1a`bi0r$UbI*GZ&W>sTjeUXL1yl^8QC;{RQ|-<6A$0{vXjbX*9K#{{UOK)@(H^ zdBoxG^qpebNHrMl3d*tzh^?iH0T?LoGdyG?1Gl$1t4bJH(x+uP)QnUkrJ}ig?KSSY zJs)F}1saN6^&6XlN-5~ACYo!}{Jv-O!~XyTmH73fU+7=6j<>Eoy{vv7_?hD`f|mu+ zF0XHFP7ZutD9{f2*P2>uS} zdivN~K(;zhj=VqOt1D)=f^@jj{4I5TWpQLCk*Av8?&Cwdu`e_wB$68`R(B#m)mO$J zv_FXd0BVogBgelKFXEQpP4Pyvrb|8C(5gjir^Tw?w3=s?97P({uF?w_6KN|T3%sMo z$L6yt3+fJ(UE@k=%E>m}uPyGjX=!bCx;-onDr*v@CCzm1-j+!&n`pPc{1f%#z}_rB z8ZR_Y19+cEjU?85L*h>mX?pdO0sWP0r`*8*0BD`k?3kgABet}#o+h}JZY)+gmlH-0 z>%WQr0A;Tact#(Co+xb@uly_V6Gl2`ilvnB*8KLBL2|-01dS3?}HYe2+^!AF7I^RGR|)fL2MnGD~}4sm-1XR z;e@Wj+7oXT&K6N6$jxDvmur( zC&qpqxw_KzI9N5Y)gaNXTmxsC`>)~Oj^xJp7ye{^|qGz>d%s>y`@flS2JBN zZM4^|x+kvv&k6A*mY;EFs>i6`F5v_NPQK3;mp1ngbq>sJQf=jkDj;FFZc@M#kLSqM>H~7=zSByR+`0L_4t*Lmhc;`^@&Wm$86u1^Q_g1$0uZLP$7i4x& zS=%aU5kz+{m}ACBC*iom8%q;}g(oIiSt{B12opp$smn@{B zlS_9cXVu%Mrl-o6UIo>E;UMvTyRBW`NpYjasoZ(a<-?w8l9`50&zzqDxP8TXpBF>%NE7+9sypXg?+|LXq2*YlySj!L4(-ctc*f@H z-Jz2|CgRdXBL*d*iB&)Z8^5#uqYsY!JEhshs=@uG7l>}WIjzp|$_!ei<-6YL*AggX zQzgnkUP1s=l2;^w!Q1inhjFW0OB}IH2C=0{bgJ{m*SV_D_4NWyELecYSS})6^vg(@jI*%QA z&Kq3|!n!?`(A2CY(yetkwKZoe62)_IYZM`+X(WVM+8d2BXqD35Yc{qb7Lp^MgZ?3F z%=UV1jjXEhTxq(Dw~_f)%{exyKaq6r<&{)W$Ps4h& zZ)-HM&kmCFDGP6TVkEe_-vQX{^2HE>%}W^MGRrvJ00ZNXh!S1d+%$e@N$tMh4VWJ~ zODnl#w@}0`69Aal;z)y(=O(_#Af*{%p$=5+)$e;-M4jyPX=%%|TO-BJB{)z{o8*?y zt9EOBZ2ouiIGYRWpAczntp->;OQBBEODdwNkeZqPjNY_-?oJ>95z`8Mo6sd#C7rErhe#qC=+7cMZcGV@%Z`wpk#L zQ-TjI=d?r&49hNVZBFwev~3yrhvP4YbgdqHjXUfuVX0bQJ{!5@yrxFX9ZyW009*X&2bgNqsPqfUfMIOg<=POZH$H*ldCyel;GT4T25C{*{yZf-oNlA7`bw>1GPl4fOvIK zteaab!HNCc{hZd`-%7Dkc6n09B0xZGj~bUM1~HDn3?I_JH8gJn+3CI^)92Hz{{VE_ zMwbyO87JgL7an#-@3eWW=^hAoQ`rnP^X@p z_aeR=@GZT}F+kCcl0y`ziQ^pbPJ0nwQTSU_zO%E;_a7;4V=6ZenFO5n z$WU-|k<%df735 z-T8LA+qIVe0C9COv!iV!*7u6hJKJx&*Iz@yM}(&G(Ib{vi5OuFW|W=Jn)U>vO9e7 zf&dJ_Z3JMBoMyb+#B*B%Ev2k}UzS+zDgh`*(rozX!V zDms!dK*u0)Uo!kf)?ZYiLJp+Uw4}7Xn|E)reV*Gr^w{}V#wPb&yq?wq1Z+#0RuqgWA22b?w|c6%IXUNU zIj=FY@LEB59j(&lR7MO*EbxVQUh|7A2Zp=<44nax;u)YO0Q)fCe}x70URR zNQ+O?rMV^$7&J>8%gab3l6etWC;Ffh8<+^uh>)=hwYuF;SB6WAXP#HxY-BqUQ514v zenQQTI%j}0oQm@A9BMG!!h#cxuruV$(#0LoMVjs0k^-aI%F(#sD#qnNBe7&pJN=q>5yahxAwY3P+vo0 zk)s(J4?6kg|?3YO?+*v_;XP4=D%p>{U92yy*G;dO*Qyf#-C>etZAU>S2pi7 zX>VbtK@?Z6XFy}Qbd9qkMi0~Fafd8@q`4ri%_iNNR(4Od(_KF8SI*{c^%b4$n$t^s zD(zca>-Xw=cfd_|$DTLwCy2Z&;cXX2wZ74G%XyPgxsKV|%IW92OUW-aE5vA)>U+<& z-9f2bTo$tqZsK8tjeY+BMAD}4)EbtD;Js!~ihmHM$NVIoHSr;u=R)w*vdrmo7ly57 zdz)QC8&Tz2*5?&fQ2-Cpm@mS(xt z7Bq%bw^-WX+d(tSD6@8&NB|bXZwLLPziIn#h59}J0K>1@{{T_8x$*X<5jF1`M+7oy zx+;hvwz{82xMq?&jcuoIDP^~iMWEVRtPl%}`^KIRW-|WKtg!f4wGMc~q}}Zp=-iWf zSy?8ZUAEWft4(3kbNA%cqa9OD>-5=d(%WOmKWtBma`;NeR`^@s4-;Q_m&VuY7MtTO zT!9YTe#BmQAmu> z70tr7>}>o-z*Mi*)>+P2qD z%YW%Nlc#u7!ygZ=H9c#^R^s1Pf$c4{Jpq}X>F!@L1i5T)xp{UO@1@vXt#gM9DjJl0 zPQJ2&(lU|FfA+-GJg7l3LgvnR{MjFQ+aK7Rn4`+u+Q!>ThXfx3)cjMf_}^Z*)o)er zF60H2NiHp{tp)-~DI^PN=a6PygLw-byab?Ue|z{(@UCwa>pCpD#1h!}b?%MUv3S=X z=~wz4^=q-@piduwYoZ!?n$ zr75c?XKsl&?P)t(Rb;i(r>9Zz3qp&-9}YBadtUogn*RWe=D6`Km6%}59*{Kqh%AaZ z`J`a-!6bjXj69AaQcf8BY2kl|z9jJ#%({%0Nup@!a?pA6xR33KT$vumk=7{K3<2hTC-F~;JYjzv zx|QamtLj>ge%CCQGuuX{a#UN{MmNaT`wx~xURY@t0YrO1?z}1RqIlJH9a&pXva&|I zktdZNa`1lq83ctyM9Hu*ahZsdaXv`f?BVdZYzkE|bro9Bx`ZyI+?!Wzbn3Q!ucJJv zRjY`8#)KydZ%%ZTucp^Yy*~Zc?DH*d-tzrzyg#LR#?saleNsS@7Z*}trb#CZOoA=V z9B4=p#*v+&)!I!h6T>ZI8i;(wi-C1D$Pq#qWP)ePnH7)`fO5r0z>u7wDh zeE$Hnwd}Ek-y^Xs(nM59cfMrbG0R5aEK3*;C`oH(kV}8!4MH1RTVxW*(#f~Y2Q3s( z+M!lsAyOleRF&rhX0@I&c$q3x)Ao1wc^0?2Yh4p*-ul@mw|IsYo(^iIU*1VwzGtd- zR<*SCzWV5N{u#B=w4EsHH-2TctWwK8yvZ6J=!gZy?c|pVBTE~9m95}nUi$Rg+Q!=q zs{0oHI=1oFi>=FVJiBG?|rHJ3irebuoLXX9T>;%8p8)X89w=-w`I( z)=fK4^KG=WVxrtR3o0W3g_n%u0oOUf`G+_)?H{wFeXC3H{C5yXG#1(-NF{aMBvSe4 zh!BmTF&Qd^5LHi3^zpe#2k_&>Qd`(-7S<(EW3;-|VuChPb#mACYhcMR5RTQjM{DsE{NS&q}_KLI>t zsA-YIrgv-mjDW>Fc?5X$lo1k+Mv)i+l!$* zwsuXg>83cJGr#s`og`O_8jyC}M>8+k0zTx83rPO}GMf6I;hniU{pXFXHBB}hS47or zmMt?@WlL>dz+WYt$!~6_YF?X5#JEWsHhslrLJF&oHy?v^aJ6bHziQz-YV6cjkG0+2 zm+0-W*FzI2R7o`CrO5epN-e1N?Y;d^?l=Ak{rhQrDzfnB!M}r=R2CO@9s-UV{{V~^kE@0VhZdD;-(o6jV{2ch>;a`k;jie%(7>J8Pn8J#=?=YRl#6{d4>rfaz8~ zJNSBLNuuz_h;=XQu$KulCXucy>e^EI4qIYGW}XDME9IhEq^};=+P*>Xvi8H^WIq^w z9@`zdPlvVXej)fe!J?58!$I-G>bkx5lLWGoCx*ksR@%}ckj)5ql~q-O56#bpAGO!T zuMPNe_u?JZwyCG<`n}bUhi^3h02%mh`YW}bC@x;s7SpGS*7oeeEU%?YsDlYoSkVYl zs&)P${?&da@%M+cd+7A7DlZlIv%(tohjp#!?{#ez`e9>F3m2(nJZ*l>@bWoxOvosB}#M^cPMhdiN^3z*H?QhYu843`EE;76`A1hjjv-@6H1)t zCupcSN1e&FepMay+V(#T{{U{!kGikNkAvT{H^b@L*Hg6kk*V8iy41GQ8wl<+_PevO zLawGrW4;%YUPGzaUoQ+}Q%&R8){N(s&;tevx;!nlxFG#t#x6-t470IUfUI@cF z%^rc`SoGN@e<_j+WqWIAP34%5Nu->$#8ODIBA>E8DEuPvZ-h0763X&Re4lznMRyo zp0-i7`t`q-+vfq=)-T!(Vr@ zw7j>37*boCpE5b+f<<`Z-(IknPFv+y3U~f@M#|f{9UP~*M{|JyeE6%?}!>W z@C^31H=3>7(CWH&pLMBNc#1Oo3So}SO% zjgxkFk8L{lv-CP~FqG{TuQX%#-8I#9N64Q8d>JoH5QIR?^F&Ln#W{ zCYvtU&HS#08y!mC)-}CWxQbiWx$_L(L|e?AE`#uATGM<#;$1E)ZwGy^SAyo=El+jd z+U%u{{@TY-g4$!E`L@;?gs>Z@j@{!_Yj}s*+3x3(F30w8{hIu1tN6xSU30|xA<;Db zYe3W_g3fJ9TdhVb*&-KGU7M{mLT?)J;@A(H0@@j)U5-@OK7A3=`VPm2B*mq)#q#0jVB6VISHlUdVX zvk_cLq&382Ow`~g(jdFChE|5+GRbQsw<~pTBdS~>`2)v(4Q~)MTEB(ho5X$&zVgND ztQRe*X&34wjQyolNp)4$zq8e>>7Z&lHJ668tyb>h_Vv~= zGHL1d>$|vOfn$qI^CY!4Zwg-7P9?Oux>mhth2CeA^5?}rvR8~e58_$%e+TP65syZ@ zi%PlId^xDgt0koO8j>MHWhJ(eAc_IyNgUBM2tsUlT^J(4>G1w}PJ}rsX;zwCl@wdO zlK8Z}-TapOY;)##3Y8-nM^?1uO)Vv6-P`s355%t<=+KD1#UsyUDk59LC5a+Qtsv)Q zk0i1AY}Xi)S*1{}p-BX`YS+S#5NbXN@#osDEe5Tn-f8b^;$IA2Jn=!TX&1KaZtSj? z_om8bwRIY0++n7Q?ntJAK-?eo{{Z4=>p=hRS{d>&&6T)Xm zC0Ec1V+bk&`KBoxu>vIu59rx;eSyVbDmc`iv{JO<>CQ1trP?~{+1krxbK)yyc<5u4 z(vKpm8g}M|(zIIpTC3l?@7(sEf*ugkej513Ux@a12UqxI<8KUWR(c)H!Wrg{`u=-M zR`AZGpdYhp`XqNZ5nL?Fg8JPS<^m#<74+YW;xQfQ&2UkFXH*EQibDQw*;XjJ}A>a)Q!y1tAuD9W9uM|6L`hWJ0 zt7~(3F4Er8&rs9umqRAURndjxy_|tsDbhc)Pl)AaPh$A%@HfU0#im82>iQDicrMpV zwY8S|^HGv3jXKUrE-Zw;UXO>lRcQsrVcQ{XqCh@j@%YS4CyR@7R%thA$v%%)%KP+I z?6tA=IBY#wQNJ%T()z`8Nj+Qiw!7cR{AT!@puLv2r_Zl1nW@?f?JjGSy8A5BAm$A z3aCpQA71$D`z?68#yTFY;@f>w!ghC)Pi=FkTgh{(2xZkxwdJ+t^eqmBxkUF?6Hgp$ zphq0*A3HL7&)GNjaq*?ivmIYm@U)iNe71MxEw6QBEc01MGsPs1+9V{q+?eE$2T)Nq zlG(t^I>*Bil*WG1uVnq3yk)kWw$a^LTF>T>M|@{w1e-#jVq6E}Faf-umi& zN#I`$Hml+o#jUelPNv$$kx`sX*3m|uYvpsbvm<92^M3ar3jKum2k=tl+E~kdW};bG zFhdvy5wK$?j-d4D4{GsW*_Yr=&WV0+F6Pnit}f(?`bnAYUBkIA8lt^|02NBBlEs{Y zNjb0C4}`uDOJsp=meN3SSIXbO;{fIu{W-@TfLG)$pVx6JGO22CcW>TKTBmIj^xNis zhl#5!CK1)84}Ha3TXN{U_0#40^7elNCey5K?jQ>aGAaq59$ZsMa~NcgD%N#yqW&6#a)EV?GCA>7SwYX-I*blg> zvHY7$Mne|f2i;;a4lCkc7Wig6SuT+s9^cH7g1$_c-SUFLdEj&N0h}Id*vxYJ7)msy zPIva|F|$&?N4=6s*)I3H+Q);J=5=whnypSrM(g5+$t&8=wb{z}cf^w0Z3U%Oj1U6= z@_eTOSx~7A#Fh*R8%P^-kzS8&;-bOJN>!f-BTPABz!Eno#F7jGa5n&XWv`hpHTydY z$c zWs(yQ%q5A~k+hHEY~_LL@&V?Fh!1yk(2BFyU*p@-nU2XN#NLHF1l}vL0}b#)AVk0gTns+ZsA89 zkil zUs!H`vtHdz49~bbS}Shfa}!{BYL@7rlFF+IX#%qRr=`|@C3s3bEA3Ik;w8DZj%ii@ z0JHS7lEGIbonNROGCJU&O8Mi(zZNusq}`+~tp&Ubad#Y=PL|$#6A#&~M8De1Ber?= zOL~6KBRbnd9BZ|o7e_CxDv?}?E%cOkynnv@{^#UpUl&F+{o0bf?W|>GwbIh|Jgegw zm&E$7hP)%BN4^M7udIAU)M6J__8LX37c$Ef>nsDyxQ1BT$_pDsnpCx2v&b`WKF`hg zTJuV{xztXLs99KOntqpSbEx>1{{ZaJtoishB1SmKT=n@ACevYJylOQ}JO zc~-meUf;wv^2E~Xws#l0-Ml5NW3jMCwVvYU-%yfwit0%vzIiNWk{OER#c}pc(7>2I zVeyxMw9PYDeGkJ|@k64;8>(tcX?)Y&Mq_)EDVFKn+RX27aWo4u3)`5na8k_@(!Q=Z zM(DzljG-vSEhQg$`>$8cZp+I=osseN@i3iPO}o3bs(li*w07uK_|5T}+eYxaSo}G> zp59$jDJ0c3EBAYCQu@;2CXRTGvLkBZJIO=Kdv?nc+$P6D5=HXe%!iFpKfk%H62S`yo&DXSnb(u_Rn(-wYprJ#cOhjYZMYk0(q6v_?zMmi>WRC zruu#Oys}fM-f9v>Wi+nuaUHx+L183~X*AaJtdhg2U&(Orq=q|t7!+-^R< z*}EjNLfW;AGRYiXYet%d_nGJ3P7f08jV-VlB8)|D&nJcGJ{i>fU$0K9a~eecY#N>M zK=xMF>=pA74Esy~!x{*b$2oZ7j0qi9zP0f0{1f}fm);=L?XSE~;Ehu6RGuKW7x!BE zg2hE5xk+Sw=z&SJ2)Z{gS+MZu03`cB$ZvJ4v*=+ag-sYcbvH z7g48`=ju~0hro$cNah8E%+ND5h{jXqSJvXO7;LW(`?TeVX>-m|yk3#hO>Ue20E5)< zslvW4tB#7E%I$5Y>#e(ax2DI;9uW9@rdXsl`ZkAiE49)?V`)4oCZ(ud9n*cb`ZX;D zycRGdsP}hc?AFquBu0P%+x$cLlOMuQh9kpzeb$){ui{JdbFXWVLX0gM2l+I+NoF2p z=}6V4wvO51vXbGZxKk$NWAuNEJ_&fQ!B*EE6wvQ{J#T+`J)-OWBW*)elJ@;(w(}QD zvy1y39>YmwF`K1|?9X{Qc`gH>Xs7u*{u!gj{{R=fUjG2v{vh!6hKu3N0$pB&l@J?7`tS}9q7 zTJ$x>Q=SW!l_;lDNhq|^Pe|JBwPe=n--+<#&*Gnm`m3g!Z>?Y5UrJ+3Rm{4T#8MKe zG|Z97B3s=TUnoaxvBsn^X2(9U;eXlUG`NlYY2tCD+{Ek#g`|jYrhvwPh;C-GnV^PX z2@JC{d2*6vA>2m4X823?b=9;PH5+|8PYzh=x_dgtFZnc!tpt_Y~ zS&X+gk|UYqZz*h|fdF@Sui$@=ENzADwbz4=z>vdWbvG}=*eL+=Nd{*%1$y$H*wipf5Wel=B%c5AJ#E^^OAZ-GlwnOzNt%P zy}Z6l=AACfLeOqbo8XTIo7wlsvTK^FzOQ$XH%laQv}qcGYX~l`9A9b>&vuuxqRz}uU@R%cGDoH<@H%!}NSIZvn#U2Ov;ddF+A=f+& zr$WqGj+=F?Ss3ALVIvm)9YvCDt%kU8t-B;ABp;l-N&7c^Sk@$Ue-L;*A!%fiQ2O79 z<&x%R#%H>b{5DL|75PSRv9HL0ftu*RU{)fnI{3U*-PEO|<#(n2#)-T1)6a947mKTg zr^!bT8?6=GEcz?nTXp$&vpBC5d{WVEBNm<;w_E#}a~!u)WB?Zmf!qSKFgz>6wLGv9 zjyN7?f2r#_w0h0;`Dwn-Z)_TARg8A(+r)P9BjrJ6;X;s3-1V=2@Sp5)@efhENGI2P z5`kp5aLK4^tU&X?b?3ut!zdtg)PhBNkBxt1e;sK)1BMG-U%{_+uK14R-CVw*t)W{z z8W|(Dw*`DU#fwWbO6O{h6lCJQnolpnP86ugxj84y;X9@7$?D_kt^REA<@Ng1Yfhp~ zG@AF}eOu8x=&zyqyxW>W$7y_m#{hsx&NI`D;Etf=V?5W^e+Zzw(0&#CMe!*K5?x1jh zP64m8d}sSHc=tm1SK*%p*=n8*@~HTy+Iw!upmJ2O9 zd2p7ZSyo6~Zi3=>V&X?231XNrMoCyIG=Pi_N6}voHQUPuZA$#5#P)4%D>J)h0*bbi z7D)Fzx3_jM%M;tC=~P7=sCVr(HkbP-d_$K0_9xbS7DTrb&V{utb^Mv5fJlz2d^iCZ zP^^UQ1nvNk1$wuKKV^@I(CRi3Nv-%LEiCgi=?&Jj^4&`|$jq!|&{ag63dOkR5uhXH zX0A-acxv?hm$bB1u9RkzR@FauqITcUQ?mm~yed9;YbjkWXg*ftmY(zKo}QZ6`-9*% zr{OfX(R^{H+-jCAl1T@Mbf(iG)O2yBP7SR05Zy&5nzy=KGevzQ^TjK@u$3hl&3?lE z%bq0h2f)9KO`=7q->1Wm8`;8I=R~+m`x|{m?Zl93nnlgIk{cwswGiBxr2#*5@UBd% z55&LlPv6-L_>;ufehk(%%X>`<_fNQv?$*-o{{T`?vsZA?Ta^-{ z6>aJQ*Y3xIe`Aa9hJOa9)2;kXre6;V>i5<{>PP8g`v;5?ZX9jOh%}Ni1r@IRJUYw_m$s8aI^A++)1>8>0(9 zhk8}EwH2K5TxrhoPp8HrB=1=4@U0Xi7;Wz6XKmT@$Or^uHO+i+@kWi~4~&{$!_7L< z@(X_t{37xH0E*(0?h9*ld85=c9}U2)*7rAWB(||lBf`xR0ws2AJZKNoE yE-RW%MXz})#p;#XzMJZn$!p|bqgIx4hfP0a?c*86N-5deEok<;)unqkWB=I(= self.total_trials: + self._exit() + + has_told_study = [] + + for trial_idx in range(self.num_trials): + work_name = f"objective_work_{trial_idx}" + if work_name not in self.ws: + objective_work = ObjectiveWork( + script_path=self.script_path, + data_dir=self.data_dir, + cloud_compute=L.CloudCompute("cpu"), + ) + self.ws[work_name] = objective_work + if not self.ws[work_name].has_started: + trial = self._study.ask(ObjectiveWork.distributions()) + self.ws[work_name].run(trial_id=trial._trial_id, **trial.params) + + if self.ws[work_name].metric and not self.ws[work_name].has_told_study: + self.hi_plot.data.append({"x": -1 * self.ws[work_name].metric, **self.ws[work_name].params}) + self._study.tell(self.ws[work_name].trial_id, self.ws[work_name].metric) + self.ws[work_name].has_told_study = True + + has_told_study.append(self.ws[work_name].has_told_study) + + if all(has_told_study): + self.num_trials += self.simultaneous_trials + + +if __name__ == "__main__": + app = L.LightningApp( + RootHPOFlow( + script_path=str(Path(__file__).parent / "pl_script.py"), + data_dir="data/hymenoptera_data_version_0", + total_trials=6, + simultaneous_trials=2, + ) + ) diff --git a/examples/app_hpo/app_wo_ui.py b/examples/app_hpo/app_wo_ui.py new file mode 100644 index 0000000000000..b8a8448668573 --- /dev/null +++ b/examples/app_hpo/app_wo_ui.py @@ -0,0 +1,58 @@ +from pathlib import Path + +import optuna +from objective import ObjectiveWork + +import lightning as L +from lightning.app.structures import Dict + + +class RootHPOFlow(L.LightningFlow): + def __init__(self, script_path, data_dir, total_trials, simultaneous_trials): + super().__init__() + self.script_path = script_path + self.data_dir = data_dir + self.total_trials = total_trials + self.simultaneous_trials = simultaneous_trials + self.num_trials = simultaneous_trials + self._study = optuna.create_study() + self.ws = Dict() + + def run(self): + if self.num_trials >= self.total_trials: + self._exit() + + has_told_study = [] + + for trial_idx in range(self.num_trials): + work_name = f"objective_work_{trial_idx}" + if work_name not in self.ws: + objective_work = ObjectiveWork( + script_path=self.script_path, + data_dir=self.data_dir, + cloud_compute=L.CloudCompute("cpu"), + ) + self.ws[work_name] = objective_work + if not self.ws[work_name].has_started: + trial = self._study.ask(ObjectiveWork.distributions()) + self.ws[work_name].run(trial_id=trial._trial_id, **trial.params) + + if self.ws[work_name].metric and not self.ws[work_name].has_told_study: + self._study.tell(self.ws[work_name].trial_id, self.ws[work_name].metric) + self.ws[work_name].has_told_study = True + + has_told_study.append(self.ws[work_name].has_told_study) + + if all(has_told_study): + self.num_trials += self.simultaneous_trials + + +if __name__ == "__main__": + app = L.LightningApp( + RootHPOFlow( + script_path=str(Path(__file__).parent / "pl_script.py"), + data_dir="data/hymenoptera_data_version_0", + total_trials=6, + simultaneous_trials=2, + ) + ) diff --git a/examples/app_hpo/download_data.py b/examples/app_hpo/download_data.py new file mode 100644 index 0000000000000..d82b86a9dee95 --- /dev/null +++ b/examples/app_hpo/download_data.py @@ -0,0 +1,5 @@ +from utils import download_data + +data_dir = "hymenoptera_data_version_0" +download_url = f"https://pl-flash-data.s3.amazonaws.com/{data_dir}.zip" +download_data(download_url, "./data") diff --git a/examples/app_hpo/hyperplot.py b/examples/app_hpo/hyperplot.py new file mode 100644 index 0000000000000..105285822705c --- /dev/null +++ b/examples/app_hpo/hyperplot.py @@ -0,0 +1,34 @@ +import lightning as L +from lightning.app.frontend.stream_lit import StreamlitFrontend +from lightning.app.utilities.state import AppState + + +class HiPlotFlow(L.LightningFlow): + def __init__(self): + super().__init__() + self.data = [] + + def run(self): + pass + + def configure_layout(self): + return StreamlitFrontend(render_fn=render_fn) + + +def render_fn(state: AppState): + import json + + import hiplot as hip + import streamlit as st + from streamlit_autorefresh import st_autorefresh + + st.set_page_config(layout="wide") + st_autorefresh(interval=1000, limit=None, key="refresh") + + if not state.data: + st.write("No data available yet ! Stay tuned") + return + + xp = hip.Experiment.from_iterable(state.data) + ret_val = xp.to_streamlit(ret="selected_uids", key="hip").display() + st.markdown("hiplot returned " + json.dumps(ret_val)) diff --git a/examples/app_hpo/objective.py b/examples/app_hpo/objective.py new file mode 100644 index 0000000000000..f2d1ebb6a747f --- /dev/null +++ b/examples/app_hpo/objective.py @@ -0,0 +1,63 @@ +import os +import tempfile +from datetime import datetime +from typing import Optional + +import pandas as pd +import torch +from optuna.distributions import CategoricalDistribution, LogUniformDistribution +from torchmetrics import Accuracy + +import lightning as L +from lightning.app.components.python import TracerPythonScript + + +class ObjectiveWork(TracerPythonScript): + def __init__(self, script_path: str, data_dir: str, cloud_compute: Optional[L.CloudCompute]): + timestamp = datetime.now().strftime("%H:%M:%S") + tmpdir = tempfile.TemporaryDirectory().name + submission_path = os.path.join(tmpdir, f"{timestamp}.csv") + best_model_path = os.path.join(tmpdir, f"{timestamp}.model.pt") + super().__init__( + script_path, + script_args=[ + f"--train_data_path={data_dir}/train", + f"--test_data_path={data_dir}/test", + f"--submission_path={submission_path}", + f"--best_model_path={best_model_path}", + ], + cloud_compute=cloud_compute, + ) + self.data_dir = data_dir + self.best_model_path = best_model_path + self.submission_path = submission_path + self.metric = None + self.trial_id = None + self.metric = None + self.params = None + self.has_told_study = False + + def run(self, trial_id: int, **params): + self.trial_id = trial_id + self.params = params + self.script_args.extend([f"--{k}={v}" for k, v in params.items()]) + super().run() + self.compute_metric() + + def _to_labels(self, path: str): + return torch.from_numpy(pd.read_csv(path).label.values) + + def compute_metric(self): + self.metric = -1 * float( + Accuracy()( + self._to_labels(self.submission_path), + self._to_labels(f"{self.data_dir}/ground_truth.csv"), + ) + ) + + @staticmethod + def distributions(): + return { + "backbone": CategoricalDistribution(["resnet18", "resnet34"]), + "learning_rate": LogUniformDistribution(0.0001, 0.1), + } diff --git a/examples/app_hpo/pl_script.py b/examples/app_hpo/pl_script.py new file mode 100644 index 0000000000000..bbc453798431a --- /dev/null +++ b/examples/app_hpo/pl_script.py @@ -0,0 +1,43 @@ +import argparse +import os + +import pandas as pd +import torch +from flash import Trainer +from flash.image import ImageClassificationData, ImageClassifier + +# Parse arguments provided by the Work. +parser = argparse.ArgumentParser() +parser.add_argument("--train_data_path", type=str, required=True) +parser.add_argument("--submission_path", type=str, required=True) +parser.add_argument("--test_data_path", type=str, required=True) +parser.add_argument("--best_model_path", type=str, required=True) +# Optional +parser.add_argument("--backbone", type=str, default="resnet18") +parser.add_argument("--learning_rate", type=float, default=0.01) +args = parser.parse_args() + + +datamodule = ImageClassificationData.from_folders( + train_folder=args.train_data_path, + batch_size=8, +) + +model = ImageClassifier(datamodule.num_classes, backbone=args.backbone) +trainer = Trainer(fast_dev_run=True) +trainer.fit(model, datamodule=datamodule) +trainer.save_checkpoint(args.best_model_path) + +datamodule = ImageClassificationData.from_folders( + predict_folder=args.test_data_path, + batch_size=8, +) + +predictions = Trainer().predict(model, datamodule=datamodule) +submission_data = [ + {"filename": os.path.basename(p["metadata"]["filepath"]), "label": torch.argmax(p["preds"]).item()} + for batch in predictions + for p in batch +] +df = pd.DataFrame(submission_data) +df.to_csv(args.submission_path, index=False) diff --git a/examples/app_hpo/requirements.txt b/examples/app_hpo/requirements.txt new file mode 100644 index 0000000000000..bd85880da2237 --- /dev/null +++ b/examples/app_hpo/requirements.txt @@ -0,0 +1,3 @@ +optuna +lightning-flash[image,serve] == 0.7.0 +hiplot diff --git a/examples/app_hpo/utils.py b/examples/app_hpo/utils.py new file mode 100644 index 0000000000000..3e8960ea893fc --- /dev/null +++ b/examples/app_hpo/utils.py @@ -0,0 +1,54 @@ +import os +import os.path +import tarfile +import zipfile + +import requests + + +def download_data(url: str, path: str = "data/", verbose: bool = False) -> None: + """Download file with progressbar. + + # Code taken from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603 + # __author__ = "github.com/ruxi" + # __license__ = "MIT" + + Usage: + download_file('http://web4host.net/5MB.zip') + """ + if url == "NEED_TO_BE_CREATED": + raise NotImplementedError + + if not os.path.exists(path): + os.makedirs(path) + local_filename = os.path.join(path, url.split("/")[-1]) + r = requests.get(url, stream=True, verify=False) + file_size = int(r.headers["Content-Length"]) if "Content-Length" in r.headers else 0 + chunk_size = 1024 + num_bars = int(file_size / chunk_size) + if verbose: + print(dict(file_size=file_size)) + print(dict(num_bars=num_bars)) + + if not os.path.exists(local_filename): + with open(local_filename, "wb") as fp: + for chunk in r.iter_content(chunk_size=chunk_size): + fp.write(chunk) # type: ignore + + def extract_tarfile(file_path: str, extract_path: str, mode: str): + if os.path.exists(file_path): + with tarfile.open(file_path, mode=mode) as tar_ref: + for member in tar_ref.getmembers(): + try: + tar_ref.extract(member, path=extract_path, set_attrs=False) + except PermissionError: + raise PermissionError(f"Could not extract tar file {file_path}") + + if ".zip" in local_filename: + if os.path.exists(local_filename): + with zipfile.ZipFile(local_filename, "r") as zip_ref: + zip_ref.extractall(path) + elif local_filename.endswith(".tar.gz") or local_filename.endswith(".tgz"): + extract_tarfile(local_filename, path, "r:gz") + elif local_filename.endswith(".tar.bz2") or local_filename.endswith(".tbz"): + extract_tarfile(local_filename, path, "r:bz2") diff --git a/examples/app_layout/.lightning b/examples/app_layout/.lightning new file mode 100644 index 0000000000000..48e8408f9e81e --- /dev/null +++ b/examples/app_layout/.lightning @@ -0,0 +1 @@ +name: layout-example diff --git a/examples/app_layout/__init__.py b/examples/app_layout/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/examples/app_layout/app.py b/examples/app_layout/app.py new file mode 100644 index 0000000000000..b7feb3e6d07be --- /dev/null +++ b/examples/app_layout/app.py @@ -0,0 +1,101 @@ +"""An example showcasing how `configure_layout` can be used to nest user interfaces of different flows. + +Run the app: + +lightning run app examples/layout/demo.py + +This starts one server for each flow that returns a UI. Access the UI at the link printed in the terminal. +""" + +import os +from time import sleep + +import lightning as L +from lightning.app.frontend.stream_lit import StreamlitFrontend +from lightning.app.frontend.web import StaticWebFrontend + + +class C11(L.LightningFlow): + def __init__(self): + super().__init__() + self.message = "Hello Streamlit!" + + def run(self): + pass + + def configure_layout(self): + return StreamlitFrontend(render_fn=render_c11) + + +def render_c11(state): + import streamlit as st + + st.write(state.message) + + +class C21(L.LightningFlow): + def __init__(self): + super().__init__() + + def run(self): + pass + + def configure_layout(self): + return StaticWebFrontend(os.path.join(os.path.dirname(__file__), "ui1")) + + +class C22(L.LightningFlow): + def __init__(self): + super().__init__() + + def run(self): + pass + + def configure_layout(self): + return StaticWebFrontend(os.path.join(os.path.dirname(__file__), "ui2")) + + +class C1(L.LightningFlow): + def __init__(self): + super().__init__() + self.c11 = C11() + + def run(self): + pass + + +class C2(L.LightningFlow): + def __init__(self): + super().__init__() + self.c21 = C21() + self.c22 = C22() + + def run(self): + pass + + def configure_layout(self): + return [ + dict(name="one", content=self.c21), + dict(name="two", content=self.c22), + ] + + +class Root(L.LightningFlow): + def __init__(self): + super().__init__() + self.c1 = C1() + self.c2 = C2() + + def run(self): + sleep(10) + self._exit("Layout End") + + def configure_layout(self): + return [ + dict(name="one", content=self.c1.c11), + dict(name="two", content=self.c2), + dict(name="three", content="https://lightning.ai"), + ] + + +app = L.LightningApp(Root()) diff --git a/examples/app_layout/ui1/index.html b/examples/app_layout/ui1/index.html new file mode 100644 index 0000000000000..7019634b87fd5 --- /dev/null +++ b/examples/app_layout/ui1/index.html @@ -0,0 +1,10 @@ + + + + + One + + +One + + diff --git a/examples/app_layout/ui2/index.html b/examples/app_layout/ui2/index.html new file mode 100644 index 0000000000000..f9b6432e4963d --- /dev/null +++ b/examples/app_layout/ui2/index.html @@ -0,0 +1,10 @@ + + + + + Two + + +Two + + diff --git a/examples/app_multi_node/.gitignore b/examples/app_multi_node/.gitignore new file mode 100644 index 0000000000000..33eb0ef33c61c --- /dev/null +++ b/examples/app_multi_node/.gitignore @@ -0,0 +1,2 @@ +.storage/ +.shared/ diff --git a/examples/app_multi_node/.lightning b/examples/app_multi_node/.lightning new file mode 100644 index 0000000000000..7befcc74ea6d3 --- /dev/null +++ b/examples/app_multi_node/.lightning @@ -0,0 +1 @@ +name: multi-node-demo diff --git a/examples/app_multi_node/multi_node.py b/examples/app_multi_node/multi_node.py new file mode 100644 index 0000000000000..adc8df1c74815 --- /dev/null +++ b/examples/app_multi_node/multi_node.py @@ -0,0 +1,36 @@ +import lightning as L + + +class Work(L.LightningWork): + def __init__(self, cloud_compute: L.CloudCompute = L.CloudCompute(), **kwargs): + super().__init__(parallel=True, **kwargs, cloud_compute=cloud_compute) + + def run(self, main_address="localhost", main_port=1111, world_size=1, rank=0, init=False): + if init: + return + + import torch.distributed + + print(f"Initializing process group: {main_address=}, {main_port=}, {world_size=}, {rank=}") + torch.distributed.init_process_group( + backend="gloo", init_method=f"tcp://{main_address}:{main_port}", world_size=world_size, rank=rank + ) + gathered = [torch.zeros(1) for _ in range(world_size)] + torch.distributed.all_gather(gathered, torch.tensor([rank]).float()) + print(gathered) + + +class MultiNodeDemo(L.LightningFlow): + def __init__(self): + super().__init__() + self.work0 = Work() + self.work1 = Work() + + def run(self): + self.work0.run(init=True) + if self.work0.internal_ip: + self.work0.run(main_address=self.work0.internal_ip, main_port=self.work0.port, world_size=2, rank=0) + self.work1.run(main_address=self.work0.internal_ip, main_port=self.work0.port, world_size=2, rank=1) + + +app = L.LightningApp(MultiNodeDemo()) diff --git a/examples/app_multi_node/requirements.txt b/examples/app_multi_node/requirements.txt new file mode 100644 index 0000000000000..12c6d5d5eac2a --- /dev/null +++ b/examples/app_multi_node/requirements.txt @@ -0,0 +1 @@ +torch diff --git a/examples/app_payload/.lightning b/examples/app_payload/.lightning new file mode 100644 index 0000000000000..933d6ed9a73e1 --- /dev/null +++ b/examples/app_payload/.lightning @@ -0,0 +1 @@ +name: payload diff --git a/examples/app_payload/app.py b/examples/app_payload/app.py new file mode 100644 index 0000000000000..66de76d964adc --- /dev/null +++ b/examples/app_payload/app.py @@ -0,0 +1,31 @@ +import lightning as L +from lightning.app.storage.payload import Payload + + +class SourceFileWriterWork(L.LightningWork): + def __init__(self): + super().__init__() + self.value = None + + def run(self): + self.value = Payload(42) + + +class DestinationWork(L.LightningWork): + def run(self, payload): + assert payload.value == 42 + + +class RootFlow(L.LightningFlow): + def __init__(self): + super().__init__() + self.src = SourceFileWriterWork() + self.dst = DestinationWork() + + def run(self): + self.src.run() + self.dst.run(self.src.value) + self._exit("Application End!") + + +app = L.LightningApp(RootFlow()) diff --git a/examples/app_pickle_or_not/app.py b/examples/app_pickle_or_not/app.py new file mode 100644 index 0000000000000..bda24cb5b7967 --- /dev/null +++ b/examples/app_pickle_or_not/app.py @@ -0,0 +1,55 @@ +import logging + +import lightning as L + +logger = logging.getLogger(__name__) + + +class PickleChecker(L.LightningWork): + def run(self, pickle_image: bytes): + parsed = self.parse_image(pickle_image) + if parsed == b"it is a pickle": + return True + elif parsed == b"it is not a pickle": + return False + else: + raise Exception("Couldn't parse the image") + + @staticmethod + def parse_image(image_str: bytes): + return image_str + + +class Slack(L.LightningFlow): + def __init__(self): + super().__init__() + + @staticmethod + def send_message(message): + logger.info(f"Sending message: {message}") + + def run(self): + pass + + +class RootComponent(L.LightningFlow): + def __init__(self): + super().__init__() + self.pickle_checker = PickleChecker() + self.slack = Slack() + self.counter = 3 + + def run(self): + if self.counter > 0: + logger.info(f"Running the app {self.counter}") + image_str = b"it is not a pickle" + if self.pickle_checker.run(image_str): + self.slack.send_message("It's a pickle!") + else: + self.slack.send_message("It's not a pickle!") + self.counter -= 1 + else: + self._exit("Pickle or Not End") + + +app = L.LightningApp(RootComponent()) diff --git a/examples/app_pickle_or_not/requirements.txt b/examples/app_pickle_or_not/requirements.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/examples/app_v0/.gitignore b/examples/app_v0/.gitignore new file mode 100644 index 0000000000000..186149fa056fe --- /dev/null +++ b/examples/app_v0/.gitignore @@ -0,0 +1,2 @@ +.storage +.lightning diff --git a/examples/app_v0/README.md b/examples/app_v0/README.md new file mode 100644 index 0000000000000..516283ae9cedd --- /dev/null +++ b/examples/app_v0/README.md @@ -0,0 +1,18 @@ +# v0 app + +This app is a flow-only app with nothing fancy. +This is meant to present the basic functionalities of the lightning framework. + +## Starting it + +Local + +```bash +lightning run app app.py +``` + +Cloud + +```bash +lightning run app app.py --cloud +``` diff --git a/examples/app_v0/__init__.py b/examples/app_v0/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/examples/app_v0/app.py b/examples/app_v0/app.py new file mode 100644 index 0000000000000..26345f5b43e46 --- /dev/null +++ b/examples/app_v0/app.py @@ -0,0 +1,49 @@ +# v0_app.py +import os +from datetime import datetime +from time import sleep + +import lightning as L +from lightning.app.frontend.web import StaticWebFrontend + + +class Word(L.LightningFlow): + def __init__(self, letter): + super().__init__() + self.letter = letter + self.repeats = letter + + def run(self): + self.repeats += self.letter + + def configure_layout(self): + return StaticWebFrontend(os.path.join(os.path.dirname(__file__), f"ui/{self.letter}")) + + +class V0App(L.LightningFlow): + def __init__(self): + super().__init__() + self.aas = Word("a") + self.bbs = Word("b") + self.counter = 0 + + def run(self): + now = datetime.now() + now = now.strftime("%H:%M:%S") + log = {"time": now, "a": self.aas.repeats, "b": self.bbs.repeats} + print(log) + self.aas.run() + self.bbs.run() + + sleep(2.0) + self.counter += 1 + + def configure_layout(self): + tab1 = {"name": "Tab_1", "content": self.aas} + tab2 = {"name": "Tab_2", "content": self.bbs} + tab3 = {"name": "Tab_3", "content": "https://tensorboard.dev/experiment/8m1aX0gcQ7aEmH0J7kbBtg/#scalars"} + + return [tab1, tab2, tab3] + + +app = L.LightningApp(V0App()) diff --git a/examples/app_v0/emulate_ui.py b/examples/app_v0/emulate_ui.py new file mode 100644 index 0000000000000..8a5b45c1c3904 --- /dev/null +++ b/examples/app_v0/emulate_ui.py @@ -0,0 +1,19 @@ +from time import sleep + +import requests + +from lightning.app.utilities.state import headers_for + +headers = headers_for({}) +headers["X-Lightning-Type"] = "DEFAULT" + +res = requests.get("http://127.0.0.1:7501/state", headers=headers) + + +res = requests.post("http://127.0.0.1:7501/state", json={"stage": "running"}, headers=headers) +print(res) + +sleep(10) + +res = requests.post("http://127.0.0.1:7501/state", json={"stage": "stopping"}, headers=headers) +print(res) diff --git a/examples/app_v0/requirements.txt b/examples/app_v0/requirements.txt new file mode 100644 index 0000000000000..edfce786a4d18 --- /dev/null +++ b/examples/app_v0/requirements.txt @@ -0,0 +1 @@ +py diff --git a/examples/app_v0/ui/a/index.html b/examples/app_v0/ui/a/index.html new file mode 100644 index 0000000000000..6ddb9a5a1323c --- /dev/null +++ b/examples/app_v0/ui/a/index.html @@ -0,0 +1 @@ +
Hello from component A
diff --git a/examples/app_v0/ui/b/index.html b/examples/app_v0/ui/b/index.html new file mode 100644 index 0000000000000..3bfd9e24cb7f7 --- /dev/null +++ b/examples/app_v0/ui/b/index.html @@ -0,0 +1 @@ +
Hello from component B
diff --git a/tests/tests_app_examples/__init__.py b/tests/tests_app_examples/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app_examples/collect_failures/.lightning b/tests/tests_app_examples/collect_failures/.lightning new file mode 100644 index 0000000000000..2d3c5c666d1b7 --- /dev/null +++ b/tests/tests_app_examples/collect_failures/.lightning @@ -0,0 +1 @@ +name: collect_failure diff --git a/tests/tests_app_examples/collect_failures/__init__.py b/tests/tests_app_examples/collect_failures/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app_examples/collect_failures/app.py b/tests/tests_app_examples/collect_failures/app.py new file mode 100644 index 0000000000000..7f82f2367775d --- /dev/null +++ b/tests/tests_app_examples/collect_failures/app.py @@ -0,0 +1,46 @@ +import logging +import sys +import time + +from lightning_app import LightningApp, LightningFlow, LightningWork + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) +logger.addHandler(logging.StreamHandler(sys.stdout)) + + +class SimpleWork(LightningWork): + def __init__(self): + super().__init__(cache_calls=False, parallel=True) + self.is_running_now = False + + def run(self): + self.is_running_now = True + print("work_is_running") + for i in range(1, 10): + time.sleep(1) + if i % 5 == 0: + raise Exception(f"invalid_value_of_i_{i}") + print(f"good_value_of_i_{i}") + + +class RootFlow(LightningFlow): + def __init__(self): + super().__init__() + self.simple_work = SimpleWork() + + def run(self): + print("useless_garbage_log_that_is_always_there_to_overload_logs") + self.simple_work.run() + if not self.simple_work.is_running_now: + pass + # work is not ready yet + print("waiting_for_work_to_be_ready") + else: + print("flow_and_work_are_running") + logger.info("logger_flow_work") + time.sleep(0.1) + + +if __name__ == "__main__": + app = LightningApp(RootFlow()) diff --git a/tests/tests_app_examples/collect_failures/requirements.txt b/tests/tests_app_examples/collect_failures/requirements.txt new file mode 100644 index 0000000000000..7800f0fad3fff --- /dev/null +++ b/tests/tests_app_examples/collect_failures/requirements.txt @@ -0,0 +1 @@ +redis diff --git a/tests/tests_app_examples/components/__init__.py b/tests/tests_app_examples/components/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app_examples/components/python/__init__.py b/tests/tests_app_examples/components/python/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app_examples/components/python/test_scripts.py b/tests/tests_app_examples/components/python/test_scripts.py new file mode 100644 index 0000000000000..4a3084832a574 --- /dev/null +++ b/tests/tests_app_examples/components/python/test_scripts.py @@ -0,0 +1,40 @@ +import os + +import pytest +from click.testing import CliRunner +from tests_app import _PROJECT_ROOT + +from lightning_app.cli.lightning_cli import run_app +from lightning_app.testing.helpers import run_script, RunIf + + +@RunIf(pytorch_lightning=True) +@pytest.mark.parametrize( + "file", + [ + pytest.param("component_popen.py"), + pytest.param("component_tracer.py"), + ], +) +def test_scripts(file): + run_script(str(os.path.join(_PROJECT_ROOT, f"examples/app_components/python/{file}"))) + + +@pytest.mark.skip(reason="causing some issues with CI, not sure if the test is actually needed") +@RunIf(pytorch_lightning=True) +def test_components_app_example(): + + runner = CliRunner() + result = runner.invoke( + run_app, + [ + os.path.join(_PROJECT_ROOT, "examples/app_components/python/app.py"), + "--blocking", + "False", + "--open-ui", + "False", + ], + catch_exceptions=False, + ) + assert result.exit_code == 0 + assert "tracer script succeed" in result.stdout diff --git a/tests/tests_app_examples/core_features_app/app.py b/tests/tests_app_examples/core_features_app/app.py new file mode 100644 index 0000000000000..069384f721290 --- /dev/null +++ b/tests/tests_app_examples/core_features_app/app.py @@ -0,0 +1,17 @@ +import os + +from lightning_app.core import LightningApp, LightningFlow + + +class EnvVarTestApp(LightningFlow): + def __init__(self): + super().__init__() + + def run(self): + # these env vars are set here: tests/tests_app_examples/test_core_features_app.py:15 + assert os.getenv("FOO", "") == "bar" + assert os.getenv("BLA", "") == "bloz" + self._exit() + + +app = LightningApp(EnvVarTestApp()) diff --git a/tests/tests_app_examples/custom_work_dependencies/.lightning b/tests/tests_app_examples/custom_work_dependencies/.lightning new file mode 100644 index 0000000000000..869a6055873da --- /dev/null +++ b/tests/tests_app_examples/custom_work_dependencies/.lightning @@ -0,0 +1 @@ +name: work-dependency-checker diff --git a/tests/tests_app_examples/custom_work_dependencies/__init__.py b/tests/tests_app_examples/custom_work_dependencies/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app_examples/custom_work_dependencies/app.py b/tests/tests_app_examples/custom_work_dependencies/app.py new file mode 100644 index 0000000000000..fb85c911906bb --- /dev/null +++ b/tests/tests_app_examples/custom_work_dependencies/app.py @@ -0,0 +1,56 @@ +import os + +from lightning_app import BuildConfig, CloudCompute, LightningApp, LightningFlow, LightningWork + + +class CustomBuildConfig(BuildConfig): + def build_commands(self): + return ["sudo apt update", "sudo apt install redis", "pip install lmdb"] + + +class WorkWithCustomDeps(LightningWork): + def __init__(self, cloud_compute: CloudCompute = CloudCompute(), **kwargs): + build_config = CustomBuildConfig(requirements=["numpy", "pandas", "py"]) + super().__init__(parallel=True, **kwargs, cloud_compute=cloud_compute, cloud_build_config=build_config) + + def run(self): + # installed by the build commands and by requirements in the build config + import lmdb + import numpy as np + import pandas as pd + + print("installed numpy version:", np.__version__) + print("installed pandas version:", pd.__version__) + print("installed lmdb version:", lmdb.__version__) + + +class WorkWithCustomBaseImage(LightningWork): + def __init__(self, cloud_compute: CloudCompute = CloudCompute(), **kwargs): + # this image has been created from ghcr.io/gridai/base-images:v1.8-cpu + # by just adding an empty file at /content/.e2e_test + custom_image = "ghcr.io/gridai/image-for-testing-custom-images-in-e2e" + build_config = BuildConfig(image=custom_image) + super().__init__(parallel=True, **kwargs, cloud_compute=cloud_compute, cloud_build_config=build_config) + + def run(self): + # checking the existence of the file - this file had been added to the custom base image + assert ".e2e_test" in os.listdir("/content/"), "file not found" + + +class CustomWorkBuildConfigChecker(LightningFlow): + def run(self): + # create dynamically the work at runtime + if not hasattr(self, "work1"): + self.work1 = WorkWithCustomDeps() + if not hasattr(self, "work2"): + self.work2 = WorkWithCustomBaseImage() + + self.work1.run() + self.work2.run() + + if self.work1.has_succeeded and self.work2.has_succeeded: + print("--- Custom Work Dependency checker End ----") + self._exit() + + +app = LightningApp(CustomWorkBuildConfigChecker()) diff --git a/tests/tests_app_examples/idle_timeout/.lightning b/tests/tests_app_examples/idle_timeout/.lightning new file mode 100644 index 0000000000000..2f0f426f01963 --- /dev/null +++ b/tests/tests_app_examples/idle_timeout/.lightning @@ -0,0 +1 @@ +name: idle_timeout_1 diff --git a/tests/tests_app_examples/idle_timeout/__init__.py b/tests/tests_app_examples/idle_timeout/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/tests/tests_app_examples/idle_timeout/app.py b/tests/tests_app_examples/idle_timeout/app.py new file mode 100644 index 0000000000000..ff45f5332bcaa --- /dev/null +++ b/tests/tests_app_examples/idle_timeout/app.py @@ -0,0 +1,71 @@ +import pathlib + +from lightning_app import CloudCompute, LightningApp, LightningFlow, LightningWork +from lightning_app.storage.path import artifacts_path, filesystem +from lightning_app.utilities.enum import WorkStageStatus, WorkStopReasons + + +class SourceFileWriterWork(LightningWork): + def __init__(self): + super().__init__(cache_calls=False, parallel=True, cloud_compute=CloudCompute(idle_timeout=5)) + self.counter = 0 + self.value = None + self.path = None + + def run(self): + self.path = "lit://boring_file.txt" + with open(self.path, "w") as f: + f.write("path") + self.counter += 1 + + +class DestinationWork(LightningWork): + def run(self, path): + assert path.exists() + + +class RootFlow(LightningFlow): + def __init__(self): + super().__init__() + self.make_check = True + self.work = SourceFileWriterWork() + self.dest_work = DestinationWork(parallel=True) + + def run(self): + if self.work.counter == 0: + self.work.run() + + elif ( + self.work.status.stage == WorkStageStatus.STOPPED + and self.work.status.reason == WorkStopReasons.SIGTERM_SIGNAL_HANDLER + and self.make_check + ): + succeeded_status = self.work.statuses[-3] + stopped_status_pending = self.work.statuses[-2] + stopped_status_sigterm = self.work.statuses[-1] + assert succeeded_status.stage == WorkStageStatus.SUCCEEDED + assert stopped_status_pending.stage == WorkStageStatus.STOPPED + assert stopped_status_pending.reason == WorkStopReasons.PENDING + assert stopped_status_sigterm.stage == WorkStageStatus.STOPPED + assert stopped_status_sigterm.reason == WorkStopReasons.SIGTERM_SIGNAL_HANDLER + # Note: Account for the controlplane, k8s, SIGTERM handler delays. + assert (stopped_status_pending.timestamp - succeeded_status.timestamp) < 10 + assert (stopped_status_sigterm.timestamp - stopped_status_pending.timestamp) < 120 + fs = filesystem() + destination_path = artifacts_path(self.work) / pathlib.Path(*self.work.path.resolve().parts[1:]) + assert fs.exists(destination_path) + self.dest_work.run(self.work.path) + self.make_check = False + print("Successfully stopped SourceFileWriterWork.") + + if self.dest_work.status.stage == WorkStageStatus.SUCCEEDED: + print("Stopping work") + self.dest_work.stop() + + if self.dest_work.status.stage == WorkStageStatus.STOPPED: + print(self.dest_work.statuses) + print("Application End") + self._exit() + + +app = LightningApp(RootFlow()) diff --git a/tests/tests_app_examples/layout.py b/tests/tests_app_examples/layout.py new file mode 100644 index 0000000000000..ce65d3d9a825b --- /dev/null +++ b/tests/tests_app_examples/layout.py @@ -0,0 +1,24 @@ +import os + +from click.testing import CliRunner +from tests_app import _PROJECT_ROOT + +from lightning_app.cli.lightning_cli import run_app + + +def test_layout_example(): + + runner = CliRunner() + result = runner.invoke( + run_app, + [ + os.path.join(_PROJECT_ROOT, "examples/app_layout/app.py"), + "--blocking", + "False", + "--open-ui", + "False", + ], + catch_exceptions=False, + ) + assert "Layout End" in str(result.stdout_bytes) + assert result.exit_code == 0 diff --git a/tests/tests_app_examples/pickle_or_not.py b/tests/tests_app_examples/pickle_or_not.py new file mode 100644 index 0000000000000..d55f39b2db28c --- /dev/null +++ b/tests/tests_app_examples/pickle_or_not.py @@ -0,0 +1,24 @@ +import os + +from click.testing import CliRunner +from tests_app import _PROJECT_ROOT + +from lightning_app.cli.lightning_cli import run_app + + +def test_pickle_or_not_example(): + + runner = CliRunner() + result = runner.invoke( + run_app, + [ + os.path.join(_PROJECT_ROOT, "examples/app_pickle_or_not/app.py"), + "--blocking", + "False", + "--open-ui", + "False", + ], + catch_exceptions=False, + ) + assert "Pickle or Not End" in str(result.stdout_bytes) + assert result.exit_code == 0 diff --git a/tests/tests_app_examples/test_boring_app.py b/tests/tests_app_examples/test_boring_app.py new file mode 100644 index 0000000000000..1f681260de5c2 --- /dev/null +++ b/tests/tests_app_examples/test_boring_app.py @@ -0,0 +1,23 @@ +import os + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.testing.testing import run_app_in_cloud, wait_for + + +@pytest.mark.cloud +def test_boring_app_example_cloud() -> None: + with run_app_in_cloud(os.path.join(_PROJECT_ROOT, "examples/app_boring/"), app_name="app_dynamic.py") as ( + _, + view_page, + _, + ): + + def check_hello_there(*_, **__): + locator = view_page.frame_locator("iframe").locator('ul:has-text("Hello there!")') + locator.wait_for(timeout=3 * 1000) + if len(locator.all_text_contents()): + return True + + wait_for(view_page, check_hello_there) diff --git a/tests/tests_app_examples/test_collect_failures.py b/tests/tests_app_examples/test_collect_failures.py new file mode 100644 index 0000000000000..f263ebb1a9f58 --- /dev/null +++ b/tests/tests_app_examples/test_collect_failures.py @@ -0,0 +1,39 @@ +import os +from time import sleep + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.testing.testing import run_app_in_cloud + + +@pytest.mark.cloud +def test_collect_failures_example_cloud() -> None: + # logs are in order + expected_logs = [ + "useless_garbage_log_that_is_always_there_to_overload_logs", + "waiting_for_work_to_be_ready", + "work_is_running", + "flow_and_work_are_running", + "logger_flow_work", + "good_value_of_i_1", + "good_value_of_i_2", + "good_value_of_i_3", + "good_value_of_i_4", + "invalid_value_of_i_5", + ] + with run_app_in_cloud(os.path.join(_PROJECT_ROOT, "tests/tests_app_examples/collect_failures")) as ( + _, + _, + fetch_logs, + ): + last_found_log_index = -1 + while len(expected_logs) != 0: + for index, log in enumerate(fetch_logs()): + if expected_logs[0] in log: + print(f"found expected log: {expected_logs[0]}") + expected_logs.pop(0) + assert index > last_found_log_index + if len(expected_logs) == 0: + break + sleep(1) diff --git a/tests/tests_app_examples/test_core_features_app.py b/tests/tests_app_examples/test_core_features_app.py new file mode 100644 index 0000000000000..3fd425350ad0a --- /dev/null +++ b/tests/tests_app_examples/test_core_features_app.py @@ -0,0 +1,27 @@ +import os + +from click.testing import CliRunner +from tests_app import _PROJECT_ROOT + +from lightning_app.cli.lightning_cli import run_app + + +def test_core_features_app_example(): + + runner = CliRunner() + result = runner.invoke( + run_app, + [ + os.path.join(_PROJECT_ROOT, "tests/tests_app_examples/core_features_app/app.py"), + "--blocking", + "False", + "--open-ui", + "False", + "--env", # this is to test env variable + "FOO=bar", + "--env", + "BLA=bloz", + ], + catch_exceptions=False, + ) + assert result.exit_code == 0 diff --git a/tests/tests_app_examples/test_custom_work_dependencies.py b/tests/tests_app_examples/test_custom_work_dependencies.py new file mode 100644 index 0000000000000..8390233e2eee3 --- /dev/null +++ b/tests/tests_app_examples/test_custom_work_dependencies.py @@ -0,0 +1,22 @@ +import os +from time import sleep + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.testing.testing import run_app_in_cloud + + +@pytest.mark.cloud +def test_custom_work_dependencies_example_cloud() -> None: + # if requirements not installed, the app will fail + with run_app_in_cloud( + os.path.join(_PROJECT_ROOT, "tests/tests_app_examples/custom_work_dependencies/"), + app_name="app.py", + ) as (_, _, fetch_logs): + has_logs = False + while not has_logs: + for log in fetch_logs(): + if "Custom Work Dependency checker End" in log: + has_logs = True + sleep(1) diff --git a/tests/tests_app_examples/test_drive.py b/tests/tests_app_examples/test_drive.py new file mode 100644 index 0000000000000..9cebca9cf1072 --- /dev/null +++ b/tests/tests_app_examples/test_drive.py @@ -0,0 +1,23 @@ +import os +from time import sleep + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.testing.testing import run_app_in_cloud + + +@pytest.mark.cloud +def test_drive_example_cloud() -> None: + with run_app_in_cloud(os.path.join(_PROJECT_ROOT, "examples/app_drive")) as ( + _, + view_page, + fetch_logs, + ): + + has_logs = False + while not has_logs: + for log in fetch_logs(): + if "Application End!" in log: + has_logs = True + sleep(1) diff --git a/tests/tests_app_examples/test_idle_timeout.py b/tests/tests_app_examples/test_idle_timeout.py new file mode 100644 index 0000000000000..fb58a83aefc93 --- /dev/null +++ b/tests/tests_app_examples/test_idle_timeout.py @@ -0,0 +1,22 @@ +import os +from time import sleep + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.testing.testing import run_app_in_cloud + + +@pytest.mark.cloud +def test_idle_timeout_example_cloud() -> None: + with run_app_in_cloud(os.path.join(_PROJECT_ROOT, "tests/tests_app_examples/idle_timeout")) as ( + _, + _, + fetch_logs, + ): + has_logs = False + while not has_logs: + for log in fetch_logs(): + if "Application End" in log: + has_logs = True + sleep(1) diff --git a/tests/tests_app_examples/test_payload.py b/tests/tests_app_examples/test_payload.py new file mode 100644 index 0000000000000..28d2391c18a2a --- /dev/null +++ b/tests/tests_app_examples/test_payload.py @@ -0,0 +1,19 @@ +import os +from time import sleep + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.testing.testing import run_app_in_cloud + + +@pytest.mark.cloud +def test_payload_example_cloud() -> None: + with run_app_in_cloud(os.path.join(_PROJECT_ROOT, "examples/app_payload")) as (_, _, fetch_logs): + + has_logs = False + while not has_logs: + for log in fetch_logs(): + if "Application End!" in log: + has_logs = True + sleep(1) diff --git a/tests/tests_app_examples/test_quick_start.py b/tests/tests_app_examples/test_quick_start.py new file mode 100644 index 0000000000000..ef29a24e572bc --- /dev/null +++ b/tests/tests_app_examples/test_quick_start.py @@ -0,0 +1,69 @@ +import logging +import os +from unittest import mock + +import pytest +from click.testing import CliRunner +from tests_app import _PROJECT_ROOT + +from lightning_app import LightningApp +from lightning_app.cli.lightning_cli import run_app +from lightning_app.testing.helpers import RunIf +from lightning_app.testing.testing import run_app_in_cloud, wait_for + + +class QuickStartApp(LightningApp): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.root.serve_work._parallel = True + + def run_once(self): + done = super().run_once() + if self.root.train_work.best_model_path: + return True + return done + + +@pytest.mark.skipif(True, reason="test is skipped because CI was blocking all the PRs") +@RunIf(pytorch_lightning=True, skip_windows=True, skip_linux=True) +def test_quick_start_example(caplog, monkeypatch): + """This test ensures the Quick Start example properly train and serve PyTorch Lightning.""" + + monkeypatch.setattr("logging.getLogger", mock.MagicMock(return_value=logging.getLogger())) + + with caplog.at_level(logging.INFO): + with mock.patch("lightning_app.LightningApp", QuickStartApp): + runner = CliRunner() + result = runner.invoke( + run_app, + [ + os.path.join(_PROJECT_ROOT, "lightning-quick-start/app.py"), + "--blocking", + "False", + "--open-ui", + "False", + ], + catch_exceptions=False, + ) + assert result.exit_code == 0 + + +@pytest.mark.cloud +def test_quick_start_example_cloud() -> None: + with run_app_in_cloud(os.path.join(_PROJECT_ROOT, "lightning-quick-start/")) as (_, view_page, _): + + def click_gradio_demo(*_, **__): + button = view_page.locator('button:has-text("Interactive demo")') + button.wait_for(timeout=3 * 1000) + button.click() + return True + + wait_for(view_page, click_gradio_demo) + + def check_examples(*_, **__): + locator = view_page.frame_locator("iframe").locator('button:has-text("Submit")') + locator.wait_for(timeout=10 * 1000) + if len(locator.all_text_contents()) > 0: + return True + + wait_for(view_page, check_examples) diff --git a/tests/tests_app_examples/test_template_react_ui.py b/tests/tests_app_examples/test_template_react_ui.py new file mode 100644 index 0000000000000..2e348035fe6e5 --- /dev/null +++ b/tests/tests_app_examples/test_template_react_ui.py @@ -0,0 +1,33 @@ +import os +from time import sleep + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.testing.testing import run_app_in_cloud, wait_for + + +@pytest.mark.cloud +def test_template_react_ui_example_cloud() -> None: + """This test ensures streamlit works in the cloud by clicking a button and checking the logs.""" + with run_app_in_cloud(os.path.join(_PROJECT_ROOT, "examples/app_template_react_ui")) as ( + _, + view_page, + fetch_logs, + ): + + def click_button(*_, **__): + button = view_page.frame_locator("iframe").locator('button:has-text("Start Printing")') + button.wait_for(timeout=3 * 1000) + if button.all_text_contents() == ["Start Printing"]: + button.click() + return True + + wait_for(view_page, click_button) + + has_logs = False + while not has_logs: + for log in fetch_logs(): + if "0: Hello World!" in log: + has_logs = True + sleep(1) diff --git a/tests/tests_app_examples/test_template_streamlit_ui.py b/tests/tests_app_examples/test_template_streamlit_ui.py new file mode 100644 index 0000000000000..ec18206bf06dd --- /dev/null +++ b/tests/tests_app_examples/test_template_streamlit_ui.py @@ -0,0 +1,33 @@ +import os +from time import sleep + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.testing.testing import run_app_in_cloud, wait_for + + +@pytest.mark.cloud +def test_template_streamlit_ui_example_cloud() -> None: + """This test ensures streamlit works in the cloud by clicking a button and checking the logs.""" + with run_app_in_cloud(os.path.join(_PROJECT_ROOT, "templates/template_streamlit_ui/")) as ( + _, + view_page, + fetch_logs, + ): + + def click_button(*_, **__): + button = view_page.frame_locator("iframe").locator('button:has-text("Should print to the terminal ?")') + button.wait_for(timeout=5 * 1000) + if button.all_text_contents() == ["Should print to the terminal ?"]: + button.click() + return True + + wait_for(view_page, click_button) + + has_logs = False + while not has_logs: + for log in fetch_logs(): + if "0: Hello World!" in log: + has_logs = True + sleep(1) diff --git a/tests/tests_app_examples/test_v0_app.py b/tests/tests_app_examples/test_v0_app.py new file mode 100644 index 0000000000000..2c03d2de60e29 --- /dev/null +++ b/tests/tests_app_examples/test_v0_app.py @@ -0,0 +1,59 @@ +import os +from time import sleep +from typing import Tuple + +import pytest +from tests_app import _PROJECT_ROOT + +from lightning_app.testing.testing import application_testing, LightningTestApp, run_app_in_cloud, wait_for +from lightning_app.utilities.enum import AppStage + + +class LightningAppTestInt(LightningTestApp): + def run_once(self) -> Tuple[bool, float]: + if self.root.counter > 1: + print("V0 App End") + self.stage = AppStage.STOPPING + return True, 0.0 + return super().run_once() + + +def test_v0_app_example(): + command_line = [ + os.path.join(_PROJECT_ROOT, "examples/app_v0/app.py"), + "--blocking", + "False", + "--open-ui", + "False", + ] + result = application_testing(LightningAppTestInt, command_line) + assert result.exit_code == 0 + + +@pytest.mark.cloud +def test_v0_app_example_cloud() -> None: + with run_app_in_cloud(os.path.join(_PROJECT_ROOT, "examples/app_v0")) as ( + _, + view_page, + fetch_logs, + ): + + def check_content(button_name, text_content): + button = view_page.locator(f'button:has-text("{button_name}")') + button.wait_for(timeout=3 * 1000) + button.click() + view_page.reload() + locator = view_page.frame_locator("iframe").locator("div") + locator.wait_for(timeout=3 * 1000) + assert text_content in " ".join(locator.all_text_contents()) + return True + + wait_for(view_page, check_content, "TAB_1", "Hello from component A") + wait_for(view_page, check_content, "TAB_2", "Hello from component B") + + has_logs = False + while not has_logs: + for log in fetch_logs(): + if "'a': 'a', 'b': 'b'" in log: + has_logs = True + sleep(1) From 03d3654fb255478e0e5b3e62321c9e89eecb263b Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Fri, 1 Jul 2022 06:02:57 +0200 Subject: [PATCH 44/89] ci: drop false download artifact (#13473) --- .github/workflows/release-pypi.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/release-pypi.yml b/.github/workflows/release-pypi.yml index 35bfea4ca3fe1..2476685dae3b8 100644 --- a/.github/workflows/release-pypi.yml +++ b/.github/workflows/release-pypi.yml @@ -17,10 +17,6 @@ jobs: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 - - uses: actions/download-artifact@v2 - with: - name: pypi-packages-${{ github.sha }} - path: dist - uses: actions/setup-python@v2 with: python-version: 3.9 From a7f41c0c194b81ffc2a9c3d8da45347cec4af5a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Fri, 1 Jul 2022 09:55:50 +0200 Subject: [PATCH 45/89] Remove redundant shebang from source files (#13479) --- .actions/setup_tools.py | 1 - src/lightning/__about__.py | 1 - src/pytorch_lightning/__about__.py | 1 - src/pytorch_lightning/utilities/deepspeed.py | 1 - src/pytorch_lightning/utilities/deepspeed_model_summary.py | 1 - 5 files changed, 5 deletions(-) diff --git a/.actions/setup_tools.py b/.actions/setup_tools.py index 2b02e0955a14d..5801e1497af6f 100644 --- a/.actions/setup_tools.py +++ b/.actions/setup_tools.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/lightning/__about__.py b/src/lightning/__about__.py index afe0a140f8d9b..cfda4d9d6af54 100644 --- a/src/lightning/__about__.py +++ b/src/lightning/__about__.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/pytorch_lightning/__about__.py b/src/pytorch_lightning/__about__.py index a8b0d9e5be1c0..2cd55565de9ba 100644 --- a/src/pytorch_lightning/__about__.py +++ b/src/pytorch_lightning/__about__.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/pytorch_lightning/utilities/deepspeed.py b/src/pytorch_lightning/utilities/deepspeed.py index d671be1e6c05a..f1c83176ccccf 100644 --- a/src/pytorch_lightning/utilities/deepspeed.py +++ b/src/pytorch_lightning/utilities/deepspeed.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2020 The PyTorch Lightning team and Microsoft Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/pytorch_lightning/utilities/deepspeed_model_summary.py b/src/pytorch_lightning/utilities/deepspeed_model_summary.py index fca288be0542b..89dd6a9f9a25f 100644 --- a/src/pytorch_lightning/utilities/deepspeed_model_summary.py +++ b/src/pytorch_lightning/utilities/deepspeed_model_summary.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2020 The PyTorch Lightning team and Microsoft Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); From 9baa7d157173f24672d4922f27199e61456b16c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Fri, 1 Jul 2022 10:47:04 +0200 Subject: [PATCH 46/89] Move deepspeed summary test to correct folder (#13478) --- .../{callbacks => utilities}/test_deepspeed_model_summary.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/tests_pytorch/{callbacks => utilities}/test_deepspeed_model_summary.py (100%) diff --git a/tests/tests_pytorch/callbacks/test_deepspeed_model_summary.py b/tests/tests_pytorch/utilities/test_deepspeed_model_summary.py similarity index 100% rename from tests/tests_pytorch/callbacks/test_deepspeed_model_summary.py rename to tests/tests_pytorch/utilities/test_deepspeed_model_summary.py From 10cae3d6afc561fa1313bc09749800723f38470f Mon Sep 17 00:00:00 2001 From: Cyprien Ricque <48893621+Cyprien-Ricque@users.noreply.github.com> Date: Fri, 1 Jul 2022 14:36:33 +0200 Subject: [PATCH 47/89] fix mypy typing errors in pytorch_lightning.__setup__.py (#13472) Fix typing in _load_py_module function --- .actions/setup_tools.py | 2 ++ pyproject.toml | 1 - setup.py | 2 ++ src/lightning/__setup__.py | 2 ++ src/lightning_app/__setup__.py | 2 ++ src/pytorch_lightning/__setup__.py | 2 ++ 6 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.actions/setup_tools.py b/.actions/setup_tools.py index 5801e1497af6f..2790820a5a221 100644 --- a/.actions/setup_tools.py +++ b/.actions/setup_tools.py @@ -26,7 +26,9 @@ def _load_py_module(name: str, location: str) -> ModuleType: spec = spec_from_file_location(name, location) + assert spec, f"Failed to load module {name} from {location}" py = module_from_spec(spec) + assert spec.loader, f"ModuleSpec.loader is None for {name} from {location}" spec.loader.exec_module(py) return py diff --git a/pyproject.toml b/pyproject.toml index dc9db77d6dabd..51781d4953935 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,6 +95,5 @@ module = [ "pytorch_lightning.utilities.data", "pytorch_lightning.utilities.distributed", "pytorch_lightning.utilities.meta", - "pytorch_lightning.__setup__", ] ignore_errors = "True" diff --git a/setup.py b/setup.py index 0ff57c3f85439..a542b3c1e0291 100755 --- a/setup.py +++ b/setup.py @@ -74,7 +74,9 @@ def _load_py_module(name: str, location: str) -> ModuleType: spec = spec_from_file_location(name, location) + assert spec, f"Failed to load module {name} from {location}" py = module_from_spec(spec) + assert spec.loader, f"ModuleSpec.loader is None for {name} from {location}" spec.loader.exec_module(py) return py diff --git a/src/lightning/__setup__.py b/src/lightning/__setup__.py index f549419bf7cae..93679c2b88431 100644 --- a/src/lightning/__setup__.py +++ b/src/lightning/__setup__.py @@ -15,7 +15,9 @@ def _load_py_module(name: str, location: str) -> ModuleType: spec = spec_from_file_location(name, location) + assert spec, f"Failed to load module {name} from {location}" py = module_from_spec(spec) + assert spec.loader, f"ModuleSpec.loader is None for {name} from {location}" spec.loader.exec_module(py) return py diff --git a/src/lightning_app/__setup__.py b/src/lightning_app/__setup__.py index e989a17b50412..2a738f20c6b7c 100644 --- a/src/lightning_app/__setup__.py +++ b/src/lightning_app/__setup__.py @@ -14,7 +14,9 @@ def _load_py_module(name: str, location: str) -> ModuleType: spec = spec_from_file_location(name, location) + assert spec, f"Failed to load module {name} from {location}" py = module_from_spec(spec) + assert spec.loader, f"ModuleSpec.loader is None for {name} from {location}" spec.loader.exec_module(py) return py diff --git a/src/pytorch_lightning/__setup__.py b/src/pytorch_lightning/__setup__.py index 4b28df68296a0..8fd93c77eeab1 100644 --- a/src/pytorch_lightning/__setup__.py +++ b/src/pytorch_lightning/__setup__.py @@ -15,7 +15,9 @@ def _load_py_module(name: str, location: str) -> ModuleType: spec = spec_from_file_location(name, location) + assert spec, f"Failed to load module {name} from {location}" py = module_from_spec(spec) + assert spec.loader, f"ModuleSpec.loader is None for {name} from {location}" spec.loader.exec_module(py) return py From 61283a785f0563f38eabb3a3172cd509d60b6687 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Fri, 1 Jul 2022 20:12:52 +0200 Subject: [PATCH 48/89] lightning entry point (#13490) --- src/lightning/__setup__.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/lightning/__setup__.py b/src/lightning/__setup__.py index 93679c2b88431..439ece715425a 100644 --- a/src/lightning/__setup__.py +++ b/src/lightning/__setup__.py @@ -78,20 +78,29 @@ def _setup_args(**kwargs: Any) -> Dict[str, Any]: package_dir={"": "src"}, long_description=_long_description, long_description_content_type="text/markdown", + include_package_data=True, zip_safe=False, keywords=["deep learning", "pytorch", "AI"], # todo: aggregate tags from all packages python_requires=">=3.7", # todo: take the lowes based on all packages + entry_points={ + "console_scripts": [ + "lightning = lightning_app.cli.lightning_cli:main", + ], + }, setup_requires=[], install_requires=_requires, extras_require={}, # todo: consider porting all other packages extras with prefix project_urls={ "Bug Tracker": "https://github.com/Lightning-AI/lightning/issues", + "Documentation": "https://lightning.ai/lightning-docs", "Source Code": "https://github.com/Lightning-AI/lightning", }, classifiers=[ "Environment :: Console", "Natural Language :: English", - "Development Status :: 5 - Production/Stable", + # How mature is this project? Common values are + # 3 - Alpha, 4 - Beta, 5 - Production/Stable + "Development Status :: 4 - Beta", # Indicate who your project is intended for "Intended Audience :: Developers", "Topic :: Scientific/Engineering :: Artificial Intelligence", From e20f6a86f04fcda7468aeef0d5f7e6c2666c1863 Mon Sep 17 00:00:00 2001 From: Mansy Date: Fri, 1 Jul 2022 22:28:44 +0200 Subject: [PATCH 49/89] Add CI for python lightning app Python unit tests (#13491) * Update lightning_app src * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update lightning app tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add CI * update tests * requirements * fix version tests * todo * fix tests * fix tests * fix tests * fix tests * fix formatting Co-authored-by: mansy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: awaelchli --- .github/workflows/ci-app_block.yml | 8 - .github/workflows/ci-app_tests.yml | 140 ++ src/lightning_app/__init__.py | 2 +- src/lightning_app/cli/app-template/.gitignore | 8 +- .../cli/app-template/placeholdername/app.py | 6 +- .../components/component_a/component_a.py | 4 +- .../components/component_b/component_a.py | 4 +- .../tests/test_placeholdername_app.py | 2 +- src/lightning_app/cli/cmd_init.py | 2 +- src/lightning_app/cli/cmd_install.py | 29 +- .../.github/workflows/ci-testing.yml | 2 +- .../cli/component-template/.gitignore | 8 +- .../cli/component-template/README.md | 8 +- .../cli/component-template/app.py | 6 +- .../placeholdername/component.py | 4 +- src/lightning_app/cli/lightning_cli.py | 1 + src/lightning_app/cli/pl-app-template/app.py | 8 +- .../cli/pl-app-template/core/callbacks.py | 2 +- .../core/components/logger/tensorboard.py | 4 +- .../components/logger/weights_and_biases.py | 2 +- .../components/script_runner/script_runner.py | 15 +- .../tests/core/test_callbacks.py | 2 +- .../cli/react-ui-template/README.md | 8 +- .../cli/react-ui-template/example_app.py | 11 +- .../cli/react-ui-template/ui/package.json | 2 +- .../cli/react-ui-template/ui/yarn.lock | 1138 +++++++++++++++++ src/lightning_app/components/python/popen.py | 4 +- src/lightning_app/components/python/tracer.py | 34 +- src/lightning_app/components/serve/gradio.py | 2 +- src/lightning_app/core/app.py | 25 +- src/lightning_app/core/flow.py | 89 +- src/lightning_app/core/work.py | 30 +- src/lightning_app/testing/helpers.py | 2 +- src/lightning_app/testing/testing.py | 14 +- src/lightning_app/utilities/app_helpers.py | 2 +- src/lightning_app/utilities/network.py | 2 +- .../utilities/packaging/build_config.py | 43 +- .../utilities/packaging/cloud_compute.py | 4 +- .../utilities/packaging/lightning_utils.py | 65 +- tests/tests_app/cli/test_cmd_react_ui_init.py | 2 +- .../packaging/test_lightning_utils.py | 4 +- tests/tests_app/utilities/test_git.py | 2 +- 42 files changed, 1599 insertions(+), 151 deletions(-) create mode 100644 .github/workflows/ci-app_tests.yml create mode 100644 src/lightning_app/cli/react-ui-template/ui/yarn.lock diff --git a/.github/workflows/ci-app_block.yml b/.github/workflows/ci-app_block.yml index 5eef31cb8c8d5..92b928a68d83b 100644 --- a/.github/workflows/ci-app_block.yml +++ b/.github/workflows/ci-app_block.yml @@ -19,18 +19,10 @@ jobs: echo "$file" done - - name: Block edits in src/lightning_app - if: contains(steps.changed-files.outputs.all_changed_and_modified_files, 'src/lightning_app') - run: exit 1 - - name: Block edits in docs/source-app if: contains(steps.changed-files.outputs.all_changed_and_modified_files, 'docs/source-app') run: exit 1 - - name: Block edits in tests/tests_app - if: contains(steps.changed-files.outputs.all_changed_and_modified_files, 'tests/tests_app') - run: exit 1 - - name: Block edits in examples/app if: contains(steps.changed-files.outputs.all_changed_and_modified_files, 'examples/app_') run: exit 1 diff --git a/.github/workflows/ci-app_tests.yml b/.github/workflows/ci-app_tests.yml new file mode 100644 index 0000000000000..0c87c3f4c2802 --- /dev/null +++ b/.github/workflows/ci-app_tests.yml @@ -0,0 +1,140 @@ +name: CI App Tests + +# see: https://help.github.com/en/actions/reference/events-that-trigger-workflows +on: # Trigger the workflow on push or pull request, but only for the master branch + push: + branches: + - "master" + pull_request: + paths: + - "src/lightning_app/**" + - "tests/tests_app/**" + - "requirements/app/**" + - "setup.py" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/master' }} + +jobs: + pytest: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-20.04, macOS-10.15, windows-2019] + python-version: [3.8] + requires: ["oldest", "latest"] + + # Timeout: https://stackoverflow.com/a/59076067/4521646 + timeout-minutes: 20 + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + # TODO: use replace_oldest_ver() instead + - name: Set min. dependencies + if: matrix.requires == 'oldest' + run: | + for fpath in ('requirements/app/base.txt', 'requirements/app/test.txt'): + req = open(fpath).read().replace('>=', '==') + open(fpath, 'w').write(req) + shell: python + + - run: echo "::set-output name=period::$(python -c 'import time ; days = time.time() / 60 / 60 / 24 ; print(int(days / 7))' 2>&1)" + if: matrix.requires == 'latest' + id: times + + # Note: This uses an internal pip API and may not always work + # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow + - name: Get pip cache + id: pip-cache + run: | + python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)" + + - name: Cache pip + uses: actions/cache@v2 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ matrix.requires }}-td${{ steps.times.outputs.period }}-${{ hashFiles('requirements/app/base.txt') }} + restore-keys: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ matrix.requires }}-td${{ steps.times.outputs.period }}- + + - name: Install dependencies + run: | + pip --version + pip install -r requirements/app/devel.txt --quiet --find-links https://download.pytorch.org/whl/cpu/torch_stable.html + pip list + shell: bash + +# - name: Start Redis +# if: runner.os == 'Linux' +# uses: supercharge/redis-github-action@1.4.0 +# with: +# redis-version: 6 +# redis-port: 6379 + + - name: Setup Node.js + uses: actions/setup-node@v2 + with: + node-version: '16' + + - name: Install Yarn + run: npm install -g yarn + + - name: Install Lightning as top-level + run: pip install -e . -r requirements/app/base.txt + shell: bash + + - name: Tests + working-directory: ./tests + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: us-east-1 + PYTEST_ARTIFACT: results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }}.xml + run: | + coverage run --source lightning_app -m pytest -m "not cloud" tests_app --timeout=300 -vvvv --junitxml=$PYTEST_ARTIFACT --durations=0 + + - name: Upload pytest test results + uses: actions/upload-artifact@v2 + with: + name: unittest-results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }} + path: tests/results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }}.xml + if: failure() + + - name: Statistics + if: success() + working-directory: ./tests + run: | + coverage xml -i + coverage report -i + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + file: tests/coverage.xml + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: false + +# TODO: figure out why we clone and install quick-start +# - name: Clone Quick Start Example Repo +# uses: actions/checkout@v3 +# # TODO: this needs to be git submodule +# if: matrix.os == 'windows-2019' # because the install doesn't work on windows +# with: +# repository: Lightning-AI/lightning-quick-start +# ref: 'main' +# path: lightning-quick-start +# +# - name: Lightning Install quick-start +# shell: bash +# if: matrix.os != 'windows-2019' # because the install doesn't work on windows +# run: | +# python -m lightning install app lightning/quick-start -y diff --git a/src/lightning_app/__init__.py b/src/lightning_app/__init__.py index b680564bda1cf..07926c203662b 100644 --- a/src/lightning_app/__init__.py +++ b/src/lightning_app/__init__.py @@ -37,6 +37,6 @@ from lightning_app.components import demo # noqa: F401 _PACKAGE_ROOT = os.path.dirname(__file__) -_PROJECT_ROOT = os.path.dirname(_PACKAGE_ROOT) +_PROJECT_ROOT = os.path.dirname(os.path.dirname(_PACKAGE_ROOT)) __all__ = ["LightningApp", "LightningFlow", "LightningWork", "BuildConfig", "CloudCompute"] diff --git a/src/lightning_app/cli/app-template/.gitignore b/src/lightning_app/cli/app-template/.gitignore index 1c6df5296752b..09f455310ce5b 100644 --- a/src/lightning_app/cli/app-template/.gitignore +++ b/src/lightning_app/cli/app-template/.gitignore @@ -58,8 +58,8 @@ coverage.xml # Sphinx documentation docs/_build/ -docs/source/api/ -docs/source/*.md +docs/source-app/api/ +docs/source-app/*.md # PyBuilder target/ @@ -132,9 +132,9 @@ coverage.* # Frontend build artifacts *lightning_app/ui* gradio_cached_examples -/docs/source/api_reference/generated/* +/docs/source-app/api_reference/generated/* examples/my_own_leaderboard/submissions/* -docs/source/api_reference/generated/* +docs/source-app/api_reference/generated/* *.ckpt redis-stable node_modules diff --git a/src/lightning_app/cli/app-template/placeholdername/app.py b/src/lightning_app/cli/app-template/placeholdername/app.py index e667aa172cd78..a7ce8a307061a 100644 --- a/src/lightning_app/cli/app-template/placeholdername/app.py +++ b/src/lightning_app/cli/app-template/placeholdername/app.py @@ -1,9 +1,9 @@ from placeholdername import ComponentA, ComponentB -import lightning_app as la +import lightning as L -class LitApp(la.LightningFlow): +class LitApp(L.LightningFlow): def __init__(self) -> None: super().__init__() self.component_a = ComponentA() @@ -14,4 +14,4 @@ def run(self): self.component_b.run() -app = la.LightningApp(LitApp()) +app = L.LightningApp(LitApp()) diff --git a/src/lightning_app/cli/app-template/placeholdername/components/component_a/component_a.py b/src/lightning_app/cli/app-template/placeholdername/components/component_a/component_a.py index 446619bfb9495..172aaab32416d 100644 --- a/src/lightning_app/cli/app-template/placeholdername/components/component_a/component_a.py +++ b/src/lightning_app/cli/app-template/placeholdername/components/component_a/component_a.py @@ -1,6 +1,6 @@ -import lightning_app as la +import lightning as L -class ComponentA(la.LightningFlow): +class ComponentA(L.LightningFlow): def run(self): print("hello from component A") diff --git a/src/lightning_app/cli/app-template/placeholdername/components/component_b/component_a.py b/src/lightning_app/cli/app-template/placeholdername/components/component_b/component_a.py index d309ebc7c387e..c5537d9565f31 100644 --- a/src/lightning_app/cli/app-template/placeholdername/components/component_b/component_a.py +++ b/src/lightning_app/cli/app-template/placeholdername/components/component_b/component_a.py @@ -1,6 +1,6 @@ -import lightning_app as la +import lightning as L -class ComponentB(la.LightningFlow): +class ComponentB(L.LightningFlow): def run(self): print("hello from component B") diff --git a/src/lightning_app/cli/app-template/tests/test_placeholdername_app.py b/src/lightning_app/cli/app-template/tests/test_placeholdername_app.py index fffc137db5ee7..da2bca590792a 100644 --- a/src/lightning_app/cli/app-template/tests/test_placeholdername_app.py +++ b/src/lightning_app/cli/app-template/tests/test_placeholdername_app.py @@ -10,7 +10,7 @@ import os from contextlib import redirect_stdout -from lightning_app.testing.testing import application_testing, LightningTestApp +from lightning.app.testing.testing import application_testing, LightningTestApp class LightningAppTestInt(LightningTestApp): diff --git a/src/lightning_app/cli/cmd_init.py b/src/lightning_app/cli/cmd_init.py index fb80a6fecc8fb..4e239da87c736 100644 --- a/src/lightning_app/cli/cmd_init.py +++ b/src/lightning_app/cli/cmd_init.py @@ -124,7 +124,7 @@ def component(component_name): ⚡ Use the component inside an app: ⚡ from {name_for_files} import TemplateComponent - import lightning_app as la + import lightning.app as la class LitApp(la.LightningFlow): def __init__(self) -> None: diff --git a/src/lightning_app/cli/cmd_install.py b/src/lightning_app/cli/cmd_install.py index d7bb0b435ae53..4fbaefd924544 100644 --- a/src/lightning_app/cli/cmd_install.py +++ b/src/lightning_app/cli/cmd_install.py @@ -49,10 +49,12 @@ def gallery_app(name, yes_arg, version_arg, cwd=None, overwrite=False): app_entry = _resolve_resource(registry_url, name=name, version_arg=version_arg, resource_type="app") # give the user the chance to do a manual install - source_url, git_url, folder_name = _show_install_app_prompt(app_entry, app, org, yes_arg, resource_type="app") + source_url, git_url, folder_name, git_sha = _show_install_app_prompt( + app_entry, app, org, yes_arg, resource_type="app" + ) # run installation if requested - _install_app(source_url, git_url, folder_name, cwd=cwd, overwrite=overwrite) + _install_app(source_url, git_url, folder_name, cwd=cwd, overwrite=overwrite, git_sha=git_sha) def non_gallery_app(gh_url, yes_arg, cwd=None, overwrite=False): @@ -161,14 +163,16 @@ def _show_non_gallery_install_component_prompt(gh_url, yes_arg): def _show_install_app_prompt(entry, app, org, yes_arg, resource_type): source_url = entry["sourceUrl"] # This URL is used only to display the repo and extract folder name full_git_url = entry["gitUrl"] # Used to clone the repo (can include tokens for private repos) - git_url = full_git_url.split("#ref=")[0] + git_url_parts = full_git_url.split("#ref=") + git_url = git_url_parts[0] + git_sha = git_url_parts[1] if len(git_url_parts) == 2 else None folder_name = source_url.split("/")[-1] # yes arg does not prompt the user for permission to install anything # automatically creates env and sets up the project if yes_arg: - return source_url, git_url, folder_name + return source_url, git_url, folder_name, git_sha prompt = f""" ⚡ Installing Lightning {resource_type} ⚡ @@ -192,7 +196,7 @@ def _show_install_app_prompt(entry, app, org, yes_arg, resource_type): if not should_install: raise KeyboardInterrupt() - return source_url, git_url, folder_name + return source_url, git_url, folder_name, git_sha except KeyboardInterrupt: repo = entry["sourceUrl"] m = f""" @@ -367,7 +371,9 @@ def _install_with_env(repo_url, folder_name, cwd=None): logger.info(m) -def _install_app(source_url: str, git_url: str, folder_name: str, cwd=None, overwrite: bool = False): +def _install_app( + source_url: str, git_url: str, folder_name: str, cwd=None, overwrite: bool = False, git_sha: str = None +): """Installing lighting app from the `git_url` Args: @@ -381,6 +387,8 @@ def _install_app(source_url: str, git_url: str, folder_name: str, cwd=None, over Working director. If not specified, current working directory is used. overwrite: If true, overwrite the app directory without asking if it already exists + git_sha: + The git_sha for checking out the git repo of the app. """ if not cwd: @@ -412,6 +420,15 @@ def _install_app(source_url: str, git_url: str, folder_name: str, cwd=None, over os.chdir(f"{folder_name}") cwd = os.getcwd() + try: + if git_sha: + subprocess.check_output(["git", "checkout", git_sha], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + if "did not match any" in str(e.output): + raise SystemExit("Looks like the git SHA is not valid or doesn't exist in app repo.") + else: + raise Exception(e) + # activate and install reqs # TODO: remove shell=True... but need to run command in venv logger.info("⚡ RUN: install requirements (pip install -r requirements.txt)") diff --git a/src/lightning_app/cli/component-template/.github/workflows/ci-testing.yml b/src/lightning_app/cli/component-template/.github/workflows/ci-testing.yml index 019693bb73210..a15100b3b171b 100644 --- a/src/lightning_app/cli/component-template/.github/workflows/ci-testing.yml +++ b/src/lightning_app/cli/component-template/.github/workflows/ci-testing.yml @@ -52,7 +52,7 @@ jobs: - name: Clone Template React UI Repo uses: actions/checkout@v3 with: - repository: PyTorchLightning/lightning + repository: Lightning-AI/lightning token: ${{ secrets.PAT_GHOST }} ref: 'master' path: lightning diff --git a/src/lightning_app/cli/component-template/.gitignore b/src/lightning_app/cli/component-template/.gitignore index 1c6df5296752b..09f455310ce5b 100644 --- a/src/lightning_app/cli/component-template/.gitignore +++ b/src/lightning_app/cli/component-template/.gitignore @@ -58,8 +58,8 @@ coverage.xml # Sphinx documentation docs/_build/ -docs/source/api/ -docs/source/*.md +docs/source-app/api/ +docs/source-app/*.md # PyBuilder target/ @@ -132,9 +132,9 @@ coverage.* # Frontend build artifacts *lightning_app/ui* gradio_cached_examples -/docs/source/api_reference/generated/* +/docs/source-app/api_reference/generated/* examples/my_own_leaderboard/submissions/* -docs/source/api_reference/generated/* +docs/source-app/api_reference/generated/* *.ckpt redis-stable node_modules diff --git a/src/lightning_app/cli/component-template/README.md b/src/lightning_app/cli/component-template/README.md index 7ec8feb881cde..16e5911bb8f27 100644 --- a/src/lightning_app/cli/component-template/README.md +++ b/src/lightning_app/cli/component-template/README.md @@ -8,7 +8,7 @@ lightning init component placeholdername ## To run placeholdername -First, install placeholdername (warning: this app has not been officially approved on the lightning gallery): +First, install placeholdername (warning: this component has not been officially approved on the lightning gallery): ```bash lightning install component https://github.com/theUser/placeholdername @@ -18,10 +18,10 @@ Once the app is installed, use it in an app: ```python from placeholdername import TemplateComponent -import lightning_app as la +import lightning as L -class LitApp(lapp.LightningFlow): +class LitApp(L.LightningFlow): def __init__(self) -> None: super().__init__() self.placeholdername = TemplateComponent() @@ -31,5 +31,5 @@ class LitApp(lapp.LightningFlow): self.placeholdername.run() -app = lapp.LightningApp(LitApp()) +app = L.LightningApp(LitApp()) ``` diff --git a/src/lightning_app/cli/component-template/app.py b/src/lightning_app/cli/component-template/app.py index c413a044bbe15..49db53e353d48 100644 --- a/src/lightning_app/cli/component-template/app.py +++ b/src/lightning_app/cli/component-template/app.py @@ -1,9 +1,9 @@ from placeholdername import TemplateComponent -import lightning_app as la +import lightning as L -class LitApp(la.LightningFlow): +class LitApp(L.LightningFlow): def __init__(self) -> None: super().__init__() self.placeholdername = TemplateComponent() @@ -13,4 +13,4 @@ def run(self): self.placeholdername.run() -app = la.LightningApp(LitApp()) +app = L.LightningApp(LitApp()) diff --git a/src/lightning_app/cli/component-template/placeholdername/component.py b/src/lightning_app/cli/component-template/placeholdername/component.py index 61b36ac3562bd..3b8bc0e2b0e9b 100644 --- a/src/lightning_app/cli/component-template/placeholdername/component.py +++ b/src/lightning_app/cli/component-template/placeholdername/component.py @@ -1,7 +1,7 @@ -import lightning_app as la +import lightning as L -class TemplateComponent(la.LightningWork): +class TemplateComponent(L.LightningWork): def __init__(self) -> None: super().__init__() self.value = 0 diff --git a/src/lightning_app/cli/lightning_cli.py b/src/lightning_app/cli/lightning_cli.py index 1ff0dab3300a0..fb39f743ec3a2 100644 --- a/src/lightning_app/cli/lightning_cli.py +++ b/src/lightning_app/cli/lightning_cli.py @@ -68,6 +68,7 @@ def _run_app( ) env_vars = _format_input_env_variables(env) + os.environ.update(env_vars) def on_before_run(*args): if open_ui and not without_server: diff --git a/src/lightning_app/cli/pl-app-template/app.py b/src/lightning_app/cli/pl-app-template/app.py index 7f3d5965b5f1c..01bc0106f264b 100644 --- a/src/lightning_app/cli/pl-app-template/app.py +++ b/src/lightning_app/cli/pl-app-template/app.py @@ -4,10 +4,10 @@ from core.components import TensorBoard, WeightsAndBiases from core.components.script_runner import ScriptRunner -from lightning_app import LightningApp, LightningFlow -from lightning_app.frontend import StaticWebFrontend -from lightning_app.storage import Path -from lightning_app.utilities.packaging.cloud_compute import CloudCompute +from lightning.app import LightningApp, LightningFlow +from lightning.app.frontend import StaticWebFrontend +from lightning.app.storage import Path +from lightning.app.utilities.packaging.cloud_compute import CloudCompute class ReactUI(LightningFlow): diff --git a/src/lightning_app/cli/pl-app-template/core/callbacks.py b/src/lightning_app/cli/pl-app-template/core/callbacks.py index 93992c552f781..de1bb4003f71f 100644 --- a/src/lightning_app/cli/pl-app-template/core/callbacks.py +++ b/src/lightning_app/cli/pl-app-template/core/callbacks.py @@ -5,7 +5,7 @@ from core.state import ProgressBarState, TrainerState import pytorch_lightning as pl -from lightning_app.storage import Path +from lightning.app.storage import Path from pytorch_lightning import Callback from pytorch_lightning.callbacks.progress.base import get_standard_metrics from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger diff --git a/src/lightning_app/cli/pl-app-template/core/components/logger/tensorboard.py b/src/lightning_app/cli/pl-app-template/core/components/logger/tensorboard.py index cd7f406dd53bd..6f5b2eb563c6d 100644 --- a/src/lightning_app/cli/pl-app-template/core/components/logger/tensorboard.py +++ b/src/lightning_app/cli/pl-app-template/core/components/logger/tensorboard.py @@ -2,8 +2,8 @@ import time from typing import Dict, List -from lightning_app import BuildConfig, LightningFlow, LightningWork -from lightning_app.storage import Path +from lightning.app import BuildConfig, LightningFlow, LightningWork +from lightning.app.storage import Path class TensorBoard(LightningFlow): diff --git a/src/lightning_app/cli/pl-app-template/core/components/logger/weights_and_biases.py b/src/lightning_app/cli/pl-app-template/core/components/logger/weights_and_biases.py index a3fb0131cb5a7..c0fbbadc7c46f 100644 --- a/src/lightning_app/cli/pl-app-template/core/components/logger/weights_and_biases.py +++ b/src/lightning_app/cli/pl-app-template/core/components/logger/weights_and_biases.py @@ -1,7 +1,7 @@ import os from typing import Dict, List, Optional, TYPE_CHECKING -from lightning_app import LightningFlow +from lightning.app import LightningFlow if TYPE_CHECKING: import wandb diff --git a/src/lightning_app/cli/pl-app-template/core/components/script_runner/script_runner.py b/src/lightning_app/cli/pl-app-template/core/components/script_runner/script_runner.py index 3eb10dfeee9d9..7cef61a2a525e 100644 --- a/src/lightning_app/cli/pl-app-template/core/components/script_runner/script_runner.py +++ b/src/lightning_app/cli/pl-app-template/core/components/script_runner/script_runner.py @@ -1,14 +1,12 @@ -import os import sys import traceback from typing import Any, Dict, List, Optional, Tuple -from pkg_resources import parse_requirements - -from lightning_app.components.python import TracerPythonScript -from lightning_app.storage import Path -from lightning_app.utilities.packaging.build_config import BuildConfig -from lightning_app.utilities.tracer import Tracer +from lightning.app.components.python import TracerPythonScript +from lightning.app.storage import Path +from lightning.app.utilities.packaging.build_config import BuildConfig +from lightning.app.utilities.tracer import Tracer +from lightning_app.utilities.packaging.build_config import load_requirements class ScriptRunner(TracerPythonScript): @@ -76,7 +74,6 @@ def _get_build_config(root_path: str) -> Optional[BuildConfig]: ] if Path(root_path, "requirements.txt").exists(): # Requirements from the user's code folder - path_req = os.path.join(root_path, "requirements.txt") - requirements.extend(list(map(str, parse_requirements(open(path_req).readlines())))) + requirements.extend(load_requirements(root_path, file_name="requirements.txt")) return BuildConfig(requirements=requirements) diff --git a/src/lightning_app/cli/pl-app-template/tests/core/test_callbacks.py b/src/lightning_app/cli/pl-app-template/tests/core/test_callbacks.py index ed058d7ce7afb..d7fb5c1064093 100644 --- a/src/lightning_app/cli/pl-app-template/tests/core/test_callbacks.py +++ b/src/lightning_app/cli/pl-app-template/tests/core/test_callbacks.py @@ -5,7 +5,7 @@ from core.callbacks import PLAppArtifactsTracker, PLAppProgressTracker, PLAppSummary from core.components.script_runner import ScriptRunner -from lightning_app.storage import Path +from lightning.app.storage import Path from pytorch_lightning import LightningModule, Trainer from pytorch_lightning.loggers import TensorBoardLogger diff --git a/src/lightning_app/cli/react-ui-template/README.md b/src/lightning_app/cli/react-ui-template/README.md index a2fdc147de8d0..023fb3babc14e 100644 --- a/src/lightning_app/cli/react-ui-template/README.md +++ b/src/lightning_app/cli/react-ui-template/README.md @@ -4,7 +4,7 @@ This is a full react template ready to use in a component This UI was automatically generated with: -```bash +```commandline lightning init react-ui ``` @@ -34,12 +34,12 @@ lightning run app react-ui/example_app.py To connect the react UI to your component, simply point the `StaticWebFrontend` to the `dist/` folder generated by yarn after building your react website. ```python -import lightning_app as la +import lightning as L -class YourComponent(lapp.LightningFlow): +class YourComponent(L.LightningFlow): def configure_layout(self): - return lapp.frontend.StaticWebFrontend(Path(__file__).parent / "react-ui/src/dist") + return Lapp.frontend.StaticWebFrontend(Path(__file__).parent / "react-ui/src/dist") ``` ### Set up interactions between React and the component diff --git a/src/lightning_app/cli/react-ui-template/example_app.py b/src/lightning_app/cli/react-ui-template/example_app.py index 735f1782e4aac..e99f61e38a64a 100644 --- a/src/lightning_app/cli/react-ui-template/example_app.py +++ b/src/lightning_app/cli/react-ui-template/example_app.py @@ -2,20 +2,21 @@ from pathlib import Path -import lightning_app as la +import lightning as L +from lightning.app import frontend -class YourComponent(la.LightningFlow): +class YourComponent(L.LightningFlow): def __init__(self): super().__init__() self.message_to_print = "Hello World!" self.should_print = False def configure_layout(self): - return la.frontend.StaticWebFrontend(Path(__file__).parent / "ui/dist") + return frontend.StaticWebFrontend(Path(__file__).parent / "ui/dist") -class HelloLitReact(la.LightningFlow): +class HelloLitReact(L.LightningFlow): def __init__(self): super().__init__() self.counter = 0 @@ -30,4 +31,4 @@ def configure_layout(self): return [{"name": "React UI", "content": self.react_ui}] -app = la.LightningApp(HelloLitReact()) +app = L.LightningApp(HelloLitReact()) diff --git a/src/lightning_app/cli/react-ui-template/ui/package.json b/src/lightning_app/cli/react-ui-template/ui/package.json index ef7e7a58504bd..3245255a473da 100644 --- a/src/lightning_app/cli/react-ui-template/ui/package.json +++ b/src/lightning_app/cli/react-ui-template/ui/package.json @@ -10,7 +10,7 @@ "dependencies": { "@emotion/react": "^11.8.2", "@emotion/styled": "^11.8.1", - "@mui/material": "^5.5.0", + "@mui/material": "5.8.5", "axios": "^0.26.1", "lodash": "^4.17.21", "nanoid": "^3.3.1", diff --git a/src/lightning_app/cli/react-ui-template/ui/yarn.lock b/src/lightning_app/cli/react-ui-template/ui/yarn.lock new file mode 100644 index 0000000000000..3cba665d27146 --- /dev/null +++ b/src/lightning_app/cli/react-ui-template/ui/yarn.lock @@ -0,0 +1,1138 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@ampproject/remapping@^2.1.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.0.tgz#56c133824780de3174aed5ab6834f3026790154d" + integrity sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w== + dependencies: + "@jridgewell/gen-mapping" "^0.1.0" + "@jridgewell/trace-mapping" "^0.3.9" + +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.18.6.tgz#3b25d38c89600baa2dcc219edfa88a74eb2c427a" + integrity sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q== + dependencies: + "@babel/highlight" "^7.18.6" + +"@babel/compat-data@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.18.6.tgz#8b37d24e88e8e21c499d4328db80577d8882fa53" + integrity sha512-tzulrgDT0QD6U7BJ4TKVk2SDDg7wlP39P9yAx1RfLy7vP/7rsDRlWVfbWxElslu56+r7QOhB2NSDsabYYruoZQ== + +"@babel/core@^7.17.10": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.18.6.tgz#54a107a3c298aee3fe5e1947a6464b9b6faca03d" + integrity sha512-cQbWBpxcbbs/IUredIPkHiAGULLV8iwgNRMFzvbhEXISp4f3rUUXE5+TIw6KwUWUR3DwyI6gmBRnmAtYaWehwQ== + dependencies: + "@ampproject/remapping" "^2.1.0" + "@babel/code-frame" "^7.18.6" + "@babel/generator" "^7.18.6" + "@babel/helper-compilation-targets" "^7.18.6" + "@babel/helper-module-transforms" "^7.18.6" + "@babel/helpers" "^7.18.6" + "@babel/parser" "^7.18.6" + "@babel/template" "^7.18.6" + "@babel/traverse" "^7.18.6" + "@babel/types" "^7.18.6" + convert-source-map "^1.7.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.2.1" + semver "^6.3.0" + +"@babel/generator@^7.18.6": + version "7.18.7" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.18.7.tgz#2aa78da3c05aadfc82dbac16c99552fc802284bd" + integrity sha512-shck+7VLlY72a2w9c3zYWuE1pwOKEiQHV7GTUbSnhyl5eu3i04t30tBY82ZRWrDfo3gkakCFtevExnxbkf2a3A== + dependencies: + "@babel/types" "^7.18.7" + "@jridgewell/gen-mapping" "^0.3.2" + jsesc "^2.5.1" + +"@babel/helper-annotate-as-pure@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.18.6.tgz#eaa49f6f80d5a33f9a5dd2276e6d6e451be0a6bb" + integrity sha512-duORpUiYrEpzKIop6iNbjnwKLAKnJ47csTyRACyEmWj0QdUrm5aqNJGHSSEQSUAvNW0ojX0dOmK9dZduvkfeXA== + dependencies: + "@babel/types" "^7.18.6" + +"@babel/helper-compilation-targets@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.18.6.tgz#18d35bfb9f83b1293c22c55b3d576c1315b6ed96" + integrity sha512-vFjbfhNCzqdeAtZflUFrG5YIFqGTqsctrtkZ1D/NB0mDW9TwW3GmmUepYY4G9wCET5rY5ugz4OGTcLd614IzQg== + dependencies: + "@babel/compat-data" "^7.18.6" + "@babel/helper-validator-option" "^7.18.6" + browserslist "^4.20.2" + semver "^6.3.0" + +"@babel/helper-environment-visitor@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.6.tgz#b7eee2b5b9d70602e59d1a6cad7dd24de7ca6cd7" + integrity sha512-8n6gSfn2baOY+qlp+VSzsosjCVGFqWKmDF0cCWOybh52Dw3SEyoWR1KrhMJASjLwIEkkAufZ0xvr+SxLHSpy2Q== + +"@babel/helper-function-name@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.18.6.tgz#8334fecb0afba66e6d87a7e8c6bb7fed79926b83" + integrity sha512-0mWMxV1aC97dhjCah5U5Ua7668r5ZmSC2DLfH2EZnf9c3/dHZKiFa5pRLMH5tjSl471tY6496ZWk/kjNONBxhw== + dependencies: + "@babel/template" "^7.18.6" + "@babel/types" "^7.18.6" + +"@babel/helper-hoist-variables@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz#d4d2c8fb4baeaa5c68b99cc8245c56554f926678" + integrity sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q== + dependencies: + "@babel/types" "^7.18.6" + +"@babel/helper-module-imports@^7.12.13", "@babel/helper-module-imports@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz#1e3ebdbbd08aad1437b428c50204db13c5a3ca6e" + integrity sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA== + dependencies: + "@babel/types" "^7.18.6" + +"@babel/helper-module-transforms@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.18.6.tgz#57e3ca669e273d55c3cda55e6ebf552f37f483c8" + integrity sha512-L//phhB4al5uucwzlimruukHB3jRd5JGClwRMD/ROrVjXfLqovYnvQrK/JK36WYyVwGGO7OD3kMyVTjx+WVPhw== + dependencies: + "@babel/helper-environment-visitor" "^7.18.6" + "@babel/helper-module-imports" "^7.18.6" + "@babel/helper-simple-access" "^7.18.6" + "@babel/helper-split-export-declaration" "^7.18.6" + "@babel/helper-validator-identifier" "^7.18.6" + "@babel/template" "^7.18.6" + "@babel/traverse" "^7.18.6" + "@babel/types" "^7.18.6" + +"@babel/helper-plugin-utils@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.18.6.tgz#9448974dd4fb1d80fefe72e8a0af37809cd30d6d" + integrity sha512-gvZnm1YAAxh13eJdkb9EWHBnF3eAub3XTLCZEehHT2kWxiKVRL64+ae5Y6Ivne0mVHmMYKT+xWgZO+gQhuLUBg== + +"@babel/helper-simple-access@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.18.6.tgz#d6d8f51f4ac2978068df934b569f08f29788c7ea" + integrity sha512-iNpIgTgyAvDQpDj76POqg+YEt8fPxx3yaNBg3S30dxNKm2SWfYhD0TGrK/Eu9wHpUW63VQU894TsTg+GLbUa1g== + dependencies: + "@babel/types" "^7.18.6" + +"@babel/helper-split-export-declaration@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz#7367949bc75b20c6d5a5d4a97bba2824ae8ef075" + integrity sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA== + dependencies: + "@babel/types" "^7.18.6" + +"@babel/helper-validator-identifier@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.18.6.tgz#9c97e30d31b2b8c72a1d08984f2ca9b574d7a076" + integrity sha512-MmetCkz9ej86nJQV+sFCxoGGrUbU3q02kgLciwkrt9QqEB7cP39oKEY0PakknEO0Gu20SskMRi+AYZ3b1TpN9g== + +"@babel/helper-validator-option@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.18.6.tgz#bf0d2b5a509b1f336099e4ff36e1a63aa5db4db8" + integrity sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw== + +"@babel/helpers@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.18.6.tgz#4c966140eaa1fcaa3d5a8c09d7db61077d4debfd" + integrity sha512-vzSiiqbQOghPngUYt/zWGvK3LAsPhz55vc9XNN0xAl2gV4ieShI2OQli5duxWHD+72PZPTKAcfcZDE1Cwc5zsQ== + dependencies: + "@babel/template" "^7.18.6" + "@babel/traverse" "^7.18.6" + "@babel/types" "^7.18.6" + +"@babel/highlight@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.18.6.tgz#81158601e93e2563795adcbfbdf5d64be3f2ecdf" + integrity sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g== + dependencies: + "@babel/helper-validator-identifier" "^7.18.6" + chalk "^2.0.0" + js-tokens "^4.0.0" + +"@babel/parser@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.18.6.tgz#845338edecad65ebffef058d3be851f1d28a63bc" + integrity sha512-uQVSa9jJUe/G/304lXspfWVpKpK4euFLgGiMQFOCpM/bgcAdeoHwi/OQz23O9GK2osz26ZiXRRV9aV+Yl1O8tw== + +"@babel/plugin-syntax-jsx@^7.12.13", "@babel/plugin-syntax-jsx@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.18.6.tgz#a8feef63b010150abd97f1649ec296e849943ca0" + integrity sha512-6mmljtAedFGTWu2p/8WIORGwy+61PLgOMPOdazc7YoJ9ZCWUyFy3A6CpPkRKLKD1ToAesxX8KGEViAiLo9N+7Q== + dependencies: + "@babel/helper-plugin-utils" "^7.18.6" + +"@babel/plugin-transform-react-jsx-development@^7.16.7": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.18.6.tgz#dbe5c972811e49c7405b630e4d0d2e1380c0ddc5" + integrity sha512-SA6HEjwYFKF7WDjWcMcMGUimmw/nhNRDWxr+KaLSCrkD/LMDBvWRmHAYgE1HDeF8KUuI8OAu+RT6EOtKxSW2qA== + dependencies: + "@babel/plugin-transform-react-jsx" "^7.18.6" + +"@babel/plugin-transform-react-jsx-self@^7.16.7": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.18.6.tgz#3849401bab7ae8ffa1e3e5687c94a753fc75bda7" + integrity sha512-A0LQGx4+4Jv7u/tWzoJF7alZwnBDQd6cGLh9P+Ttk4dpiL+J5p7NSNv/9tlEFFJDq3kjxOavWmbm6t0Gk+A3Ig== + dependencies: + "@babel/helper-plugin-utils" "^7.18.6" + +"@babel/plugin-transform-react-jsx-source@^7.16.7": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.18.6.tgz#06e9ae8a14d2bc19ce6e3c447d842032a50598fc" + integrity sha512-utZmlASneDfdaMh0m/WausbjUjEdGrQJz0vFK93d7wD3xf5wBtX219+q6IlCNZeguIcxS2f/CvLZrlLSvSHQXw== + dependencies: + "@babel/helper-plugin-utils" "^7.18.6" + +"@babel/plugin-transform-react-jsx@^7.17.3", "@babel/plugin-transform-react-jsx@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.18.6.tgz#2721e96d31df96e3b7ad48ff446995d26bc028ff" + integrity sha512-Mz7xMPxoy9kPS/JScj6fJs03TZ/fZ1dJPlMjRAgTaxaS0fUBk8FV/A2rRgfPsVCZqALNwMexD+0Uaf5zlcKPpw== + dependencies: + "@babel/helper-annotate-as-pure" "^7.18.6" + "@babel/helper-module-imports" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" + "@babel/plugin-syntax-jsx" "^7.18.6" + "@babel/types" "^7.18.6" + +"@babel/runtime@^7.13.10", "@babel/runtime@^7.17.2", "@babel/runtime@^7.5.5", "@babel/runtime@^7.7.2", "@babel/runtime@^7.8.7": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.18.6.tgz#6a1ef59f838debd670421f8c7f2cbb8da9751580" + integrity sha512-t9wi7/AW6XtKahAe20Yw0/mMljKq0B1r2fPdvaAdV/KPDZewFXdaaa6K7lxmZBZ8FBNpCiAT6iHPmd6QO9bKfQ== + dependencies: + regenerator-runtime "^0.13.4" + +"@babel/template@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.18.6.tgz#1283f4993e00b929d6e2d3c72fdc9168a2977a31" + integrity sha512-JoDWzPe+wgBsTTgdnIma3iHNFC7YVJoPssVBDjiHfNlyt4YcunDtcDOUmfVDfCK5MfdsaIoX9PkijPhjH3nYUw== + dependencies: + "@babel/code-frame" "^7.18.6" + "@babel/parser" "^7.18.6" + "@babel/types" "^7.18.6" + +"@babel/traverse@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.18.6.tgz#a228562d2f46e89258efa4ddd0416942e2fd671d" + integrity sha512-zS/OKyqmD7lslOtFqbscH6gMLFYOfG1YPqCKfAW5KrTeolKqvB8UelR49Fpr6y93kYkW2Ik00mT1LOGiAGvizw== + dependencies: + "@babel/code-frame" "^7.18.6" + "@babel/generator" "^7.18.6" + "@babel/helper-environment-visitor" "^7.18.6" + "@babel/helper-function-name" "^7.18.6" + "@babel/helper-hoist-variables" "^7.18.6" + "@babel/helper-split-export-declaration" "^7.18.6" + "@babel/parser" "^7.18.6" + "@babel/types" "^7.18.6" + debug "^4.1.0" + globals "^11.1.0" + +"@babel/types@^7.18.6", "@babel/types@^7.18.7": + version "7.18.7" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.18.7.tgz#a4a2c910c15040ea52cdd1ddb1614a65c8041726" + integrity sha512-QG3yxTcTIBoAcQmkCs+wAPYZhu7Dk9rXKacINfNbdJDNERTbLQbHGyVG8q/YGMPeCJRIhSY0+fTc5+xuh6WPSQ== + dependencies: + "@babel/helper-validator-identifier" "^7.18.6" + to-fast-properties "^2.0.0" + +"@emotion/babel-plugin@^11.7.1": + version "11.9.2" + resolved "https://registry.yarnpkg.com/@emotion/babel-plugin/-/babel-plugin-11.9.2.tgz#723b6d394c89fb2ef782229d92ba95a740576e95" + integrity sha512-Pr/7HGH6H6yKgnVFNEj2MVlreu3ADqftqjqwUvDy/OJzKFgxKeTQ+eeUf20FOTuHVkDON2iNa25rAXVYtWJCjw== + dependencies: + "@babel/helper-module-imports" "^7.12.13" + "@babel/plugin-syntax-jsx" "^7.12.13" + "@babel/runtime" "^7.13.10" + "@emotion/hash" "^0.8.0" + "@emotion/memoize" "^0.7.5" + "@emotion/serialize" "^1.0.2" + babel-plugin-macros "^2.6.1" + convert-source-map "^1.5.0" + escape-string-regexp "^4.0.0" + find-root "^1.1.0" + source-map "^0.5.7" + stylis "4.0.13" + +"@emotion/cache@^11.7.1", "@emotion/cache@^11.9.3": + version "11.9.3" + resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-11.9.3.tgz#96638449f6929fd18062cfe04d79b29b44c0d6cb" + integrity sha512-0dgkI/JKlCXa+lEXviaMtGBL0ynpx4osh7rjOXE71q9bIF8G+XhJgvi+wDu0B0IdCVx37BffiwXlN9I3UuzFvg== + dependencies: + "@emotion/memoize" "^0.7.4" + "@emotion/sheet" "^1.1.1" + "@emotion/utils" "^1.0.0" + "@emotion/weak-memoize" "^0.2.5" + stylis "4.0.13" + +"@emotion/hash@^0.8.0": + version "0.8.0" + resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.8.0.tgz#bbbff68978fefdbe68ccb533bc8cbe1d1afb5413" + integrity sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow== + +"@emotion/is-prop-valid@^1.1.2", "@emotion/is-prop-valid@^1.1.3": + version "1.1.3" + resolved "https://registry.yarnpkg.com/@emotion/is-prop-valid/-/is-prop-valid-1.1.3.tgz#f0907a416368cf8df9e410117068e20fe87c0a3a" + integrity sha512-RFg04p6C+1uO19uG8N+vqanzKqiM9eeV1LDOG3bmkYmuOj7NbKNlFC/4EZq5gnwAIlcC/jOT24f8Td0iax2SXA== + dependencies: + "@emotion/memoize" "^0.7.4" + +"@emotion/memoize@^0.7.4", "@emotion/memoize@^0.7.5": + version "0.7.5" + resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.7.5.tgz#2c40f81449a4e554e9fc6396910ed4843ec2be50" + integrity sha512-igX9a37DR2ZPGYtV6suZ6whr8pTFtyHL3K/oLUotxpSVO2ASaprmAe2Dkq7tBo7CRY7MMDrAa9nuQP9/YG8FxQ== + +"@emotion/react@^11.8.2": + version "11.9.3" + resolved "https://registry.yarnpkg.com/@emotion/react/-/react-11.9.3.tgz#f4f4f34444f6654a2e550f5dab4f2d360c101df9" + integrity sha512-g9Q1GcTOlzOEjqwuLF/Zd9LC+4FljjPjDfxSM7KmEakm+hsHXk+bYZ2q+/hTJzr0OUNkujo72pXLQvXj6H+GJQ== + dependencies: + "@babel/runtime" "^7.13.10" + "@emotion/babel-plugin" "^11.7.1" + "@emotion/cache" "^11.9.3" + "@emotion/serialize" "^1.0.4" + "@emotion/utils" "^1.1.0" + "@emotion/weak-memoize" "^0.2.5" + hoist-non-react-statics "^3.3.1" + +"@emotion/serialize@^1.0.2", "@emotion/serialize@^1.0.4": + version "1.0.4" + resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-1.0.4.tgz#ff31fd11bb07999611199c2229e152faadc21a3c" + integrity sha512-1JHamSpH8PIfFwAMryO2bNka+y8+KA5yga5Ocf2d7ZEiJjb7xlLW7aknBGZqJLajuLOvJ+72vN+IBSwPlXD1Pg== + dependencies: + "@emotion/hash" "^0.8.0" + "@emotion/memoize" "^0.7.4" + "@emotion/unitless" "^0.7.5" + "@emotion/utils" "^1.0.0" + csstype "^3.0.2" + +"@emotion/sheet@^1.1.1": + version "1.1.1" + resolved "https://registry.yarnpkg.com/@emotion/sheet/-/sheet-1.1.1.tgz#015756e2a9a3c7c5f11d8ec22966a8dbfbfac787" + integrity sha512-J3YPccVRMiTZxYAY0IOq3kd+hUP8idY8Kz6B/Cyo+JuXq52Ek+zbPbSQUrVQp95aJ+lsAW7DPL1P2Z+U1jGkKA== + +"@emotion/styled@^11.8.1": + version "11.9.3" + resolved "https://registry.yarnpkg.com/@emotion/styled/-/styled-11.9.3.tgz#47f0c71137fec7c57035bf3659b52fb536792340" + integrity sha512-o3sBNwbtoVz9v7WB1/Y/AmXl69YHmei2mrVnK7JgyBJ//Rst5yqPZCecEJlMlJrFeWHp+ki/54uN265V2pEcXA== + dependencies: + "@babel/runtime" "^7.13.10" + "@emotion/babel-plugin" "^11.7.1" + "@emotion/is-prop-valid" "^1.1.3" + "@emotion/serialize" "^1.0.4" + "@emotion/utils" "^1.1.0" + +"@emotion/unitless@^0.7.5": + version "0.7.5" + resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.7.5.tgz#77211291c1900a700b8a78cfafda3160d76949ed" + integrity sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg== + +"@emotion/utils@^1.0.0", "@emotion/utils@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-1.1.0.tgz#86b0b297f3f1a0f2bdb08eeac9a2f49afd40d0cf" + integrity sha512-iRLa/Y4Rs5H/f2nimczYmS5kFJEbpiVvgN3XVfZ022IYhuNA1IRSHEizcof88LtCTXtl9S2Cxt32KgaXEu72JQ== + +"@emotion/weak-memoize@^0.2.5": + version "0.2.5" + resolved "https://registry.yarnpkg.com/@emotion/weak-memoize/-/weak-memoize-0.2.5.tgz#8eed982e2ee6f7f4e44c253e12962980791efd46" + integrity sha512-6U71C2Wp7r5XtFtQzYrW5iKFT67OixrSxjI4MptCHzdSVlgabczzqLe0ZSgnub/5Kp4hSbpDB1tMytZY9pwxxA== + +"@jridgewell/gen-mapping@^0.1.0": + version "0.1.1" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz#e5d2e450306a9491e3bd77e323e38d7aff315996" + integrity sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w== + dependencies: + "@jridgewell/set-array" "^1.0.0" + "@jridgewell/sourcemap-codec" "^1.4.10" + +"@jridgewell/gen-mapping@^0.3.2": + version "0.3.2" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz#c1aedc61e853f2bb9f5dfe6d4442d3b565b253b9" + integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A== + dependencies: + "@jridgewell/set-array" "^1.0.1" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/trace-mapping" "^0.3.9" + +"@jridgewell/resolve-uri@^3.0.3": + version "3.0.8" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.0.8.tgz#687cc2bbf243f4e9a868ecf2262318e2658873a1" + integrity sha512-YK5G9LaddzGbcucK4c8h5tWFmMPBvRZ/uyWmN1/SbBdIvqGUdWGkJ5BAaccgs6XbzVLsqbPJrBSFwKv3kT9i7w== + +"@jridgewell/set-array@^1.0.0", "@jridgewell/set-array@^1.0.1": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72" + integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw== + +"@jridgewell/sourcemap-codec@^1.4.10": + version "1.4.14" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24" + integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw== + +"@jridgewell/trace-mapping@^0.3.9": + version "0.3.14" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.14.tgz#b231a081d8f66796e475ad588a1ef473112701ed" + integrity sha512-bJWEfQ9lPTvm3SneWwRFVLzrh6nhjwqw7TUFFBEMzwvg7t7PCDenf2lDwqo4NQXzdpgBXyFgDWnQA+2vkruksQ== + dependencies: + "@jridgewell/resolve-uri" "^3.0.3" + "@jridgewell/sourcemap-codec" "^1.4.10" + +"@mui/base@5.0.0-alpha.86": + version "5.0.0-alpha.86" + resolved "https://registry.yarnpkg.com/@mui/base/-/base-5.0.0-alpha.86.tgz#7ac5af939cec7e763c1bf49bf5e30bb9464c4ebf" + integrity sha512-0vi/Nni1mizrgrzKeyksEjw5JVSrgT8Vr2NhxzFtYxqpMgtdSrBvcmcuzBf9kE/ECMPbgpSIcqv0nLbLZUYkOQ== + dependencies: + "@babel/runtime" "^7.17.2" + "@emotion/is-prop-valid" "^1.1.2" + "@mui/types" "^7.1.4" + "@mui/utils" "^5.8.4" + "@popperjs/core" "^2.11.5" + clsx "^1.1.1" + prop-types "^15.8.1" + react-is "^17.0.2" + +"@mui/material@5.8.5": + version "5.8.5" + resolved "https://registry.yarnpkg.com/@mui/material/-/material-5.8.5.tgz#a1a79fc57b212a9781eb4a53e9995c4a9df04753" + integrity sha512-wngPXlOI9BurLSGlObQM/2L0QFFaIcvJnDK5A+ALxuUyuQnPviVWfC1l/r8rPlxQ4PCbSYpq3gzLlgnLoWcO/g== + dependencies: + "@babel/runtime" "^7.17.2" + "@mui/base" "5.0.0-alpha.86" + "@mui/system" "^5.8.5" + "@mui/types" "^7.1.4" + "@mui/utils" "^5.8.4" + "@types/react-transition-group" "^4.4.4" + clsx "^1.1.1" + csstype "^3.1.0" + prop-types "^15.8.1" + react-is "^17.0.2" + react-transition-group "^4.4.2" + +"@mui/private-theming@^5.8.6": + version "5.8.6" + resolved "https://registry.yarnpkg.com/@mui/private-theming/-/private-theming-5.8.6.tgz#db2bafeda1699e43e67b3ff4f770d6b7a234501f" + integrity sha512-yHsJk1qU9r/q0DlnxGRJPHyM0Y/nUv8FTNgDTiI9I58GWuVuZqeTUr7JRvPh6ybeP/FLtW5eXEavRK9wxVk4uQ== + dependencies: + "@babel/runtime" "^7.17.2" + "@mui/utils" "^5.8.6" + prop-types "^15.8.1" + +"@mui/styled-engine@^5.8.0": + version "5.8.0" + resolved "https://registry.yarnpkg.com/@mui/styled-engine/-/styled-engine-5.8.0.tgz#89ed42efe7c8749e5a60af035bc5d3a6bea362bf" + integrity sha512-Q3spibB8/EgeMYHc+/o3RRTnAYkSl7ROCLhXJ830W8HZ2/iDiyYp16UcxKPurkXvLhUaILyofPVrP3Su2uKsAw== + dependencies: + "@babel/runtime" "^7.17.2" + "@emotion/cache" "^11.7.1" + prop-types "^15.8.1" + +"@mui/system@^5.8.5": + version "5.8.6" + resolved "https://registry.yarnpkg.com/@mui/system/-/system-5.8.6.tgz#aed7e501c513429dab9cfbbe86da5dcd056c2a0a" + integrity sha512-+a+rD58XltKQHDrrjcuCta2cUBqdnLDUDwnphSLCMFigRl8/uk+R+fdQRlMNRXAOgnMb8ioWIgfjxri5pmTH4A== + dependencies: + "@babel/runtime" "^7.17.2" + "@mui/private-theming" "^5.8.6" + "@mui/styled-engine" "^5.8.0" + "@mui/types" "^7.1.4" + "@mui/utils" "^5.8.6" + clsx "^1.1.1" + csstype "^3.1.0" + prop-types "^15.8.1" + +"@mui/types@^7.1.4": + version "7.1.4" + resolved "https://registry.yarnpkg.com/@mui/types/-/types-7.1.4.tgz#4185c05d6df63ec673cda15feab80440abadc764" + integrity sha512-uveM3byMbthO+6tXZ1n2zm0W3uJCQYtwt/v5zV5I77v2v18u0ITkb8xwhsDD2i3V2Kye7SaNR6FFJ6lMuY/WqQ== + +"@mui/utils@^5.8.4", "@mui/utils@^5.8.6": + version "5.8.6" + resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-5.8.6.tgz#543de64a64bb9135316ecfd91d75a8740544d79f" + integrity sha512-QM2Sd1xZo2jOt2Vz5Rmro+pi2FLJyiv4+OjxkUwXR3oUM65KSMAMLl/KNYU55s3W3DLRFP5MVwE4FhAbHseHAg== + dependencies: + "@babel/runtime" "^7.17.2" + "@types/prop-types" "^15.7.5" + "@types/react-is" "^16.7.1 || ^17.0.0" + prop-types "^15.8.1" + react-is "^17.0.2" + +"@popperjs/core@^2.11.5": + version "2.11.5" + resolved "https://registry.yarnpkg.com/@popperjs/core/-/core-2.11.5.tgz#db5a11bf66bdab39569719555b0f76e138d7bd64" + integrity sha512-9X2obfABZuDVLCgPK9aX0a/x4jaOEweTTWE2+9sr0Qqqevj2Uv5XorvusThmc9XGYpS9yI+fhh8RTafBtGposw== + +"@rollup/pluginutils@^4.2.1": + version "4.2.1" + resolved "https://registry.yarnpkg.com/@rollup/pluginutils/-/pluginutils-4.2.1.tgz#e6c6c3aba0744edce3fb2074922d3776c0af2a6d" + integrity sha512-iKnFXr7NkdZAIHiIWE+BX5ULi/ucVFYWD6TbAV+rZctiRTY2PL6tsIKhoIOaoskiWAkgu+VsbXgUVDNLHf+InQ== + dependencies: + estree-walker "^2.0.1" + picomatch "^2.2.2" + +"@types/lodash@^4.14.179": + version "4.14.182" + resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.182.tgz#05301a4d5e62963227eaafe0ce04dd77c54ea5c2" + integrity sha512-/THyiqyQAP9AfARo4pF+aCGcyiQ94tX/Is2I7HofNRqoYLgN1PBoOWu2/zTA5zMxzP5EFutMtWtGAFRKUe961Q== + +"@types/parse-json@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.0.tgz#2f8bb441434d163b35fb8ffdccd7138927ffb8c0" + integrity sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA== + +"@types/prop-types@*", "@types/prop-types@^15.7.5": + version "15.7.5" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.5.tgz#5f19d2b85a98e9558036f6a3cacc8819420f05cf" + integrity sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w== + +"@types/react-dom@^18.0.0": + version "18.0.5" + resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-18.0.5.tgz#330b2d472c22f796e5531446939eacef8378444a" + integrity sha512-OWPWTUrY/NIrjsAPkAk1wW9LZeIjSvkXRhclsFO8CZcZGCOg2G0YZy4ft+rOyYxy8B7ui5iZzi9OkDebZ7/QSA== + dependencies: + "@types/react" "*" + +"@types/react-is@^16.7.1 || ^17.0.0": + version "17.0.3" + resolved "https://registry.yarnpkg.com/@types/react-is/-/react-is-17.0.3.tgz#2d855ba575f2fc8d17ef9861f084acc4b90a137a" + integrity sha512-aBTIWg1emtu95bLTLx0cpkxwGW3ueZv71nE2YFBpL8k/z5czEW8yYpOo8Dp+UUAFAtKwNaOsh/ioSeQnWlZcfw== + dependencies: + "@types/react" "*" + +"@types/react-transition-group@^4.4.4": + version "4.4.5" + resolved "https://registry.yarnpkg.com/@types/react-transition-group/-/react-transition-group-4.4.5.tgz#aae20dcf773c5aa275d5b9f7cdbca638abc5e416" + integrity sha512-juKD/eiSM3/xZYzjuzH6ZwpP+/lejltmiS3QEzV/vmb/Q8+HfDmxu+Baga8UEMGBqV88Nbg4l2hY/K2DkyaLLA== + dependencies: + "@types/react" "*" + +"@types/react@*", "@types/react@^18.0.1": + version "18.0.14" + resolved "https://registry.yarnpkg.com/@types/react/-/react-18.0.14.tgz#e016616ffff51dba01b04945610fe3671fdbe06d" + integrity sha512-x4gGuASSiWmo0xjDLpm5mPb52syZHJx02VKbqUKdLmKtAwIh63XClGsiTI1K6DO5q7ox4xAsQrU+Gl3+gGXF9Q== + dependencies: + "@types/prop-types" "*" + "@types/scheduler" "*" + csstype "^3.0.2" + +"@types/scheduler@*": + version "0.16.2" + resolved "https://registry.yarnpkg.com/@types/scheduler/-/scheduler-0.16.2.tgz#1a62f89525723dde24ba1b01b092bf5df8ad4d39" + integrity sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew== + +"@vitejs/plugin-react@^1.0.7": + version "1.3.2" + resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-1.3.2.tgz#2fcf0b6ce9bcdcd4cec5c760c199779d5657ece1" + integrity sha512-aurBNmMo0kz1O4qRoY+FM4epSA39y3ShWGuqfLRA/3z0oEJAdtoSfgA3aO98/PCCHAqMaduLxIxErWrVKIFzXA== + dependencies: + "@babel/core" "^7.17.10" + "@babel/plugin-transform-react-jsx" "^7.17.3" + "@babel/plugin-transform-react-jsx-development" "^7.16.7" + "@babel/plugin-transform-react-jsx-self" "^7.16.7" + "@babel/plugin-transform-react-jsx-source" "^7.16.7" + "@rollup/pluginutils" "^4.2.1" + react-refresh "^0.13.0" + resolve "^1.22.0" + +ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" + +axios@^0.26.1: + version "0.26.1" + resolved "https://registry.yarnpkg.com/axios/-/axios-0.26.1.tgz#1ede41c51fcf51bbbd6fd43669caaa4f0495aaa9" + integrity sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA== + dependencies: + follow-redirects "^1.14.8" + +babel-plugin-macros@^2.6.1: + version "2.8.0" + resolved "https://registry.yarnpkg.com/babel-plugin-macros/-/babel-plugin-macros-2.8.0.tgz#0f958a7cc6556b1e65344465d99111a1e5e10138" + integrity sha512-SEP5kJpfGYqYKpBrj5XU3ahw5p5GOHJ0U5ssOSQ/WBVdwkD2Dzlce95exQTs3jOVWPPKLBN2rlEWkCK7dSmLvg== + dependencies: + "@babel/runtime" "^7.7.2" + cosmiconfig "^6.0.0" + resolve "^1.12.0" + +browserslist@^4.20.2: + version "4.21.1" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.1.tgz#c9b9b0a54c7607e8dc3e01a0d311727188011a00" + integrity sha512-Nq8MFCSrnJXSc88yliwlzQe3qNe3VntIjhsArW9IJOEPSHNx23FalwApUVbzAWABLhYJJ7y8AynWI/XM8OdfjQ== + dependencies: + caniuse-lite "^1.0.30001359" + electron-to-chromium "^1.4.172" + node-releases "^2.0.5" + update-browserslist-db "^1.0.4" + +callsites@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" + integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== + +caniuse-lite@^1.0.30001359: + version "1.0.30001361" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001361.tgz#ba2adb2527566fb96f3ac7c67698ae7fc495a28d" + integrity sha512-ybhCrjNtkFji1/Wto6SSJKkWk6kZgVQsDq5QI83SafsF6FXv2JB4df9eEdH6g8sdGgqTXrFLjAxqBGgYoU3azQ== + +chalk@^2.0.0: + version "2.4.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +clsx@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/clsx/-/clsx-1.1.1.tgz#98b3134f9abbdf23b2663491ace13c5c03a73188" + integrity sha512-6/bPho624p3S2pMyvP5kKBPXnI3ufHLObBFCfgx+LkeR5lg2XYy2hqZqUf45ypD8COn2bhgGJSUE+l5dhNBieA== + +color-convert@^1.9.0: + version "1.9.3" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== + dependencies: + color-name "1.1.3" + +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== + +convert-source-map@^1.5.0, convert-source-map@^1.7.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.8.0.tgz#f3373c32d21b4d780dd8004514684fb791ca4369" + integrity sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA== + dependencies: + safe-buffer "~5.1.1" + +cosmiconfig@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-6.0.0.tgz#da4fee853c52f6b1e6935f41c1a2fc50bd4a9982" + integrity sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg== + dependencies: + "@types/parse-json" "^4.0.0" + import-fresh "^3.1.0" + parse-json "^5.0.0" + path-type "^4.0.0" + yaml "^1.7.2" + +csstype@^3.0.2, csstype@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.0.tgz#4ddcac3718d787cf9df0d1b7d15033925c8f29f2" + integrity sha512-uX1KG+x9h5hIJsaKR9xHUeUraxf8IODOwq9JLNPq6BwB04a/xgpq3rcx47l5BZu5zBPlgD342tdke3Hom/nJRA== + +debug@^4.1.0: + version "4.3.4" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" + integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== + dependencies: + ms "2.1.2" + +dom-helpers@^5.0.1: + version "5.2.1" + resolved "https://registry.yarnpkg.com/dom-helpers/-/dom-helpers-5.2.1.tgz#d9400536b2bf8225ad98fe052e029451ac40e902" + integrity sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA== + dependencies: + "@babel/runtime" "^7.8.7" + csstype "^3.0.2" + +electron-to-chromium@^1.4.172: + version "1.4.174" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.174.tgz#ffdf57f26dd4558c5aabdb4b190c47af1c4e443b" + integrity sha512-JER+w+9MV2MBVFOXxP036bLlNOnzbYAWrWU8sNUwoOO69T3w4564WhM5H5atd8VVS8U4vpi0i0kdoYzm1NPQgQ== + +error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== + dependencies: + is-arrayish "^0.2.1" + +esbuild-android-64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-android-64/-/esbuild-android-64-0.14.48.tgz#7e6394a0e517f738641385aaf553c7e4fb6d1ae3" + integrity sha512-3aMjboap/kqwCUpGWIjsk20TtxVoKck8/4Tu19rubh7t5Ra0Yrpg30Mt1QXXlipOazrEceGeWurXKeFJgkPOUg== + +esbuild-android-arm64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-android-arm64/-/esbuild-android-arm64-0.14.48.tgz#6877566be0f82dd5a43030c0007d06ece7f7c02f" + integrity sha512-vptI3K0wGALiDq+EvRuZotZrJqkYkN5282iAfcffjI5lmGG9G1ta/CIVauhY42MBXwEgDJkweiDcDMRLzBZC4g== + +esbuild-darwin-64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-darwin-64/-/esbuild-darwin-64-0.14.48.tgz#ea3caddb707d88f844b1aa1dea5ff3b0a71ef1fd" + integrity sha512-gGQZa4+hab2Va/Zww94YbshLuWteyKGD3+EsVon8EWTWhnHFRm5N9NbALNbwi/7hQ/hM1Zm4FuHg+k6BLsl5UA== + +esbuild-darwin-arm64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-darwin-arm64/-/esbuild-darwin-arm64-0.14.48.tgz#4e5eaab54df66cc319b76a2ac0e8af4e6f0d9c2f" + integrity sha512-bFjnNEXjhZT+IZ8RvRGNJthLWNHV5JkCtuOFOnjvo5pC0sk2/QVk0Qc06g2PV3J0TcU6kaPC3RN9yy9w2PSLEA== + +esbuild-freebsd-64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-freebsd-64/-/esbuild-freebsd-64-0.14.48.tgz#47b5abc7426eae66861490ffbb380acc67af5b15" + integrity sha512-1NOlwRxmOsnPcWOGTB10JKAkYSb2nue0oM1AfHWunW/mv3wERfJmnYlGzL3UAOIUXZqW8GeA2mv+QGwq7DToqA== + +esbuild-freebsd-arm64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-freebsd-arm64/-/esbuild-freebsd-arm64-0.14.48.tgz#e8c54c8637cd44feed967ea12338b0a4da3a7b11" + integrity sha512-gXqKdO8wabVcYtluAbikDH2jhXp+Klq5oCD5qbVyUG6tFiGhrC9oczKq3vIrrtwcxDQqK6+HDYK8Zrd4bCA9Gw== + +esbuild-linux-32@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-linux-32/-/esbuild-linux-32-0.14.48.tgz#229cf3246de2b7937c3ac13fac622d4d7a1344c5" + integrity sha512-ghGyDfS289z/LReZQUuuKq9KlTiTspxL8SITBFQFAFRA/IkIvDpnZnCAKTCjGXAmUqroMQfKJXMxyjJA69c/nQ== + +esbuild-linux-64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-linux-64/-/esbuild-linux-64-0.14.48.tgz#7c0e7226c02c42aacc5656c36977493dc1e96c4f" + integrity sha512-vni3p/gppLMVZLghI7oMqbOZdGmLbbKR23XFARKnszCIBpEMEDxOMNIKPmMItQrmH/iJrL1z8Jt2nynY0bE1ug== + +esbuild-linux-arm64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-linux-arm64/-/esbuild-linux-arm64-0.14.48.tgz#0af1eda474b5c6cc0cace8235b74d0cb8fcf57a7" + integrity sha512-3CFsOlpoxlKPRevEHq8aAntgYGYkE1N9yRYAcPyng/p4Wyx0tPR5SBYsxLKcgPB9mR8chHEhtWYz6EZ+H199Zw== + +esbuild-linux-arm@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-linux-arm/-/esbuild-linux-arm-0.14.48.tgz#de4d1fa6b77cdcd00e2bb43dd0801e4680f0ab52" + integrity sha512-+VfSV7Akh1XUiDNXgqgY1cUP1i2vjI+BmlyXRfVz5AfV3jbpde8JTs5Q9sYgaoq5cWfuKfoZB/QkGOI+QcL1Tw== + +esbuild-linux-mips64le@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-linux-mips64le/-/esbuild-linux-mips64le-0.14.48.tgz#822c1778495f7868e990d4da47ad7281df28fd15" + integrity sha512-cs0uOiRlPp6ymknDnjajCgvDMSsLw5mST2UXh+ZIrXTj2Ifyf2aAP3Iw4DiqgnyYLV2O/v/yWBJx+WfmKEpNLA== + +esbuild-linux-ppc64le@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-linux-ppc64le/-/esbuild-linux-ppc64le-0.14.48.tgz#55de0a9ec4a48fedfe82a63e083164d001709447" + integrity sha512-+2F0vJMkuI0Wie/wcSPDCqXvSFEELH7Jubxb7mpWrA/4NpT+/byjxDz0gG6R1WJoeDefcrMfpBx4GFNN1JQorQ== + +esbuild-linux-riscv64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-linux-riscv64/-/esbuild-linux-riscv64-0.14.48.tgz#cd2b7381880b2f4b21a5a598fb673492120f18a5" + integrity sha512-BmaK/GfEE+5F2/QDrIXteFGKnVHGxlnK9MjdVKMTfvtmudjY3k2t8NtlY4qemKSizc+QwyombGWTBDc76rxePA== + +esbuild-linux-s390x@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-linux-s390x/-/esbuild-linux-s390x-0.14.48.tgz#4b319eca2a5c64637fc7397ffbd9671719cdb6bf" + integrity sha512-tndw/0B9jiCL+KWKo0TSMaUm5UWBLsfCKVdbfMlb3d5LeV9WbijZ8Ordia8SAYv38VSJWOEt6eDCdOx8LqkC4g== + +esbuild-netbsd-64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-netbsd-64/-/esbuild-netbsd-64-0.14.48.tgz#c27cde8b5cb55dcc227943a18ab078fb98d0adbf" + integrity sha512-V9hgXfwf/T901Lr1wkOfoevtyNkrxmMcRHyticybBUHookznipMOHoF41Al68QBsqBxnITCEpjjd4yAos7z9Tw== + +esbuild-openbsd-64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-openbsd-64/-/esbuild-openbsd-64-0.14.48.tgz#af5ab2d1cb41f09064bba9465fc8bf1309150df1" + integrity sha512-+IHf4JcbnnBl4T52egorXMatil/za0awqzg2Vy6FBgPcBpisDWT2sVz/tNdrK9kAqj+GZG/jZdrOkj7wsrNTKA== + +esbuild-sunos-64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-sunos-64/-/esbuild-sunos-64-0.14.48.tgz#db3ae20526055cf6fd5c4582676233814603ac54" + integrity sha512-77m8bsr5wOpOWbGi9KSqDphcq6dFeJyun8TA+12JW/GAjyfTwVtOnN8DOt6DSPUfEV+ltVMNqtXUeTeMAxl5KA== + +esbuild-windows-32@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-windows-32/-/esbuild-windows-32-0.14.48.tgz#021ffceb0a3f83078262870da88a912293c57475" + integrity sha512-EPgRuTPP8vK9maxpTGDe5lSoIBHGKO/AuxDncg5O3NkrPeLNdvvK8oywB0zGaAZXxYWfNNSHskvvDgmfVTguhg== + +esbuild-windows-64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-windows-64/-/esbuild-windows-64-0.14.48.tgz#a4d3407b580f9faac51f61eec095fa985fb3fee4" + integrity sha512-YmpXjdT1q0b8ictSdGwH3M8VCoqPpK1/UArze3X199w6u8hUx3V8BhAi1WjbsfDYRBanVVtduAhh2sirImtAvA== + +esbuild-windows-arm64@0.14.48: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild-windows-arm64/-/esbuild-windows-arm64-0.14.48.tgz#762c0562127d8b09bfb70a3c816460742dd82880" + integrity sha512-HHaOMCsCXp0rz5BT2crTka6MPWVno121NKApsGs/OIW5QC0ggC69YMGs1aJct9/9FSUF4A1xNE/cLvgB5svR4g== + +esbuild@^0.14.27: + version "0.14.48" + resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.14.48.tgz#da5d8d25cd2d940c45ea0cfecdca727f7aee2b85" + integrity sha512-w6N1Yn5MtqK2U1/WZTX9ZqUVb8IOLZkZ5AdHkT6x3cHDMVsYWC7WPdiLmx19w3i4Rwzy5LqsEMtVihG3e4rFzA== + optionalDependencies: + esbuild-android-64 "0.14.48" + esbuild-android-arm64 "0.14.48" + esbuild-darwin-64 "0.14.48" + esbuild-darwin-arm64 "0.14.48" + esbuild-freebsd-64 "0.14.48" + esbuild-freebsd-arm64 "0.14.48" + esbuild-linux-32 "0.14.48" + esbuild-linux-64 "0.14.48" + esbuild-linux-arm "0.14.48" + esbuild-linux-arm64 "0.14.48" + esbuild-linux-mips64le "0.14.48" + esbuild-linux-ppc64le "0.14.48" + esbuild-linux-riscv64 "0.14.48" + esbuild-linux-s390x "0.14.48" + esbuild-netbsd-64 "0.14.48" + esbuild-openbsd-64 "0.14.48" + esbuild-sunos-64 "0.14.48" + esbuild-windows-32 "0.14.48" + esbuild-windows-64 "0.14.48" + esbuild-windows-arm64 "0.14.48" + +escalade@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40" + integrity sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw== + +escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== + +escape-string-regexp@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" + integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== + +estree-walker@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/estree-walker/-/estree-walker-2.0.2.tgz#52f010178c2a4c117a7757cfe942adb7d2da4cac" + integrity sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w== + +find-root@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/find-root/-/find-root-1.1.0.tgz#abcfc8ba76f708c42a97b3d685b7e9450bfb9ce4" + integrity sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng== + +follow-redirects@^1.14.8: + version "1.15.1" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.1.tgz#0ca6a452306c9b276e4d3127483e29575e207ad5" + integrity sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA== + +fsevents@~2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" + integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== + +function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== + +gensync@^1.0.0-beta.2: + version "1.0.0-beta.2" + resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" + integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== + +globals@^11.1.0: + version "11.12.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" + integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw== + +has@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== + dependencies: + function-bind "^1.1.1" + +hoist-non-react-statics@^3.3.1: + version "3.3.2" + resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz#ece0acaf71d62c2969c2ec59feff42a4b1a85b45" + integrity sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw== + dependencies: + react-is "^16.7.0" + +import-fresh@^3.1.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b" + integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw== + dependencies: + parent-module "^1.0.0" + resolve-from "^4.0.0" + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg== + +is-core-module@^2.9.0: + version "2.9.0" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.9.0.tgz#e1c34429cd51c6dd9e09e0799e396e27b19a9c69" + integrity sha512-+5FPy5PnwmO3lvfMb0AsoPaBG+5KHUI0wYFXOtYPnVVVspTFUuMZNfNaNVRt3FZadstu2c8x23vykRW/NBoU6A== + dependencies: + has "^1.0.3" + +"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +jsesc@^2.5.1: + version "2.5.2" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" + integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== + +json-parse-even-better-errors@^2.3.0: + version "2.3.1" + resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" + integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== + +json5@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.1.tgz#655d50ed1e6f95ad1a3caababd2b0efda10b395c" + integrity sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA== + +lines-and-columns@^1.1.6: + version "1.2.4" + resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" + integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== + +lodash@^4.17.21: + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + +loose-envify@^1.1.0, loose-envify@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" + integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== + dependencies: + js-tokens "^3.0.0 || ^4.0.0" + +ms@2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" + integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== + +nanoid@^3.3.1, nanoid@^3.3.4: + version "3.3.4" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.4.tgz#730b67e3cd09e2deacf03c027c81c9d9dbc5e8ab" + integrity sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw== + +node-releases@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.5.tgz#280ed5bc3eba0d96ce44897d8aee478bfb3d9666" + integrity sha512-U9h1NLROZTq9uE1SNffn6WuPDg8icmi3ns4rEl/oTfIle4iLjTliCzgTsbaIFMq/Xn078/lfY/BL0GWZ+psK4Q== + +object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== + +parent-module@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" + integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== + dependencies: + callsites "^3.0.0" + +parse-json@^5.0.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" + integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg== + dependencies: + "@babel/code-frame" "^7.0.0" + error-ex "^1.3.1" + json-parse-even-better-errors "^2.3.0" + lines-and-columns "^1.1.6" + +path-parse@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" + integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== + +path-type@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== + +picocolors@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" + integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== + +picomatch@^2.2.2: + version "2.3.1" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" + integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== + +postcss@^8.4.13: + version "8.4.14" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.14.tgz#ee9274d5622b4858c1007a74d76e42e56fd21caf" + integrity sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig== + dependencies: + nanoid "^3.3.4" + picocolors "^1.0.0" + source-map-js "^1.0.2" + +prettier@^2.5.1: + version "2.7.1" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.7.1.tgz#e235806850d057f97bb08368a4f7d899f7760c64" + integrity sha512-ujppO+MkdPqoVINuDFDRLClm7D78qbDt0/NR+wp5FqEZOoTNAjPHWj17QRhu7geIHJfcNhRk1XVQmF8Bp3ye+g== + +prop-types@^15.6.2, prop-types@^15.8.1: + version "15.8.1" + resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.8.1.tgz#67d87bf1a694f48435cf332c24af10214a3140b5" + integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg== + dependencies: + loose-envify "^1.4.0" + object-assign "^4.1.1" + react-is "^16.13.1" + +react-dom@^17.0.2: + version "17.0.2" + resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-17.0.2.tgz#ecffb6845e3ad8dbfcdc498f0d0a939736502c23" + integrity sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + scheduler "^0.20.2" + +react-is@^16.13.1, react-is@^16.7.0: + version "16.13.1" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" + integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== + +react-is@^17.0.2: + version "17.0.2" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-17.0.2.tgz#e691d4a8e9c789365655539ab372762b0efb54f0" + integrity sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w== + +react-refresh@^0.13.0: + version "0.13.0" + resolved "https://registry.yarnpkg.com/react-refresh/-/react-refresh-0.13.0.tgz#cbd01a4482a177a5da8d44c9755ebb1f26d5a1c1" + integrity sha512-XP8A9BT0CpRBD+NYLLeIhld/RqG9+gktUjW1FkE+Vm7OCinbG1SshcK5tb9ls4kzvjZr9mOQc7HYgBngEyPAXg== + +react-transition-group@^4.4.2: + version "4.4.2" + resolved "https://registry.yarnpkg.com/react-transition-group/-/react-transition-group-4.4.2.tgz#8b59a56f09ced7b55cbd53c36768b922890d5470" + integrity sha512-/RNYfRAMlZwDSr6z4zNKV6xu53/e2BuaBbGhbyYIXTrmgu/bGHzmqOs7mJSJBHy9Ud+ApHx3QjrkKSp1pxvlFg== + dependencies: + "@babel/runtime" "^7.5.5" + dom-helpers "^5.0.1" + loose-envify "^1.4.0" + prop-types "^15.6.2" + +react@^17.0.2: + version "17.0.2" + resolved "https://registry.yarnpkg.com/react/-/react-17.0.2.tgz#d0b5cc516d29eb3eee383f75b62864cfb6800037" + integrity sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + +regenerator-runtime@^0.13.4: + version "0.13.9" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz#8925742a98ffd90814988d7566ad30ca3b263b52" + integrity sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA== + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== + +resolve@^1.12.0, resolve@^1.22.0: + version "1.22.1" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.1.tgz#27cb2ebb53f91abb49470a928bba7558066ac177" + integrity sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw== + dependencies: + is-core-module "^2.9.0" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" + +rollup@^2.59.0: + version "2.75.7" + resolved "https://registry.yarnpkg.com/rollup/-/rollup-2.75.7.tgz#221ff11887ae271e37dcc649ba32ce1590aaa0b9" + integrity sha512-VSE1iy0eaAYNCxEXaleThdFXqZJ42qDBatAwrfnPlENEZ8erQ+0LYX4JXOLPceWfZpV1VtZwZ3dFCuOZiSyFtQ== + optionalDependencies: + fsevents "~2.3.2" + +safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== + +scheduler@^0.20.2: + version "0.20.2" + resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.20.2.tgz#4baee39436e34aa93b4874bddcbf0fe8b8b50e91" + integrity sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + +semver@^6.3.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" + integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== + +source-map-js@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c" + integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw== + +source-map@^0.5.7: + version "0.5.7" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ== + +stylis@4.0.13: + version "4.0.13" + resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.0.13.tgz#f5db332e376d13cc84ecfe5dace9a2a51d954c91" + integrity sha512-xGPXiFVl4YED9Jh7Euv2V220mriG9u4B2TA6Ybjc1catrstKD2PpIdU3U0RKpkVBC2EhmL/F0sPCr9vrFTNRag== + +supports-color@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" + integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== + dependencies: + has-flag "^3.0.0" + +supports-preserve-symlinks-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" + integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== + +to-fast-properties@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" + integrity sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog== + +typescript@^4.5.4: + version "4.7.4" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.7.4.tgz#1a88596d1cf47d59507a1bcdfb5b9dfe4d488235" + integrity sha512-C0WQT0gezHuw6AdY1M2jxUO83Rjf0HP7Sk1DtXj6j1EwkQNZrHAg2XPWlq62oqEhYvONq5pkC2Y9oPljWToLmQ== + +update-browserslist-db@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.4.tgz#dbfc5a789caa26b1db8990796c2c8ebbce304824" + integrity sha512-jnmO2BEGUjsMOe/Fg9u0oczOe/ppIDZPebzccl1yDWGLFP16Pa1/RM5wEoKYPG2zstNcDuAStejyxsOuKINdGA== + dependencies: + escalade "^3.1.1" + picocolors "^1.0.0" + +vite@^2.8.6: + version "2.9.13" + resolved "https://registry.yarnpkg.com/vite/-/vite-2.9.13.tgz#859cb5d4c316c0d8c6ec9866045c0f7858ca6abc" + integrity sha512-AsOBAaT0AD7Mhe8DuK+/kE4aWYFMx/i0ZNi98hJclxb4e0OhQcZYUrvLjIaQ8e59Ui7txcvKMiJC1yftqpQoDw== + dependencies: + esbuild "^0.14.27" + postcss "^8.4.13" + resolve "^1.22.0" + rollup "^2.59.0" + optionalDependencies: + fsevents "~2.3.2" + +yaml@^1.7.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" + integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== diff --git a/src/lightning_app/components/python/popen.py b/src/lightning_app/components/python/popen.py index eb2ad978e3472..553795d9617de 100644 --- a/src/lightning_app/components/python/popen.py +++ b/src/lightning_app/components/python/popen.py @@ -31,7 +31,7 @@ def __init__( env: Optional[Dict] = None, **kwargs, ): - """The PopenPythonScript Class enables to easily run a Python Script. + """The PopenPythonScript component enables to easily run a python script within a subprocess. Arguments: script_path: Path of the python script to run. @@ -56,7 +56,7 @@ def __init__( In this example, the script will be launch with the :class:`~subprocess.Popen`. - .. literalinclude:: ../../../../examples/components/python/component_popen.py + .. literalinclude:: ../../../../examples/app_components/python/component_popen.py :language: python """ super().__init__(**kwargs) diff --git a/src/lightning_app/components/python/tracer.py b/src/lightning_app/components/python/tracer.py index 5789b40dcc1d4..5605eee6b6d47 100644 --- a/src/lightning_app/components/python/tracer.py +++ b/src/lightning_app/components/python/tracer.py @@ -33,9 +33,12 @@ def __init__( env: Optional[Dict] = None, **kwargs, ): - """The TracerPythonScript Class enables to easily run a Python Script with Lightning - :class:`~lightning_app.utilities.tracer.Tracer`. Simply overrides the - :meth:`~lightning_app.components.python.tracer.TracerPythonScript.configure_tracer` method. + """The TracerPythonScript class enables to easily run a python script. + + When subclassing this class, you can configure your own :class:`~lightning_app.utilities.tracer.Tracer` + by :meth:`~lightning_app.components.python.tracer.TracerPythonScript.configure_tracer` method + + The tracer is quite a magical class. It enables you to inject core into a script execution without changing it. Arguments: script_path: Path of the python script to run. @@ -47,6 +50,13 @@ def __init__( Raises: FileNotFoundError: If the provided `script_path` doesn't exists. + **How does it works ?** + + It works by executing the python script with python built-in `runpy + `_ run_path method. + This method takes any python globals before executing the script, + e.g you can modify classes or function from the script. + .. doctest:: >>> from lightning_app.components.python import TracerPythonScript @@ -59,17 +69,27 @@ def __init__( Hello World ! >>> os.remove("a.py") - In this example, you would be able to implement your own :class:`~lightning_app.utilities.tracer.Tracer` - and intercept / modify elements while the script is being executed. + In the example below, we subclass the :class:`~lightning_app.components.python.TracerPythonScript` + component and override its configure_tracer method. + + Using the Tracer, we are patching the ``__init__`` method of the PyTorch Lightning Trainer. + Once the script starts running and if a Trainer is instantiated, the provided ``pre_fn`` is + called and we inject a Lightning callback. + + This callback has a reference to the work and on every batch end, we are capturing the + trainer ``global_step`` and ``best_model_path``. + + Even more interesting, this component works for ANY Pytorch Lightning script and + its state can be used in real time in a UI. - .. literalinclude:: ../../../../examples/components/python/component_tracer.py + .. literalinclude:: ../../../../examples/app_components/python/component_tracer.py :language: python Once implemented, this component can easily be integrated within a larger app to execute a specific python script. - .. literalinclude:: ../../../../examples/components/python/app.py + .. literalinclude:: ../../../../examples/app_components/python/app.py :language: python """ super().__init__(**kwargs) diff --git a/src/lightning_app/components/serve/gradio.py b/src/lightning_app/components/serve/gradio.py index 2376104894957..ecbd583020354 100644 --- a/src/lightning_app/components/serve/gradio.py +++ b/src/lightning_app/components/serve/gradio.py @@ -18,7 +18,7 @@ class ServeGradio(LightningWork, abc.ABC): In the example below, the ``ServeGradio`` is subclassed to deploy ``AnimeGANv2``. - .. literalinclude:: ../../../../examples/components/serve/gradio/app.py + .. literalinclude:: ../../../../examples/app_components/serve/gradio/app.py :language: python The result would be the following: diff --git a/src/lightning_app/core/app.py b/src/lightning_app/core/app.py index d9ecda0cfebc0..81a1a2115e523 100644 --- a/src/lightning_app/core/app.py +++ b/src/lightning_app/core/app.py @@ -37,26 +37,33 @@ def __init__( root: "lightning_app.LightningFlow", debug: bool = False, ): - """LightningApp, or App in short, alternatively run its root - :class:`~lightning_app.core.flow.LightningFlow` component and collects state changes from external - sources to maintain the application state up-to-date or performs checkpointing. All those operations - are executed within an infinite loop. + """The Lightning App, or App in short runs a tree of one or more components that interact to create end-to-end + applications. There are two kinds of components: :class:`~lightning_app.core.flow.LightningFlow` and + :class:`~lightning_app.core.work.LightningWork`. This modular design enables you to reuse components + created by other users. + + The Lightning App alternatively run an event loop triggered by delta changes sent from + either :class:`~lightning.app.core.work.LightningWork` or from the Lightning UI. + Once deltas are received, the Lightning App runs + the :class:`~lightning.app.core.flow.LightningFlow` provided. Arguments: - root: The root LightningFlow component, that defined all the app's nested components, running infinitely. - debug: Whether to run the application in debug model. + root: The root LightningFlow component, that defined all + the app's nested components, running infinitely. + debug: Whether to activate the Lightning Logger debug mode. + This can be helpful when reporting bugs on Lightning repo. .. doctest:: - >>> from lightning_app import LightningFlow, LightningApp - >>> from lightning_app.runners import SingleProcessRuntime + >>> from lightning import LightningFlow, LightningApp + >>> from lightning.app.runners import MultiProcessRuntime >>> class RootFlow(LightningFlow): ... def run(self): ... print("Hello World!") ... self._exit() ... >>> app = LightningApp(RootFlow()) # application can be dispatched using the `runners`. - >>> SingleProcessRuntime(app).dispatch() + >>> MultiProcessRuntime(app).dispatch() Hello World! """ diff --git a/src/lightning_app/core/flow.py b/src/lightning_app/core/flow.py index 12056d0ce37da..a5dcfd0a77e2e 100644 --- a/src/lightning_app/core/flow.py +++ b/src/lightning_app/core/flow.py @@ -25,10 +25,10 @@ class LightningFlow: } def __init__(self): - """The LightningFlow is a building block to coordinate and manage long running-tasks contained within - :class:`~lightning_app.core.work.LightningWork` or nested LightningFlow. + """The LightningFlow is used by the :class:`~lightning_app.core.app.LightningApp` to coordinate and manage + long- running jobs contained, the :class:`~lightning_app.core.work.LightningWork`. - At a minimum, a LightningFlow is characterized by: + A LightningFlow is characterized by: * A set of state variables. * Long-running jobs (:class:`~lightning_app.core.work.LightningWork`). @@ -41,11 +41,6 @@ def __init__(self): They also may not reach into global variables unless they are constant. - .. note :: - The limitation to primitive types will be lifted in time for - certain aggregate types, and will be made extensible so that component - developers will be able to add custom state-compatible types. - The attributes need to be all defined in `__init__` method, and eventually assigned to different values throughout the lifetime of the object. However, defining new attributes outside of `__init__` is not allowed. @@ -83,7 +78,7 @@ def __init__(self): .. doctest:: - >>> from lightning_app import LightningFlow + >>> from lightning import LightningFlow >>> class RootFlow(LightningFlow): ... def __init__(self): ... super().__init__() @@ -345,6 +340,7 @@ def _is_state_attribute(name: str) -> bool: return name in LightningFlow._INTERNAL_STATE_VARS or not name.startswith("_") def run(self, *args, **kwargs) -> None: + """Override with your own logic.""" pass def schedule( @@ -352,15 +348,16 @@ def schedule( ) -> bool: """The schedule method is used to run a part of the flow logic on timely manner. - .. code-block:: + .. code-block:: python from lightning_app import LightningFlow - class Flow(LightningFlow): + class Flow(LightningFlow): def run(self): if self.schedule("hourly"): # run some code once every hour. + print("run this every hour") Arguments: cron_pattern: The cron pattern to provide. Learn more at https://crontab.guru/. @@ -370,7 +367,7 @@ def run(self): A best practice is to avoid running a dynamic flow or work under the self.schedule method. Instead, instantiate them within the condition, but run them outside. - .. code-block:: python + .. code-block:: python from lightning_app import LightningFlow from lightning_app.structures import List @@ -382,11 +379,40 @@ def __init__(self): self.dags = List() def run(self): - if self.schedule("@hourly"): + if self.schedule("hourly"): self.dags.append(DAG(...)) for dag in self.dags: payload = dag.run() + + **Learn more about Scheduling** + + .. raw:: html + +
+
+ + .. displayitem:: + :header: Schedule your components + :description: Learn more scheduling. + :col_css: col-md-4 + :button_link: ../../../glossary/scheduling.html + :height: 180 + :tag: Basic + + .. displayitem:: + :header: Build your own DAG + :description: Learn more DAG scheduling with examples. + :col_css: col-md-4 + :button_link: ../../../examples/app_dag/dag.html + :height: 180 + :tag: Basic + + .. raw:: html + +
+
+
""" if not user_key: frame = cast(FrameType, inspect.currentframe()).f_back @@ -454,40 +480,48 @@ def configure_layout(self) -> Union[Dict[str, Any], List[Dict[str, Any]], Fronte **Example:** Serve a static directory (with at least a file index.html inside). - .. code-block:: + .. code-block:: python from lightning_app.frontend import StaticWebFrontend + class Flow(LightningFlow): ... + def configure_layout(self): return StaticWebFrontend("path/to/folder/to/serve") **Example:** Serve a streamlit UI (needs the streamlit package to be installed). - .. code-block:: + .. code-block:: python from lightning_app.frontend import StaticWebFrontend + class Flow(LightningFlow): ... + def configure_layout(self): return StreamlitFrontend(render_fn=my_streamlit_ui) + def my_streamlit_ui(state): # add your streamlit code here! + import streamlit as st + + st.button("Hello!") **Example:** Arrange the UI of my children in tabs (default UI by Lightning). - .. code-block:: + .. code-block:: python class Flow(LightningFlow): ... + def configure_layout(self): return [ dict(name="First Tab", content=self.child0), dict(name="Second Tab", content=self.child1), - ... # You can include direct URLs too dict(name="Lightning", content="https://lightning.ai"), ] @@ -500,6 +534,27 @@ def configure_layout(self): returned layout configuration can depend on the state. The only exception are the flows that return a :class:`~lightning_app.frontend.frontend.Frontend`. These need to be provided at the time of app creation in order for the runtime to start the server. + + **Learn more about adding UI** + + .. raw:: html + +
+
+ + .. displayitem:: + :header: Add a web user interface (UI) + :description: Learn more how to integrate several UIs. + :col_css: col-md-4 + :button_link: ../../../workflows/add_web_ui/index.html + :height: 180 + :tag: Basic + + .. raw:: html + +
+
+
""" return [dict(name=name, content=component) for (name, component) in self.flows.items()] diff --git a/src/lightning_app/core/work.py b/src/lightning_app/core/work.py index 48a59ab447581..0c12242e26d6a 100644 --- a/src/lightning_app/core/work.py +++ b/src/lightning_app/core/work.py @@ -63,7 +63,31 @@ def __init__( with the same arguments in subsequent calls. raise_exception: Whether to re-raise an exception in the flow when raised from within the work run method. host: Bind socket to this host - port: Bind socket to this port + port: Bind socket to this port. Be default, this is None and should be called within your run method. + local_build_config: The local BuildConfig isn't used until Lightning supports DockerRuntime. + cloud_build_config: The cloud BuildConfig enables user to easily configure machine before running this work. + run_once: Deprecated in favor of cache_calls. This will be removed soon. + + **Learn More About Lightning Work Inner Workings** + + .. raw:: html + +
+
+ + .. displayitem:: + :header: The Lightning Work inner workings. + :description: Learn more Lightning Work. + :col_css: col-md-4 + :button_link: ../../../core_api/lightning_work/index.html + :height: 180 + :tag: Basic + + .. raw:: html + +
+
+
""" from lightning_app.runners.backends.backend import Backend @@ -98,6 +122,7 @@ def __init__( @property def url(self) -> str: + """Returns the current url of the work.""" return self._url @url.setter @@ -106,6 +131,7 @@ def url(self, url: str) -> None: @property def host(self) -> str: + """Returns the current host of the work.""" return self._host @property @@ -166,6 +192,7 @@ def local_build_config(self, build_config: BuildConfig) -> None: @property def cloud_build_config(self) -> BuildConfig: + """Returns the cloud build config used to prepare the selected cloud hardware.""" return self._cloud_build_config @cloud_build_config.setter @@ -179,6 +206,7 @@ def cloud_compute(self) -> CloudCompute: @cloud_compute.setter def cloud_compute(self, cloud_compute) -> None: + """Returns the cloud compute used to select the cloud hardware.""" self._cloud_compute = cloud_compute @property diff --git a/src/lightning_app/testing/helpers.py b/src/lightning_app/testing/helpers.py index 85bcbf33fae63..f2e29b8213d02 100644 --- a/src/lightning_app/testing/helpers.py +++ b/src/lightning_app/testing/helpers.py @@ -23,7 +23,7 @@ def call_script( if args is None: args = [] args = [str(a) for a in args] - command = [sys.executable, "-m", "coverage", "run", filepath] + args + command = [sys.executable, filepath] + args # todo: add back coverage p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: stdout, stderr = p.communicate(timeout=timeout) diff --git a/src/lightning_app/testing/testing.py b/src/lightning_app/testing/testing.py index 72e7283a2a969..7ae9bf6274e6c 100644 --- a/src/lightning_app/testing/testing.py +++ b/src/lightning_app/testing/testing.py @@ -81,7 +81,7 @@ def application_testing(lightning_app_cls: Type[LightningTestApp], command_line: from click.testing import CliRunner - with mock.patch("lightning_app.LightningApp", lightning_app_cls): + with mock.patch("lightning.LightningApp", lightning_app_cls): runner = CliRunner() return runner.invoke(run_app, command_line, catch_exceptions=False) @@ -208,22 +208,14 @@ def run_app_in_cloud(app_folder: str, app_name: str = "app.py") -> Generator: """, [LIGHTNING_CLOUD_PROJECT_ID], ) - admin_page.reload() + admin_page.goto(f"{Config.url}/{Config.username}/apps") try: # Closing the Create Project modal button = admin_page.locator('button:has-text("Cancel")') - button.wait_for(timeout=1 * 1000) - button.click() - except (playwright._impl._api_types.Error, playwright._impl._api_types.TimeoutError): - pass - try: - # Skipping the Hubspot form - button = admin_page.locator('button:has-text("Skip for now")') - button.wait_for(timeout=1 * 1000) + button.wait_for(timeout=3 * 1000) button.click() except (playwright._impl._api_types.Error, playwright._impl._api_types.TimeoutError): pass - admin_page.goto(f"{Config.url}/{Config.username}/apps") admin_page.locator(f"text={name}").click() admin_page.evaluate( """data => { diff --git a/src/lightning_app/utilities/app_helpers.py b/src/lightning_app/utilities/app_helpers.py index b1834daa55c92..4144c6de3ba12 100644 --- a/src/lightning_app/utilities/app_helpers.py +++ b/src/lightning_app/utilities/app_helpers.py @@ -232,7 +232,7 @@ def render_non_authorized(self): # Adapted from -# https://github.com/Lightning-AI/lightning/blob/master/pytorch_lightning/utilities/model_helpers.py#L21 +# https://github.com/Lightning-AI/pytorch-lightning/blob/master/pytorch_lightning/utilities/model_helpers.py#L21 def is_overridden(method_name: str, instance: Optional[object] = None, parent: Optional[Type[object]] = None) -> bool: if instance is None: return False diff --git a/src/lightning_app/utilities/network.py b/src/lightning_app/utilities/network.py index fdd2a6dbfcdf1..98c7db3d46ff8 100644 --- a/src/lightning_app/utilities/network.py +++ b/src/lightning_app/utilities/network.py @@ -48,7 +48,7 @@ def _configure_session() -> Session: return http -def _check_service_url_is_ready(url: str, timeout: float = 0.1) -> bool: +def _check_service_url_is_ready(url: str, timeout: float = 0.5) -> bool: try: response = requests.get(url, timeout=timeout) return response.status_code in (200, 404) diff --git a/src/lightning_app/utilities/packaging/build_config.py b/src/lightning_app/utilities/packaging/build_config.py index 29d5ec230591b..9231875d5d7fd 100644 --- a/src/lightning_app/utilities/packaging/build_config.py +++ b/src/lightning_app/utilities/packaging/build_config.py @@ -1,12 +1,11 @@ import inspect import logging import os +import re from dataclasses import asdict, dataclass from types import FrameType from typing import cast, List, Optional, TYPE_CHECKING, Union -from pkg_resources import parse_requirements - if TYPE_CHECKING: from lightning_app import LightningWork from lightning_app.utilities.packaging.cloud_compute import CloudCompute @@ -15,6 +14,37 @@ logger = logging.getLogger(__name__) +def load_requirements( + path_dir: str, file_name: str = "base.txt", comment_char: str = "#", unfreeze: bool = True +) -> List[str]: + """Load requirements from a file. + + .. code-block:: python + + path_req = os.path.join(_PROJECT_ROOT, "requirements") + requirements = load_requirements(path_req) + print(requirements) # ['numpy...', 'torch...', ...] + """ + with open(os.path.join(path_dir, file_name)) as file: + lines = [ln.strip() for ln in file.readlines()] + reqs = [] + for ln in lines: + # filer all comments + comment = "" + if comment_char in ln: + comment = ln[ln.index(comment_char) :] + ln = ln[: ln.index(comment_char)] + req = ln.strip() + # skip directly installed dependencies + if not req or req.startswith("http") or "@http" in req: + continue + # remove version restrictions unless they are strict + if unfreeze and "<" in req and "strict" not in comment: + req = re.sub(r",? *<=? *[\d\.\*]+", "", req).strip() + reqs.append(req) + return reqs + + @dataclass class BuildConfig: """The Build Configuration describes how the environment a LightningWork runs in should be set up. @@ -61,7 +91,7 @@ def build_commands(self) -> List[str]: class MyOwnBuildConfig(BuildConfig): def build_commands(self): - return ["sudo apt-get install libsparsehash-dev"] + return ["apt-get install libsparsehash-dev"] BuildConfig(requirements=["git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0"]) """ @@ -84,8 +114,9 @@ def _find_requirements(self, work: "LightningWork") -> List[str]: requirement_files = [os.path.join(dirname, f) for f in os.listdir(dirname) if f == "requirements.txt"] if not requirement_files: return [] + dirname, basename = os.path.dirname(requirement_files[0]), os.path.basename(requirement_files[0]) try: - requirements = list(map(str, parse_requirements(open(requirement_files[0]).readlines()))) + requirements = load_requirements(dirname, basename) except NotADirectoryError: requirements = [] return [r for r in requirements if r != "lightning"] @@ -116,7 +147,9 @@ def _prepare_requirements(self) -> Optional[Union[str, List[str]]]: path = os.path.join(self._call_dir, req) if os.path.exists(path): try: - requirements.extend(list(map(str, parse_requirements(open(path).readlines())))) + requirements.extend( + load_requirements(os.path.dirname(path), os.path.basename(path)), + ) except NotADirectoryError: pass else: diff --git a/src/lightning_app/utilities/packaging/cloud_compute.py b/src/lightning_app/utilities/packaging/cloud_compute.py index 311e9acf75b73..6527911855bae 100644 --- a/src/lightning_app/utilities/packaging/cloud_compute.py +++ b/src/lightning_app/utilities/packaging/cloud_compute.py @@ -32,8 +32,8 @@ class CloudCompute: This timeout starts whenever your run() method succeeds (or fails). If the timeout is reached, the instance pauses until the next run() call happens. - shm_size: Shared memory size in MiB, backed by RAM. min 512, max 4096, it will auto update in steps of 512. - For example 1100 will become 1024. If set to zero (the default) will get the default 65MB inside docker. + shm_size: Shared memory size in MiB, backed by RAM. min 512, max 8192, it will auto update in steps of 512. + For example 1100 will become 1024. If set to zero (the default) will get the default 64MiB inside docker. """ name: str = "default" diff --git a/src/lightning_app/utilities/packaging/lightning_utils.py b/src/lightning_app/utilities/packaging/lightning_utils.py index 0eb1f6ba79e68..c6bcea035797f 100644 --- a/src/lightning_app/utilities/packaging/lightning_utils.py +++ b/src/lightning_app/utilities/packaging/lightning_utils.py @@ -14,7 +14,7 @@ from packaging.version import Version from lightning_app import _logger, _PROJECT_ROOT, _root_logger -from lightning_app.__about__ import __version__ +from lightning_app.__version__ import version from lightning_app.core.constants import PREPARE_LIGHTING from lightning_app.utilities.git import check_github_repository, get_dir_name @@ -29,7 +29,7 @@ def download_frontend(root): """Downloads an archive file for a specific release of the Lightning frontend and extracts it to the correct directory.""" build_dir = "build" - frontend_dir = pathlib.Path(root, "lightning_app", "ui") + frontend_dir = pathlib.Path(root, "src", "lightning_app", "ui") download_dir = tempfile.mkdtemp() shutil.rmtree(frontend_dir, ignore_errors=True) @@ -43,41 +43,51 @@ def download_frontend(root): print("The Lightning UI has successfully been downloaded!") -def _cleanup(tar_file: str): - shutil.rmtree(os.path.join(_PROJECT_ROOT, "dist"), ignore_errors=True) - os.remove(tar_file) +def _cleanup(*tar_files: str): + for tar_file in tar_files: + shutil.rmtree(os.path.join(_PROJECT_ROOT, "dist"), ignore_errors=True) + os.remove(tar_file) -def _prepare_lightning_wheels(): +def _prepare_wheel(path): with open("log.txt", "w") as logfile: with subprocess.Popen( - ["rm", "-r", "dist"], stdout=logfile, stderr=logfile, bufsize=0, close_fds=True, cwd=_PROJECT_ROOT + ["rm", "-r", "dist"], stdout=logfile, stderr=logfile, bufsize=0, close_fds=True, cwd=path ) as proc: proc.wait() - with subprocess.Popen( ["python", "setup.py", "sdist"], stdout=logfile, stderr=logfile, bufsize=0, close_fds=True, - cwd=_PROJECT_ROOT, + cwd=path, ) as proc: proc.wait() os.remove("log.txt") -def _copy_lightning_tar(root: Path) -> str: - dist_dir = os.path.join(_PROJECT_ROOT, "dist") +def _copy_tar(project_root, dest: Path) -> str: + dist_dir = os.path.join(project_root, "dist") tar_files = os.listdir(dist_dir) assert len(tar_files) == 1 tar_name = tar_files[0] tar_path = os.path.join(dist_dir, tar_name) - shutil.copy(tar_path, root) + shutil.copy(tar_path, dest) return tar_name +def get_dist_path_if_editable_install(project_name) -> str: + """Is distribution an editable install - modified version from pip that + fetches egg-info instead of egg-link""" + for path_item in sys.path: + egg_info = os.path.join(path_item, project_name + ".egg-info") + if os.path.isdir(egg_info): + return path_item + return "" + + def _prepare_lightning_wheels_and_requirements(root: Path) -> Optional[Callable]: if "site-packages" in _PROJECT_ROOT: @@ -88,20 +98,37 @@ def _prepare_lightning_wheels_and_requirements(root: Path) -> Optional[Callable] if not PREPARE_LIGHTING and (not git_dir_name or (git_dir_name and not git_dir_name.startswith("lightning"))): return - if not bool(int(os.getenv("SKIP_LIGHTING_WHEELS_BUILD", "0"))): download_frontend(_PROJECT_ROOT) - _prepare_lightning_wheels() + _prepare_wheel(_PROJECT_ROOT) logger.info("Packaged Lightning with your application.") - tar_name = _copy_lightning_tar(root) + tar_name = _copy_tar(_PROJECT_ROOT, root) + + tar_files = [os.path.join(root, tar_name)] + + # skipping this by default + if not bool(int(os.getenv("SKIP_LIGHTING_UTILITY_WHEELS_BUILD", "1"))): + # building and copying launcher wheel if installed in editable mode + launcher_project_path = get_dist_path_if_editable_install("lightning_launcher") + if launcher_project_path: + _prepare_wheel(launcher_project_path) + tar_name = _copy_tar(launcher_project_path, root) + tar_files.append(os.path.join(root, tar_name)) + + # building and copying lightning-cloud wheel if installed in editable mode + lightning_cloud_project_path = get_dist_path_if_editable_install("lightning_cloud") + if lightning_cloud_project_path: + _prepare_wheel(lightning_cloud_project_path) + tar_name = _copy_tar(lightning_cloud_project_path, root) + tar_files.append(os.path.join(root, tar_name)) - return functools.partial(_cleanup, tar_file=os.path.join(root, tar_name)) + return functools.partial(_cleanup, *tar_files) def _enable_debugging(): - tar_file = os.path.join(os.getcwd(), f"lightning-{__version__}.tar.gz") + tar_file = os.path.join(os.getcwd(), f"lightning-{version}.tar.gz") if not os.path.exists(tar_file): return @@ -138,7 +165,7 @@ def _fetch_latest_version(package_name: str) -> str: if proc.stdout: logs = " ".join([line.decode("utf-8") for line in iter(proc.stdout.readline, b"")]) return logs.split(")\n")[0].split(",")[-1].replace(" ", "") - return __version__ + return version def _verify_lightning_version(): @@ -149,7 +176,7 @@ def _verify_lightning_version(): lightning_latest_version = _fetch_latest_version("lightning") - if Version(lightning_latest_version) > Version(__version__): + if Version(lightning_latest_version) > Version(version): raise Exception( f"You need to use the latest version of Lightning ({lightning_latest_version}) to run in the cloud. " "Please, run `pip install -U lightning`" diff --git a/tests/tests_app/cli/test_cmd_react_ui_init.py b/tests/tests_app/cli/test_cmd_react_ui_init.py index 7ad248084bc0e..0a54c478c4f55 100644 --- a/tests/tests_app/cli/test_cmd_react_ui_init.py +++ b/tests/tests_app/cli/test_cmd_react_ui_init.py @@ -56,4 +56,4 @@ def test_copy_and_setup_react_ui(tmpdir): def test_correct_num_react_template_files(): template_dir = os.path.join(la.__path__[0], "cli/react-ui-template") files = cmd_init._ls_recursively(template_dir) - assert len(files) == 15, "react-ui template files must be minimal... do not add nice to haves" + assert len(files) == 16, "react-ui template files must be minimal... do not add nice to haves" diff --git a/tests/tests_app/utilities/packaging/test_lightning_utils.py b/tests/tests_app/utilities/packaging/test_lightning_utils.py index 0e4a370b401fc..b34e3162d5a0c 100644 --- a/tests/tests_app/utilities/packaging/test_lightning_utils.py +++ b/tests/tests_app/utilities/packaging/test_lightning_utils.py @@ -2,7 +2,7 @@ import pytest -from lightning import __about__ +from lightning.__version__ import version from lightning_app.testing.helpers import RunIf from lightning_app.utilities.packaging import lightning_utils from lightning_app.utilities.packaging.lightning_utils import ( @@ -15,7 +15,7 @@ def test_prepare_lightning_wheels_and_requirement(tmpdir): """This test ensures the lightning source gets packaged inside the lightning repo.""" cleanup_handle = _prepare_lightning_wheels_and_requirements(tmpdir) - tar_name = f"lightning-{__about__.__version__}.tar.gz" + tar_name = f"lightning-{version}.tar.gz" assert sorted(os.listdir(tmpdir)) == [tar_name] cleanup_handle() assert os.listdir(tmpdir) == [] diff --git a/tests/tests_app/utilities/test_git.py b/tests/tests_app/utilities/test_git.py index efb59d05adc1c..cb2db0a2bfe33 100644 --- a/tests/tests_app/utilities/test_git.py +++ b/tests/tests_app/utilities/test_git.py @@ -15,7 +15,7 @@ def test_execute_git_command(): res = execute_git_command(["pull"]) assert res - assert get_dir_name() == "lightning-app" + assert get_dir_name() == "lightning" assert check_github_repository() From 5e78491870d591e48d990b0b0619af877b7120f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Sat, 2 Jul 2022 02:24:59 +0200 Subject: [PATCH 50/89] Remove redundant progress bar refresh (#13462) --- src/pytorch_lightning/callbacks/progress/tqdm_progress.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/pytorch_lightning/callbacks/progress/tqdm_progress.py b/src/pytorch_lightning/callbacks/progress/tqdm_progress.py index c7d86a09a5b5d..204ee574e11a4 100644 --- a/src/pytorch_lightning/callbacks/progress/tqdm_progress.py +++ b/src/pytorch_lightning/callbacks/progress/tqdm_progress.py @@ -387,4 +387,3 @@ def _update_n(bar: _tqdm, current: int, refresh_rate: int) -> None: leftover = current % refresh_rate advance = leftover if (current == total and leftover != 0) else refresh_rate bar.update(advance) - bar.refresh() From 81b787410be8f7bfb36546a9a40b0d1f5d08116b Mon Sep 17 00:00:00 2001 From: Mansy Date: Sat, 2 Jul 2022 07:05:16 +0200 Subject: [PATCH 51/89] Add CI for app examples (#13495) * Add CI for app examples Co-authored-by: manskx Co-authored-by: Jirka Borovec Co-authored-by: Jirka --- .github/workflows/ci-app_block.yml | 8 -- .github/workflows/ci-app_examples.yml | 117 +++++++++++++++++++ .github/workflows/ci-app_tests.yml | 2 +- .github/workflows/ci-pytorch_dockers.yml | 2 +- .github/workflows/ci-pytorch_test-conda.yml | 2 +- .github/workflows/ci-pytorch_test-full.yml | 2 +- .github/workflows/ci-pytorch_test-slow.yml | 2 +- setup.cfg | 2 + tests/tests_app_examples/test_quick_start.py | 3 +- 9 files changed, 126 insertions(+), 14 deletions(-) create mode 100644 .github/workflows/ci-app_examples.yml diff --git a/.github/workflows/ci-app_block.yml b/.github/workflows/ci-app_block.yml index 92b928a68d83b..8c2dd772aa1ad 100644 --- a/.github/workflows/ci-app_block.yml +++ b/.github/workflows/ci-app_block.yml @@ -22,11 +22,3 @@ jobs: - name: Block edits in docs/source-app if: contains(steps.changed-files.outputs.all_changed_and_modified_files, 'docs/source-app') run: exit 1 - - - name: Block edits in examples/app - if: contains(steps.changed-files.outputs.all_changed_and_modified_files, 'examples/app_') - run: exit 1 - - - name: Block edits in tests/tests_app_examples - if: contains(steps.changed-files.outputs.all_changed_and_modified_files, 'tests/tests_app_examples') - run: exit 1 diff --git a/.github/workflows/ci-app_examples.yml b/.github/workflows/ci-app_examples.yml new file mode 100644 index 0000000000000..30d29a853597e --- /dev/null +++ b/.github/workflows/ci-app_examples.yml @@ -0,0 +1,117 @@ +name: Test App - examples + +# see: https://help.github.com/en/actions/reference/events-that-trigger-workflows +on: # Trigger the workflow on push or pull request, but only for the master branch + push: + branches: + - "master" + pull_request: + paths: + - "src/lightning_app/**" + - "tests/tests_app_examples/**" + - "requirements/app/**" + - "setup.py" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/master' }} + +jobs: + pytest: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-20.04, macOS-10.15, windows-2019] + python-version: [3.8] + requires: ["oldest", "latest"] + + # Timeout: https://stackoverflow.com/a/59076067/4521646 + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + # TODO: use replace_oldest_ver() instead + - name: Set min. dependencies + if: matrix.requires == 'oldest' + run: | + for fpath in ('requirements/app/base.txt', 'requirements/app/test.txt'): + req = open(fpath).read().replace('>=', '==') + open(fpath, 'w').write(req) + shell: python + + - run: echo "::set-output name=period::$(python -c 'import time ; days = time.time() / 60 / 60 / 24 ; print(int(days / 7))' 2>&1)" + if: matrix.requires == 'latest' + id: times + + # Note: This uses an internal pip API and may not always work + # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow + - name: Get pip cache + id: pip-cache + run: | + python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)" + + - name: Cache pip + uses: actions/cache@v2 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ matrix.requires }}-td${{ steps.times.outputs.period }}-${{ hashFiles('requirements/app/base.txt') }} + restore-keys: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ matrix.requires }}-td${{ steps.times.outputs.period }}- + + - name: Install dependencies + run: | + pip --version + pip install -r requirements/app/devel.txt --quiet --find-links https://download.pytorch.org/whl/cpu/torch_stable.html + pip list + shell: bash + + - name: Setup Node.js + uses: actions/setup-node@v2 + with: + node-version: '16' + + - name: Install Yarn + run: npm install -g yarn + + - name: Install Lightning as top-level + run: pip install -e . -r requirements/app/base.txt + shell: bash + + - name: Tests + working-directory: ./tests + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: us-east-1 + PYTEST_ARTIFACT: results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }}.xml + run: | + coverage run --source lightning_app -m pytest -m "not cloud" tests_app_examples --timeout=300 -vvvv --junitxml=$PYTEST_ARTIFACT --durations=0 + + - name: Upload pytest test results + uses: actions/upload-artifact@v2 + with: + name: unittest-results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }} + path: tests/results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }}.xml + if: failure() + + - name: Statistics + if: success() + working-directory: ./tests + run: | + coverage xml -i + coverage report -i + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + file: tests/coverage.xml + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: false diff --git a/.github/workflows/ci-app_tests.yml b/.github/workflows/ci-app_tests.yml index 0c87c3f4c2802..3993c31afab0c 100644 --- a/.github/workflows/ci-app_tests.yml +++ b/.github/workflows/ci-app_tests.yml @@ -1,4 +1,4 @@ -name: CI App Tests +name: Test App # see: https://help.github.com/en/actions/reference/events-that-trigger-workflows on: # Trigger the workflow on push or pull request, but only for the master branch diff --git a/.github/workflows/ci-pytorch_dockers.yml b/.github/workflows/ci-pytorch_dockers.yml index f8dec54280271..69d5955c5db33 100644 --- a/.github/workflows/ci-pytorch_dockers.yml +++ b/.github/workflows/ci-pytorch_dockers.yml @@ -1,4 +1,4 @@ -name: Docker +name: Docker - PyTorch # https://www.docker.com/blog/first-docker-github-action-is-here # https://github.com/docker/build-push-action # see: https://help.github.com/en/actions/reference/events-that-trigger-workflows diff --git a/.github/workflows/ci-pytorch_test-conda.yml b/.github/workflows/ci-pytorch_test-conda.yml index 21ab32ae303f5..c062e6e02acb1 100644 --- a/.github/workflows/ci-pytorch_test-conda.yml +++ b/.github/workflows/ci-pytorch_test-conda.yml @@ -1,4 +1,4 @@ -name: Test with Conda +name: Test PyTorch with Conda # see: https://help.github.com/en/actions/reference/events-that-trigger-workflows on: # Trigger the workflow on push or pull request, but only for the master branch diff --git a/.github/workflows/ci-pytorch_test-full.yml b/.github/workflows/ci-pytorch_test-full.yml index 66173e9c14327..42ec2b71fd0b6 100644 --- a/.github/workflows/ci-pytorch_test-full.yml +++ b/.github/workflows/ci-pytorch_test-full.yml @@ -1,4 +1,4 @@ -name: Test full +name: Test PyTorch full # see: https://help.github.com/en/actions/reference/events-that-trigger-workflows on: # Trigger the workflow on push or pull request, but only for the master branch diff --git a/.github/workflows/ci-pytorch_test-slow.yml b/.github/workflows/ci-pytorch_test-slow.yml index 36251c202c49d..279c4ffe772a8 100644 --- a/.github/workflows/ci-pytorch_test-slow.yml +++ b/.github/workflows/ci-pytorch_test-slow.yml @@ -1,4 +1,4 @@ -name: Test slow +name: Test PyTorch slow # see: https://help.github.com/en/actions/reference/events-that-trigger-workflows on: # Trigger the workflow on push or pull request, but only for the master branch diff --git a/setup.cfg b/setup.cfg index 1bb124259a49f..f59a6c1cf436a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -25,6 +25,8 @@ addopts = --color=yes --disable-pytest-warnings --ignore=legacy/checkpoints +markers = + cloud:Run the cloud tests for example filterwarnings = # error out on our deprecation warnings - ensures the code and tests are kept up-to-date error::pytorch_lightning.utilities.rank_zero.LightningDeprecationWarning diff --git a/tests/tests_app_examples/test_quick_start.py b/tests/tests_app_examples/test_quick_start.py index ef29a24e572bc..272fdbb7f5b63 100644 --- a/tests/tests_app_examples/test_quick_start.py +++ b/tests/tests_app_examples/test_quick_start.py @@ -24,7 +24,8 @@ def run_once(self): return done -@pytest.mark.skipif(True, reason="test is skipped because CI was blocking all the PRs") +# TODO +@pytest.mark.skipif(True, reason="test is skipped because CI was blocking all the PRs.") @RunIf(pytorch_lightning=True, skip_windows=True, skip_linux=True) def test_quick_start_example(caplog, monkeypatch): """This test ensures the Quick Start example properly train and serve PyTorch Lightning.""" From 3e1725f31bc809be8a7e0259d083de61ef279906 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Sat, 2 Jul 2022 21:08:29 +0200 Subject: [PATCH 52/89] code-owners for App (#13497) * condensers for App * pkg * TestsUpdate .github/CODEOWNERS Co-authored-by: Mansy Co-authored-by: Mansy --- .github/CODEOWNERS | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7a113e69dc119..efd3bbc9bb96f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -13,13 +13,17 @@ *.yml @borda @carmocca @akihironitta @tchaton # Docs -/docs/ @edenlightning @tchaton @borda @awaelchli @RobertLaurella -/.github/*.md @edenlightning @williamfalcon @borda -/.github/ISSUE_TEMPLATE/ @edenlightning @borda @tchaton -/docs/source-pytorch/conf.py @borda @awaelchli @carmocca -/docs/source-pytorch/index.rst @williamfalcon -/docs/source-pytorch/levels @williamfalcon @RobertLaurella -/docs/source-pytorch/expertise_levels @williamfalcon @RobertLaurella +/docs/ @edenlightning @tchaton @borda @awaelchli @RobertLaurella +/.github/*.md @edenlightning @williamfalcon @borda +/.github/ISSUE_TEMPLATE/ @edenlightning @borda @tchaton +/docs/source-pytorch/conf.py @borda @awaelchli @carmocca +/docs/source-pytorch/index.rst @williamfalcon +/docs/source-pytorch/levels @williamfalcon @RobertLaurella +/docs/source-pytorch/expertise_levels @williamfalcon @RobertLaurella +/docs/source-app/ @williamfalcon @RobertLaurella @tchaton @awaelchli +/docs/source-app/conf.py @borda @awaelchli @carmocca +/docs/source-app/index.rst @williamfalcon +/docs/source-app/expertise_levels @williamfalcon @RobertLaurella # Packages /src/pytorch_lightning/accelerators @williamfalcon @tchaton @SeanNaren @awaelchli @justusschock @kaushikb11 @@ -41,6 +45,14 @@ /src/pytorch_lightning/tuner @SkafteNicki @borda @awaelchli /src/pytorch_lightning/utilities @borda @tchaton @SeanNaren @carmocca +/src/lightning_app @tchaton @awaelchli @manskx @hhsecond + +# Examples +/examples/app_* @tchaton @awaelchli @manskx @hhsecond +# App tests +/tests/tests_app @tchaton @awaelchli @manskx @hhsecond +/tests/tests_app_examples @tchaton @awaelchli @manskx @hhsecond + # Specifics /src/pytorch_lightning/trainer/connectors/logger_connector @tchaton @carmocca /src/pytorch_lightning/trainer/progress.py @tchaton @awaelchli @carmocca From 8a634db10cd759f24bdaeaa99c4ad2a6a80b9536 Mon Sep 17 00:00:00 2001 From: Cyprien Ricque <48893621+Cyprien-Ricque@users.noreply.github.com> Date: Mon, 4 Jul 2022 18:28:41 +0200 Subject: [PATCH 53/89] fix mypy typing errors in pytorch_lightning/strategies/single_device.py (#13532) * fix typing in strategies/single_device.py * Make assert statement more explicit Co-authored-by: Justus Schock <12886177+justusschock@users.noreply.github.com> --- pyproject.toml | 1 - src/pytorch_lightning/strategies/single_device.py | 7 ++++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 51781d4953935..55543c9142276 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,7 +74,6 @@ module = [ "pytorch_lightning.strategies.parallel", "pytorch_lightning.strategies.sharded", "pytorch_lightning.strategies.sharded_spawn", - "pytorch_lightning.strategies.single_device", "pytorch_lightning.strategies.single_tpu", "pytorch_lightning.strategies.tpu_spawn", "pytorch_lightning.strategies.strategy", diff --git a/src/pytorch_lightning/strategies/single_device.py b/src/pytorch_lightning/strategies/single_device.py index 82681ad423199..cb436fded86d0 100644 --- a/src/pytorch_lightning/strategies/single_device.py +++ b/src/pytorch_lightning/strategies/single_device.py @@ -21,7 +21,7 @@ import pytorch_lightning as pl from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO from pytorch_lightning.plugins.precision import PrecisionPlugin -from pytorch_lightning.strategies.strategy import Strategy +from pytorch_lightning.strategies.strategy import Strategy, TBroadcast from pytorch_lightning.utilities.types import _DEVICE @@ -66,6 +66,7 @@ def root_device(self) -> torch.device: return self._root_device def model_to_device(self) -> None: + assert self.model is not None, "self.model must be set before self.model.to()" self.model.to(self.root_device) def setup(self, trainer: pl.Trainer) -> None: @@ -76,10 +77,10 @@ def setup(self, trainer: pl.Trainer) -> None: def is_global_zero(self) -> bool: return True - def barrier(self, *args, **kwargs) -> None: + def barrier(self, *args: Any, **kwargs: Any) -> None: pass - def broadcast(self, obj: object, src: int = 0) -> object: + def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast: return obj @classmethod From 89766abf58d92b31b704fa2b48c2ed4e528445a2 Mon Sep 17 00:00:00 2001 From: Cyprien Ricque <48893621+Cyprien-Ricque@users.noreply.github.com> Date: Mon, 4 Jul 2022 18:30:44 +0200 Subject: [PATCH 54/89] fix mypy typing errors in pytorch_lightning/strategies/ddp2.py (#13535) * fix typing in strategies/ddp2.py * Use quotes instead of __future__.annotations for forward references Co-authored-by: Justus Schock <12886177+justusschock@users.noreply.github.com> --- pyproject.toml | 1 - src/pytorch_lightning/strategies/ddp2.py | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 55543c9142276..99e7e934b2efb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,7 +64,6 @@ module = [ "pytorch_lightning.loggers.wandb", "pytorch_lightning.loops.epoch.training_epoch_loop", "pytorch_lightning.strategies.ddp", - "pytorch_lightning.strategies.ddp2", "pytorch_lightning.strategies.ddp_spawn", "pytorch_lightning.strategies.deepspeed", "pytorch_lightning.strategies.dp", diff --git a/src/pytorch_lightning/strategies/ddp2.py b/src/pytorch_lightning/strategies/ddp2.py index 81ee737e5286c..e13c750e0a976 100644 --- a/src/pytorch_lightning/strategies/ddp2.py +++ b/src/pytorch_lightning/strategies/ddp2.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any + class DDP2Strategy: """DDP2 behaves like DP in one node, but synchronization across nodes behaves like in DDP. @@ -25,7 +27,7 @@ class DDP2Strategy: strategy_name = "ddp2" - def __new__(cls, *args, **kwargs) -> None: + def __new__(cls, *args: Any, **kwargs: Any) -> "DDP2Strategy": raise TypeError( "The `DDP2Strategy`/`DDP2Plugin` is no longer supported in v1.7 and will be removed completely in v1.8." " For single-node execution, we recommend the `DDPStrategy` or the `DPStrategy`. If you rely on DDP2, you" From c0874f352b6a9fe3084b9e7090c3e21bdd50e324 Mon Sep 17 00:00:00 2001 From: Masahiro Wada Date: Tue, 5 Jul 2022 02:04:35 +0900 Subject: [PATCH 55/89] Fix type hints of callbacks/finetuning.py (#13516) --- pyproject.toml | 1 - src/pytorch_lightning/callbacks/finetuning.py | 13 +++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 99e7e934b2efb..5667f0824cce8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,7 +41,6 @@ warn_no_return = "False" # the list can be generated with: # mypy | tr ':' ' ' | awk '{print $1}' | sort | uniq | sed 's/\.py//g' | sed 's|\/|\.|g' | xargs -I {} echo '"{}",' module = [ - "pytorch_lightning.callbacks.finetuning", "pytorch_lightning.callbacks.model_checkpoint", "pytorch_lightning.callbacks.progress.rich_progress", "pytorch_lightning.callbacks.quantization", diff --git a/src/pytorch_lightning/callbacks/finetuning.py b/src/pytorch_lightning/callbacks/finetuning.py index 4a7067f56c697..ad45ff8b0591b 100644 --- a/src/pytorch_lightning/callbacks/finetuning.py +++ b/src/pytorch_lightning/callbacks/finetuning.py @@ -32,8 +32,8 @@ log = logging.getLogger(__name__) -def multiplicative(epoch): - return 2 +def multiplicative(epoch: int) -> float: + return 2.0 class BaseFinetuning(Callback): @@ -79,7 +79,7 @@ class BaseFinetuning(Callback): ... ) """ - def __init__(self): + def __init__(self) -> None: self._internal_optimizer_metadata: Dict[int, List[Dict[str, Any]]] = {} self._restarting = False @@ -94,7 +94,7 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: self._internal_optimizer_metadata = state_dict["internal_optimizer_metadata"] else: # compatibility to load from old checkpoints before PR #11887 - self._internal_optimizer_metadata = state_dict + self._internal_optimizer_metadata = state_dict # type: ignore[assignment] def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: # restore the param_groups created during the previous training. @@ -122,10 +122,11 @@ def flatten_modules(modules: Union[Module, Iterable[Union[Module, Iterable]]]) - modules = modules.values() if isinstance(modules, Iterable): - _modules = [] + _flatten_modules = [] for m in modules: - _modules.extend(BaseFinetuning.flatten_modules(m)) + _flatten_modules.extend(BaseFinetuning.flatten_modules(m)) + _modules = iter(_flatten_modules) else: _modules = modules.modules() From eb059b4a0e4b74a7bc8bfa03df3eea2529c6c591 Mon Sep 17 00:00:00 2001 From: Cyprien Ricque <48893621+Cyprien-Ricque@users.noreply.github.com> Date: Tue, 5 Jul 2022 09:27:27 +0200 Subject: [PATCH 56/89] fix mypy typing errors in pytorch_lightning/strategies/single_tpu.py (#13534) Co-authored-by: Jirka Borovec Co-authored-by: awaelchli --- pyproject.toml | 1 - src/pytorch_lightning/strategies/single_tpu.py | 11 ++--------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5667f0824cce8..c08d2c99bf3f5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -72,7 +72,6 @@ module = [ "pytorch_lightning.strategies.parallel", "pytorch_lightning.strategies.sharded", "pytorch_lightning.strategies.sharded_spawn", - "pytorch_lightning.strategies.single_tpu", "pytorch_lightning.strategies.tpu_spawn", "pytorch_lightning.strategies.strategy", "pytorch_lightning.profilers.advanced", diff --git a/src/pytorch_lightning/strategies/single_tpu.py b/src/pytorch_lightning/strategies/single_tpu.py index f4d3234e9e695..e65078efc67ee 100644 --- a/src/pytorch_lightning/strategies/single_tpu.py +++ b/src/pytorch_lightning/strategies/single_tpu.py @@ -19,7 +19,6 @@ from pytorch_lightning.plugins.precision import PrecisionPlugin from pytorch_lightning.strategies.single_device import SingleDeviceStrategy from pytorch_lightning.utilities import _TPU_AVAILABLE, find_shared_parameters, set_shared_parameters -from pytorch_lightning.utilities.model_helpers import is_overridden if _TPU_AVAILABLE: import torch_xla.core.xla_model as xm @@ -55,13 +54,10 @@ def is_distributed(self) -> bool: return False def setup(self, trainer: "pl.Trainer") -> None: + assert self.model, "self.model must be set before find_shared_parameters(self.model)" shared_params = find_shared_parameters(self.model) self.model_to_device() - if is_overridden("on_post_move_to_device", self.lightning_module): - self.model.on_post_move_to_device() - else: - set_shared_parameters(self.model, shared_params) - + set_shared_parameters(self.model, shared_params) super().setup(trainer) if self.debug: @@ -70,9 +66,6 @@ def setup(self, trainer: "pl.Trainer") -> None: self.tpu_local_core_rank = xm.get_local_ordinal() self.tpu_global_core_rank = xm.get_ordinal() - def model_to_device(self) -> None: - self.model.to(self.root_device) - @classmethod def register_strategies(cls, strategy_registry: Dict) -> None: strategy_registry.register( From 5dc95381e7bbbe50a8a36817ef47bea073ddcb48 Mon Sep 17 00:00:00 2001 From: nitinramvelraj <98356761+nitinramvelraj@users.noreply.github.com> Date: Tue, 5 Jul 2022 00:34:18 -0700 Subject: [PATCH 57/89] Remove deprecated `on_keyboard_interrupt` (#13438) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Akihiro Nitta Co-authored-by: Carlos Mocholí Co-authored-by: Adrian Wälchli --- docs/source-pytorch/extensions/callbacks.rst | 5 ---- src/pytorch_lightning/CHANGELOG.md | 3 +++ src/pytorch_lightning/callbacks/callback.py | 8 ------ .../callbacks/lambda_function.py | 1 - .../trainer/callback_hook.py | 10 -------- .../trainer/configuration_validator.py | 5 ---- .../logger_connector/fx_validator.py | 1 - src/pytorch_lightning/trainer/trainer.py | 3 +-- .../callbacks/test_lambda_function.py | 17 +++++++++---- .../deprecated_api/test_remove_1-7.py | 25 +------------------ .../trainer/logging_/test_logger_connector.py | 2 -- tests/tests_pytorch/trainer/test_trainer.py | 14 ++--------- 12 files changed, 19 insertions(+), 75 deletions(-) diff --git a/docs/source-pytorch/extensions/callbacks.rst b/docs/source-pytorch/extensions/callbacks.rst index 57342e7dd62d5..72f02fadb6af6 100644 --- a/docs/source-pytorch/extensions/callbacks.rst +++ b/docs/source-pytorch/extensions/callbacks.rst @@ -353,11 +353,6 @@ on_predict_end .. automethod:: pytorch_lightning.callbacks.Callback.on_predict_end :noindex: -on_keyboard_interrupt -^^^^^^^^^^^^^^^^^^^^^ - -.. automethod:: pytorch_lightning.callbacks.Callback.on_keyboard_interrupt - :noindex: on_exception ^^^^^^^^^^^^ diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index e389fee222262..7de177c9472ed 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -272,6 +272,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed support for the `DDP2Strategy` ([#12705](https://github.com/PyTorchLightning/pytorch-lightning/pull/12705)) +- Removed deprecated `Callback.on_keyboard_interrupt` ([#13438](https://github.com/Lightning-AI/lightning/pull/13438)) + + ### Fixed diff --git a/src/pytorch_lightning/callbacks/callback.py b/src/pytorch_lightning/callbacks/callback.py index ed106076e41a7..892bd0fdfbf8b 100644 --- a/src/pytorch_lightning/callbacks/callback.py +++ b/src/pytorch_lightning/callbacks/callback.py @@ -272,14 +272,6 @@ def on_predict_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule def on_predict_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: """Called when predict ends.""" - def on_keyboard_interrupt(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.5 - This callback hook was deprecated in v1.5 in favor of `on_exception` and will be removed in v1.7. - - Called when any trainer execution is interrupted by KeyboardInterrupt. - """ - def on_exception(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", exception: BaseException) -> None: """Called when any trainer execution is interrupted by an exception.""" diff --git a/src/pytorch_lightning/callbacks/lambda_function.py b/src/pytorch_lightning/callbacks/lambda_function.py index b9fc8f01385b9..a37122cb2aa04 100644 --- a/src/pytorch_lightning/callbacks/lambda_function.py +++ b/src/pytorch_lightning/callbacks/lambda_function.py @@ -74,7 +74,6 @@ def __init__( on_validation_end: Optional[Callable] = None, on_test_start: Optional[Callable] = None, on_test_end: Optional[Callable] = None, - on_keyboard_interrupt: Optional[Callable] = None, on_exception: Optional[Callable] = None, on_save_checkpoint: Optional[Callable] = None, on_load_checkpoint: Optional[Callable] = None, diff --git a/src/pytorch_lightning/trainer/callback_hook.py b/src/pytorch_lightning/trainer/callback_hook.py index 904cbc872376f..1e455b3424606 100644 --- a/src/pytorch_lightning/trainer/callback_hook.py +++ b/src/pytorch_lightning/trainer/callback_hook.py @@ -553,16 +553,6 @@ def on_predict_end(self) -> None: for callback in self.callbacks: callback.on_predict_end(self, self.lightning_module) - def on_keyboard_interrupt(self): - r""" - .. deprecated:: v1.5 - This callback hook was deprecated in v1.5 in favor of `on_exception` and will be removed in v1.7. - - Called when any trainer execution is interrupted by KeyboardInterrupt. - """ - for callback in self.callbacks: - callback.on_keyboard_interrupt(self, self.lightning_module) - def on_exception(self, exception: BaseException) -> None: r""" .. deprecated:: v1.6 diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index c1ca692d031b8..ceeec9f7fcbcd 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -277,11 +277,6 @@ def _check_on_pretrain_routine(model: "pl.LightningModule") -> None: def _check_deprecated_callback_hooks(trainer: "pl.Trainer") -> None: for callback in trainer.callbacks: - if is_overridden(method_name="on_keyboard_interrupt", instance=callback): - rank_zero_deprecation( - "The `on_keyboard_interrupt` callback hook was deprecated in v1.5 and will be removed in v1.7." - " Please use the `on_exception` callback hook instead." - ) if is_overridden(method_name="on_init_start", instance=callback): rank_zero_deprecation( "The `on_init_start` callback hook was deprecated in v1.6 and will be removed in v1.8." diff --git a/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py b/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py index c3cc2885f407a..6f60ba6f1aa2f 100644 --- a/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py +++ b/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py @@ -124,7 +124,6 @@ class _LogOptions(TypedDict): ), "on_predict_batch_start": None, "on_predict_batch_end": None, - "on_keyboard_interrupt": None, "on_exception": None, "state_dict": None, "on_save_checkpoint": None, diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index 7201ef53501c0..e3e45885e0545 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -653,13 +653,12 @@ def _call_and_handle_interrupt(self, trainer_fn: Callable, *args: Any, **kwargs: return self.strategy.launcher.launch(trainer_fn, *args, trainer=self, **kwargs) else: return trainer_fn(*args, **kwargs) - # TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7 + # TODO(awaelchli): Unify both exceptions below, where `KeyboardError` doesn't re-raise except KeyboardInterrupt as exception: rank_zero_warn("Detected KeyboardInterrupt, attempting graceful shutdown...") # user could press Ctrl+c many times... only shutdown once if not self.interrupted: self.state.status = TrainerStatus.INTERRUPTED - self._call_callback_hooks("on_keyboard_interrupt") self._call_callback_hooks("on_exception", exception) except BaseException as exception: self.state.status = TrainerStatus.INTERRUPTED diff --git a/tests/tests_pytorch/callbacks/test_lambda_function.py b/tests/tests_pytorch/callbacks/test_lambda_function.py index df71295566095..08fec1ebd2efb 100644 --- a/tests/tests_pytorch/callbacks/test_lambda_function.py +++ b/tests/tests_pytorch/callbacks/test_lambda_function.py @@ -48,9 +48,10 @@ def call(hook, *_, **__): limit_val_batches=1, callbacks=[LambdaCallback(**hooks_args)], ) - with pytest.deprecated_call(match="on_keyboard_interrupt` callback hook was deprecated in v1.5"): + with pytest.deprecated_call( + match="`on_init_start` callback hook was deprecated in v1.6 and will be removed in v1.8." + ): trainer.fit(model) - ckpt_path = trainer.checkpoint_callback.best_model_path # raises KeyboardInterrupt and loads from checkpoint @@ -63,11 +64,17 @@ def call(hook, *_, **__): limit_predict_batches=1, callbacks=[LambdaCallback(**hooks_args)], ) - with pytest.deprecated_call(match="on_keyboard_interrupt` callback hook was deprecated in v1.5"): + with pytest.deprecated_call( + match="`on_init_start` callback hook was deprecated in v1.6 and will be removed in v1.8." + ): trainer.fit(model, ckpt_path=ckpt_path) - with pytest.deprecated_call(match="on_keyboard_interrupt` callback hook was deprecated in v1.5"): + with pytest.deprecated_call( + match="`on_init_start` callback hook was deprecated in v1.6 and will be removed in v1.8." + ): trainer.test(model) - with pytest.deprecated_call(match="on_keyboard_interrupt` callback hook was deprecated in v1.5"): + with pytest.deprecated_call( + match="`on_init_start` callback hook was deprecated in v1.6 and will be removed in v1.8." + ): trainer.predict(model) assert checker == hooks diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py index 0cc1473e79ab9..2ae305d2c06b7 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py @@ -20,7 +20,7 @@ import pytest import torch -from pytorch_lightning import Callback, Trainer +from pytorch_lightning import Trainer from pytorch_lightning.demos.boring_classes import BoringModel from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper from pytorch_lightning.plugins.environments import ( @@ -35,29 +35,6 @@ from tests_pytorch.plugins.environments.test_lsf_environment import _make_rankfile -def test_v1_7_0_on_interrupt(tmpdir): - class HandleInterruptCallback(Callback): - def on_keyboard_interrupt(self, trainer, pl_module): - print("keyboard interrupt") - - model = BoringModel() - handle_interrupt_callback = HandleInterruptCallback() - - trainer = Trainer( - callbacks=[handle_interrupt_callback], - max_epochs=1, - limit_val_batches=0.1, - limit_train_batches=0.2, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `on_keyboard_interrupt` callback hook was deprecated in v1.5 and will be removed in v1.7" - ): - trainer.fit(model) - - class BoringCallbackDDPSpawnModel(BoringModel): def add_to_queue(self, queue): ... diff --git a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py index 681c4a7732f3d..760e8eea2a85c 100644 --- a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py +++ b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py @@ -48,7 +48,6 @@ def test_fx_validator(): "on_fit_start", "on_init_end", "on_init_start", - "on_keyboard_interrupt", "on_exception", "on_load_checkpoint", "load_state_dict", @@ -93,7 +92,6 @@ def test_fx_validator(): "on_configure_sharded_model", "on_init_end", "on_init_start", - "on_keyboard_interrupt", "on_exception", "on_load_checkpoint", "load_state_dict", diff --git a/tests/tests_pytorch/trainer/test_trainer.py b/tests/tests_pytorch/trainer/test_trainer.py index d8e9e27d9ec1b..a0d20fc58ed1c 100644 --- a/tests/tests_pytorch/trainer/test_trainer.py +++ b/tests/tests_pytorch/trainer/test_trainer.py @@ -16,7 +16,6 @@ import math import os import pickle -import sys from argparse import Namespace from contextlib import nullcontext from copy import deepcopy @@ -1013,14 +1012,10 @@ class HandleInterruptCallback(Callback): def __init__(self): super().__init__() self.exception = None - self.exc_info = None def on_exception(self, trainer, pl_module, exception): self.exception = exception - def on_keyboard_interrupt(self, trainer, pl_module): - self.exc_info = sys.exc_info() - interrupt_callback = InterruptCallback() handle_interrupt_callback = HandleInterruptCallback() @@ -1035,15 +1030,10 @@ def on_keyboard_interrupt(self, trainer, pl_module): ) assert not trainer.interrupted assert handle_interrupt_callback.exception is None - assert handle_interrupt_callback.exc_info is None - with pytest.deprecated_call(match="on_keyboard_interrupt` callback hook was deprecated in v1.5"): - trainer.fit(model) + trainer.fit(model) assert trainer.interrupted assert isinstance(handle_interrupt_callback.exception, KeyboardInterrupt) - assert isinstance(handle_interrupt_callback.exc_info[1], KeyboardInterrupt) - with pytest.raises(MisconfigurationException), pytest.deprecated_call( - match="on_keyboard_interrupt` callback hook was deprecated in v1.5" - ): + with pytest.raises(MisconfigurationException): trainer.test(model) assert trainer.interrupted assert isinstance(handle_interrupt_callback.exception, MisconfigurationException) From 9dfc712232281afafd1b06b00ea29dc7ab9e51d9 Mon Sep 17 00:00:00 2001 From: donlapark <10988155+donlapark@users.noreply.github.com> Date: Fri, 8 Jul 2022 22:28:47 +0700 Subject: [PATCH 58/89] fix mypy typing errors in pytorch_lightning/tuner/lr_finder.py (#13513) Co-authored-by: Akihiro Nitta Co-authored-by: Justus Schock <12886177+justusschock@users.noreply.github.com> --- pyproject.toml | 1 - src/pytorch_lightning/tuner/lr_finder.py | 51 +++++++++++++++--------- 2 files changed, 33 insertions(+), 19 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c08d2c99bf3f5..e032fe7649866 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -85,7 +85,6 @@ module = [ "pytorch_lightning.trainer.supporters", "pytorch_lightning.trainer.trainer", "pytorch_lightning.tuner.batch_size_scaling", - "pytorch_lightning.tuner.lr_finder", "pytorch_lightning.tuner.tuning", "pytorch_lightning.utilities.auto_restart", "pytorch_lightning.utilities.data", diff --git a/src/pytorch_lightning/tuner/lr_finder.py b/src/pytorch_lightning/tuner/lr_finder.py index ad15707d079f1..9e584708f369e 100644 --- a/src/pytorch_lightning/tuner/lr_finder.py +++ b/src/pytorch_lightning/tuner/lr_finder.py @@ -16,7 +16,7 @@ import os import uuid from functools import wraps -from typing import Any, Dict, Optional, Sequence +from typing import Any, Callable, cast, Dict, List, Optional, Sequence, TYPE_CHECKING, Union import numpy as np import torch @@ -27,9 +27,10 @@ from pytorch_lightning.core.optimizer import _init_optimizers_and_lr_schedulers, _set_scheduler_opt_idx from pytorch_lightning.loggers.logger import DummyLogger from pytorch_lightning.utilities.exceptions import MisconfigurationException +from pytorch_lightning.utilities.imports import _RequirementAvailable from pytorch_lightning.utilities.parsing import lightning_hasattr, lightning_setattr from pytorch_lightning.utilities.rank_zero import rank_zero_warn -from pytorch_lightning.utilities.types import LRSchedulerConfig +from pytorch_lightning.utilities.types import LRSchedulerConfig, STEP_OUTPUT # check if ipywidgets is installed before importing tqdm.auto # to ensure it won't fail and a progress bar is displayed @@ -38,6 +39,10 @@ else: from tqdm import tqdm +_MATPLOTLIB_AVAILABLE: bool = _RequirementAvailable("matplotlib") # type: ignore[assignment] +if _MATPLOTLIB_AVAILABLE and TYPE_CHECKING: + import matplotlib.pyplot as plt + log = logging.getLogger(__name__) @@ -95,16 +100,16 @@ def __init__(self, mode: str, lr_min: float, lr_max: float, num_training: int): self.lr_max = lr_max self.num_training = num_training - self.results = {} + self.results: Dict[str, Any] = {} self._total_batch_idx = 0 # for debug purpose - def _exchange_scheduler(self, trainer: "pl.Trainer", model: "pl.LightningModule"): + def _exchange_scheduler(self, trainer: "pl.Trainer", model: "pl.LightningModule") -> Callable[["pl.Trainer"], None]: """Decorate `trainer.strategy.setup_optimizers` method such that it sets the user's originally specified optimizer together with a new scheduler that takes care of the learning rate search.""" setup_optimizers = trainer.strategy.setup_optimizers @wraps(setup_optimizers) - def func(trainer): + def func(trainer: "pl.Trainer") -> None: # Decide the structure of the output from _init_optimizers_and_lr_schedulers optimizers, _, _ = _init_optimizers_and_lr_schedulers(trainer.lightning_module) @@ -123,6 +128,7 @@ def func(trainer): args = (optimizer, self.lr_max, self.num_training) scheduler = _LinearLR(*args) if self.mode == "linear" else _ExponentialLR(*args) + scheduler = cast(pl.utilities.types._LRScheduler, scheduler) trainer.strategy.optimizers = [optimizer] trainer.strategy.lr_scheduler_configs = [LRSchedulerConfig(scheduler, interval="step", opt_idx=0)] @@ -131,13 +137,18 @@ def func(trainer): return func - def plot(self, suggest: bool = False, show: bool = False): + def plot(self, suggest: bool = False, show: bool = False) -> Optional["plt.Figure"]: """Plot results from lr_find run Args: suggest: if True, will mark suggested lr to use with a red point show: if True, will show figure """ + if not _MATPLOTLIB_AVAILABLE: + raise MisconfigurationException( + "To use the `plot` method, you must have Matplotlib installed." + " Install it by running `pip install -U matplotlib`." + ) import matplotlib.pyplot as plt lrs = self.results["lr"] @@ -162,7 +173,7 @@ def plot(self, suggest: bool = False, show: bool = False): return fig - def suggestion(self, skip_begin: int = 10, skip_end: int = 1): + def suggestion(self, skip_begin: int = 10, skip_end: int = 1) -> Optional[float]: """This will propose a suggestion for choice of initial learning rate as the point with the steepest negative gradient. @@ -196,7 +207,7 @@ def lr_find( """See :meth:`~pytorch_lightning.tuner.tuning.Tuner.lr_find`""" if trainer.fast_dev_run: rank_zero_warn("Skipping learning rate finder since fast_dev_run is enabled.") - return + return None # Determine lr attr if update_attr: @@ -218,7 +229,7 @@ def lr_find( trainer.progress_bar_callback.disable() # Configure optimizer and scheduler - trainer.strategy.setup_optimizers = lr_finder._exchange_scheduler(trainer, model) + trainer.strategy.setup_optimizers = lr_finder._exchange_scheduler(trainer, model) # type: ignore[assignment] # Fit, lr & loss logged in callback trainer.tuner._run(model) @@ -304,14 +315,16 @@ def __init__( self.num_training = num_training self.early_stop_threshold = early_stop_threshold self.beta = beta - self.losses = [] - self.lrs = [] + self.losses: List[float] = [] + self.lrs: List[float] = [] self.avg_loss = 0.0 self.best_loss = 0.0 self.progress_bar_refresh_rate = progress_bar_refresh_rate self.progress_bar = None - def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): + def on_train_batch_start( + self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", batch: Any, batch_idx: int + ) -> None: """Called before each training batch, logs the lr that will be used.""" if (trainer.fit_loop.batch_idx + 1) % trainer.accumulate_grad_batches != 0: return @@ -319,9 +332,11 @@ def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): if self.progress_bar_refresh_rate and self.progress_bar is None: self.progress_bar = tqdm(desc="Finding best initial lr", total=self.num_training) - self.lrs.append(trainer.lr_scheduler_configs[0].scheduler.lr[0]) + self.lrs.append(trainer.lr_scheduler_configs[0].scheduler.lr[0]) # type: ignore[union-attr] - def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): + def on_train_batch_end( + self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs: STEP_OUTPUT, batch: Any, batch_idx: int + ) -> None: """Called when the training batch ends, logs the calculated loss.""" if (trainer.fit_loop.batch_idx + 1) % trainer.accumulate_grad_batches != 0: return @@ -372,7 +387,7 @@ def __init__(self, optimizer: torch.optim.Optimizer, end_lr: float, num_iter: in self.num_iter = num_iter super().__init__(optimizer, last_epoch) - def get_lr(self): + def get_lr(self) -> List[float]: # type: ignore[override] curr_iter = self.last_epoch + 1 r = curr_iter / self.num_iter @@ -384,7 +399,7 @@ def get_lr(self): return val @property - def lr(self): + def lr(self) -> Union[float, List[float]]: return self._lr @@ -410,7 +425,7 @@ def __init__(self, optimizer: torch.optim.Optimizer, end_lr: float, num_iter: in self.num_iter = num_iter super().__init__(optimizer, last_epoch) - def get_lr(self): + def get_lr(self) -> List[float]: # type: ignore[override] curr_iter = self.last_epoch + 1 r = curr_iter / self.num_iter @@ -422,5 +437,5 @@ def get_lr(self): return val @property - def lr(self): + def lr(self) -> Union[float, List[float]]: return self._lr From 58abfda8fcbee8fa200bfd6d21e4bd1cb48146ba Mon Sep 17 00:00:00 2001 From: Justin Goheen <26209687+JustinGoheen@users.noreply.github.com> Date: Mon, 11 Jul 2022 07:41:49 -0400 Subject: [PATCH 59/89] Fix mypy errors attributed to `pytorch_lightning.loggers.logger.py` (#13541) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Adrian Wälchli Co-authored-by: Akihiro Nitta --- pyproject.toml | 1 - src/pytorch_lightning/loggers/logger.py | 75 +++++++++++++++---------- 2 files changed, 44 insertions(+), 32 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e032fe7649866..f5b78cafe24f6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,6 @@ module = [ "pytorch_lightning.demos.mnist_datamodule", "pytorch_lightning.distributed.dist", "pytorch_lightning.loggers.base", - "pytorch_lightning.loggers.logger", "pytorch_lightning.loggers.comet", "pytorch_lightning.loggers.csv_logs", "pytorch_lightning.loggers.mlflow", diff --git a/src/pytorch_lightning/loggers/logger.py b/src/pytorch_lightning/loggers/logger.py index d532aae413650..4113b61627d8f 100644 --- a/src/pytorch_lightning/loggers/logger.py +++ b/src/pytorch_lightning/loggers/logger.py @@ -13,16 +13,18 @@ # limitations under the License. """Abstract base class used to build new loggers.""" -import argparse + import functools import operator from abc import ABC, abstractmethod from argparse import Namespace +from collections import defaultdict from functools import wraps -from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Sequence, Union +from typing import Any, Callable, Dict, Generator, Iterable, List, Mapping, Optional, Sequence, Union from weakref import ReferenceType import numpy as np +from torch import Tensor import pytorch_lightning as pl from pytorch_lightning.callbacks import Checkpoint @@ -33,9 +35,20 @@ def rank_zero_experiment(fn: Callable) -> Callable: """Returns the real experiment on rank 0 and otherwise the DummyExperiment.""" @wraps(fn) - def experiment(self): + def experiment(self) -> Union[Any, DummyExperiment]: # type: ignore[no-untyped-def] + """ + Note: + `self` is a custom logger instance. The loggers typical wrap an `experiment` method + with a @rank_zero_experiment decorator. An exception being `loggers.neptune` wraps + `experiment` and `run` with rank_zero_experiment. + + Union[Any, DummyExperiment] is used because the wrapped hooks have several returns + types that are specific to the custom logger. The return type can be considered as + Union[return type of logger.experiment, DummyExperiment] + """ + @rank_zero_only - def get_experiment(): + def get_experiment() -> Callable: return fn(self) return get_experiment() or DummyExperiment() @@ -98,7 +111,7 @@ def update_agg_funcs( self, agg_key_funcs: Optional[Mapping[str, Callable[[Sequence[float]], float]]] = None, agg_default_func: Callable[[Sequence[float]], float] = np.mean, - ): + ) -> None: """Update aggregation methods. .. deprecated:: v1.6 @@ -119,7 +132,7 @@ def update_agg_funcs( self._agg_default_func = agg_default_func rank_zero_deprecation("`Logger.update_agg_funcs` was deprecated in v1.6 and will be removed in v1.8.") - def agg_and_log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None): + def agg_and_log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None: """Aggregates and records metrics. This method doesn't log the passed metrics instantaneously, but instead it aggregates them and logs only if metrics are ready to be logged. @@ -134,7 +147,7 @@ def agg_and_log_metrics(self, metrics: Dict[str, float], step: Optional[int] = N self.log_metrics(metrics=metrics, step=step) @abstractmethod - def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None): + def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None: """ Records metrics. This method logs metrics as as soon as it received them. If you want to aggregate @@ -148,16 +161,16 @@ def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None): pass @abstractmethod - def log_hyperparams(self, params: argparse.Namespace, *args, **kwargs): + def log_hyperparams(self, params: Union[Dict[str, Any], Namespace], *args: Any, **kwargs: Any) -> None: """Record hyperparameters. Args: - params: :class:`~argparse.Namespace` containing the hyperparameters + params: :class:`~argparse.Namespace` or `Dict` containing the hyperparameters args: Optional positional arguments, depends on the specific logger being used kwargs: Optional keyword arguments, depends on the specific logger being used """ - def log_graph(self, model: "pl.LightningModule", input_array=None) -> None: + def log_graph(self, model: "pl.LightningModule", input_array: Optional[Tensor] = None) -> None: """Record model graph. Args: @@ -184,7 +197,7 @@ def save_dir(self) -> Optional[str]: return None @property - def group_separator(self): + def group_separator(self) -> str: """Return the default separator used by the logger to group the data into subfolders.""" return "/" @@ -229,7 +242,7 @@ def update_agg_funcs( self, agg_key_funcs: Optional[Mapping[str, Callable[[Sequence[float]], float]]] = None, agg_default_func: Callable[[Sequence[float]], float] = np.mean, - ): + ) -> None: for logger in self._logger_iterable: logger.update_agg_funcs(agg_key_funcs, agg_default_func) @@ -238,7 +251,7 @@ def experiment(self) -> List[Any]: """Returns a list of experiment objects for all the loggers in the logger collection.""" return [logger.experiment for logger in self._logger_iterable] - def agg_and_log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None): + def agg_and_log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None: for logger in self._logger_iterable: logger.agg_and_log_metrics(metrics=metrics, step=step) @@ -246,19 +259,19 @@ def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> for logger in self._logger_iterable: logger.log_metrics(metrics=metrics, step=step) - def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: + def log_hyperparams(self, params: Union[Dict[str, Any], Namespace], *args: Any, **kwargs: Any) -> None: for logger in self._logger_iterable: - logger.log_hyperparams(params) + logger.log_hyperparams(params, *args, **kwargs) - def log_graph(self, model: "pl.LightningModule", input_array=None) -> None: + def log_graph(self, model: "pl.LightningModule", input_array: Optional[Tensor] = None) -> None: for logger in self._logger_iterable: logger.log_graph(model, input_array) - def log_text(self, *args, **kwargs) -> None: + def log_text(self, *args: Any, **kwargs: Any) -> None: for logger in self._logger_iterable: logger.log_text(*args, **kwargs) - def log_image(self, *args, **kwargs) -> None: + def log_image(self, *args: Any, **kwargs: Any) -> None: for logger in self._logger_iterable: logger.log_image(*args, **kwargs) @@ -293,17 +306,17 @@ def version(self) -> str: class DummyExperiment: """Dummy experiment.""" - def nop(self, *args, **kw): + def nop(self, *args: Any, **kw: Any) -> None: pass - def __getattr__(self, _): + def __getattr__(self, _: Any) -> Callable: return self.nop - def __getitem__(self, idx) -> "DummyExperiment": + def __getitem__(self, idx: int) -> "DummyExperiment": # enables self.logger.experiment[0].add_image(...) return self - def __setitem__(self, *args, **kwargs) -> None: + def __setitem__(self, *args: Any, **kwargs: Any) -> None: pass @@ -313,7 +326,7 @@ class DummyLogger(Logger): It is useful if we want to disable user's logger for a feature, but still ensure that user code can run """ - def __init__(self): + def __init__(self) -> None: super().__init__() self._experiment = DummyExperiment() @@ -322,10 +335,10 @@ def experiment(self) -> DummyExperiment: """Return the experiment object associated with this logger.""" return self._experiment - def log_metrics(self, *args, **kwargs) -> None: + def log_metrics(self, *args: Any, **kwargs: Any) -> None: pass - def log_hyperparams(self, *args, **kwargs) -> None: + def log_hyperparams(self, *args: Any, **kwargs: Any) -> None: pass @property @@ -338,18 +351,18 @@ def version(self) -> str: """Return the experiment version.""" return "" - def __getitem__(self, idx) -> "DummyLogger": + def __getitem__(self, idx: int) -> "DummyLogger": # enables self.logger[0].experiment.add_image(...) return self - def __iter__(self): + def __iter__(self) -> Generator[None, None, None]: # if DummyLogger is substituting a logger collection, pretend it is empty yield from () def __getattr__(self, name: str) -> Callable: """Allows the DummyLogger to be called with arbitrary methods, to avoid AttributeErrors.""" - def method(*args, **kwargs): + def method(*args: Any, **kwargs: Any) -> None: return None return method @@ -357,7 +370,7 @@ def method(*args, **kwargs): def merge_dicts( dicts: Sequence[Mapping], - agg_key_funcs: Optional[Mapping[str, Callable[[Sequence[float]], float]]] = None, + agg_key_funcs: Optional[Mapping] = None, default_func: Callable[[Sequence[float]], float] = np.mean, ) -> Dict: """Merge a sequence with dictionaries into one dictionary by aggregating the same keys with some given @@ -395,7 +408,7 @@ def merge_dicts( """ agg_key_funcs = agg_key_funcs or {} keys = list(functools.reduce(operator.or_, [set(d.keys()) for d in dicts])) - d_out = {} + d_out: Dict = defaultdict(dict) for k in keys: fn = agg_key_funcs.get(k) values_to_agg = [v for v in [d_in.get(k) for d_in in dicts] if v is not None] @@ -405,4 +418,4 @@ def merge_dicts( else: d_out[k] = (fn or default_func)(values_to_agg) - return d_out + return dict(d_out) From 6a8e53756fb0b8c0114ca63181978730d7bd395a Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Tue, 12 Jul 2022 02:15:46 +0900 Subject: [PATCH 60/89] CI: Define reusable workflow - check schema (#13562) * Decouple schema check * Update workflow name * Don't run if dir not found --- .github/workflows/_check-schema.yml | 37 +++++++++++++++++++++++++++++ .github/workflows/ci_schema.yml | 24 ++++--------------- 2 files changed, 41 insertions(+), 20 deletions(-) create mode 100644 .github/workflows/_check-schema.yml diff --git a/.github/workflows/_check-schema.yml b/.github/workflows/_check-schema.yml new file mode 100644 index 0000000000000..299af83503831 --- /dev/null +++ b/.github/workflows/_check-schema.yml @@ -0,0 +1,37 @@ +name: Reusable Check Schema + +on: + workflow_call: + inputs: + azure-dir: + description: 'Directory containing Azure Pipelines config files. Provide an empty string to skip checking on Azure Pipelines files.' + default: './.azure/' + required: false + type: string + +jobs: + schema: + runs-on: ubuntu-20.04 + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Install dependencies + run: pip install check-jsonschema + + - name: GitHub Actions - workflow + run: check-jsonschema $(find .github/workflows -name '*.yml' -a ! -name '_*.yml') --builtin-schema "github-workflows" + + - name: GitHub Actions - action + run: | + if [ -d ".github/actions" ]; then + check-jsonschema .github/actions/*/*.yml --builtin-schema "github-actions" + fi + + - name: Azure Pipelines + env: + SCHEMA_FILE: https://raw.githubusercontent.com/microsoft/azure-pipelines-vscode/v1.204.0/service-schema.json + run: | + if [ -d ${{ inputs.azure-dir }} ]; then + check-jsonschema ${{ inputs.azure-dir }}/*.yml --schemafile "$SCHEMA_FILE" + fi diff --git a/.github/workflows/ci_schema.yml b/.github/workflows/ci_schema.yml index 2e62157dfbd74..156334ae96043 100644 --- a/.github/workflows/ci_schema.yml +++ b/.github/workflows/ci_schema.yml @@ -1,26 +1,10 @@ -name: Schema -on: # Trigger the workflow on push or pull request, but only for the master branch +name: Check Schema + +on: push: {} pull_request: branches: [master, "release/*"] jobs: check: - runs-on: ubuntu-20.04 - steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Install pkg - run: | - pip install "check-jsonschema>=0.10" - - - name: GH Workflows - run: | - check-jsonschema .github/workflows/*.yml --builtin-schema "github-workflows" - check-jsonschema .github/actions/*/*.yml --builtin-schema "github-actions" - - - name: Azure Pipelines - env: - SCHEMA_FILE: https://raw.githubusercontent.com/microsoft/azure-pipelines-vscode/v1.204.0/service-schema.json - run: check-jsonschema .azure/*.yml --schemafile "$SCHEMA_FILE" + uses: ./.github/workflows/_check-schema.yml From dfe5c835eb895c8f3ecf84f3bc018ff23f525d83 Mon Sep 17 00:00:00 2001 From: Kaushik B <45285388+kaushikb11@users.noreply.github.com> Date: Mon, 11 Jul 2022 22:55:32 +0530 Subject: [PATCH 61/89] Fix TPU circleci tests (#13432) * Fix TPU circleci tests * Fix TPU circleci tests * Fix TPU circleci tests * Fix TPU circleci tests * Fix TPU circleci tests * Fix rank issue * Fix rank issue * debug alternative fix * Revert properties Co-authored-by: awaelchli --- .circleci/config.yml | 6 +++--- dockers/tpu-tests/tpu_test_cases.jsonnet | 5 +++-- src/pytorch_lightning/strategies/tpu_spawn.py | 1 + 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6176884c2e65f..c608680e1c168 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,8 +1,8 @@ # Python CircleCI 2.1 configuration file. version: 2.1 orbs: - gcp-gke: circleci/gcp-gke@1.0.4 - go: circleci/go@1.3.0 + gcp-gke: circleci/gcp-gke@1.4.0 + go: circleci/go@1.7.1 codecov: codecov/codecov@1.1.0 trigger: @@ -60,7 +60,7 @@ references: run: name: Install jsonnet command: | - go get github.com/google/go-jsonnet/cmd/jsonnet + go install github.com/google/go-jsonnet/cmd/jsonnet@latest update_jsonnet: &update_jsonnet run: diff --git a/dockers/tpu-tests/tpu_test_cases.jsonnet b/dockers/tpu-tests/tpu_test_cases.jsonnet index 620deffae5275..bf2aab9317fcf 100644 --- a/dockers/tpu-tests/tpu_test_cases.jsonnet +++ b/dockers/tpu-tests/tpu_test_cases.jsonnet @@ -24,7 +24,7 @@ local tputests = base.BaseTest { conda activate lightning mkdir -p /home/runner/work/pytorch-lightning && cd /home/runner/work/pytorch-lightning git clone https://github.com/Lightning-AI/lightning.git - cd pytorch-lightning + cd lightning echo $PWD git ls-remote --refs origin git fetch origin "refs/pull/{PR_NUMBER}/head:pr/{PR_NUMBER}" && git checkout "pr/{PR_NUMBER}" @@ -35,8 +35,9 @@ local tputests = base.BaseTest { echo $KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS export XRT_TPU_CONFIG="tpu_worker;0;${KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS:7}" cd tests/tests_pytorch + echo $PWD # TODO (@kaushikb11): Add device stats tests here - coverage run --source=pytorch_lightning -m pytest -v --capture=no \ + coverage run --source pytorch_lightning -m pytest -v --capture=no \ strategies/test_tpu_spawn.py \ profilers/test_xla_profiler.py \ accelerators/test_tpu.py \ diff --git a/src/pytorch_lightning/strategies/tpu_spawn.py b/src/pytorch_lightning/strategies/tpu_spawn.py index 464eb6b57d4de..b774bf19acaa0 100644 --- a/src/pytorch_lightning/strategies/tpu_spawn.py +++ b/src/pytorch_lightning/strategies/tpu_spawn.py @@ -208,6 +208,7 @@ def reduce(self, output, group: Optional[Any] = None, reduce_op: Optional[Union[ def _worker_setup(self, process_idx: int): reset_seed() + self._local_rank = xm.get_local_ordinal() self.tpu_local_core_rank = xm.get_local_ordinal() self.tpu_global_core_rank = xm.get_ordinal() rank_zero_only.rank = self.global_rank From 35486281ed0ecbb0b4596de1c8c3351cd3761b9e Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Mon, 11 Jul 2022 23:46:10 +0200 Subject: [PATCH 62/89] cd: releasing packages (#13489) * assist * split * need * inverse * checkout * json * include * unzip * mirror * fire * twine * for tar -xf * clean * 3.8 * ls --- .actions/assistant.py | 116 ++++++++++++++++++-- .github/workflows/ci_pkg-install.yml | 53 ++++++++- .github/workflows/release-pypi.yml | 158 ++++++++++++++++++++++++--- 3 files changed, 294 insertions(+), 33 deletions(-) diff --git a/.actions/assistant.py b/.actions/assistant.py index f67a35111f299..4a5afde4c560b 100644 --- a/.actions/assistant.py +++ b/.actions/assistant.py @@ -1,23 +1,56 @@ import datetime +import glob +import json import os import re +import shutil +from distutils.version import LooseVersion, StrictVersion +from importlib.util import module_from_spec, spec_from_file_location +from itertools import chain +from pathlib import Path from pprint import pprint -from typing import Sequence +from types import ModuleType +from typing import List, Optional, Sequence +from urllib import request +from urllib.request import Request, urlopen import fire -REQUIREMENT_FILES = ( - "requirements/pytorch/base.txt", - "requirements/pytorch/extra.txt", - "requirements/pytorch/loggers.txt", - "requirements/pytorch/strategies.txt", - "requirements/pytorch/examples.txt", -) +REQUIREMENT_FILES = { + "pytorch": ( + "requirements/pytorch/base.txt", + "requirements/pytorch/extra.txt", + "requirements/pytorch/loggers.txt", + "requirements/pytorch/strategies.txt", + "requirements/pytorch/examples.txt", + ) +} +REQUIREMENT_FILES_ALL = tuple(chain(*REQUIREMENT_FILES.values())) +PACKAGE_MAPPING = {"app": "lightning-app", "pytorch": "pytorch-lightning"} -class AssistantCLI: +def pypi_versions(package_name: str) -> List[str]: + """Return a list of released versions of a provided pypi name.""" + # https://stackoverflow.com/a/27239645/4521646 + url = f"https://pypi.org/pypi/{package_name}/json" + data = json.load(urlopen(Request(url))) + versions = list(data["releases"].keys()) + # todo: drop this line after cleaning Pypi history from invalid versions + versions = list(filter(lambda v: v.count(".") == 2 and "rc" not in v, versions)) + versions.sort(key=StrictVersion) + return versions + + +def _load_py_module(name: str, location: str) -> ModuleType: + spec = spec_from_file_location(name, location) + py = module_from_spec(spec) + spec.loader.exec_module(py) + return py - _PATH_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + +class AssistantCLI: + _PATH_ROOT = str(Path(__file__).parent.parent) + _PATH_SRC = os.path.join(_PATH_ROOT, "src") @staticmethod def prepare_nightly_version(proj_root: str = _PATH_ROOT) -> None: @@ -35,7 +68,7 @@ def prepare_nightly_version(proj_root: str = _PATH_ROOT) -> None: fp.write(init) @staticmethod - def requirements_prune_pkgs(packages: Sequence[str], req_files: Sequence[str] = REQUIREMENT_FILES) -> None: + def requirements_prune_pkgs(packages: Sequence[str], req_files: Sequence[str] = REQUIREMENT_FILES_ALL) -> None: """Remove some packages from given requirement files.""" if isinstance(req_files, str): req_files = [req_files] @@ -63,11 +96,70 @@ def _replace_min(fname: str) -> None: open(fname, "w").write(req) @staticmethod - def replace_oldest_ver(requirement_fnames: Sequence[str] = REQUIREMENT_FILES) -> None: + def replace_oldest_ver(requirement_fnames: Sequence[str] = REQUIREMENT_FILES_ALL) -> None: """Replace the min package version by fixed one.""" for fname in requirement_fnames: AssistantCLI._replace_min(fname) + @staticmethod + def _release_pkg(pkg: str, src_folder: str = _PATH_SRC) -> bool: + pypi_ver = pypi_versions(pkg)[-1] + _version = _load_py_module("version", os.path.join(src_folder, pkg.replace("-", "_"), "__version__.py")) + local_ver = _version.version + return "dev" not in local_ver and LooseVersion(local_ver) > LooseVersion(pypi_ver) + + @staticmethod + def determine_releasing_pkgs( + src_folder: str = _PATH_SRC, packages: Sequence[str] = ("pytorch", "app"), inverse: bool = False + ) -> Sequence[str]: + """Determine version of package where the name is `lightning.`.""" + if isinstance(packages, str): + packages = [packages] + releasing = [pkg for pkg in packages if AssistantCLI._release_pkg(PACKAGE_MAPPING[pkg], src_folder=src_folder)] + if inverse: + releasing = list(filter(lambda pkg: pkg not in releasing, packages)) + return json.dumps([{"pkg": pkg for pkg in releasing}]) + + @staticmethod + def download_package(package: str, folder: str = ".", version: Optional[str] = None) -> None: + """Download specific or latest package from PyPI where the name is `lightning.`.""" + url = f"https://pypi.org/pypi/{PACKAGE_MAPPING[package]}/json" + data = json.load(urlopen(Request(url))) + if not version: + versions = list(data["releases"].keys()) + version = sorted(versions, key=LooseVersion)[-1] + releases = list(filter(lambda r: r["packagetype"] == "sdist", data["releases"][version])) + assert releases, f"Missing 'sdist' for this package/version aka {package}/{version}" + release = releases[0] + pkg_url = release["url"] + pkg_file = os.path.basename(pkg_url) + pkg_path = os.path.join(folder, pkg_file) + os.makedirs(folder, exist_ok=True) + request.urlretrieve(pkg_url, pkg_path) + + @staticmethod + def _find_pkgs(folder: str, pkg_pattern: str = "lightning") -> List[str]: + """Find all python packages with spec. + + pattern in given folder, in case `src` exists dive there. + """ + pkg_dirs = [d for d in glob.glob(os.path.join(folder, "*")) if os.path.isdir(d)] + if "src" in [os.path.basename(p) for p in pkg_dirs]: + return AssistantCLI._find_pkgs(os.path.join(folder, "src"), pkg_pattern) + pkg_dirs = list(filter(lambda p: pkg_pattern in os.path.basename(p), pkg_dirs)) + return pkg_dirs + + @staticmethod + def mirror_pkg2source(pypi_folder: str, src_folder: str) -> None: + """From extracted sdist packages overwrite the python package with given pkg pattern.""" + pypi_dirs = [d for d in glob.glob(os.path.join(pypi_folder, "*")) if os.path.isdir(d)] + for pkg_dir in pypi_dirs: + for py_dir in AssistantCLI._find_pkgs(pkg_dir): + dir_name = os.path.basename(py_dir) + py_dir2 = os.path.join(src_folder, dir_name) + shutil.rmtree(py_dir2, ignore_errors=True) + shutil.copytree(py_dir, py_dir2) + if __name__ == "__main__": fire.Fire(AssistantCLI) diff --git a/.github/workflows/ci_pkg-install.yml b/.github/workflows/ci_pkg-install.yml index aaf1d5aefe2f9..45d48cbfff7fc 100644 --- a/.github/workflows/ci_pkg-install.yml +++ b/.github/workflows/ci_pkg-install.yml @@ -37,7 +37,7 @@ jobs: matrix: os: [ubuntu-20.04, macOS-10.15, windows-2019] pkg: ["app", "pytorch"] - python-version: [3.7] # , 3.9 + python-version: [3.8] # , 3.9 steps: - uses: actions/checkout@v2 @@ -69,7 +69,7 @@ jobs: with: pkg-name: ${{ env.PKG_NAME }} - install-meta: + install-meta-src: needs: install-standalone runs-on: ${{ matrix.os }} strategy: @@ -78,7 +78,7 @@ jobs: matrix: os: [ubuntu-20.04, macOS-10.15, windows-2019] pkg: ["", "lightning"] - python-version: [3.7] # , 3.9 + python-version: [3.8] # , 3.9 steps: - uses: actions/checkout@v2 @@ -102,3 +102,50 @@ jobs: with: pkg-name: "lightning" pip-flags: "-U --pre --find-links ../pypi/" + + install-meta-pypi: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + # max-parallel: 1 + matrix: + os: [ubuntu-20.04, macOS-10.15, windows-2019] + python-version: [3.8] # , 3.9 + + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Dowload package + run: | + pip install -q fire requests + for pkg in 'app' 'pytorch' ; do + python .actions/assistant.py download-package "$pkg" --folder pypi + done + ls -lh pypi/ + + - name: Unzip packages + working-directory: pypi + run: for file in `ls *.gz`; do tar -xzf $file; done + - name: Show upacked pkgs + if: runner.os == 'linux' + run: | + sudo apt install -y tree + tree pypi/ -L 3 + + - name: Miror source + run: | + pip install -q fire requests + python .actions/assistant.py mirror-pkg2source pypi src + ls -R src/ + + # TODO: use this testing when PL 1.6.5 is out + #- uses: ./.github/actions/pkg-check + # with: + # pkg-name: "lightning" + + #- uses: ./.github/actions/pkg-install + # with: + # pkg-name: "lightning" diff --git a/.github/workflows/release-pypi.yml b/.github/workflows/release-pypi.yml index 2476685dae3b8..bf20252b9efd7 100644 --- a/.github/workflows/release-pypi.yml +++ b/.github/workflows/release-pypi.yml @@ -7,48 +7,172 @@ on: # Trigger the workflow on push or pull request, but only for the master bra release: types: [published] +# there are several consecutive actions: +# 1) determine which packages have been change at the time this event is processed +# 2) build related packages - app/pytorch or download latest from pypi +# 3) create the meta package - lightning +# 4) publish all new creations tada jobs: - # Todo run job which determine changed versions + # run job which determine changed versions + releasing: + runs-on: ubuntu-20.04 + outputs: + build-pkgs: ${{ steps.candidate.outputs.pkgs }} + pull-pkgs: ${{ steps.download.outputs.pkgs }} + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: 3.9 + - run: | + pip install -q fire requests + mkdir dist && touch dist/.placeholder + mkdir pypi && touch pypi/.placeholder + - uses: actions/upload-artifact@v3 + with: + name: dist-packages-${{ github.sha }} + path: dist + - uses: actions/upload-artifact@v3 + with: + name: pypi-packages-${{ github.sha }} + path: pypi + + - name: Find changed packages + id: candidate + run: | + echo "::set-output name=pkgs::{include: $(python .actions/assistant.py determine-releasing-pkgs 2>&1)}" + - run: echo "${{ steps.candidate.outputs.pkgs }}" + + - name: Inverse packages to pull + id: download + run: | + echo "::set-output name=pkgs::{include: $(python .actions/assistant.py determine-releasing-pkgs --inverse 2>&1)}" + - run: echo "${{ steps.download.outputs.pkgs }}" # based on https://github.com/pypa/gh-action-pypi-publish build-package: - # todo run sequential + needs: releasing runs-on: ubuntu-20.04 + strategy: + fail-fast: true + # run sequential + max-parallel: 1 + matrix: ${{ fromJSON(needs.releasing.outputs.build-pkgs) }} steps: - uses: actions/checkout@v2 + - uses: actions/download-artifact@v3 + with: + name: dist-packages-${{ github.sha }} + path: dist - uses: actions/setup-python@v2 with: python-version: 3.9 - name: Install dependencies - run: pip install --upgrade setuptools wheel + run: pip install -U setuptools wheel - name: Build packages - # TODO: this would be big challenge as at the release time (especially from master) - # all packages from meta is generated wont be in stable mode, - # so the meta will be for one as release and other as dev - # todo: for meta pkg generation we need to install the unstable pkg from PyPI and overwrite source env: - PACKAGE_NAME: pytorch + PACKAGE_NAME: ${{ matrix.pkg }} run: | python setup.py sdist bdist_wheel ls -lh dist/ - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 + with: + name: dist-packages-${{ github.sha }} + path: dist + + # based on https://github.com/pypa/gh-action-pypi-publish + download-package: + needs: releasing + runs-on: ubuntu-20.04 + strategy: + fail-fast: true + # run sequential + max-parallel: 1 + matrix: ${{ fromJSON(needs.releasing.outputs.pull-pkgs) }} + steps: + - uses: actions/checkout@v2 + - uses: actions/download-artifact@v3 + with: + name: pypi-packages-${{ github.sha }} + path: pypi + - uses: actions/setup-python@v2 + with: + python-version: 3.9 + + - name: Dowload package + run: | + pip install -q fire requests + python .actions/assistant.py download-package ${{ matrix.pkg }} --folder pypi + + - uses: actions/upload-artifact@v3 + with: + name: pypi-packages-${{ github.sha }} + path: pypi + + # based on https://github.com/pypa/gh-action-pypi-publish + build-meta-pkg: + needs: [build-package, download-package] + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + - uses: actions/download-artifact@v3 + with: + name: dist-packages-${{ github.sha }} + path: dist + - run: ls -lh dist/ + - uses: actions/download-artifact@v3 with: name: pypi-packages-${{ github.sha }} + path: pypi + - run: ls -lh pypi/ + - uses: actions/setup-python@v2 + with: + python-version: 3.9 + + - name: Install dependencies + run: pip install -U setuptools wheel "twine==4.0.*" + + - name: Unzip packages + working-directory: pypi + run: for file in `ls *.gz`; do tar -xzf $file; done + - name: Show upacked pkgs + if: runner.os == 'linux' + run: | + sudo apt install -y tree + tree pypi/ -L 3 + + - name: Miror source + run: | + pip install -q fire requests + python .actions/assistant.py mirror-pkg2source pypi src + ls -R src/ + + - name: Build packages + env: + PACKAGE_NAME: "lightning" + run: | + python setup.py sdist bdist_wheel + twine check dist/* + ls -lh dist/ + + - uses: actions/upload-artifact@v3 + with: + name: dist-packages-${{ github.sha }} path: dist upload-package: runs-on: ubuntu-20.04 - needs: build-package + needs: build-meta-pkg if: startsWith(github.event.ref, 'refs/tags') || github.event_name == 'release' steps: - uses: actions/checkout@v2 - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: - name: pypi-packages-${{ github.sha }} + name: dist-packages-${{ github.sha }} path: dist - run: ls -lh dist/ @@ -60,13 +184,13 @@ jobs: publish-package: runs-on: ubuntu-20.04 - needs: build-package + needs: build-meta-pkg if: startsWith(github.event.ref, 'refs/tags') || github.event_name == 'release' steps: - uses: actions/checkout@v2 - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: - name: pypi-packages-${{ github.sha }} + name: dist-packages-${{ github.sha }} path: dist - run: ls -lh dist/ @@ -91,7 +215,7 @@ jobs: password: ${{ secrets.pypi_password }} create-legacy-ckpt: - # todo skip if pytorch was not released + # Todo: skip if pytorch was not released runs-on: ubuntu-20.04 needs: [build-package, publish-package] steps: @@ -111,7 +235,6 @@ jobs: - uses: actions/download-artifact@v2 with: name: pypi-packages-${{ github.sha }} - path: dist - name: Pull files from S3 working-directory: ./tests/legacy @@ -125,7 +248,6 @@ jobs: run: | ls -lh dist/ pip install dist/*.whl - pl_ver=$(python -c "import pytorch_lightning as pl ; print(pl.__version__)" 2>&1) # generate checkpoint to this version bash generate_checkpoints.sh $pl_ver From 75e50c5f1763d4b37ef38f044068bd76b6afa6f4 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Tue, 12 Jul 2022 06:46:36 +0900 Subject: [PATCH 63/89] CI: Add PR labeler (#13475) * Add pr labeler * Triger on docs change * Make mutually exclusive * Add requirements * files Co-authored-by: Rohit Gupta Co-authored-by: Jirka Borovec Co-authored-by: Rohit Gupta --- .github/labeler.yml | 7 +++++++ .github/workflows/labeler.yml | 15 +++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 .github/labeler.yml create mode 100644 .github/workflows/labeler.yml diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 0000000000000..bba4ddec5dc24 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,7 @@ +app: +- any: ['src/lightning*/**/*', 'tests/tests_app*/**/*', 'docs/source-app/**/*', 'requirements/app/*'] + all: ['!src/pytorch_lightning/**/*', '!tests/tests_pytorch/**/*', '!docs/source-pytorch/**/*', '!requirements/pytorch/*'] + +pl: +- any: ['src/pytorch_lightning/**/*', 'tests/tests_pytorch/**/*', 'docs/source-pytorch/**/*', 'requirements/pytorch/*'] + all: ['!src/lightning*/**/*', '!tests/tests_app*/**/*', '!docs/source-app/**/*', '!requirements/app/*'] diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 0000000000000..fe060c10cb980 --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,15 @@ +name: Label Pull Requests +on: +- pull_request_target + +jobs: + triage: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v4 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + sync-labels: true From ab67ec9021705a79cfb51074d76fe0909c0dbb40 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Tue, 12 Jul 2022 06:46:52 +0900 Subject: [PATCH 64/89] CI: Update mypy workflow (#13574) * Fix pyproject.toml * Add TODO * Update mypy workflow --- .github/workflows/code-checks.yml | 11 +++++------ pyproject.toml | 8 +++++++- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml index c1c8ffd70006e..fc02f340048bb 100644 --- a/.github/workflows/code-checks.yml +++ b/.github/workflows/code-checks.yml @@ -13,10 +13,10 @@ concurrency: jobs: mypy: runs-on: ubuntu-20.04 - # todo: checking also lightning_app steps: - - uses: actions/checkout@master - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + + - uses: actions/setup-python@v3 with: python-version: 3.9 @@ -37,6 +37,5 @@ jobs: pip install -r requirements/pytorch/devel.txt --find-links https://download.pytorch.org/whl/cpu/torch_stable.html pip list - - name: Type check PyTorch - working-directory: src - run: mypy pytorch_lightning --install-types --non-interactive --config-file ../pyproject.toml + - name: Check typing + run: mypy diff --git a/pyproject.toml b/pyproject.toml index f5b78cafe24f6..770f0983c3139 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,13 @@ line-length = 120 [tool.mypy] -files = ["pytorch_lightning"] +files = [ + "src/pytorch_lightning", + # TODO: Check typing in app source + # "src/lightning_app", +] +install_types = "True" +non_interactive = "True" disallow_untyped_defs = "True" ignore_missing_imports = "True" show_error_codes = "True" From 93b2e800e8ee0c291c4c2a3317d5d86f675c5ebd Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Mon, 11 Jul 2022 23:47:06 +0200 Subject: [PATCH 65/89] setup: set default metadata (#13571) --- setup.cfg | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/setup.cfg b/setup.cfg index f59a6c1cf436a..055af361c4bcb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,6 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +[metadata] +name = pytorch-lightning +author = Lightning-AI et al. +url = https://github.com/Lightning-AI/lightning + [tool:pytest] norecursedirs = .git From 424fb0e30ee33595f10bc826ba3346266378a09a Mon Sep 17 00:00:00 2001 From: Shantam Gilra <64306405+shantam-8@users.noreply.github.com> Date: Tue, 12 Jul 2022 00:03:54 +0100 Subject: [PATCH 66/89] Remove deprecated `pytorch_lightning.core.decorators.parameter_validation` (#13514) * Removal of depreciated code from decorators * Update CHANGELOG.md * Removed imports --- src/pytorch_lightning/CHANGELOG.md | 3 + src/pytorch_lightning/core/decorators.py | 60 ------------------- .../deprecated_api/test_remove_1-7.py | 10 ---- 3 files changed, 3 insertions(+), 70 deletions(-) delete mode 100644 src/pytorch_lightning/core/decorators.py diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 7de177c9472ed..7ff9bca24ac15 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -177,6 +177,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Removed +- Removed deprecated `pytorch_lightning.core.decorators.parameter_validation` from `decorators` ([#13514](https://github.com/Lightning-AI/lightning/pull/13514)) + + - Removed the deprecated `Logger.close` method ([#13149](https://github.com/PyTorchLightning/pytorch-lightning/pull/13149)) diff --git a/src/pytorch_lightning/core/decorators.py b/src/pytorch_lightning/core/decorators.py deleted file mode 100644 index 33c83b4b10d6d..0000000000000 --- a/src/pytorch_lightning/core/decorators.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn - -rank_zero_deprecation( - "Using `pytorch_lightning.core.decorators.parameter_validation` is deprecated in v1.5, " - "and will be removed in v1.7. It has been replaced by automatic parameters tying with " - "`pytorch_lightning.utilities.params_tying.set_shared_parameters`" -) - -from functools import wraps # noqa: E402 -from typing import Callable # noqa: E402 - - -def parameter_validation(fn: Callable) -> Callable: - """Validates that the module parameter lengths match after moving to the device. It is useful when tying - weights on TPU's. - - Args: - fn: ``model_to_device`` method - - Note: - TPU's require weights to be tied/shared after moving the module to the device. - Failure to do this results in the initialization of new weights which are not tied. - To overcome this issue, weights should be tied using the ``on_post_move_to_device`` model hook - which is called after the module has been moved to the device. - - See Also: - - `XLA Documentation `_ - """ - - @wraps(fn) - def inner_fn(self, *args, **kwargs): - pre_layer_count = len(list(self.model.parameters())) - module = fn(self, *args, **kwargs) - self.model.on_post_move_to_device() - post_layer_count = len(list(self.model.parameters())) - - if not pre_layer_count == post_layer_count: - rank_zero_warn( - "The model layers do not match after moving to the target device." - " If your model employs weight sharing on TPU," - " please tie your weights using the `on_post_move_to_device` model hook.\n" - f"Layer count: [Before: {pre_layer_count} After: {post_layer_count}]" - ) - - return module - - return inner_fn diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py index 2ae305d2c06b7..17cccbfa80a5e 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py @@ -31,7 +31,6 @@ TorchElasticEnvironment, ) from pytorch_lightning.strategies import SingleDeviceStrategy -from tests_pytorch.deprecated_api import _soft_unimport_module from tests_pytorch.plugins.environments.test_lsf_environment import _make_rankfile @@ -76,15 +75,6 @@ def on_post_move_to_device(self): trainer.fit(model) -def test_v1_7_0_deprecate_parameter_validation(): - - _soft_unimport_module("pytorch_lightning.core.decorators") - with pytest.deprecated_call( - match="Using `pytorch_lightning.core.decorators.parameter_validation` is deprecated in v1.5" - ): - from pytorch_lightning.core.decorators import parameter_validation # noqa: F401 - - def test_v1_7_0_deprecated_slurm_job_id(): trainer = Trainer() with pytest.deprecated_call(match="Method `slurm_job_id` is deprecated in v1.6.0 and will be removed in v1.7.0."): From 08c08cbab11105f04c945155e4e28d2f81db5e18 Mon Sep 17 00:00:00 2001 From: Bibhabasu Mohapatra <68384968+bibhabasumohapatra@users.noreply.github.com> Date: Tue, 12 Jul 2022 14:38:25 +0530 Subject: [PATCH 67/89] Adds is last batch (#13550) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Adrian Wälchli Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Kaushik B <45285388+kaushikb11@users.noreply.github.com> --- docs/source-pytorch/common/trainer.rst | 10 ++++++++++ src/pytorch_lightning/trainer/trainer.py | 1 + 2 files changed, 11 insertions(+) diff --git a/docs/source-pytorch/common/trainer.rst b/docs/source-pytorch/common/trainer.rst index 3e3734ceb35e3..775b582307186 100644 --- a/docs/source-pytorch/common/trainer.rst +++ b/docs/source-pytorch/common/trainer.rst @@ -1641,6 +1641,16 @@ The number of epochs run. if trainer.current_epoch >= 10: ... +is_last_batch +************* + +Whether trainer is executing last batch in the current epoch. + +.. code-block:: python + + if trainer.is_last_batch: + ... + global_step *********** diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index e3e45885e0545..acde9224d501e 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -2574,6 +2574,7 @@ def min_steps(self) -> Optional[int]: @property def is_last_batch(self) -> bool: + """Whether trainer is executing the last batch.""" return self.fit_loop.epoch_loop.batch_progress.is_last_batch @property From d6ce69732b3e3c8bdcb3980ea0bbebca9ab48a2b Mon Sep 17 00:00:00 2001 From: Rohit Gupta Date: Tue, 12 Jul 2022 15:15:59 +0530 Subject: [PATCH 68/89] Restore log step during restart (#13467) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- src/pytorch_lightning/CHANGELOG.md | 2 ++ src/pytorch_lightning/loops/epoch/training_epoch_loop.py | 2 ++ tests/tests_pytorch/loops/test_loop_state_dict.py | 2 +- tests/tests_pytorch/models/test_restore.py | 1 + 4 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 7ff9bca24ac15..952cad27d8613 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -314,6 +314,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed the input validation for the accelerator Trainer argument when passed as a string ([#13417](https://github.com/PyTorchLightning/pytorch-lightning/pull/13417)) +- Fixed the restoration of log step during restart ([#13467](https://github.com/PyTorchLightning/pytorch-lightning/pull/13467)) + ## [1.6.4] - 2022-06-01 diff --git a/src/pytorch_lightning/loops/epoch/training_epoch_loop.py b/src/pytorch_lightning/loops/epoch/training_epoch_loop.py index 04e9d070a6d8e..36a594b45ae6f 100644 --- a/src/pytorch_lightning/loops/epoch/training_epoch_loop.py +++ b/src/pytorch_lightning/loops/epoch/training_epoch_loop.py @@ -273,6 +273,7 @@ def teardown(self) -> None: def on_save_checkpoint(self) -> Dict: state_dict = super().on_save_checkpoint() + state_dict["_batches_that_stepped"] = self._batches_that_stepped if ( self.trainer is not None @@ -292,6 +293,7 @@ def on_save_checkpoint(self) -> Dict: def on_load_checkpoint(self, state_dict: Dict) -> None: # cache the dataloader state dict until the dataloader objects are available self._dataloader_state_dict = state_dict.get("dataloader_state_dict") + self._batches_that_stepped = state_dict.get("_batches_that_stepped", 0) def _run_validation(self) -> None: # reload dataloaders diff --git a/tests/tests_pytorch/loops/test_loop_state_dict.py b/tests/tests_pytorch/loops/test_loop_state_dict.py index 1e67fcc0ed8db..f9630095502d1 100644 --- a/tests/tests_pytorch/loops/test_loop_state_dict.py +++ b/tests/tests_pytorch/loops/test_loop_state_dict.py @@ -47,7 +47,7 @@ def test_loops_state_dict_structure(): expected = { "fit_loop": { "state_dict": {}, - "epoch_loop.state_dict": {}, + "epoch_loop.state_dict": {"_batches_that_stepped": 0}, "epoch_loop.batch_progress": { "total": {"ready": 0, "started": 0, "processed": 0, "completed": 0}, "current": {"ready": 0, "started": 0, "processed": 0, "completed": 0}, diff --git a/tests/tests_pytorch/models/test_restore.py b/tests/tests_pytorch/models/test_restore.py index 77f45928dd907..4f167c08e8a05 100644 --- a/tests/tests_pytorch/models/test_restore.py +++ b/tests/tests_pytorch/models/test_restore.py @@ -259,6 +259,7 @@ def on_train_start(self) -> None: trainer.fit(TestModel(), ckpt_path=ckpt_path) assert trainer.current_epoch == max_epochs assert trainer.global_step == max_epochs * train_batches + assert trainer.fit_loop.epoch_loop._batches_that_stepped == max_epochs * train_batches def test_fit_twice(tmpdir): From ee4b04fed1c0da131e1607e71c87a106229fb2a6 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Tue, 12 Jul 2022 19:08:37 +0900 Subject: [PATCH 69/89] CI/CD: Refactor building docker images (#13576) * Refactor docker builds in CI * Reduce duplicate and merge two workflows * push: bool expression * Extend timeout for ipu builds * Update concurrency group * Define env for push to hub * Rename workflow * Fix bool expressions * Remove unnecessary trigger paths * Remove unused env var * Update job names * Trim timeout * rename Co-authored-by: Jirka --- .github/workflows/ci-pytorch_dockers.yml | 183 --------------- .github/workflows/cicd-pytorch_dockers.yml | 242 +++++++++++++++++++ .github/workflows/events-nightly.yml | 258 --------------------- 3 files changed, 242 insertions(+), 441 deletions(-) delete mode 100644 .github/workflows/ci-pytorch_dockers.yml create mode 100644 .github/workflows/cicd-pytorch_dockers.yml diff --git a/.github/workflows/ci-pytorch_dockers.yml b/.github/workflows/ci-pytorch_dockers.yml deleted file mode 100644 index 69d5955c5db33..0000000000000 --- a/.github/workflows/ci-pytorch_dockers.yml +++ /dev/null @@ -1,183 +0,0 @@ -name: Docker - PyTorch -# https://www.docker.com/blog/first-docker-github-action-is-here -# https://github.com/docker/build-push-action -# see: https://help.github.com/en/actions/reference/events-that-trigger-workflows -on: # Trigger the workflow on push or pull request, but only for the master branch - push: - branches: [master, "release/*"] # include release branches like release/1.0.x - pull_request: - branches: [master, "release/*"] - paths: - - "!src/lightning_app/**" # todo: implement job skip - - "!tests/tests_app/**" # todo: implement job skip - - "!tests/tests_app_examples/**" # todo: implement job skip - - "!examples/app_*" # todo: implement job skip - - "dockers/**" - - "!dockers/README.md" - - "requirements/*" - - "requirements.txt" - - "environment.yml" - - ".github/workflows/*docker*.yml" - - ".github/workflows/events-nightly.yml" - - "setup.py" - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }} - cancel-in-progress: ${{ ! (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release/')) }} - -jobs: - build-PL: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - # the config used in '.azure-pipelines/gpu-tests.yml' since the Dockerfile uses the cuda image - python_version: ["3.9"] - pytorch_version: ["1.10", "1.11"] - steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Build PL Docker - # publish master/release - uses: docker/build-push-action@v2 - with: - build-args: | - PYTHON_VERSION=${{ matrix.python_version }} - PYTORCH_VERSION=${{ matrix.pytorch_version }} - file: dockers/release/Dockerfile - push: false - timeout-minutes: 50 - - build-XLA: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - # the config used in '.circleci/config.yml`' - python_version: ["3.7"] - xla_version: ["1.11"] - steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Build XLA Docker - # publish master/release - uses: docker/build-push-action@v2 - with: - build-args: | - PYTHON_VERSION=${{ matrix.python_version }} - XLA_VERSION=${{ matrix.xla_version }} - file: dockers/base-xla/Dockerfile - push: false - timeout-minutes: 60 - - build-CUDA: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - include: - # the config used in '.azure-pipelines/gpu-tests.yml' - - {python_version: "3.7", pytorch_version: "1.10", cuda_version: "11.1", ubuntu_version: "20.04"} - - {python_version: "3.7", pytorch_version: "1.11", cuda_version: "11.3.1", ubuntu_version: "20.04"} - # latest (used in Tutorials) - - {python_version: "3.8", pytorch_version: "1.9", cuda_version: "11.1", ubuntu_version: "20.04"} - - {python_version: "3.9", pytorch_version: "1.10", cuda_version: "11.1", ubuntu_version: "20.04"} - - {python_version: "3.9", pytorch_version: "1.11", cuda_version: "11.3.1", ubuntu_version: "20.04"} - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Build CUDA Docker - # publish master/release - uses: docker/build-push-action@v2 - with: - build-args: | - PYTHON_VERSION=${{ matrix.python_version }} - PYTORCH_VERSION=${{ matrix.pytorch_version }} - CUDA_VERSION=${{ matrix.cuda_version }} - UBUNTU_VERSION=${{ matrix.ubuntu_version }} - file: dockers/base-cuda/Dockerfile - push: false - timeout-minutes: 95 - - build-Conda: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - include: - # see: https://pytorch.org/get-started/previous-versions/ - - {python_version: "3.8", pytorch_version: "1.9", cuda_version: "11.1"} - - {python_version: "3.8", pytorch_version: "1.10", cuda_version: "11.1"} - - {python_version: "3.9", pytorch_version: "1.11", cuda_version: "11.3.1"} - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Build Conda Docker - # publish master/release - uses: docker/build-push-action@v2 - with: - build-args: | - PYTHON_VERSION=${{ matrix.python_version }} - PYTORCH_VERSION=${{ matrix.pytorch_version }} - CUDA_VERSION=${{ matrix.cuda_version }} - file: dockers/base-conda/Dockerfile - push: false - timeout-minutes: 95 - - build-ipu: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - # the config used in 'dockers/ci-runner-ipu/Dockerfile' - python_version: ["3.9"] # latest - pytorch_version: ["1.9"] - steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Build IPU Docker - uses: docker/build-push-action@v2 - with: - build-args: | - PYTHON_VERSION=${{ matrix.python_version }} - PYTORCH_VERSION=${{ matrix.pytorch_version }} - file: dockers/base-ipu/Dockerfile - push: false - tags: pytorchlightning/pytorch_lightning:base-ipu-py${{ matrix.python_version }}-torch${{ matrix.pytorch_version }} - timeout-minutes: 50 - - - name: Build IPU CI runner Docker - uses: docker/build-push-action@v2 - with: - build-args: | - PYTHON_VERSION=${{ matrix.python_version }} - PYTORCH_VERSION=${{ matrix.pytorch_version }} - file: dockers/ci-runner-ipu/Dockerfile - push: false - timeout-minutes: 60 - - build-hpu: - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - # the config used in 'dockers/ci-runner-hpu/Dockerfile' - gaudi_version: ["1.5.0"] - pytorch_version: ["1.11.0"] - steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Build HPU CI runner Docker - uses: docker/build-push-action@v2 - with: - build-args: | - DIST=latest - GAUDI_VERSION=${{ matrix.gaudi_version }} - PYTORCH_VERSION=${{ matrix.pytorch_version }} - file: dockers/ci-runner-hpu/Dockerfile - push: false - timeout-minutes: 60 diff --git a/.github/workflows/cicd-pytorch_dockers.yml b/.github/workflows/cicd-pytorch_dockers.yml new file mode 100644 index 0000000000000..317d005bd89f1 --- /dev/null +++ b/.github/workflows/cicd-pytorch_dockers.yml @@ -0,0 +1,242 @@ +name: Docker + +on: + push: + branches: [master, "release/*"] + pull_request: + branches: [master, "release/*"] + paths: + - "dockers/**" + - "!dockers/README.md" + - "requirements/*" + - "requirements.txt" + - "environment.yml" + - ".github/workflows/*docker*.yml" + - ".github/workflows/events-nightly.yml" + - "setup.py" + schedule: + - cron: "0 0 * * *" # at the end of every day + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }}-${{ github.event_name }} + cancel-in-progress: ${{ ! (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release/')) }} + +env: + PUSH_TO_HUB: ${{ github.event_name == 'schedule' }} + +jobs: + build-pl: + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + # the config used in '.azure-pipelines/gpu-tests.yml' since the Dockerfile uses the cuda image + python_version: ["3.9"] + pytorch_version: ["1.10", "1.11"] + steps: + - uses: actions/checkout@v2 + - uses: docker/setup-buildx-action@v1 + - uses: docker/build-push-action@v2 + with: + build-args: | + PYTHON_VERSION=${{ matrix.python_version }} + PYTORCH_VERSION=${{ matrix.pytorch_version }} + file: dockers/release/Dockerfile + push: false # pushed in release-docker.yml only when PL is released + timeout-minutes: 50 + + build-xla: + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + # the config used in '.circleci/config.yml`' + python_version: ["3.7"] + xla_version: ["1.11"] + steps: + - uses: actions/checkout@v2 + - uses: docker/setup-buildx-action@v1 + - uses: docker/login-action@v1 + if: env.PUSH_TO_HUB == 'true' + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - uses: docker/build-push-action@v2 + with: + build-args: | + PYTHON_VERSION=${{ matrix.python_version }} + XLA_VERSION=${{ matrix.xla_version }} + file: dockers/base-xla/Dockerfile + push: ${{ env.PUSH_TO_HUB }} + tags: pytorchlightning/pytorch_lightning:base-xla-py${{ matrix.python_version }}-torch${{ matrix.xla_version }} + timeout-minutes: 60 + - uses: ravsamhq/notify-slack-action@v1 + if: failure() && env.PUSH_TO_HUB == 'true' + with: + status: ${{ job.status }} + token: ${{ secrets.GITHUB_TOKEN }} + notification_title: ${{ format('XLA; {0} py{1} for *{2}*', runner.os, matrix.python_version, matrix.xla_version) }} + message_format: '{emoji} *{workflow}* {status_message}, see <{run_url}|detail>, cc: <@U01GD29QCAV>' # kaushikb11 + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + + build-cuda: + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + include: + # the config used in '.azure-pipelines/gpu-tests.yml' + - {python_version: "3.7", pytorch_version: "1.10", cuda_version: "11.1", ubuntu_version: "20.04"} + - {python_version: "3.7", pytorch_version: "1.11", cuda_version: "11.3.1", ubuntu_version: "20.04"} + # latest (used in Tutorials) + - {python_version: "3.8", pytorch_version: "1.9", cuda_version: "11.1", ubuntu_version: "20.04"} + - {python_version: "3.9", pytorch_version: "1.10", cuda_version: "11.1", ubuntu_version: "20.04"} + - {python_version: "3.9", pytorch_version: "1.11", cuda_version: "11.3.1", ubuntu_version: "20.04"} + steps: + - uses: actions/checkout@v2 + - uses: docker/setup-buildx-action@v1 + - uses: docker/login-action@v1 + if: env.PUSH_TO_HUB == 'true' + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - uses: docker/build-push-action@v2 + with: + build-args: | + PYTHON_VERSION=${{ matrix.python_version }} + PYTORCH_VERSION=${{ matrix.pytorch_version }} + CUDA_VERSION=${{ matrix.cuda_version }} + UBUNTU_VERSION=${{ matrix.ubuntu_version }} + file: dockers/base-cuda/Dockerfile + push: ${{ env.PUSH_TO_HUB }} + tags: pytorchlightning/pytorch_lightning:base-cuda-py${{ matrix.python_version }}-torch${{ matrix.pytorch_version }} + timeout-minutes: 95 + - uses: ravsamhq/notify-slack-action@v1 + if: failure() && env.PUSH_TO_HUB == 'true' + with: + status: ${{ job.status }} + token: ${{ secrets.GITHUB_TOKEN }} + notification_title: ${{ format('CUDA; {0} py{1} for *{2}*', runner.os, matrix.python_version, matrix.pytorch_version) }} + message_format: '{emoji} *{workflow}* {status_message}, see <{run_url}|detail>, cc: <@U01A5T7EY9M>' # akihironitta + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + + build-conda: + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + include: + - {python_version: "3.8", pytorch_version: "1.9", cuda_version: "11.1"} + - {python_version: "3.8", pytorch_version: "1.10", cuda_version: "11.1"} + - {python_version: "3.9", pytorch_version: "1.11", cuda_version: "11.3.1"} + # nightly: add when there's a release candidate + # - {python_version: "3.9", pytorch_version: "1.12"} + steps: + - uses: actions/checkout@v2 + - uses: docker/setup-buildx-action@v1 + - uses: docker/login-action@v1 + if: env.PUSH_TO_HUB == 'true' + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - uses: docker/build-push-action@v2 + with: + build-args: | + PYTHON_VERSION=${{ matrix.python_version }} + PYTORCH_VERSION=${{ matrix.pytorch_version }} + CUDA_VERSION=${{ matrix.cuda_version }} + file: dockers/base-conda/Dockerfile + push: ${{ env.PUSH_TO_HUB }} + tags: pytorchlightning/pytorch_lightning:base-conda-py${{ matrix.python_version }}-torch${{ matrix.pytorch_version }} + timeout-minutes: 95 + - uses: ravsamhq/notify-slack-action@v1 + if: failure() && env.PUSH_TO_HUB == 'true' + with: + status: ${{ job.status }} + token: ${{ secrets.GITHUB_TOKEN }} + notification_title: ${{ format('Conda; {0} py{1} for *{2}*', runner.os, matrix.python_version, matrix.pytorch_version) }} + message_format: '{emoji} *{workflow}* {status_message}, see <{run_url}|detail>, cc: <@U01A5T7EY9M>' # akihironitta + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + + build-ipu: + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + include: + # the config used in 'dockers/ci-runner-ipu/Dockerfile' + - {python_version: "3.9", pytorch_version: "1.9"} + steps: + - uses: actions/checkout@v2 + - uses: docker/setup-buildx-action@v1 + - uses: docker/login-action@v1 + if: env.PUSH_TO_HUB == 'true' + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - uses: docker/build-push-action@v2 + with: + build-args: | + PYTHON_VERSION=${{ matrix.python_version }} + PYTORCH_VERSION=${{ matrix.pytorch_version }} + file: dockers/base-ipu/Dockerfile + push: ${{ env.PUSH_TO_HUB }} + tags: pytorchlightning/pytorch_lightning:base-ipu-py${{ matrix.python_version }}-torch${{ matrix.pytorch_version }} + timeout-minutes: 100 + - uses: docker/build-push-action@v2 + with: + build-args: | + PYTHON_VERSION=${{ matrix.python_version }} + PYTORCH_VERSION=${{ matrix.pytorch_version }} + file: dockers/ci-runner-ipu/Dockerfile + push: ${{ env.PUSH_TO_HUB }} + tags: pytorchlightning/pytorch_lightning:ipu-ci-runner-py${{ matrix.python_version }} + timeout-minutes: 10 + - uses: ravsamhq/notify-slack-action@v1 + if: failure() && env.PUSH_TO_HUB == 'true' + with: + status: ${{ job.status }} + token: ${{ secrets.GITHUB_TOKEN }} + notification_title: ${{ format('IPU; {0} py{1} for *{2}*', runner.os, matrix.python_version, matrix.pytorch_version) }} + message_format: '{emoji} *{workflow}* {status_message}, see <{run_url}|detail>, cc: <@U01BULUS2BG>' # SeanNaren + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + + build-hpu: + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + include: + # the config used in 'dockers/ci-runner-hpu/Dockerfile' + - {gaudi_version: "1.5.0", pytorch_version: "1.11.0"} + steps: + - uses: actions/checkout@v2 + - uses: docker/setup-buildx-action@v1 + - uses: docker/login-action@v1 + if: env.PUSH_TO_HUB == 'true' + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - uses: docker/build-push-action@v2 + with: + build-args: | + DIST=latest + GAUDI_VERSION=${{ matrix.gaudi_version }} + PYTORCH_VERSION=${{ matrix.pytorch_version }} + file: dockers/ci-runner-hpu/Dockerfile + push: ${{ env.PUSH_TO_HUB }} + tags: pytorchlightning/pytorch_lightning:hpu-ci-runner-gaudi${{ matrix.gaudi_version }} + timeout-minutes: 10 + - uses: ravsamhq/notify-slack-action@v1 + if: failure() && env.PUSH_TO_HUB == 'true' + with: + status: ${{ job.status }} + token: ${{ secrets.GITHUB_TOKEN }} + notification_title: ${{ format('HPU; {0} py{1} for *{2}*', runner.os, matrix.gaudi_version, matrix.pytorch_version) }} + message_format: '{emoji} *{workflow}* {status_message}, see <{run_url}|detail>, cc: <@U02PV6CL144> <@U0355SJN6HK>' # arao & Mythravarun N R + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/events-nightly.yml b/.github/workflows/events-nightly.yml index 0325671413dbb..9279b95c767ec 100644 --- a/.github/workflows/events-nightly.yml +++ b/.github/workflows/events-nightly.yml @@ -7,9 +7,6 @@ on: # At the end of every day - cron: "0 0 * * *" -env: - PUSH_TO_HUB: true - # based on https://github.com/pypa/gh-action-pypi-publish jobs: pypi-release: @@ -59,258 +56,3 @@ jobs: message_format: '{emoji} *{workflow}* {status_message}, see <{run_url}|detail>, cc: <@UR9FXE6QG>' #Borda env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - - docker-XLA: - if: ${{ github.repository_owner == 'Lightning-AI' }} - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - # the config used in '.circleci/config.yml`' - python_version: ["3.7"] - xla_version: ["1.8"] - - steps: - - name: Checkout - uses: actions/checkout@v2 - - # https://github.com/docker/setup-buildx-action - # Set up Docker Buildx - to use cache-from and cache-to argument of buildx command - - uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Publish XLA to Docker Hub - # publish master/release - uses: docker/build-push-action@v2 - with: - build-args: | - PYTHON_VERSION=${{ matrix.python_version }} - XLA_VERSION=${{ matrix.xla_version }} - file: dockers/base-xla/Dockerfile - push: ${{ env.PUSH_TO_HUB }} - tags: pytorchlightning/pytorch_lightning:base-xla-py${{ matrix.python_version }}-torch${{ matrix.xla_version }} - timeout-minutes: 55 - - # report failure to Slack - - name: Slack notification - if: failure() && github.event_name == 'schedule' - uses: ravsamhq/notify-slack-action@v1 - with: - status: ${{ job.status }} - token: ${{ secrets.GITHUB_TOKEN }} - notification_title: ${{ format('XLA; {0} py{1} for *{2}*', runner.os, matrix.python_version, matrix.xla_version) }} - message_format: '{emoji} *{workflow}* {status_message}, see <{run_url}|detail>, cc: <@U01GD29QCAV>' #kaushikb11 - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - - docker-CUDA: - if: ${{ github.repository_owner == 'Lightning-AI' }} - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - include: - # the config used in '.azure-pipelines/gpu-tests.yml' - - {python_version: "3.7", pytorch_version: "1.10", cuda_version: "11.1", ubuntu_version: "20.04"} - - {python_version: "3.7", pytorch_version: "1.11", cuda_version: "11.3.1", ubuntu_version: "20.04"} - # latest (used in Tutorials) - - {python_version: "3.8", pytorch_version: "1.9", cuda_version: "11.1", ubuntu_version: "20.04"} - - {python_version: "3.9", pytorch_version: "1.10", cuda_version: "11.1", ubuntu_version: "20.04"} - - {python_version: "3.9", pytorch_version: "1.11", cuda_version: "11.3.1", ubuntu_version: "20.04"} - steps: - - name: Checkout - uses: actions/checkout@v2 - - - uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Publish CUDA to Docker Hub - # publish master/release - uses: docker/build-push-action@v2 - with: - build-args: | - PYTHON_VERSION=${{ matrix.python_version }} - PYTORCH_VERSION=${{ matrix.pytorch_version }} - CUDA_VERSION=${{ matrix.cuda_version }} - UBUNTU_VERSION=${{ matrix.ubuntu_version }} - file: dockers/base-cuda/Dockerfile - push: ${{ env.PUSH_TO_HUB }} - tags: pytorchlightning/pytorch_lightning:base-cuda-py${{ matrix.python_version }}-torch${{ matrix.pytorch_version }} - timeout-minutes: 95 - - # report failure to Slack - - name: Slack notification - if: failure() && github.event_name == 'schedule' - uses: ravsamhq/notify-slack-action@v1 - with: - status: ${{ job.status }} - token: ${{ secrets.GITHUB_TOKEN }} - notification_title: ${{ format('CUDA; {0} py{1} for *{2}*', runner.os, matrix.python_version, matrix.pytorch_version) }} - message_format: '{emoji} *{workflow}* {status_message}, see <{run_url}|detail>, cc: <@U01A5T7EY9M>' #akihironitta - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - - docker-Conda: - if: ${{ github.repository_owner == 'Lightning-AI' }} - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - include: - # see: https://pytorch.org/get-started/previous-versions/ - - {python_version: "3.8", pytorch_version: "1.9", cuda_version: "11.1"} - - {python_version: "3.8", pytorch_version: "1.10", cuda_version: "11.1"} - - {python_version: "3.9", pytorch_version: "1.11", cuda_version: "11.3.1"} - # nightly: add when there's a release candidate - # - {python_version: "3.9", pytorch_version: "1.12"} - - steps: - - name: Checkout - uses: actions/checkout@v2 - - - uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Publish Conda to Docker Hub - # publish master/release - uses: docker/build-push-action@v2 - with: - build-args: | - PYTHON_VERSION=${{ matrix.python_version }} - PYTORCH_VERSION=${{ matrix.pytorch_version }} - CUDA_VERSION=${{ matrix.cuda_version }} - file: dockers/base-conda/Dockerfile - push: ${{ env.PUSH_TO_HUB }} - tags: pytorchlightning/pytorch_lightning:base-conda-py${{ matrix.python_version }}-torch${{ matrix.pytorch_version }} - timeout-minutes: 95 - - # report failure to Slack - - name: Slack notification - if: failure() && github.event_name == 'schedule' - uses: ravsamhq/notify-slack-action@v1 - with: - status: ${{ job.status }} - token: ${{ secrets.GITHUB_TOKEN }} - notification_title: ${{ format('Conda; {0} py{1} for *{2}*', runner.os, matrix.python_version, matrix.pytorch_version) }} - message_format: '{emoji} *{workflow}* {status_message}, see <{run_url}|detail>, cc: <@U01A5T7EY9M>' #akihironitta - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - - docker-IPU: - if: ${{ github.repository_owner == 'Lightning-AI' }} - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - # the config used in 'dockers/ci-runner-ipu/Dockerfile' - include: - - {python_version: "3.9", pytorch_version: "1.9"} - - steps: - - name: Checkout - uses: actions/checkout@v2 - - # https://github.com/docker/setup-buildx-action - # Set up Docker Buildx - to use cache-from and cache-to argument of buildx command - - uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Publish IPU base to Docker Hub - # publish master/release - uses: docker/build-push-action@v2 - with: - build-args: | - PYTHON_VERSION=${{ matrix.python_version }} - PYTORCH_VERSION=${{ matrix.pytorch_version }} - file: dockers/base-ipu/Dockerfile - push: ${{ env.PUSH_TO_HUB }} - tags: pytorchlightning/pytorch_lightning:base-ipu-py${{ matrix.python_version }}-torch${{ matrix.pytorch_version }} - timeout-minutes: 55 - - - name: Publish IPU CI runner to Docker Hub - # publish master/release - uses: docker/build-push-action@v2 - with: - build-args: | - PYTHON_VERSION=${{ matrix.python_version }} - PYTORCH_VERSION=${{ matrix.pytorch_version }} - file: dockers/ci-runner-ipu/Dockerfile - push: ${{ env.PUSH_TO_HUB }} - tags: pytorchlightning/pytorch_lightning:ipu-ci-runner-py${{ matrix.python_version }} - timeout-minutes: 55 - - # report failure to Slack - - name: Slack notification - if: failure() && github.event_name == 'schedule' - uses: ravsamhq/notify-slack-action@v1 - with: - status: ${{ job.status }} - token: ${{ secrets.GITHUB_TOKEN }} - notification_title: ${{ format('IPU; {0} py{1} for *{2}*', runner.os, matrix.python_version, matrix.pytorch_version) }} - message_format: '{emoji} *{workflow}* {status_message}, see <{run_url}|detail>, cc: <@U01BULUS2BG>' #SeanNaren - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - - docker-HPU: - if: ${{ github.repository_owner == 'Lightning-AI' }} - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - matrix: - # the config used in 'dockers/ci-runner-hpu/Dockerfile' - include: - - {gaudi_version: "1.5.0", pytorch_version: "1.11.0"} - - steps: - - name: Checkout - uses: actions/checkout@v2 - - # https://github.com/docker/setup-buildx-action - # Set up Docker Buildx - to use cache-from and cache-to argument of buildx command - - uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Publish HPU CI runner to Docker Hub - # publish master/release - uses: docker/build-push-action@v2 - with: - build-args: | - DIST=latest - GAUDI_VERSION=${{ matrix.gaudi_version }} - PYTORCH_VERSION=${{ matrix.pytorch_version }} - file: dockers/ci-runner-hpu/Dockerfile - push: ${{ env.PUSH_TO_HUB }} - tags: pytorchlightning/pytorch_lightning:hpu-ci-runner-gaudi${{ matrix.gaudi_version }} - timeout-minutes: 55 - - # report failure to Slack - - name: Slack notification - if: failure() && github.event_name == 'schedule' - uses: ravsamhq/notify-slack-action@v1 - with: - status: ${{ job.status }} - token: ${{ secrets.GITHUB_TOKEN }} - notification_title: ${{ format('HPU; {0} py{1} for *{2}*', runner.os, matrix.gaudi_version, matrix.pytorch_version) }} - message_format: '{emoji} *{workflow}* {status_message}, see <{run_url}|detail>, cc: <@U02PV6CL144> <@U0355SJN6HK>' #arao & Mythravarun N R - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} From c8d231218aeff178011b0078721c49a8b93d4341 Mon Sep 17 00:00:00 2001 From: Justin Goheen <26209687+JustinGoheen@users.noreply.github.com> Date: Tue, 12 Jul 2022 07:11:31 -0400 Subject: [PATCH 70/89] Fix mypy errors attributed to `pytorch_lightning.loggers.csv_logs.py` (#13538) Co-authored-by: Akihiro Nitta --- pyproject.toml | 1 - src/pytorch_lightning/loggers/csv_logs.py | 22 +++++++++++----------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 770f0983c3139..ba18f63aba3ce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,7 +61,6 @@ module = [ "pytorch_lightning.distributed.dist", "pytorch_lightning.loggers.base", "pytorch_lightning.loggers.comet", - "pytorch_lightning.loggers.csv_logs", "pytorch_lightning.loggers.mlflow", "pytorch_lightning.loggers.neptune", "pytorch_lightning.loggers.tensorboard", diff --git a/src/pytorch_lightning/loggers/csv_logs.py b/src/pytorch_lightning/loggers/csv_logs.py index 3316a5e86e64b..72d21ae2c4974 100644 --- a/src/pytorch_lightning/loggers/csv_logs.py +++ b/src/pytorch_lightning/loggers/csv_logs.py @@ -22,7 +22,7 @@ import logging import os from argparse import Namespace -from typing import Any, Dict, Optional, Union +from typing import Any, Dict, List, Optional, Union from torch import Tensor @@ -49,8 +49,8 @@ class ExperimentWriter: NAME_METRICS_FILE = "metrics.csv" def __init__(self, log_dir: str) -> None: - self.hparams = {} - self.metrics = [] + self.hparams: Dict[str, Any] = {} + self.metrics: List[Dict[str, float]] = [] self.log_dir = log_dir if os.path.exists(self.log_dir) and os.listdir(self.log_dir): @@ -69,7 +69,7 @@ def log_hparams(self, params: Dict[str, Any]) -> None: def log_metrics(self, metrics_dict: Dict[str, float], step: Optional[int] = None) -> None: """Record metrics.""" - def _handle_value(value): + def _handle_value(value: Union[Tensor, Any]) -> Any: if isinstance(value, Tensor): return value.item() return value @@ -126,7 +126,7 @@ class CSVLogger(Logger): def __init__( self, save_dir: str, - name: Optional[str] = "lightning_logs", + name: str = "lightning_logs", version: Optional[Union[int, str]] = None, prefix: str = "", flush_logs_every_n_steps: int = 100, @@ -136,7 +136,7 @@ def __init__( self._name = name or "" self._version = version self._prefix = prefix - self._experiment = None + self._experiment: Optional[ExperimentWriter] = None self._flush_logs_every_n_steps = flush_logs_every_n_steps @property @@ -161,7 +161,7 @@ def log_dir(self) -> str: return log_dir @property - def save_dir(self) -> Optional[str]: + def save_dir(self) -> str: """The current directory where logs are saved. Returns: @@ -169,7 +169,7 @@ def save_dir(self) -> Optional[str]: """ return self._save_dir - @property + @property # type: ignore[misc] @rank_zero_experiment def experiment(self) -> ExperimentWriter: r""" @@ -182,7 +182,7 @@ def experiment(self) -> ExperimentWriter: self.logger.experiment.some_experiment_writer_function() """ - if self._experiment: + if self._experiment is not None: return self._experiment os.makedirs(self.root_dir, exist_ok=True) @@ -220,7 +220,7 @@ def name(self) -> str: return self._name @property - def version(self) -> int: + def version(self) -> Union[int, str]: """Gets the version of the experiment. Returns: @@ -230,7 +230,7 @@ def version(self) -> int: self._version = self._get_next_version() return self._version - def _get_next_version(self): + def _get_next_version(self) -> int: root_dir = self.root_dir if not os.path.isdir(root_dir): From c1ec00e0c8fa37c8ee14bcc211177fbac231f8b0 Mon Sep 17 00:00:00 2001 From: Justin Goheen <26209687+JustinGoheen@users.noreply.github.com> Date: Tue, 12 Jul 2022 07:45:21 -0400 Subject: [PATCH 71/89] Fix mypy errors attributed to `pytorch_lightning.loggers.base.py` (#13494) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Rohit Gupta Co-authored-by: Carlos Mocholí --- pyproject.toml | 1 - src/pytorch_lightning/loggers/base.py | 16 ++++++++++++---- src/pytorch_lightning/loggers/logger.py | 12 ++++++------ 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ba18f63aba3ce..c6e3452784945 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,7 +59,6 @@ module = [ "pytorch_lightning.demos.boring_classes", "pytorch_lightning.demos.mnist_datamodule", "pytorch_lightning.distributed.dist", - "pytorch_lightning.loggers.base", "pytorch_lightning.loggers.comet", "pytorch_lightning.loggers.mlflow", "pytorch_lightning.loggers.neptune", diff --git a/src/pytorch_lightning/loggers/base.py b/src/pytorch_lightning/loggers/base.py index 1da0749e460fe..43c572e3953c0 100644 --- a/src/pytorch_lightning/loggers/base.py +++ b/src/pytorch_lightning/loggers/base.py @@ -12,16 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Callable, Dict, Mapping, Optional, Sequence + +import numpy as np + import pytorch_lightning.loggers.logger as logger from pytorch_lightning.utilities.warnings import rank_zero_deprecation -def rank_zero_experiment(*args, **kwargs) -> None: # type: ignore[no-untyped-def] +def rank_zero_experiment(fn: Callable) -> Callable: rank_zero_deprecation( "The `pytorch_lightning.loggers.base.rank_zero_experiment` is deprecated in v1.7" " and will be removed in v1.9. Please use `pytorch_lightning.loggers.logger.rank_zero_experiment` instead." ) - return logger.rank_zero_experiment(*args, **kwargs) + return logger.rank_zero_experiment(fn) class LightningLoggerBase(logger.Logger): @@ -77,9 +81,13 @@ def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def] super().__init__(*args, **kwargs) -def merge_dicts(*args, **kwargs) -> None: # type: ignore[no-untyped-def] +def merge_dicts( + dicts: Sequence[Mapping], + agg_key_funcs: Optional[Mapping] = None, + default_func: Callable[[Sequence[float]], float] = np.mean, +) -> Dict: rank_zero_deprecation( "The `pytorch_lightning.loggers.base.merge_dicts` is deprecated in v1.7" " and will be removed in v1.9. Please use `pytorch_lightning.loggers.logger.merge_dicts` instead." ) - return logger.merge_dicts(*args, **kwargs) + return logger.merge_dicts(dicts=dicts, agg_key_funcs=agg_key_funcs, default_func=default_func) diff --git a/src/pytorch_lightning/loggers/logger.py b/src/pytorch_lightning/loggers/logger.py index 4113b61627d8f..03d934aa58760 100644 --- a/src/pytorch_lightning/loggers/logger.py +++ b/src/pytorch_lightning/loggers/logger.py @@ -38,13 +38,13 @@ def rank_zero_experiment(fn: Callable) -> Callable: def experiment(self) -> Union[Any, DummyExperiment]: # type: ignore[no-untyped-def] """ Note: - `self` is a custom logger instance. The loggers typical wrap an `experiment` method - with a @rank_zero_experiment decorator. An exception being `loggers.neptune` wraps - `experiment` and `run` with rank_zero_experiment. + ``self`` is a custom logger instance. The loggers typically wrap an ``experiment`` method + with a ``@rank_zero_experiment`` decorator. An exception is that ``loggers.neptune`` wraps + ``experiment`` and ``run`` with rank_zero_experiment. - Union[Any, DummyExperiment] is used because the wrapped hooks have several returns - types that are specific to the custom logger. The return type can be considered as - Union[return type of logger.experiment, DummyExperiment] + ``Union[Any, DummyExperiment]`` is used because the wrapped hooks have several return + types that are specific to the custom logger. The return type here can be considered as + ``Union[return type of logger.experiment, DummyExperiment]``. """ @rank_zero_only From b10f5a59939c03b155070413e1aab99481acad5b Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Tue, 12 Jul 2022 21:17:23 +0900 Subject: [PATCH 72/89] CI: Enable dependabot for GitHub Actions (#13589) * Enable dependabot on GHA * Update comment * Update PR limit Co-authored-by: Jirka Borovec --- .github/dependabot.yml | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index a7a957a2a1db4..ab67c9026b55b 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -17,8 +17,25 @@ updates: # Separate sections of the branch name with a hyphen # for example, `dependabot-npm_and_yarn-next_js-acorn-6.4.1` separator: "-" - # Allow up to 10 open pull requests for pip dependencies + # Allow up to 5 open pull requests for pip dependencies + open-pull-requests-limit: 5 + reviewers: + - "Lightning-AI/teams/core-lightning" + + # Enable version updates for GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + # Check for updates once a week + schedule: + interval: "monthly" + # Labels on pull requests for version updates only + labels: + - "ci" + pull-request-branch-name: + # Separate sections of the branch name with a hyphen + # for example, `dependabot-npm_and_yarn-next_js-acorn-6.4.1` + separator: "-" + # Allow up to 5 open pull requests for GitHub Actions open-pull-requests-limit: 5 reviewers: - - "carmocca" - "Lightning-AI/teams/core-lightning" From 3aee3450289673b1346d2af5b7c96c091d2bac7e Mon Sep 17 00:00:00 2001 From: Jimmy Yao Date: Tue, 12 Jul 2022 06:39:48 -0700 Subject: [PATCH 73/89] Fix default value for `enable_progress_bar` in docs (#13584) fix typo --- src/pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index acde9224d501e..37ba9a6ab2161 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -306,7 +306,7 @@ def __init__( Default: ``50``. enable_progress_bar: Whether to enable to progress bar by default. - Default: ``False``. + Default: ``True``. profiler: To profile individual steps during training and assist in identifying bottlenecks. Default: ``None``. From cd09761720109885ca99642863a91f163a56be12 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Tue, 12 Jul 2022 22:50:46 +0900 Subject: [PATCH 74/89] CI: Update labeler bot (#13624) Update labeler --- .github/workflows/labeler.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index fe060c10cb980..db5a4fcffb927 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -12,4 +12,3 @@ jobs: - uses: actions/labeler@v4 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" - sync-labels: true From 047d8ee2e11809083b62a0649142dfcf377bbcaa Mon Sep 17 00:00:00 2001 From: Rohit Gupta Date: Tue, 12 Jul 2022 19:21:11 +0530 Subject: [PATCH 75/89] Remove redundant GPU test (#13623) Remove redundant test --- .../tests_pytorch/trainer/test_dataloaders.py | 49 +------------------ 1 file changed, 1 insertion(+), 48 deletions(-) diff --git a/tests/tests_pytorch/trainer/test_dataloaders.py b/tests/tests_pytorch/trainer/test_dataloaders.py index 6b01150c80857..5bea5a4cbbe1c 100644 --- a/tests/tests_pytorch/trainer/test_dataloaders.py +++ b/tests/tests_pytorch/trainer/test_dataloaders.py @@ -19,7 +19,7 @@ import torch from torch.utils.data import RandomSampler from torch.utils.data.dataloader import DataLoader -from torch.utils.data.dataset import Dataset, IterableDataset, Subset +from torch.utils.data.dataset import Dataset, IterableDataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import SequentialSampler @@ -831,53 +831,6 @@ def test_dataloader_distributed_sampler_already_attached(tmpdir): assert trainer.state.finished, "DDP Training failed" -@RunIf(min_cuda_gpus=3) -def test_batch_size_smaller_than_num_gpus(tmpdir): - # we need at least 3 gpus for this test - num_gpus = 3 - batch_size = 3 - - class CurrentTestModel(BoringModel): - def __init__(self, batch_size) -> None: - super().__init__() - self.save_hyperparameters() - # batch norm doesn't work with batch size 1, we replace it - self.c_d1_bn = torch.nn.ReLU() - - def training_step(self, *args, **kwargs): - output = super().training_step(*args, **kwargs) - loss = output["loss"] - # we make sure to add some metrics to the output dict, - # this is essential for this test - output["progress_bar"] = {"train_loss": loss} - return output - - def train_dataloader(self): - dataset = RandomDataset(32, 64) - # construct a dataset with a size that is not divisible by num_gpus - # therefore the last batch will have a size < num_gpus - size = num_gpus * self.hparams.batch_size + (num_gpus - 1) - dataset = Subset(dataset, range(size)) - dataloader = DataLoader(dataset, batch_size=self.hparams.batch_size, drop_last=False) - return dataloader - - model = CurrentTestModel(batch_size=batch_size) - - trainer = Trainer( - default_root_dir=tmpdir, - max_epochs=1, - limit_train_batches=0.1, - limit_val_batches=0, - accelerator="gpu", - devices=num_gpus, - ) - - # we expect the reduction for the metrics also to happen on the last batch - # where we will get fewer metrics than gpus - trainer.fit(model) - assert trainer.state.finished, f"Training failed with {trainer.state}" - - @pytest.mark.parametrize( ["multiple_trainloader_mode", "num_training_batches"], [("min_size", 16), ("max_size_cycle", 64)], From 8e56a52e28bcabd2a8f254f9fd8fd9db257c72a5 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Tue, 12 Jul 2022 15:51:29 +0200 Subject: [PATCH 76/89] CI: hotfix gatekeeper (#13606) * CI: hotfix gatekeeper * no min * min 1 --- .github/approve_config.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/approve_config.yml b/.github/approve_config.yml index 76e83beb1aa39..365f2a89e23c2 100644 --- a/.github/approve_config.yml +++ b/.github/approve_config.yml @@ -3,7 +3,7 @@ approvals: minimum: 1 groups: - name: 'PyTorch Lightning' - minimum: 0 + minimum: 1 from: - awaelchli - Borda @@ -16,7 +16,8 @@ approvals: - tchaton - williamFalcon - name: 'Lightning Apps' - minimum: 0 + minimum: 1 + from: - alecmerdler - awaelchli - hhsecond From 7174d7e035e1a73615db4cd0eb4d57af96f6497e Mon Sep 17 00:00:00 2001 From: Nikhil Shenoy Date: Tue, 12 Jul 2022 20:17:51 +0530 Subject: [PATCH 77/89] Remove `add_to_queue` and `remove_from_queue` from LightningModule (#13600) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- .../common/lightning_module.rst | 12 ------- src/pytorch_lightning/CHANGELOG.md | 3 ++ src/pytorch_lightning/core/module.py | 22 ------------- .../strategies/launchers/spawn.py | 8 ----- .../strategies/launchers/xla_spawn.py | 4 --- .../trainer/configuration_validator.py | 18 ----------- .../deprecated_api/test_remove_1-7.py | 19 ----------- .../strategies/test_ddp_spawn_strategy.py | 32 ++----------------- 8 files changed, 6 insertions(+), 112 deletions(-) diff --git a/docs/source-pytorch/common/lightning_module.rst b/docs/source-pytorch/common/lightning_module.rst index 636777ec7e9e5..bf774b02a2f8a 100644 --- a/docs/source-pytorch/common/lightning_module.rst +++ b/docs/source-pytorch/common/lightning_module.rst @@ -1626,15 +1626,3 @@ on_after_batch_transfer .. automethod:: pytorch_lightning.core.module.LightningModule.on_after_batch_transfer :noindex: - -add_to_queue -~~~~~~~~~~~~ - -.. automethod:: pytorch_lightning.core.module.LightningModule.add_to_queue - :noindex: - -get_from_queue -~~~~~~~~~~~~~~ - -.. automethod:: pytorch_lightning.core.module.LightningModule.get_from_queue - :noindex: diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 952cad27d8613..8a22757b9bf76 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -177,6 +177,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Removed +- Removed the deprecated `LightningModule.add_to_queue` and `LightningModule.get_from_queue` method ([#13600](https://github.com/PyTorchLightning/pytorch-lightning/pull/13600)) + + - Removed deprecated `pytorch_lightning.core.decorators.parameter_validation` from `decorators` ([#13514](https://github.com/Lightning-AI/lightning/pull/13514)) diff --git a/src/pytorch_lightning/core/module.py b/src/pytorch_lightning/core/module.py index ef4a869b3c502..022f7ab678e78 100644 --- a/src/pytorch_lightning/core/module.py +++ b/src/pytorch_lightning/core/module.py @@ -1955,28 +1955,6 @@ def use_amp(self, use_amp: bool) -> None: ) self._use_amp = use_amp - def add_to_queue(self, queue: pl.strategies.launchers.spawn._FakeQueue) -> None: - """Appends the :attr:`trainer.callback_metrics` dictionary to the given queue. To avoid issues with memory - sharing, we cast the data to numpy. - - Args: - queue: the instance of the queue to append the data. - - .. deprecated:: v1.5 - This method was deprecated in v1.5 and will be removed in v1.7. - """ - - def get_from_queue(self, queue: pl.strategies.launchers.spawn._FakeQueue) -> None: - """Retrieve the :attr:`trainer.callback_metrics` dictionary from the given queue. To preserve consistency, - we cast back the data to ``torch.Tensor``. - - Args: - queue: the instance of the queue from where to get the data. - - .. deprecated:: v1.5 - This method was deprecated in v1.5 and will be removed in v1.7. - """ - @contextmanager def _prevent_trainer_and_dataloaders_deepcopy(self) -> None: self._should_prevent_trainer_and_dataloaders_deepcopy = True diff --git a/src/pytorch_lightning/strategies/launchers/spawn.py b/src/pytorch_lightning/strategies/launchers/spawn.py index d94909b778a83..0a92ceee5aacf 100644 --- a/src/pytorch_lightning/strategies/launchers/spawn.py +++ b/src/pytorch_lightning/strategies/launchers/spawn.py @@ -26,7 +26,6 @@ from pytorch_lightning.strategies.strategy import Strategy from pytorch_lightning.trainer.states import TrainerFn, TrainerState from pytorch_lightning.utilities.apply_func import apply_to_collection, move_data_to_device -from pytorch_lightning.utilities.model_helpers import is_overridden from pytorch_lightning.utilities.rank_zero import rank_zero_debug from pytorch_lightning.utilities.types import _PATH @@ -122,10 +121,6 @@ def _recover_results_in_main_process(self, spawn_output: "_SpawnOutput", trainer trainer.state = spawn_output.trainer_state # get the `callback_metrics` and set it to the trainer - if is_overridden("get_from_queue", trainer.lightning_module): - # only in case the user does not override it. - # TODO: Remove the if in v1.7 - trainer.lightning_module.get_from_queue(spawn_output.extra) self.get_from_queue(trainer, spawn_output.extra) def _collect_rank_zero_results(self, trainer: "pl.Trainer", results: Any) -> Optional["_SpawnOutput"]: @@ -151,9 +146,6 @@ def _collect_rank_zero_results(self, trainer: "pl.Trainer", results: Any) -> Opt # adds the `callback_metrics` to the queue extra = _FakeQueue() - if is_overridden("add_to_queue", trainer.lightning_module): - # TODO: Remove the if in v1.7 - trainer.lightning_module.add_to_queue(extra) self.add_to_queue(trainer, extra) return _SpawnOutput(best_model_path, weights_path, trainer.state, results, extra) diff --git a/src/pytorch_lightning/strategies/launchers/xla_spawn.py b/src/pytorch_lightning/strategies/launchers/xla_spawn.py index 13c948577ca5b..9c47e3b325cac 100644 --- a/src/pytorch_lightning/strategies/launchers/xla_spawn.py +++ b/src/pytorch_lightning/strategies/launchers/xla_spawn.py @@ -23,7 +23,6 @@ from pytorch_lightning.trainer.states import TrainerFn from pytorch_lightning.utilities import _TPU_AVAILABLE from pytorch_lightning.utilities.apply_func import move_data_to_device -from pytorch_lightning.utilities.model_helpers import is_overridden from pytorch_lightning.utilities.rank_zero import rank_zero_debug if _TPU_AVAILABLE: @@ -136,9 +135,6 @@ def _collect_rank_zero_results(self, trainer: "pl.Trainer", results: Any) -> Opt # adds the `callback_metrics` to the queue extra = _FakeQueue() - if is_overridden("add_to_queue", trainer.lightning_module): - # TODO: Remove the if in v1.7 - trainer.lightning_module.add_to_queue(extra) self.add_to_queue(trainer, extra) return _SpawnOutput(best_model_path, weights_path, trainer.state, results, extra) diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index ceeec9f7fcbcd..c53e22ea74a76 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -46,7 +46,6 @@ def verify_loop_configurations(trainer: "pl.Trainer") -> None: __verify_eval_loop_configuration(trainer, model, "predict") __verify_dp_batch_transfer_support(trainer, model) - _check_add_get_queue(model) # TODO: Delete _check_on_post_move_to_device in v1.7 _check_on_post_move_to_device(model) _check_deprecated_callback_hooks(trainer) @@ -218,23 +217,6 @@ def __check_training_step_requires_dataloader_iter(model: "pl.LightningModule") ) -def _check_add_get_queue(model: "pl.LightningModule") -> None: - r""" - Checks if add_to_queue or get_from_queue is overridden and sends a deprecation warning. - - Args: - model: The lightning module - """ - if is_overridden("add_to_queue", model): - rank_zero_deprecation( - "The `LightningModule.add_to_queue` method was deprecated in v1.5 and will be removed in v1.7." - ) - if is_overridden("get_from_queue", model): - rank_zero_deprecation( - "The `LightningModule.get_from_queue` method was deprecated in v1.5 and will be removed in v1.7." - ) - - # TODO: Delete _check_on_hpc_hooks in v1.8 def _check_on_hpc_hooks(model: "pl.LightningModule") -> None: if is_overridden("on_hpc_save", model): diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py index 17cccbfa80a5e..629bb9f9136ef 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py @@ -34,25 +34,6 @@ from tests_pytorch.plugins.environments.test_lsf_environment import _make_rankfile -class BoringCallbackDDPSpawnModel(BoringModel): - def add_to_queue(self, queue): - ... - - def get_from_queue(self, queue): - ... - - -def test_v1_7_0_deprecate_add_get_queue(tmpdir): - model = BoringCallbackDDPSpawnModel() - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True) - - with pytest.deprecated_call(match=r"`LightningModule.add_to_queue` method was deprecated in v1.5"): - trainer.fit(model) - - with pytest.deprecated_call(match=r"`LightningModule.get_from_queue` method was deprecated in v1.5"): - trainer.fit(model) - - def test_v1_7_0_deprecate_lightning_distributed(tmpdir): with pytest.deprecated_call(match="LightningDistributed is deprecated in v1.5 and will be removed in v1.7."): from pytorch_lightning.distributed.dist import LightningDistributed diff --git a/tests/tests_pytorch/strategies/test_ddp_spawn_strategy.py b/tests/tests_pytorch/strategies/test_ddp_spawn_strategy.py index 9a072368b0136..5af3df4613a2c 100644 --- a/tests/tests_pytorch/strategies/test_ddp_spawn_strategy.py +++ b/tests/tests_pytorch/strategies/test_ddp_spawn_strategy.py @@ -44,14 +44,6 @@ def validation_step(self, batch, batch_idx): self.log(self.name, self.val) return super().validation_step(batch, batch_idx) - def add_to_queue(self, queue) -> None: - queue.put("test_val") - return super().add_to_queue(queue) - - def get_from_queue(self, queue) -> None: - self.test_val = queue.get() - return super().get_from_queue(queue) - @RunIf(skip_windows=True) def test_ddp_cpu(): @@ -67,31 +59,13 @@ def test_ddp_cpu(): trainer.fit(model) -@RunIf(min_cuda_gpus=2) -def test_ddp_spawn_extra_parameters(tmpdir): - """Tests if device is set correctly when training for DDPSpawnStrategy and tests add_to_queue/get_from_queue - with Lightning Module (deprecated way).""" - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, accelerator="gpu", devices=2, strategy="ddp_spawn") - - assert isinstance(trainer.strategy, DDPSpawnStrategy) - assert trainer.strategy.root_device == torch.device("cuda:0") - - val: float = 1.0 - val_name: str = "val_acc" - model = BoringCallbackDDPSpawnModel(val_name, val) - dm = BoringDataModule() - trainer.fit(model, datamodule=dm) - assert trainer.callback_metrics[val_name] == torch.tensor(val) - assert model.test_val == "test_val" - - class CustomSpawnLauncher(_SpawnLauncher): def add_to_queue(self, trainer, queue) -> None: - queue.put("new_test_val") + queue.put("test_val") return super().add_to_queue(trainer, queue) def get_from_queue(self, trainer: Trainer, queue) -> None: - trainer.strategy.new_test_val = queue.get() + trainer.strategy.test_val = queue.get() return super().get_from_queue(trainer, queue) @@ -115,7 +89,7 @@ def test_ddp_spawn_add_get_queue(tmpdir): dm = BoringDataModule() trainer.fit(model, datamodule=dm) assert trainer.callback_metrics[val_name] == torch.tensor(val) - assert ddp_spawn_strategy.new_test_val == "new_test_val" + assert ddp_spawn_strategy.test_val == "test_val" class BoringModelDDP(BoringModel): From 6924b41ba7ebce660e070a23b5a8322acaeb14df Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Jul 2022 16:13:34 +0000 Subject: [PATCH 78/89] Bump codecov/codecov-action from 1 to 3 (#13620) --- .github/workflows/ci-app_examples.yml | 2 +- .github/workflows/ci-app_tests.yml | 2 +- .github/workflows/ci-pytorch_test-conda.yml | 2 +- .github/workflows/ci-pytorch_test-full.yml | 2 +- .github/workflows/ci-pytorch_test-slow.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci-app_examples.yml b/.github/workflows/ci-app_examples.yml index 30d29a853597e..46e523fab8acf 100644 --- a/.github/workflows/ci-app_examples.yml +++ b/.github/workflows/ci-app_examples.yml @@ -107,7 +107,7 @@ jobs: coverage report -i - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v3 with: token: ${{ secrets.CODECOV_TOKEN }} file: tests/coverage.xml diff --git a/.github/workflows/ci-app_tests.yml b/.github/workflows/ci-app_tests.yml index 3993c31afab0c..cd0078ae59f1a 100644 --- a/.github/workflows/ci-app_tests.yml +++ b/.github/workflows/ci-app_tests.yml @@ -114,7 +114,7 @@ jobs: coverage report -i - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v3 with: token: ${{ secrets.CODECOV_TOKEN }} file: tests/coverage.xml diff --git a/.github/workflows/ci-pytorch_test-conda.yml b/.github/workflows/ci-pytorch_test-conda.yml index c062e6e02acb1..89d333b818b32 100644 --- a/.github/workflows/ci-pytorch_test-conda.yml +++ b/.github/workflows/ci-pytorch_test-conda.yml @@ -93,7 +93,7 @@ jobs: coverage xml - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v3 if: always() # see: https://github.com/actions/toolkit/issues/399 continue-on-error: true diff --git a/.github/workflows/ci-pytorch_test-full.yml b/.github/workflows/ci-pytorch_test-full.yml index 42ec2b71fd0b6..e1159443c3eea 100644 --- a/.github/workflows/ci-pytorch_test-full.yml +++ b/.github/workflows/ci-pytorch_test-full.yml @@ -160,7 +160,7 @@ jobs: coverage xml - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v3 if: always() # see: https://github.com/actions/toolkit/issues/399 continue-on-error: true diff --git a/.github/workflows/ci-pytorch_test-slow.yml b/.github/workflows/ci-pytorch_test-slow.yml index 279c4ffe772a8..41d0543d05f7f 100644 --- a/.github/workflows/ci-pytorch_test-slow.yml +++ b/.github/workflows/ci-pytorch_test-slow.yml @@ -84,7 +84,7 @@ jobs: coverage xml - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v3 if: success() # see: https://github.com/actions/toolkit/issues/399 continue-on-error: true From 27a0ac952378ab382dd9f61bde96d852de54be49 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Jul 2022 16:34:18 +0000 Subject: [PATCH 79/89] Bump actions/upload-artifact from 2 to 3 (#13622) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 2 to 3. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-app_examples.yml | 2 +- .github/workflows/ci-app_tests.yml | 2 +- .github/workflows/ci-pytorch_test-conda.yml | 2 +- .github/workflows/ci-pytorch_test-full.yml | 2 +- .github/workflows/ci-pytorch_test-slow.yml | 2 +- .github/workflows/docs-checks.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-app_examples.yml b/.github/workflows/ci-app_examples.yml index 46e523fab8acf..0cbdc387d13c6 100644 --- a/.github/workflows/ci-app_examples.yml +++ b/.github/workflows/ci-app_examples.yml @@ -93,7 +93,7 @@ jobs: coverage run --source lightning_app -m pytest -m "not cloud" tests_app_examples --timeout=300 -vvvv --junitxml=$PYTEST_ARTIFACT --durations=0 - name: Upload pytest test results - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: unittest-results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }} path: tests/results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }}.xml diff --git a/.github/workflows/ci-app_tests.yml b/.github/workflows/ci-app_tests.yml index cd0078ae59f1a..ae8416634a2cb 100644 --- a/.github/workflows/ci-app_tests.yml +++ b/.github/workflows/ci-app_tests.yml @@ -100,7 +100,7 @@ jobs: coverage run --source lightning_app -m pytest -m "not cloud" tests_app --timeout=300 -vvvv --junitxml=$PYTEST_ARTIFACT --durations=0 - name: Upload pytest test results - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: unittest-results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }} path: tests/results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }}.xml diff --git a/.github/workflows/ci-pytorch_test-conda.yml b/.github/workflows/ci-pytorch_test-conda.yml index 89d333b818b32..43cf73052b757 100644 --- a/.github/workflows/ci-pytorch_test-conda.yml +++ b/.github/workflows/ci-pytorch_test-conda.yml @@ -79,7 +79,7 @@ jobs: run: coverage run --source pytorch_lightning -m pytest -v --timeout 150 --durations=50 --junitxml=results-${{ runner.os }}-torch${{ matrix.pytorch-version }}.xml - name: Upload pytest results - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: unittest-results-${{ runner.os }}-torch${{ matrix.pytorch-version }} path: tests/tests_pytorch/results-${{ runner.os }}-torch${{ matrix.pytorch-version }}.xml diff --git a/.github/workflows/ci-pytorch_test-full.yml b/.github/workflows/ci-pytorch_test-full.yml index e1159443c3eea..2db59a30f5b64 100644 --- a/.github/workflows/ci-pytorch_test-full.yml +++ b/.github/workflows/ci-pytorch_test-full.yml @@ -136,7 +136,7 @@ jobs: run: coverage run --source pytorch_lightning -m pytest -v --durations=50 --junitxml=results-${{ runner.os }}-py${{ matrix.python-version }}-${{ matrix.requires }}-${{ matrix.release }}.xml - name: Upload pytest results - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: unittest-results-${{ runner.os }}-py${{ matrix.python-version }}-${{ matrix.requires }}-${{ matrix.release }} path: tests/tests_pytorch/results-${{ runner.os }}-py${{ matrix.python-version }}-${{ matrix.requires }}-${{ matrix.release }}.xml diff --git a/.github/workflows/ci-pytorch_test-slow.yml b/.github/workflows/ci-pytorch_test-slow.yml index 41d0543d05f7f..79cede5b59b8c 100644 --- a/.github/workflows/ci-pytorch_test-slow.yml +++ b/.github/workflows/ci-pytorch_test-slow.yml @@ -70,7 +70,7 @@ jobs: PL_RUN_SLOW_TESTS: 1 - name: Upload pytest test results - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: unittest-results-${{ runner.os }}-py${{ matrix.python-version }} path: tests/tests_pytorch/results-${{ runner.os }}-py${{ matrix.python-version }}.xml diff --git a/.github/workflows/docs-checks.yml b/.github/workflows/docs-checks.yml index 25a9b17d6914b..eadf11a56801d 100644 --- a/.github/workflows/docs-checks.yml +++ b/.github/workflows/docs-checks.yml @@ -101,7 +101,7 @@ jobs: make html --debug --jobs $(nproc) SPHINXOPTS="-W --keep-going" - name: Upload built docs - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: docs-results-${{ github.sha }} path: docs/build/html/ From 6b953c7a1a3ce15c3122a9d50ea2a40c1df556e1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Jul 2022 21:14:03 +0200 Subject: [PATCH 80/89] Bump docker/setup-buildx-action from 1 to 2 (#13618) Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 1 to 2. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/v1...v2) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cicd-pytorch_dockers.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cicd-pytorch_dockers.yml b/.github/workflows/cicd-pytorch_dockers.yml index 317d005bd89f1..c1f300d0ac996 100644 --- a/.github/workflows/cicd-pytorch_dockers.yml +++ b/.github/workflows/cicd-pytorch_dockers.yml @@ -35,7 +35,7 @@ jobs: pytorch_version: ["1.10", "1.11"] steps: - uses: actions/checkout@v2 - - uses: docker/setup-buildx-action@v1 + - uses: docker/setup-buildx-action@v2 - uses: docker/build-push-action@v2 with: build-args: | @@ -55,7 +55,7 @@ jobs: xla_version: ["1.11"] steps: - uses: actions/checkout@v2 - - uses: docker/setup-buildx-action@v1 + - uses: docker/setup-buildx-action@v2 - uses: docker/login-action@v1 if: env.PUSH_TO_HUB == 'true' with: @@ -95,7 +95,7 @@ jobs: - {python_version: "3.9", pytorch_version: "1.11", cuda_version: "11.3.1", ubuntu_version: "20.04"} steps: - uses: actions/checkout@v2 - - uses: docker/setup-buildx-action@v1 + - uses: docker/setup-buildx-action@v2 - uses: docker/login-action@v1 if: env.PUSH_TO_HUB == 'true' with: @@ -135,7 +135,7 @@ jobs: # - {python_version: "3.9", pytorch_version: "1.12"} steps: - uses: actions/checkout@v2 - - uses: docker/setup-buildx-action@v1 + - uses: docker/setup-buildx-action@v2 - uses: docker/login-action@v1 if: env.PUSH_TO_HUB == 'true' with: @@ -171,7 +171,7 @@ jobs: - {python_version: "3.9", pytorch_version: "1.9"} steps: - uses: actions/checkout@v2 - - uses: docker/setup-buildx-action@v1 + - uses: docker/setup-buildx-action@v2 - uses: docker/login-action@v1 if: env.PUSH_TO_HUB == 'true' with: @@ -215,7 +215,7 @@ jobs: - {gaudi_version: "1.5.0", pytorch_version: "1.11.0"} steps: - uses: actions/checkout@v2 - - uses: docker/setup-buildx-action@v1 + - uses: docker/setup-buildx-action@v2 - uses: docker/login-action@v1 if: env.PUSH_TO_HUB == 'true' with: From 423956470a7fac16cfb880098a72bd2740f63859 Mon Sep 17 00:00:00 2001 From: Sanjay Aradhyamath <57592361+samz5320@users.noreply.github.com> Date: Wed, 13 Jul 2022 06:23:18 +0530 Subject: [PATCH 81/89] Removed deprecated `pytorch_lightning.overrides.distributed.IndexBatchSamplerWrapper.batch_indices` (#13565) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Removed the deprecated method * Removed deprecated IndexBatchSamplerWrapper.batch_indices * Update src/pytorch_lightning/CHANGELOG.md * Missed code Co-authored-by: Akihiro Nitta Co-authored-by: Carlos Mocholí --- src/pytorch_lightning/CHANGELOG.md | 3 +++ .../overrides/distributed.py | 19 ------------------- .../deprecated_api/test_remove_1-7.py | 11 ----------- 3 files changed, 3 insertions(+), 30 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 8a22757b9bf76..4a588191501ed 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -177,6 +177,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Removed +- Removed deprecated `IndexBatchSamplerWrapper.batch_indices` ([#13565](https://github.com/PyTorchLightning/pytorch-lightning/pull/13565)) + + - Removed the deprecated `LightningModule.add_to_queue` and `LightningModule.get_from_queue` method ([#13600](https://github.com/PyTorchLightning/pytorch-lightning/pull/13600)) diff --git a/src/pytorch_lightning/overrides/distributed.py b/src/pytorch_lightning/overrides/distributed.py index 15a8632af938b..8048d83252af7 100644 --- a/src/pytorch_lightning/overrides/distributed.py +++ b/src/pytorch_lightning/overrides/distributed.py @@ -20,7 +20,6 @@ from torch.utils.data import BatchSampler, Dataset, DistributedSampler, Sampler from pytorch_lightning.overrides.base import _LightningModuleWrapperBase -from pytorch_lightning.utilities import rank_zero_deprecation from pytorch_lightning.utilities.exceptions import MisconfigurationException @@ -176,28 +175,10 @@ class IndexBatchSamplerWrapper: def __init__(self, sampler: BatchSampler) -> None: self.seen_batch_indices: List[List[int]] = [] self._sampler = sampler - self._batch_indices: List[int] = [] - - @property - def batch_indices(self) -> List[int]: - rank_zero_deprecation( - "The attribute `IndexBatchSamplerWrapper.batch_indices` was deprecated in v1.5 and will be removed in" - " v1.7. Access the full list `seen_batch_indices` instead." - ) - return self._batch_indices - - @batch_indices.setter - def batch_indices(self, indices: List[int]) -> None: - rank_zero_deprecation( - "The attribute `IndexBatchSamplerWrapper.batch_indices` was deprecated in v1.5 and will be removed in" - " v1.7. Access the full list `seen_batch_indices` instead." - ) - self._batch_indices = indices def __iter__(self) -> Iterator[List[int]]: self.seen_batch_indices = [] for batch in self._sampler: - self._batch_indices = batch self.seen_batch_indices.append(batch) yield batch diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py index 629bb9f9136ef..4187757fa3980 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py @@ -15,14 +15,12 @@ import os from re import escape from unittest import mock -from unittest.mock import Mock import pytest import torch from pytorch_lightning import Trainer from pytorch_lightning.demos.boring_classes import BoringModel -from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper from pytorch_lightning.plugins.environments import ( KubeflowEnvironment, LightningEnvironment, @@ -148,15 +146,6 @@ def is_using_torchelastic(): MyClusterEnvironment() -def test_v1_7_0_index_batch_sampler_wrapper_batch_indices(): - sampler = IndexBatchSamplerWrapper(Mock()) - with pytest.deprecated_call(match="was deprecated in v1.5 and will be removed in v1.7"): - _ = sampler.batch_indices - - with pytest.deprecated_call(match="was deprecated in v1.5 and will be removed in v1.7"): - sampler.batch_indices = [] - - def test_v1_7_0_post_dispatch_hook(): class CustomPlugin(SingleDeviceStrategy): def post_dispatch(self, trainer): From d83a42347ef5fd752a1661a914414c46c12008fe Mon Sep 17 00:00:00 2001 From: Cyprien Ricque <48893621+Cyprien-Ricque@users.noreply.github.com> Date: Wed, 13 Jul 2022 16:00:12 +0200 Subject: [PATCH 82/89] fix mypy typing errors in pytorch_lightning/strategies/dp.py (#13564) Co-authored-by: Rohit Gupta Co-authored-by: otaj --- pyproject.toml | 1 - src/pytorch_lightning/strategies/dp.py | 39 +++++++++++++------- src/pytorch_lightning/strategies/strategy.py | 1 + 3 files changed, 26 insertions(+), 15 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c6e3452784945..3fbc7f0a4c9d9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -68,7 +68,6 @@ module = [ "pytorch_lightning.strategies.ddp", "pytorch_lightning.strategies.ddp_spawn", "pytorch_lightning.strategies.deepspeed", - "pytorch_lightning.strategies.dp", "pytorch_lightning.strategies.fully_sharded", "pytorch_lightning.strategies.horovod", "pytorch_lightning.strategies.ipu", diff --git a/src/pytorch_lightning/strategies/dp.py b/src/pytorch_lightning/strategies/dp.py index bed25e1e56671..5ab5021b8ac50 100644 --- a/src/pytorch_lightning/strategies/dp.py +++ b/src/pytorch_lightning/strategies/dp.py @@ -11,20 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union import torch from torch import Tensor from torch.nn import DataParallel, Module import pytorch_lightning as pl +from pytorch_lightning.overrides.base import _LightningPrecisionModuleWrapperBase from pytorch_lightning.overrides.data_parallel import LightningParallelModule from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO from pytorch_lightning.plugins.precision import PrecisionPlugin from pytorch_lightning.strategies.parallel import ParallelStrategy +from pytorch_lightning.strategies.strategy import TBroadcast, TReduce from pytorch_lightning.utilities.apply_func import apply_to_collection +from pytorch_lightning.utilities.distributed import ReduceOp from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.types import _METRIC_COLLECTION, STEP_OUTPUT +from pytorch_lightning.utilities.types import STEP_OUTPUT class DataParallelStrategy(ParallelStrategy): @@ -67,6 +70,7 @@ def world_size(self) -> int: def setup(self, trainer: "pl.Trainer") -> None: # model needs to be moved to the device before it is wrapped self.model_to_device() + assert isinstance(self.model, (pl.LightningModule, _LightningPrecisionModuleWrapperBase)) self.model = self._setup_model(LightningParallelModule(self.model)) super().setup(trainer) @@ -87,14 +91,15 @@ def _setup_model(self, model: Module) -> DataParallel: """Wraps the given model into a :class:`~torch.nn.parallel.DataParallel` module.""" return DataParallel(module=model, device_ids=self.parallel_devices) - def reduce(self, collection: _METRIC_COLLECTION, *args, **kwargs) -> _METRIC_COLLECTION: + def reduce( + self, collection: TReduce, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = "mean" + ) -> TReduce: """Reduces a collection of tensors from all processes. It can be applied to just a single tensor. Args: collection: The collection of tensors to sync and reduce. - *args: ignored for DP - **kwargs: ignored for DP - + group: ignored for DP + reduce_op: ignored for DP Return: Reduced tensor values or the same value if it was not or did not contain a tensor. """ @@ -106,38 +111,44 @@ def mean(t: Tensor) -> Tensor: return apply_to_collection(collection, Tensor, mean) @property - def root_device(self): + def root_device(self) -> torch.device: + assert self.parallel_devices is not None return self.parallel_devices[0] def model_to_device(self) -> None: + assert self.model is not None self.model.to(self.root_device) - def barrier(self, *args, **kwargs): + def barrier(self, *args: Any, **kwargs: Any) -> None: pass - def broadcast(self, obj: object, src: int = 0) -> object: + def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast: return obj def reduce_boolean_decision(self, decision: bool) -> bool: return decision - def training_step(self, *args, **kwargs) -> STEP_OUTPUT: + def training_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT: with self.precision_plugin.train_step_context(): + assert self.model is not None return self.model(*args, **kwargs) - def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: + def validation_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]: with self.precision_plugin.val_step_context(): + assert self.model is not None return self.model(*args, **kwargs) - def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: + def test_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]: with self.precision_plugin.test_step_context(): + assert self.model is not None return self.model(*args, **kwargs) - def predict_step(self, *args, **kwargs) -> STEP_OUTPUT: + def predict_step(self, *args: Any, **kwargs: Any) -> STEP_OUTPUT: with self.precision_plugin.predict_step_context(): + assert self.model is not None return self.model(*args, **kwargs) - def training_step_end(self, output): + def training_step_end(self, output: STEP_OUTPUT) -> STEP_OUTPUT: if is_overridden("training_step_end", self.lightning_module): return output diff --git a/src/pytorch_lightning/strategies/strategy.py b/src/pytorch_lightning/strategies/strategy.py index 9de30889336fe..3890a739ed748 100644 --- a/src/pytorch_lightning/strategies/strategy.py +++ b/src/pytorch_lightning/strategies/strategy.py @@ -38,6 +38,7 @@ from pytorch_lightning.utilities.types import _PATH, LRSchedulerConfig, STEP_OUTPUT TBroadcast = TypeVar("TBroadcast") +TReduce = TypeVar("TReduce") log = logging.getLogger(__name__) From 604f7cad02ee8ec915f2b7b7fa7e26f9eef025ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 13 Jul 2022 18:50:55 +0200 Subject: [PATCH 83/89] Remove deprecated `Trainer.slurm_job_id` (#13459) --- src/pytorch_lightning/CHANGELOG.md | 3 +++ src/pytorch_lightning/trainer/trainer.py | 6 ------ tests/tests_pytorch/deprecated_api/test_remove_1-7.py | 6 ------ 3 files changed, 3 insertions(+), 12 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 4a588191501ed..70caaeb54d905 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -278,6 +278,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed deprecated `pytorch_lightning.callbacks.lr_monitor.LearningRateMonitor.lr_sch_names` ([#13353](https://github.com/Lightning-AI/lightning/pull/13353)) +- Removed deprecated `Trainer.slurm_job_id` in favor of `SLURMEnvironment.job_id` ([#13459](https://github.com/PyTorchLightning/pytorch-lightning/pull/13459)) + + - Removed support for the `DDP2Strategy` ([#12705](https://github.com/PyTorchLightning/pytorch-lightning/pull/12705)) diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index 37ba9a6ab2161..d2c41350264b8 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -61,7 +61,6 @@ PLUGIN_INPUT, PrecisionPlugin, ) -from pytorch_lightning.plugins.environments.slurm_environment import SLURMEnvironment from pytorch_lightning.profilers import ( AdvancedProfiler, PassThroughProfiler, @@ -2230,11 +2229,6 @@ def use_amp(self) -> bool: def is_global_zero(self) -> bool: return self.strategy.is_global_zero - @property - def slurm_job_id(self) -> Optional[int]: - rank_zero_deprecation("Method `slurm_job_id` is deprecated in v1.6.0 and will be removed in v1.7.0.") - return SLURMEnvironment.job_id() - @property def distributed_sampler_kwargs(self) -> Optional[dict]: if isinstance(self.strategy, ParallelStrategy): diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py index 4187757fa3980..ab7272eb0247b 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py @@ -54,12 +54,6 @@ def on_post_move_to_device(self): trainer.fit(model) -def test_v1_7_0_deprecated_slurm_job_id(): - trainer = Trainer() - with pytest.deprecated_call(match="Method `slurm_job_id` is deprecated in v1.6.0 and will be removed in v1.7.0."): - trainer.slurm_job_id - - def test_v1_7_0_deprecated_max_steps_none(tmpdir): with pytest.deprecated_call(match="`max_steps = None` is deprecated in v1.5"): _ = Trainer(max_steps=None) From 692da6ab81ff01b5116d6af258efdb81bb749fcc Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Thu, 14 Jul 2022 03:06:27 +0900 Subject: [PATCH 84/89] Remove deprecated `LightningModule.on_post_move_to_device` (#13548) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- .../accelerators/tpu_advanced.rst | 18 ++++++------------ .../source-pytorch/common/lightning_module.rst | 6 ------ src/pytorch_lightning/CHANGELOG.md | 3 +++ src/pytorch_lightning/core/hooks.py | 15 --------------- src/pytorch_lightning/overrides/base.py | 6 ------ src/pytorch_lightning/strategies/tpu_spawn.py | 7 +------ .../trainer/configuration_validator.py | 16 ---------------- .../deprecated_api/test_remove_1-7.py | 16 ---------------- 8 files changed, 10 insertions(+), 77 deletions(-) diff --git a/docs/source-pytorch/accelerators/tpu_advanced.rst b/docs/source-pytorch/accelerators/tpu_advanced.rst index f530bc6d7b088..d6568f6228be7 100644 --- a/docs/source-pytorch/accelerators/tpu_advanced.rst +++ b/docs/source-pytorch/accelerators/tpu_advanced.rst @@ -12,11 +12,10 @@ Weight Tying/Sharing is a technique where in the module weights are shared among This is a common method to reduce memory consumption and is utilized in many State of the Art architectures today. -PyTorch XLA requires these weights to be tied/shared after moving the model -to the TPU device. To support this requirement Lightning provides a model hook which is -called after the model is moved to the device. Any weights that require to be tied should -be done in the `on_post_move_to_device` model hook. This will ensure that the weights -among the modules are shared and not copied. +PyTorch XLA requires these weights to be tied/shared after moving the model to the XLA device. +To support this requirement, Lightning automatically finds these weights and ties them after +the modules are moved to the XLA device under the hood. It will ensure that the weights among +the modules are shared but not copied independently. PyTorch Lightning has an inbuilt check which verifies that the model parameter lengths match once the model is moved to the device. If the lengths do not match Lightning @@ -37,9 +36,8 @@ Example: self.layer_1 = nn.Linear(32, 10, bias=False) self.layer_2 = nn.Linear(10, 32, bias=False) self.layer_3 = nn.Linear(32, 10, bias=False) - # TPU shared weights are copied independently - # on the XLA device and this line won't have any effect. - # However, it works fine for CPU and GPU. + # Lightning automatically ties these weights after moving to the XLA device, + # so all you need is to write the following just like on other accelerators. self.layer_3.weight = self.layer_1.weight def forward(self, x): @@ -48,10 +46,6 @@ Example: x = self.layer_3(x) return x - def on_post_move_to_device(self): - # Weights shared after the model has been moved to TPU Device - self.layer_3.weight = self.layer_1.weight - model = WeightSharingModule() trainer = Trainer(max_epochs=1, accelerator="tpu", devices=8) diff --git a/docs/source-pytorch/common/lightning_module.rst b/docs/source-pytorch/common/lightning_module.rst index bf774b02a2f8a..e0fc10097d002 100644 --- a/docs/source-pytorch/common/lightning_module.rst +++ b/docs/source-pytorch/common/lightning_module.rst @@ -1501,12 +1501,6 @@ on_validation_epoch_end .. automethod:: pytorch_lightning.core.module.LightningModule.on_validation_epoch_end :noindex: -on_post_move_to_device -~~~~~~~~~~~~~~~~~~~~~~ - -.. automethod:: pytorch_lightning.core.module.LightningModule.on_post_move_to_device - :noindex: - configure_sharded_model ~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 70caaeb54d905..cdb642324ba4d 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -287,6 +287,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed deprecated `Callback.on_keyboard_interrupt` ([#13438](https://github.com/Lightning-AI/lightning/pull/13438)) +- Removed deprecated `LightningModule.on_post_move_to_device` ([#13548](https://github.com/Lightning-AI/lightning/pull/13548)) + + ### Fixed diff --git a/src/pytorch_lightning/core/hooks.py b/src/pytorch_lightning/core/hooks.py index 9a7b0dbfa59c4..011297c6974b7 100644 --- a/src/pytorch_lightning/core/hooks.py +++ b/src/pytorch_lightning/core/hooks.py @@ -298,21 +298,6 @@ def on_before_optimizer_step(self, optimizer, optimizer_idx): ) """ - def on_post_move_to_device(self) -> None: - """Called in the ``parameter_validation`` decorator after - :meth:`~pytorch_lightning.core.LightningModule.to` is called. This is a good place to tie weights between - modules after moving them to a device. Can be used when training models with weight sharing properties on - TPU. - - Addresses the handling of shared weights on TPU: - https://github.com/pytorch/xla/blob/master/TROUBLESHOOTING.md#xla-tensor-quirks - - Example:: - - def on_post_move_to_device(self): - self.decoder.weight = self.encoder.weight - """ - def configure_sharded_model(self) -> None: """Hook to create modules in a distributed aware context. This is useful for when using sharded plugins, where we'd like to shard the model instantly, which is useful for extremely large models which can save diff --git a/src/pytorch_lightning/overrides/base.py b/src/pytorch_lightning/overrides/base.py index 8064154579bae..63f8143469066 100644 --- a/src/pytorch_lightning/overrides/base.py +++ b/src/pytorch_lightning/overrides/base.py @@ -52,9 +52,6 @@ def predict_step(self, *args: Any, **kwargs: Any) -> Any: def forward(self, *args: Any, **kwargs: Any) -> Any: raise NotImplementedError - def on_post_move_to_device(self) -> None: - pass - class _LightningModuleWrapperBase(DeviceDtypeModuleMixin, torch.nn.Module): def __init__(self, pl_module: Union["pl.LightningModule", _LightningPrecisionModuleWrapperBase]) -> None: @@ -95,9 +92,6 @@ def forward(self, *inputs: Any, **kwargs: Any) -> Any: return self.module.predict_step(*inputs, **kwargs) return self.module(*inputs, **kwargs) - def on_post_move_to_device(self) -> None: - pass - def unwrap_lightning_module(wrapped_model: nn.Module) -> "pl.LightningModule": """Recursively unwraps a :class:`~pytorch_lightning.core.module.LightningModule` by following the ``.module`` diff --git a/src/pytorch_lightning/strategies/tpu_spawn.py b/src/pytorch_lightning/strategies/tpu_spawn.py index b774bf19acaa0..8f818a34bfcc7 100644 --- a/src/pytorch_lightning/strategies/tpu_spawn.py +++ b/src/pytorch_lightning/strategies/tpu_spawn.py @@ -33,7 +33,6 @@ from pytorch_lightning.utilities.data import has_len from pytorch_lightning.utilities.distributed import ReduceOp from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.model_helpers import is_overridden from pytorch_lightning.utilities.optimizer import optimizers_to_device from pytorch_lightning.utilities.rank_zero import rank_zero_only from pytorch_lightning.utilities.seed import reset_seed @@ -124,11 +123,7 @@ def setup(self, trainer: "pl.Trainer") -> None: shared_params = find_shared_parameters(self.model) self.model_to_device() - if is_overridden("on_post_move_to_device", self.lightning_module): - self.model.module.on_post_move_to_device() - else: - set_shared_parameters(self.model.module, shared_params) - + set_shared_parameters(self.model.module, shared_params) self.setup_precision_plugin() if trainer.state.fn == TrainerFn.FITTING: diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index c53e22ea74a76..5bbc476f64cb5 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -46,8 +46,6 @@ def verify_loop_configurations(trainer: "pl.Trainer") -> None: __verify_eval_loop_configuration(trainer, model, "predict") __verify_dp_batch_transfer_support(trainer, model) - # TODO: Delete _check_on_post_move_to_device in v1.7 - _check_on_post_move_to_device(model) _check_deprecated_callback_hooks(trainer) # TODO: Delete _check_on_hpc_hooks in v1.8 _check_on_hpc_hooks(model) @@ -122,20 +120,6 @@ def __verify_train_val_loop_configuration(trainer: "pl.Trainer", model: "pl.Ligh ) -def _check_on_post_move_to_device(model: "pl.LightningModule") -> None: - r""" - Checks if `on_post_move_to_device` method is overridden and sends a deprecation warning. - - Args: - model: The model to check the `on_post_move_to_device` method. - """ - if is_overridden("on_post_move_to_device", model): - rank_zero_deprecation( - "Method `on_post_move_to_device` has been deprecated in v1.5 and will be removed in v1.7. " - "We perform automatic parameters tying without the need of implementing `on_post_move_to_device`." - ) - - def __verify_eval_loop_configuration(trainer: "pl.Trainer", model: "pl.LightningModule", stage: str) -> None: loader_name = f"{stage}_dataloader" step_name = "validation_step" if stage == "val" else f"{stage}_step" diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py index ab7272eb0247b..bd642d9dbf63b 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py @@ -20,7 +20,6 @@ import torch from pytorch_lightning import Trainer -from pytorch_lightning.demos.boring_classes import BoringModel from pytorch_lightning.plugins.environments import ( KubeflowEnvironment, LightningEnvironment, @@ -39,21 +38,6 @@ def test_v1_7_0_deprecate_lightning_distributed(tmpdir): _ = LightningDistributed() -def test_v1_7_0_deprecate_on_post_move_to_device(tmpdir): - class TestModel(BoringModel): - def on_post_move_to_device(self): - print("on_post_move_to_device") - - model = TestModel() - - trainer = Trainer(default_root_dir=tmpdir, limit_train_batches=5, max_epochs=1) - - with pytest.deprecated_call( - match=r"Method `on_post_move_to_device` has been deprecated in v1.5 and will be removed in v1.7" - ): - trainer.fit(model) - - def test_v1_7_0_deprecated_max_steps_none(tmpdir): with pytest.deprecated_call(match="`max_steps = None` is deprecated in v1.5"): _ = Trainer(max_steps=None) From 8d8211e55b5752ff84b72e50cadc68e5d1b90405 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 13 Jul 2022 21:53:46 +0200 Subject: [PATCH 85/89] Remove deprecated ClustertEnvironment methods (#13458) * Remove deprecated ClustertEnvironment methods * update changelog * ignore typing error Co-authored-by: Akihiro Nitta --- src/pytorch_lightning/CHANGELOG.md | 6 ++ .../environments/cluster_environment.py | 21 ----- .../environments/kubeflow_environment.py | 11 --- .../plugins/environments/lsf_environment.py | 7 -- .../environments/torchelastic_environment.py | 12 +-- .../connectors/accelerator_connector.py | 3 +- .../deprecated_api/test_remove_1-7.py | 87 ------------------- .../test_lightning_environment.py | 2 +- .../environments/test_slurm_environment.py | 2 +- 9 files changed, 11 insertions(+), 140 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index cdb642324ba4d..0ccad1a55f50f 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -284,6 +284,12 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed support for the `DDP2Strategy` ([#12705](https://github.com/PyTorchLightning/pytorch-lightning/pull/12705)) +- Removed deprecated ClusterEnvironment properties `master_address` and `master_port` in favor of `main_address` and `main_port` ([#13458](https://github.com/PyTorchLightning/pytorch-lightning/pull/13458)) + + +- Removed deprecated ClusterEnvironment methods `KubeflowEnvironment.is_using_kubelfow()`, `LSFEnvironment.is_using_lsf()` and `TorchElasticEnvironment.is_using_torchelastic()` in favor of the `detect()` method ([#13458](https://github.com/PyTorchLightning/pytorch-lightning/pull/13458)) + + - Removed deprecated `Callback.on_keyboard_interrupt` ([#13438](https://github.com/Lightning-AI/lightning/pull/13438)) diff --git a/src/pytorch_lightning/plugins/environments/cluster_environment.py b/src/pytorch_lightning/plugins/environments/cluster_environment.py index 1871f0afdf193..9660708653812 100644 --- a/src/pytorch_lightning/plugins/environments/cluster_environment.py +++ b/src/pytorch_lightning/plugins/environments/cluster_environment.py @@ -12,19 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod -from typing import Any, Type - -from pytorch_lightning.utilities import rank_zero_deprecation class ClusterEnvironment(ABC): """Specification of a cluster environment.""" - def __new__(cls, *args: Any, **kwargs: Any) -> "ClusterEnvironment": - # TODO: remove in 1.7 - _check_for_deprecated_methods(cls) - return super().__new__(cls) - @property @abstractmethod def creates_processes_externally(self) -> bool: @@ -72,16 +64,3 @@ def node_rank(self) -> int: def teardown(self) -> None: """Clean up any state set after execution finishes.""" pass - - -def _check_for_deprecated_methods(cls: Type[ClusterEnvironment]) -> None: - if hasattr(cls, "master_address") and callable(cls.master_address): - rank_zero_deprecation( - f"`{cls.__name__}.master_address` has been deprecated in v1.6 and will be removed in v1.7." - " Implement the property `main_address` instead (do not forget to add the `@property` decorator)." - ) - if hasattr(cls, "master_port") and callable(cls.master_port): - rank_zero_deprecation( - f"`{cls.__name__}.master_port` has been deprecated in v1.6 and will be removed in v1.7." - " Implement the property `main_port` instead (do not forget to add the `@property` decorator)." - ) diff --git a/src/pytorch_lightning/plugins/environments/kubeflow_environment.py b/src/pytorch_lightning/plugins/environments/kubeflow_environment.py index 03dfdde9d78a0..4e38f6d082c29 100644 --- a/src/pytorch_lightning/plugins/environments/kubeflow_environment.py +++ b/src/pytorch_lightning/plugins/environments/kubeflow_environment.py @@ -16,7 +16,6 @@ import os from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment -from pytorch_lightning.utilities import rank_zero_deprecation log = logging.getLogger(__name__) @@ -28,16 +27,6 @@ class KubeflowEnvironment(ClusterEnvironment): .. _Kubeflow: https://www.kubeflow.org """ - def __init__(self) -> None: - super().__init__() - # TODO: remove in 1.7 - if hasattr(self, "is_using_kubeflow") and callable(self.is_using_kubeflow): - rank_zero_deprecation( - f"`{self.__class__.__name__}.is_using_kubeflow` has been deprecated in v1.6 and will be removed in" - f" v1.7. Implement the static method `detect()` instead (do not forget to add the `@staticmethod`" - f" decorator)." - ) - @property def creates_processes_externally(self) -> bool: return True diff --git a/src/pytorch_lightning/plugins/environments/lsf_environment.py b/src/pytorch_lightning/plugins/environments/lsf_environment.py index 150328dbf706a..359add5137bad 100644 --- a/src/pytorch_lightning/plugins/environments/lsf_environment.py +++ b/src/pytorch_lightning/plugins/environments/lsf_environment.py @@ -18,7 +18,6 @@ from pytorch_lightning import _logger as log from pytorch_lightning.plugins.environments import ClusterEnvironment -from pytorch_lightning.utilities import rank_zero_deprecation from pytorch_lightning.utilities.cloud_io import get_filesystem @@ -48,12 +47,6 @@ class LSFEnvironment(ClusterEnvironment): def __init__(self) -> None: super().__init__() - # TODO: remove in 1.7 - if hasattr(self, "is_using_lsf") and callable(self.is_using_lsf): - rank_zero_deprecation( - f"`{self.__class__.__name__}.is_using_lsf` has been deprecated in v1.6 and will be removed in v1.7." - " Implement the static method `detect()` instead (do not forget to add the `@staticmethod` decorator)." - ) self._main_address = self._get_main_address() self._main_port = self._get_main_port() self._node_rank = self._get_node_rank() diff --git a/src/pytorch_lightning/plugins/environments/torchelastic_environment.py b/src/pytorch_lightning/plugins/environments/torchelastic_environment.py index 98cad39a0a471..2cd3f408f4964 100644 --- a/src/pytorch_lightning/plugins/environments/torchelastic_environment.py +++ b/src/pytorch_lightning/plugins/environments/torchelastic_environment.py @@ -19,7 +19,7 @@ from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_9_1 -from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn +from pytorch_lightning.utilities.rank_zero import rank_zero_warn log = logging.getLogger(__name__) @@ -27,16 +27,6 @@ class TorchElasticEnvironment(ClusterEnvironment): """Environment for fault-tolerant and elastic training with `torchelastic `_""" - def __init__(self) -> None: - super().__init__() - # TODO: remove in 1.7 - if hasattr(self, "is_using_torchelastic") and callable(self.is_using_torchelastic): - rank_zero_deprecation( - f"`{self.__class__.__name__}.is_using_torchelastic` has been deprecated in v1.6 and will be removed in" - " v1.7. Implement the static method `detect()` instead (do not forget to add the `@staticmethod`" - " decorator)." - ) - @property def creates_processes_externally(self) -> bool: return True diff --git a/src/pytorch_lightning/trainer/connectors/accelerator_connector.py b/src/pytorch_lightning/trainer/connectors/accelerator_connector.py index f1accaa29ef35..2e112c754cbc5 100644 --- a/src/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/src/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -546,7 +546,8 @@ def _choose_and_init_cluster_environment(self) -> ClusterEnvironment: return SLURMEnvironment() for env_type in (BaguaEnvironment, TorchElasticEnvironment, KubeflowEnvironment, LSFEnvironment): if env_type.detect(): - return env_type() + # Ignore type error because it is a false positive: https://github.com/python/mypy/issues/13044 + return env_type() # type: ignore[abstract] return LightningEnvironment() def _is_slurm_managing_tasks(self) -> bool: diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py index bd642d9dbf63b..2af4e4c8e26e9 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py @@ -12,23 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. """Test deprecated functionality which will be removed in v1.7.0.""" -import os from re import escape -from unittest import mock import pytest import torch from pytorch_lightning import Trainer -from pytorch_lightning.plugins.environments import ( - KubeflowEnvironment, - LightningEnvironment, - LSFEnvironment, - SLURMEnvironment, - TorchElasticEnvironment, -) from pytorch_lightning.strategies import SingleDeviceStrategy -from tests_pytorch.plugins.environments.test_lsf_environment import _make_rankfile def test_v1_7_0_deprecate_lightning_distributed(tmpdir): @@ -47,83 +37,6 @@ def test_v1_7_0_deprecated_max_steps_none(tmpdir): trainer.fit_loop.max_steps = None -@pytest.mark.parametrize( - "cls", - [ - KubeflowEnvironment, - LightningEnvironment, - SLURMEnvironment, - TorchElasticEnvironment, - ], -) -def test_v1_7_0_cluster_environment_master_address(cls): - class MyClusterEnvironment(cls): - def master_address(self): - pass - - with pytest.deprecated_call( - match="MyClusterEnvironment.master_address` has been deprecated in v1.6 and will be removed in v1.7" - ): - MyClusterEnvironment() - - -@pytest.mark.parametrize( - "cls", - [ - KubeflowEnvironment, - LightningEnvironment, - SLURMEnvironment, - TorchElasticEnvironment, - ], -) -def test_v1_7_0_cluster_environment_master_port(cls): - class MyClusterEnvironment(cls): - def master_port(self): - pass - - with pytest.deprecated_call( - match="MyClusterEnvironment.master_port` has been deprecated in v1.6 and will be removed in v1.7" - ): - MyClusterEnvironment() - - -@pytest.mark.parametrize( - "cls,method_name", - [ - (KubeflowEnvironment, "is_using_kubeflow"), - (LSFEnvironment, "is_using_lsf"), - (TorchElasticEnvironment, "is_using_torchelastic"), - ], -) -def test_v1_7_0_cluster_environment_detection(cls, method_name, tmp_path): - class MyClusterEnvironment(cls): - @staticmethod - def is_using_kubeflow(): - pass - - @staticmethod - def is_using_lsf(): - pass - - @staticmethod - def is_using_torchelastic(): - pass - - environ = { - "LSB_DJOB_RANKFILE": _make_rankfile(tmp_path), - "LSB_JOBID": "1234", - "JSM_NAMESPACE_SIZE": "4", - "JSM_NAMESPACE_RANK": "3", - "JSM_NAMESPACE_LOCAL_RANK": "1", - } - with mock.patch.dict(os.environ, environ): - with mock.patch("socket.gethostname", return_value="10.10.10.2"): - with pytest.deprecated_call( - match=f"MyClusterEnvironment.{method_name}` has been deprecated in v1.6 and will be removed in v1.7" - ): - MyClusterEnvironment() - - def test_v1_7_0_post_dispatch_hook(): class CustomPlugin(SingleDeviceStrategy): def post_dispatch(self, trainer): diff --git a/tests/tests_pytorch/plugins/environments/test_lightning_environment.py b/tests/tests_pytorch/plugins/environments/test_lightning_environment.py index 9197e5158317e..0ad05e0bfa8eb 100644 --- a/tests/tests_pytorch/plugins/environments/test_lightning_environment.py +++ b/tests/tests_pytorch/plugins/environments/test_lightning_environment.py @@ -66,7 +66,7 @@ def test_node_rank_from_group_rank(): @mock.patch.dict(os.environ, {}, clear=True) -def test_random_master_port(): +def test_random_main_port(): """Test randomly chosen main port when no main port was given by user.""" env = LightningEnvironment() port = env.main_port diff --git a/tests/tests_pytorch/plugins/environments/test_slurm_environment.py b/tests/tests_pytorch/plugins/environments/test_slurm_environment.py index 61ef404dd127a..44b0dd97e354d 100644 --- a/tests/tests_pytorch/plugins/environments/test_slurm_environment.py +++ b/tests/tests_pytorch/plugins/environments/test_slurm_environment.py @@ -83,7 +83,7 @@ def test_attributes_from_environment_variables(caplog): "slurm_node_list,expected", [("alpha,beta,gamma", "alpha"), ("alpha beta gamma", "alpha"), ("1.2.3.[100-110]", "1.2.3.100")], ) -def test_master_address_from_slurm_node_list(slurm_node_list, expected): +def test_main_address_from_slurm_node_list(slurm_node_list, expected): """Test extracting the main node from different formats for the SLURM_NODELIST.""" with mock.patch.dict(os.environ, {"SLURM_NODELIST": slurm_node_list}): env = SLURMEnvironment() From db1e4d2bca3e07e8ef41b27f29ebee742c6d1e23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Wed, 13 Jul 2022 23:13:38 +0200 Subject: [PATCH 86/89] Update CHANGELOG after the 1.6.5 release (#13641) --- src/pytorch_lightning/CHANGELOG.md | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 0ccad1a55f50f..b509210bd90e4 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -314,24 +314,22 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed ``LightningCLI`` signature parameter resolving for some lightning classes ([#13283](https://github.com/PyTorchLightning/pytorch-lightning/pull/13283)) -- Fixed `estimated_stepping_batches` requiring distributed comms in `configure_optimizers` for the `DeepSpeedStrategy` ([#13350](https://github.com/PyTorchLightning/pytorch-lightning/pull/13350)) - - -- Fixed bug with Python version check that prevented use with development versions of Python ([#13420](https://github.com/PyTorchLightning/pytorch-lightning/pull/13420)) - - - Fixed Model Summary when using DeepSpeed Stage 3 ([#13427](https://github.com/PyTorchLightning/pytorch-lightning/pull/13427)) - Fixed `pytorch_lightning.utilities.distributed.gather_all_tensors` to handle tensors of different dimensions ([#12630](https://github.com/PyTorchLightning/pytorch-lightning/pull/12630)) -- The loops now call `.set_epoch()` also on batch samplers if the dataloader has one wrapped in a distributed sampler ([#13396](https://github.com/PyTorchLightning/pytorch-lightning/pull/13396)) +- Fixed the input validation for the accelerator Trainer argument when passed as a string ([#13417](https://github.com/PyTorchLightning/pytorch-lightning/pull/13417)) -- Fixed the input validation for the accelerator Trainer argument when passed as a string ([#13417](https://github.com/PyTorchLightning/pytorch-lightning/pull/13417)) +## [1.6.5] - 2022-07-13 +### Fixed +- Fixed `estimated_stepping_batches` requiring distributed comms in `configure_optimizers` for the `DeepSpeedStrategy` ([#13350](https://github.com/PyTorchLightning/pytorch-lightning/pull/13350)) +- Fixed bug with Python version check that prevented use with development versions of Python ([#13420](https://github.com/PyTorchLightning/pytorch-lightning/pull/13420)) +- The loops now call `.set_epoch()` also on batch samplers if the dataloader has one wrapped in a distributed sampler ([#13396](https://github.com/PyTorchLightning/pytorch-lightning/pull/13396)) - Fixed the restoration of log step during restart ([#13467](https://github.com/PyTorchLightning/pytorch-lightning/pull/13467)) From d825adafc6c6479e264a94c2478a925262c3e528 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Thu, 14 Jul 2022 06:15:28 +0900 Subject: [PATCH 87/89] Remove deprecated `LightningDistributed` (#13549) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- pyproject.toml | 16 +++---- src/pytorch_lightning/CHANGELOG.md | 3 ++ src/pytorch_lightning/distributed/__init__.py | 14 ------ src/pytorch_lightning/distributed/dist.py | 47 ------------------- .../deprecated_api/test_remove_1-7.py | 7 --- 5 files changed, 10 insertions(+), 77 deletions(-) delete mode 100644 src/pytorch_lightning/distributed/__init__.py delete mode 100644 src/pytorch_lightning/distributed/dist.py diff --git a/pyproject.toml b/pyproject.toml index 3fbc7f0a4c9d9..898699875e873 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,7 +45,7 @@ warn_no_return = "False" # TODO: the goal is for this to be empty [[tool.mypy.overrides]] # the list can be generated with: -# mypy | tr ':' ' ' | awk '{print $1}' | sort | uniq | sed 's/\.py//g' | sed 's|\/|\.|g' | xargs -I {} echo '"{}",' +# mypy --no-error-summary 2>&1 | tr ':' ' ' | awk '{print $1}' | sort | uniq | sed 's/\.py//g; s|src/||g; s|\/|\.|g' | xargs -I {} echo '"{}",' module = [ "pytorch_lightning.callbacks.model_checkpoint", "pytorch_lightning.callbacks.progress.rich_progress", @@ -53,18 +53,21 @@ module = [ "pytorch_lightning.callbacks.stochastic_weight_avg", "pytorch_lightning.core.datamodule", "pytorch_lightning.core.decorators", - "pytorch_lightning.core.module", "pytorch_lightning.core.mixins.device_dtype_mixin", + "pytorch_lightning.core.module", "pytorch_lightning.core.saving", "pytorch_lightning.demos.boring_classes", "pytorch_lightning.demos.mnist_datamodule", - "pytorch_lightning.distributed.dist", "pytorch_lightning.loggers.comet", "pytorch_lightning.loggers.mlflow", "pytorch_lightning.loggers.neptune", "pytorch_lightning.loggers.tensorboard", "pytorch_lightning.loggers.wandb", "pytorch_lightning.loops.epoch.training_epoch_loop", + "pytorch_lightning.profilers.advanced", + "pytorch_lightning.profilers.base", + "pytorch_lightning.profilers.pytorch", + "pytorch_lightning.profilers.simple", "pytorch_lightning.strategies.ddp", "pytorch_lightning.strategies.ddp_spawn", "pytorch_lightning.strategies.deepspeed", @@ -74,16 +77,11 @@ module = [ "pytorch_lightning.strategies.parallel", "pytorch_lightning.strategies.sharded", "pytorch_lightning.strategies.sharded_spawn", - "pytorch_lightning.strategies.tpu_spawn", "pytorch_lightning.strategies.strategy", - "pytorch_lightning.profilers.advanced", - "pytorch_lightning.profilers.base", - "pytorch_lightning.profilers.pytorch", - "pytorch_lightning.profilers.simple", + "pytorch_lightning.strategies.tpu_spawn", "pytorch_lightning.trainer.callback_hook", "pytorch_lightning.trainer.connectors.callback_connector", "pytorch_lightning.trainer.connectors.data_connector", - "pytorch_lightning.trainer.data_loading", "pytorch_lightning.trainer.supporters", "pytorch_lightning.trainer.trainer", "pytorch_lightning.tuner.batch_size_scaling", diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index b509210bd90e4..e9cd98c71a97b 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -284,6 +284,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed support for the `DDP2Strategy` ([#12705](https://github.com/PyTorchLightning/pytorch-lightning/pull/12705)) +- Removed deprecated `LightningDistributed` ([#13549](https://github.com/PyTorchLightning/pytorch-lightning/pull/13549)) + + - Removed deprecated ClusterEnvironment properties `master_address` and `master_port` in favor of `main_address` and `main_port` ([#13458](https://github.com/PyTorchLightning/pytorch-lightning/pull/13458)) diff --git a/src/pytorch_lightning/distributed/__init__.py b/src/pytorch_lightning/distributed/__init__.py deleted file mode 100644 index ea060e551ad9d..0000000000000 --- a/src/pytorch_lightning/distributed/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from pytorch_lightning.distributed.dist import LightningDistributed # noqa: F401 diff --git a/src/pytorch_lightning/distributed/dist.py b/src/pytorch_lightning/distributed/dist.py deleted file mode 100644 index 1799450e3ce05..0000000000000 --- a/src/pytorch_lightning/distributed/dist.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Any - -import torch.distributed - -from pytorch_lightning.utilities import rank_zero_deprecation -from pytorch_lightning.utilities.distributed import group as _group - - -class LightningDistributed: - """ - .. deprecated:: v1.5 - This class is deprecated in v1.5 and will be removed in v1.7. - The broadcast logic will be moved to the :class:`DDPStrategy` and :class`DDPSpawnStrategy` classes. - - """ - - def __init__(self, rank=None, device=None): - rank_zero_deprecation( - "LightningDistributed is deprecated in v1.5 and will be removed in v1.7." - "Broadcast logic is implemented directly in the :class:`Strategy` implementations." - ) - self.rank = rank - self.device = device - - def broadcast(self, obj: Any, group=_group.WORLD): - # always wrap into a list so it can be broadcasted. - obj = [obj] - - if self.rank != 0: - obj = [None] * len(obj) - - torch.distributed.broadcast_object_list(obj, 0, group=group or _group.WORLD) - - return obj[0] diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py index 2af4e4c8e26e9..8ab04076f8d26 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-7.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-7.py @@ -21,13 +21,6 @@ from pytorch_lightning.strategies import SingleDeviceStrategy -def test_v1_7_0_deprecate_lightning_distributed(tmpdir): - with pytest.deprecated_call(match="LightningDistributed is deprecated in v1.5 and will be removed in v1.7."): - from pytorch_lightning.distributed.dist import LightningDistributed - - _ = LightningDistributed() - - def test_v1_7_0_deprecated_max_steps_none(tmpdir): with pytest.deprecated_call(match="`max_steps = None` is deprecated in v1.5"): _ = Trainer(max_steps=None) From abdd8902718880807de3a9f8f9689f6d76b57def Mon Sep 17 00:00:00 2001 From: Jerome Date: Wed, 20 Jul 2022 16:57:19 +0300 Subject: [PATCH 88/89] Handle nameerror exceptions Signed-off-by: Jerome --- src/pytorch_lightning/accelerators/hpu.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pytorch_lightning/accelerators/hpu.py b/src/pytorch_lightning/accelerators/hpu.py index b58599bece7f8..988665cc92452 100644 --- a/src/pytorch_lightning/accelerators/hpu.py +++ b/src/pytorch_lightning/accelerators/hpu.py @@ -58,7 +58,7 @@ def auto_device_count() -> int: """Returns the number of HPU devices when the devices is set to auto.""" try: return torch_hpu.device_count() - except AttributeError: + except (AttributeError, NameError) as e: rank_zero_debug("HPU `auto_device_count` failed, returning default count of 8.") return 8 @@ -67,7 +67,7 @@ def is_available() -> bool: """Returns a bool indicating if HPU is currently available.""" try: return torch_hpu.is_available() - except AttributeError: + except (AttributeError, NameError) as e: return False @staticmethod @@ -75,7 +75,7 @@ def get_device_name() -> str: """Returns the name of the HPU device.""" try: return torch_hpu.get_device_name() - except AttributeError: + except (AttributeError, NameError) as e: return "" @classmethod From fc8ab9c26d3b656189435ac7facb682dd1d83a2f Mon Sep 17 00:00:00 2001 From: Jerome Date: Thu, 21 Jul 2022 05:01:42 +0300 Subject: [PATCH 89/89] remove unused variable Signed-off-by: Jerome --- src/pytorch_lightning/accelerators/hpu.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pytorch_lightning/accelerators/hpu.py b/src/pytorch_lightning/accelerators/hpu.py index 988665cc92452..686bf6bb9452d 100644 --- a/src/pytorch_lightning/accelerators/hpu.py +++ b/src/pytorch_lightning/accelerators/hpu.py @@ -58,7 +58,7 @@ def auto_device_count() -> int: """Returns the number of HPU devices when the devices is set to auto.""" try: return torch_hpu.device_count() - except (AttributeError, NameError) as e: + except (AttributeError, NameError): rank_zero_debug("HPU `auto_device_count` failed, returning default count of 8.") return 8 @@ -67,7 +67,7 @@ def is_available() -> bool: """Returns a bool indicating if HPU is currently available.""" try: return torch_hpu.is_available() - except (AttributeError, NameError) as e: + except (AttributeError, NameError): return False @staticmethod @@ -75,7 +75,7 @@ def get_device_name() -> str: """Returns the name of the HPU device.""" try: return torch_hpu.get_device_name() - except (AttributeError, NameError) as e: + except (AttributeError, NameError): return "" @classmethod