diff --git a/conftest.py b/conftest.py index b0457522c0..3a91de643a 100644 --- a/conftest.py +++ b/conftest.py @@ -56,7 +56,7 @@ from smartsim._core.config.config import Config from smartsim._core.utils.telemetry.telemetry import JobEntity from smartsim.database import Orchestrator -from smartsim.entity import Model +from smartsim.entity import Application from smartsim.error import SSConfigError, SSInternalError from smartsim.log import get_logger from smartsim.settings import ( @@ -93,6 +93,7 @@ test_hostlist = None has_aprun = shutil.which("aprun") is not None + def get_account() -> str: return test_account @@ -141,7 +142,7 @@ def pytest_sessionstart( time.sleep(0.1) if CONFIG.dragon_server_path is None: - dragon_server_path = os.path.join(test_output_root, "dragon_server") + dragon_server_path = os.path.join(test_output_root, "dragon_server") os.makedirs(dragon_server_path) os.environ["SMARTSIM_DRAGON_SERVER_PATH"] = dragon_server_path @@ -183,7 +184,7 @@ def build_mpi_app() -> t.Optional[pathlib.Path]: if cc is None: return None - path_to_src = pathlib.Path(FileUtils().get_test_conf_path("mpi")) + path_to_src = pathlib.Path(FileUtils().get_test_conf_path("mpi")) path_to_out = pathlib.Path(test_output_root) / "apps" / "mpi_app" os.makedirs(path_to_out.parent, exist_ok=True) cmd = [cc, str(path_to_src / "mpi_hello.c"), "-o", str(path_to_out)] @@ -194,11 +195,12 @@ def build_mpi_app() -> t.Optional[pathlib.Path]: else: return None + @pytest.fixture(scope="session") def mpi_app_path() -> t.Optional[pathlib.Path]: """Return path to MPI app if it was built - return None if it could not or will not be built + return None if it could not or will not be built """ if not CONFIG.test_mpi: return None @@ -227,7 +229,6 @@ def kill_all_test_spawned_processes() -> None: print("Not all processes were killed after test") - def get_hostlist() -> t.Optional[t.List[str]]: global test_hostlist if not test_hostlist: @@ -654,10 +655,10 @@ def setup_test_colo( application_file: str, db_args: t.Dict[str, t.Any], colo_settings: t.Optional[RunSettings] = None, - colo_model_name: str = "colocated_model", + colo_application_name: str = "colocated_application", port: t.Optional[int] = None, on_wlm: bool = False, - ) -> Model: + ) -> Application: """Setup database needed for the colo pinning tests""" # get test setup @@ -672,31 +673,31 @@ def setup_test_colo( colo_settings.set_tasks(1) colo_settings.set_nodes(1) - colo_model = exp.create_model(colo_model_name, colo_settings) + colo_application = exp.create_application(colo_application_name, colo_settings) if db_type in ["tcp", "deprecated"]: db_args["port"] = port if port is not None else _find_free_port(test_ports) db_args["ifname"] = "lo" - if db_type == "uds" and colo_model_name is not None: + if db_type == "uds" and colo_application_name is not None: tmp_dir = tempfile.gettempdir() socket_suffix = str(uuid.uuid4())[:7] - socket_name = f"{colo_model_name}_{socket_suffix}.socket" + socket_name = f"{colo_application_name}_{socket_suffix}.socket" db_args["unix_socket"] = os.path.join(tmp_dir, socket_name) colocate_fun: t.Dict[str, t.Callable[..., None]] = { - "tcp": colo_model.colocate_db_tcp, - "deprecated": colo_model.colocate_db, - "uds": colo_model.colocate_db_uds, + "tcp": colo_application.colocate_db_tcp, + "deprecated": colo_application.colocate_db, + "uds": colo_application.colocate_db_uds, } with warnings.catch_warnings(): if db_type == "deprecated": message = "`colocate_db` has been deprecated" warnings.filterwarnings("ignore", message=message) colocate_fun[db_type](**db_args) - # assert model will launch with colocated db - assert colo_model.colocated + # assert application will launch with colocated db + assert colo_application.colocated # Check to make sure that limit_db_cpus made it into the colo settings - return colo_model + return colo_application @pytest.fixture(scope="function") @@ -708,7 +709,9 @@ def global_dragon_teardown() -> None: """ if test_launcher != "dragon" or CONFIG.dragon_server_path is None: return - logger.debug(f"Tearing down Dragon infrastructure, server path: {CONFIG.dragon_server_path}") + logger.debug( + f"Tearing down Dragon infrastructure, server path: {CONFIG.dragon_server_path}" + ) dragon_connector = DragonConnector() dragon_connector.ensure_connected() dragon_connector.cleanup() @@ -875,9 +878,13 @@ def num_calls(self) -> int: def details(self) -> t.List[t.Tuple[t.Tuple[t.Any, ...], t.Dict[str, t.Any]]]: return self._details + ## Reuse database across tests -database_registry: t.DefaultDict[str, t.Optional[Orchestrator]] = defaultdict(lambda: None) +database_registry: t.DefaultDict[str, t.Optional[Orchestrator]] = defaultdict( + lambda: None +) + @pytest.fixture(scope="function") def local_experiment(test_dir: str) -> smartsim.Experiment: @@ -885,16 +892,16 @@ def local_experiment(test_dir: str) -> smartsim.Experiment: name = pathlib.Path(test_dir).stem return smartsim.Experiment(name, exp_path=test_dir, launcher="local") + @pytest.fixture(scope="function") def wlm_experiment(test_dir: str, wlmutils: WLMUtils) -> smartsim.Experiment: """Create a default experiment that uses the requested launcher""" name = pathlib.Path(test_dir).stem return smartsim.Experiment( - name, - exp_path=test_dir, - launcher=wlmutils.get_test_launcher() + name, exp_path=test_dir, launcher=wlmutils.get_test_launcher() ) + def _cleanup_db(name: str) -> None: global database_registry db = database_registry[name] @@ -906,19 +913,22 @@ def _cleanup_db(name: str) -> None: except: pass + @dataclass class DBConfiguration: name: str launcher: str num_nodes: int - interface: t.Union[str,t.List[str]] + interface: t.Union[str, t.List[str]] hostlist: t.Optional[t.List[str]] port: int + @dataclass class PrepareDatabaseOutput: - orchestrator: t.Optional[Orchestrator] # The actual orchestrator object - new_db: bool # True if a new database was created when calling prepare_db + orchestrator: t.Optional[Orchestrator] # The actual orchestrator object + new_db: bool # True if a new database was created when calling prepare_db + # Reuse databases @pytest.fixture(scope="session") @@ -935,6 +945,7 @@ def local_db() -> t.Generator[DBConfiguration, None, None]: yield config _cleanup_db(name) + @pytest.fixture(scope="session") def single_db(wlmutils: WLMUtils) -> t.Generator[DBConfiguration, None, None]: hostlist = wlmutils.get_test_hostlist() @@ -946,7 +957,7 @@ def single_db(wlmutils: WLMUtils) -> t.Generator[DBConfiguration, None, None]: 1, wlmutils.get_test_interface(), hostlist, - _find_free_port(tuple(reversed(test_ports))) + _find_free_port(tuple(reversed(test_ports))), ) yield config _cleanup_db(name) @@ -971,9 +982,7 @@ def clustered_db(wlmutils: WLMUtils) -> t.Generator[DBConfiguration, None, None] @pytest.fixture def register_new_db() -> t.Callable[[DBConfiguration], Orchestrator]: - def _register_new_db( - config: DBConfiguration - ) -> Orchestrator: + def _register_new_db(config: DBConfiguration) -> Orchestrator: exp_path = pathlib.Path(test_output_root, config.name) exp_path.mkdir(exist_ok=True) exp = Experiment( @@ -986,26 +995,21 @@ def _register_new_db( batch=False, interface=config.interface, hosts=config.hostlist, - db_nodes=config.num_nodes + db_nodes=config.num_nodes, ) exp.generate(orc, overwrite=True) exp.start(orc) global database_registry database_registry[config.name] = orc return orc + return _register_new_db @pytest.fixture(scope="function") def prepare_db( - register_new_db: t.Callable[ - [DBConfiguration], - Orchestrator - ] -) -> t.Callable[ - [DBConfiguration], - PrepareDatabaseOutput -]: + register_new_db: t.Callable[[DBConfiguration], Orchestrator] +) -> t.Callable[[DBConfiguration], PrepareDatabaseOutput]: def _prepare_db(db_config: DBConfiguration) -> PrepareDatabaseOutput: global database_registry db = database_registry[db_config.name] @@ -1021,4 +1025,5 @@ def _prepare_db(db_config: DBConfiguration) -> PrepareDatabaseOutput: new_db = True return PrepareDatabaseOutput(db, new_db) + return _prepare_db diff --git a/doc/api/smartsim_api.rst b/doc/api/smartsim_api.rst index 420c494a2c..d9615e04cc 100644 --- a/doc/api/smartsim_api.rst +++ b/doc/api/smartsim_api.rst @@ -91,6 +91,7 @@ launches that utilize a parallel launch binary like .. autosummary:: + RunSettings.add_exe_args RunSettings.update_env .. autoclass:: RunSettings @@ -120,6 +121,7 @@ steps to a batch. SrunSettings.set_hostlist SrunSettings.set_excluded_hosts SrunSettings.set_cpus_per_task + SrunSettings.add_exe_args SrunSettings.format_run_args SrunSettings.format_env_vars SrunSettings.update_env @@ -151,6 +153,7 @@ and within batch launches (e.g., ``QsubBatchSettings``) AprunSettings.set_tasks AprunSettings.set_tasks_per_node AprunSettings.make_mpmd + AprunSettings.add_exe_args AprunSettings.format_run_args AprunSettings.format_env_vars AprunSettings.update_env @@ -236,6 +239,7 @@ supported on Slurm and PBSpro. MpirunSettings.set_tasks MpirunSettings.set_task_map MpirunSettings.make_mpmd + MpirunSettings.add_exe_args MpirunSettings.format_run_args MpirunSettings.format_env_vars MpirunSettings.update_env @@ -262,6 +266,7 @@ supported on Slurm and PBSpro. MpiexecSettings.set_tasks MpiexecSettings.set_task_map MpiexecSettings.make_mpmd + MpiexecSettings.add_exe_args MpiexecSettings.format_run_args MpiexecSettings.format_env_vars MpiexecSettings.update_env @@ -288,6 +293,7 @@ supported on Slurm and PBSpro. OrterunSettings.set_tasks OrterunSettings.set_task_map OrterunSettings.make_mpmd + OrterunSettings.add_exe_args OrterunSettings.format_run_args OrterunSettings.format_env_vars OrterunSettings.update_env diff --git a/doc/changelog.md b/doc/changelog.md index 6a1c7425d7..1f201f3a8f 100644 --- a/doc/changelog.md +++ b/doc/changelog.md @@ -31,7 +31,6 @@ Detailed Notes ([SmartSim-PR601](https://github.com/CrayLabs/SmartSim/pull/601)) - Fix packaging failures due to deprecated `pkg_resources`. ([SmartSim-PR598](https://github.com/CrayLabs/SmartSim/pull/598)) - ### 0.7.0 Released on 14 May, 2024 diff --git a/smartsim/_core/control/controller.py b/smartsim/_core/control/controller.py index 844d4fbf48..5f53db8faf 100644 --- a/smartsim/_core/control/controller.py +++ b/smartsim/_core/control/controller.py @@ -55,7 +55,7 @@ shutdown_db_node, ) from ...database import Orchestrator -from ...entity import Ensemble, EntitySequence, Model, SmartSimEntity +from ...entity import Application, Ensemble, EntitySequence, SmartSimEntity from ...error import ( LauncherError, SmartSimError, @@ -224,7 +224,12 @@ def stop_entity( if job.status not in TERMINAL_STATUSES: logger.info( " ".join( - ("Stopping model", entity.name, "with job name", str(job.name)) + ( + "Stopping application", + entity.name, + "with job name", + str(job.name), + ) ) ) status = self._launcher.stop(job.name) @@ -445,7 +450,7 @@ def _launch( ) # symlink substeps to maintain directory structure - for substep, substep_entity in zip(substeps, elist.models): + for substep, substep_entity in zip(substeps, elist.applications): symlink_substeps.append((substep, substep_entity)) steps.append((batch_step, elist)) @@ -459,24 +464,28 @@ def _launch( elist, [(step.name, step) for step, _ in job_steps] ) steps.extend(job_steps) - # models themselves cannot be batch steps. If batch settings are + # applications themselves cannot be batch steps. If batch settings are # attached, wrap them in an anonymous batch job step - for model in manifest.models: - model_telem_dir = manifest_builder.run_telemetry_subdirectory / "model" - if model.batch_settings: - anon_entity_list = _AnonymousBatchJob(model) + for application in manifest.applications: + application_telem_dir = ( + manifest_builder.run_telemetry_subdirectory / "application" + ) + if application.batch_settings: + anon_entity_list = _AnonymousBatchJob(application) batch_step, substeps = self._create_batch_job_step( - anon_entity_list, model_telem_dir + anon_entity_list, application_telem_dir + ) + manifest_builder.add_application( + application, (batch_step.name, batch_step) ) - manifest_builder.add_model(model, (batch_step.name, batch_step)) - symlink_substeps.append((substeps[0], model)) - steps.append((batch_step, model)) + symlink_substeps.append((substeps[0], application)) + steps.append((batch_step, application)) else: - # create job step for a model with run settings - job_step = self._create_job_step(model, model_telem_dir) - manifest_builder.add_model(model, (job_step.name, job_step)) - steps.append((job_step, model)) + # create job step for aapplication with run settings + job_step = self._create_job_step(application, application_telem_dir) + manifest_builder.add_application(application, (job_step.name, job_step)) + steps.append((job_step, application)) # launch and symlink steps for step, entity in steps: @@ -668,7 +677,7 @@ def _create_job_step( :return: the job step """ # get SSDB, SSIN, SSOUT and add to entity run settings - if isinstance(entity, Model): + if isinstance(entity, Application): self._prep_entity_client_env(entity) # creating job step through the created launcher @@ -680,7 +689,7 @@ def _create_job_step( # return the job step that was created using the launcher since the launcher is defined in the exp return step - def _prep_entity_client_env(self, entity: Model) -> None: + def _prep_entity_client_env(self, entity: Application) -> None: """Retrieve all connections registered to this entity :param entity: The entity to retrieve connections from @@ -706,7 +715,7 @@ def _prep_entity_client_env(self, entity: Model) -> None: if entity.query_key_prefixing(): client_env["SSKEYOUT"] = entity.name - # Set address to local if it's a colocated model + # Set address to local if it's a colocated application if entity.colocated and entity.run_settings.colocated_db_settings is not None: db_name_colo = entity.run_settings.colocated_db_settings["db_identifier"] assert isinstance(db_name_colo, str) @@ -897,11 +906,11 @@ def _set_dbobjects(self, manifest: Manifest) -> None: options = ConfigOptions.create_from_environment(name) client = Client(options, logger_name="SmartSim") - for model in manifest.models: - if not model.colocated: - for db_model in model.db_models: + for application in manifest.applications: + if not application.colocated: + for db_model in application.db_models: set_ml_model(db_model, client) - for db_script in model.db_scripts: + for db_script in application.db_scripts: set_script(db_script, client) for ensemble in manifest.ensembles: @@ -909,7 +918,7 @@ def _set_dbobjects(self, manifest: Manifest) -> None: set_ml_model(db_model, client) for db_script in ensemble.db_scripts: set_script(db_script, client) - for entity in ensemble.models: + for entity in ensemble.applications: if not entity.colocated: # Set models which could belong only # to the entities and not to the ensemble diff --git a/smartsim/_core/control/controller_utils.py b/smartsim/_core/control/controller_utils.py index 37ae9aebfb..57694ce7cf 100644 --- a/smartsim/_core/control/controller_utils.py +++ b/smartsim/_core/control/controller_utils.py @@ -30,7 +30,7 @@ import typing as t from ..._core.launcher.step import Step -from ...entity import EntityList, Model +from ...entity import Application, EntityList from ...error import SmartSimError from ..launcher.launcher import Launcher @@ -38,18 +38,18 @@ from ..utils.serialize import TStepLaunchMetaData -class _AnonymousBatchJob(EntityList[Model]): +class _AnonymousBatchJob(EntityList[Application]): @staticmethod - def _validate(model: Model) -> None: - if model.batch_settings is None: + def _validate(application: Application) -> None: + if application.batch_settings is None: msg = "Unable to create _AnonymousBatchJob without batch_settings" raise SmartSimError(msg) - def __init__(self, model: Model) -> None: - self._validate(model) - super().__init__(model.name, model.path) - self.entities = [model] - self.batch_settings = model.batch_settings + def __init__(self, application: Application) -> None: + self._validate(application) + super().__init__(application.name, application.path) + self.entities = [application] + self.batch_settings = application.batch_settings def _initialize_entities(self, **kwargs: t.Any) -> None: ... diff --git a/smartsim/_core/control/manifest.py b/smartsim/_core/control/manifest.py index fd5770f187..bf99bb050f 100644 --- a/smartsim/_core/control/manifest.py +++ b/smartsim/_core/control/manifest.py @@ -30,7 +30,7 @@ from dataclasses import dataclass, field from ...database import Orchestrator -from ...entity import DBNode, Ensemble, EntitySequence, Model, SmartSimEntity +from ...entity import Application, DBNode, Ensemble, EntitySequence, SmartSimEntity from ...error import SmartSimError from ..config import CONFIG from ..utils import helpers as _helpers @@ -38,7 +38,7 @@ _T = t.TypeVar("_T") _U = t.TypeVar("_U") -_AtomicLaunchableT = t.TypeVar("_AtomicLaunchableT", Model, DBNode) +_AtomicLaunchableT = t.TypeVar("_AtomicLaunchableT", Application, DBNode) if t.TYPE_CHECKING: import os @@ -50,7 +50,7 @@ class Manifest: `SmartSimEntity`-derived objects or `EntitySequence`-derived objects) can be accessed by using the corresponding accessor. - Instances of ``Model``, ``Ensemble`` and ``Orchestrator`` + Instances of ``Application``, ``Ensemble`` and ``Orchestrator`` can all be passed as arguments """ @@ -73,15 +73,15 @@ def dbs(self) -> t.List[Orchestrator]: return dbs @property - def models(self) -> t.List[Model]: - """Return Model instances in Manifest + def applications(self) -> t.List[Application]: + """Return Application instances in Manifest - :return: model instances + :return: application instances """ - _models: t.List[Model] = [ - item for item in self._deployables if isinstance(item, Model) + _applications: t.List[Application] = [ + item for item in self._deployables if isinstance(item, Application) ] - return _models + return _applications @property def ensembles(self) -> t.List[Ensemble]: @@ -143,7 +143,7 @@ def _check_entity_lists_nonempty(self) -> None: def __str__(self) -> str: output = "" e_header = "=== Ensembles ===\n" - m_header = "=== Models ===\n" + m_header = "=== Applications ===\n" db_header = "=== Database ===\n" if self.ensembles: output += e_header @@ -157,15 +157,15 @@ def __str__(self) -> str: output += f"{str(ensemble.batch_settings)}\n" output += "\n" - if self.models: + if self.applications: output += m_header - for model in self.models: - output += f"{model.name}\n" - if model.batch_settings: - output += f"{model.batch_settings}\n" - output += f"{model.run_settings}\n" - if model.params: - output += f"Parameters: \n{_helpers.fmt_dict(model.params)}\n" + for application in self.applications: + output += f"{application.name}\n" + if application.batch_settings: + output += f"{application.batch_settings}\n" + output += f"{application.run_settings}\n" + if application.params: + output += f"Parameters: \n{_helpers.fmt_dict(application.params)}\n" output += "\n" for adb in self.dbs: @@ -183,8 +183,8 @@ def __str__(self) -> str: @property def has_db_objects(self) -> bool: """Check if any entity has DBObjects to set""" - ents: t.Iterable[t.Union[Model, Ensemble]] = itertools.chain( - self.models, + ents: t.Iterable[t.Union[Application, Ensemble]] = itertools.chain( + self.applications, self.ensembles, (member for ens in self.ensembles for member in ens.entities), ) @@ -220,8 +220,8 @@ class LaunchedManifest(t.Generic[_T]): """ metadata: _LaunchedManifestMetadata - models: t.Tuple[t.Tuple[Model, _T], ...] - ensembles: t.Tuple[t.Tuple[Ensemble, t.Tuple[t.Tuple[Model, _T], ...]], ...] + applications: t.Tuple[t.Tuple[Application, _T], ...] + ensembles: t.Tuple[t.Tuple[Ensemble, t.Tuple[t.Tuple[Application, _T], ...]], ...] databases: t.Tuple[t.Tuple[Orchestrator, t.Tuple[t.Tuple[DBNode, _T], ...]], ...] def map(self, func: t.Callable[[_T], _U]) -> "LaunchedManifest[_U]": @@ -233,10 +233,10 @@ def _map_entity_data( return LaunchedManifest( metadata=self.metadata, - models=_map_entity_data(func, self.models), + applications=_map_entity_data(func, self.applications), ensembles=tuple( - (ens, _map_entity_data(func, model_data)) - for ens, model_data in self.ensembles + (ens, _map_entity_data(func, application_data)) + for ens, application_data in self.ensembles ), databases=tuple( (db_, _map_entity_data(func, node_data)) @@ -257,10 +257,12 @@ class LaunchedManifestBuilder(t.Generic[_T]): launcher_name: str run_id: str = field(default_factory=_helpers.create_short_id_str) - _models: t.List[t.Tuple[Model, _T]] = field(default_factory=list, init=False) - _ensembles: t.List[t.Tuple[Ensemble, t.Tuple[t.Tuple[Model, _T], ...]]] = field( + _applications: t.List[t.Tuple[Application, _T]] = field( default_factory=list, init=False ) + _ensembles: t.List[t.Tuple[Ensemble, t.Tuple[t.Tuple[Application, _T], ...]]] = ( + field(default_factory=list, init=False) + ) _databases: t.List[t.Tuple[Orchestrator, t.Tuple[t.Tuple[DBNode, _T], ...]]] = ( field(default_factory=list, init=False) ) @@ -273,8 +275,8 @@ def exp_telemetry_subdirectory(self) -> pathlib.Path: def run_telemetry_subdirectory(self) -> pathlib.Path: return _format_run_telemetry_path(self.exp_path, self.exp_name, self.run_id) - def add_model(self, model: Model, data: _T) -> None: - self._models.append((model, data)) + def add_application(self, application: Application, data: _T) -> None: + self._applications.append((application, data)) def add_ensemble(self, ens: Ensemble, data: t.Sequence[_T]) -> None: self._ensembles.append((ens, self._entities_to_data(ens.entities, data))) @@ -303,7 +305,7 @@ def finalize(self) -> LaunchedManifest[_T]: self.exp_path, self.launcher_name, ), - models=tuple(self._models), + applications=tuple(self._applications), ensembles=tuple(self._ensembles), databases=tuple(self._databases), ) diff --git a/smartsim/_core/entrypoints/indirect.py b/smartsim/_core/entrypoints/indirect.py index 1f445ac4a1..6944f3a386 100644 --- a/smartsim/_core/entrypoints/indirect.py +++ b/smartsim/_core/entrypoints/indirect.py @@ -61,7 +61,7 @@ def main( :param cmd: a base64 encoded cmd to execute :param entity_type: `SmartSimEntity` entity class. Valid values - include: orchestrator, dbnode, ensemble, model + include: orchestrator, dbnode, ensemble, application :param cwd: working directory to execute the cmd from :param status_dir: path to the output directory for status updates """ diff --git a/smartsim/_core/generation/generator.py b/smartsim/_core/generation/generator.py index 8706cf5686..a342190264 100644 --- a/smartsim/_core/generation/generator.py +++ b/smartsim/_core/generation/generator.py @@ -36,10 +36,10 @@ from tabulate import tabulate from ...database import Orchestrator -from ...entity import Ensemble, Model, TaggedFilesHierarchy +from ...entity import Application, Ensemble, TaggedFilesHierarchy from ...log import get_logger from ..control import Manifest -from .modelwriter import ModelWriter +from .modelwriter import ApplicationWriter logger = get_logger(__name__) logger.propagate = False @@ -57,7 +57,7 @@ def __init__( """Initialize a generator object if overwrite is true, replace any existing - configured models within an ensemble if there + configured applications within an ensemble if there is a name collision. Also replace any and all directories for the experiment with fresh copies. Otherwise, if overwrite is false, raises EntityExistsError when there is a name @@ -67,7 +67,7 @@ def __init__( :param overwrite: toggle entity replacement :param verbose: Whether generation information should be logged to std out """ - self._writer = ModelWriter() + self._writer = ApplicationWriter() self.gen_path = gen_path self.overwrite = overwrite self.log_level = DEBUG if not verbose else INFO @@ -87,7 +87,7 @@ def generate_experiment(self, *args: t.Any) -> None: Generate the file structure for a SmartSim experiment. This includes the writing and configuring of input files for a - model. + application. To have files or directories present in the created entity directories, such as datasets or input files, call @@ -95,7 +95,7 @@ def generate_experiment(self, *args: t.Any) -> None: ``entity.attach_generator_files`` for more information on what types of files can be included. - Tagged model files are read, checked for input variables to + Tagged application files are read, checked for input variables to configure, and written. Input variables to configure are specified with a tag within the input file itself. The default tag is surronding an input value with semicolons. @@ -107,25 +107,25 @@ def generate_experiment(self, *args: t.Any) -> None: self._gen_exp_dir() self._gen_orc_dir(generator_manifest.dbs) self._gen_entity_list_dir(generator_manifest.ensembles) - self._gen_entity_dirs(generator_manifest.models) + self._gen_entity_dirs(generator_manifest.applications) def set_tag(self, tag: str, regex: t.Optional[str] = None) -> None: """Set the tag used for tagging input files Set a tag or a regular expression for the - generator to look for when configuring new models. + generator to look for when configuring new applications. For example, a tag might be ``;`` where the - expression being replaced in the model configuration + expression being replaced in the application configuration file would look like ``;expression;`` A full regular expression might tag specific - model configurations such that the configuration + application configurations such that the configuration files don't need to be tagged manually. :param tag: A string of characters that signify the string to be changed. Defaults to ``;`` - :param regex: full regex for the modelwriter to search for + :param regex: full regex for the applicationwriter to search for """ self._writer.set_tag(tag, regex) @@ -189,16 +189,16 @@ def _gen_entity_list_dir(self, entity_lists: t.List[Ensemble]) -> None: mkdir(elist_dir) elist.path = elist_dir - self._gen_entity_dirs(list(elist.models), entity_list=elist) + self._gen_entity_dirs(list(elist.applications), entity_list=elist) def _gen_entity_dirs( self, - entities: t.List[Model], + entities: t.List[Application], entity_list: t.Optional[Ensemble] = None, ) -> None: """Generate directories for Entity instances - :param entities: list of Model instances + :param entities: list of Application instances :param entity_list: Ensemble instance :raises EntityExistsError: if a directory already exists for an entity by that name @@ -228,13 +228,13 @@ def _gen_entity_dirs( self._link_entity_files(entity) self._write_tagged_entity_files(entity) - def _write_tagged_entity_files(self, entity: Model) -> None: + def _write_tagged_entity_files(self, entity: Application) -> None: """Read, configure and write the tagged input files for - a Model instance within an ensemble. This function + a Application instance within an ensemble. This function specifically deals with the tagged files attached to an Ensemble. - :param entity: a Model instance + :param entity: a Application instance """ if entity.files: to_write = [] @@ -263,20 +263,20 @@ def _build_tagged_files(tagged: TaggedFilesHierarchy) -> None: _build_tagged_files(entity.files.tagged_hierarchy) # write in changes to configurations - if isinstance(entity, Model): - files_to_params = self._writer.configure_tagged_model_files( + if isinstance(entity, Application): + files_to_params = self._writer.configure_tagged_application_files( to_write, entity.params ) self._log_params(entity, files_to_params) def _log_params( - self, entity: Model, files_to_params: t.Dict[str, t.Dict[str, str]] + self, entity: Application, files_to_params: t.Dict[str, t.Dict[str, str]] ) -> None: """Log which files were modified during generation and what values were set to the parameters - :param entity: the model being generated + :param entity: the application being generated :param files_to_params: a dict connecting each file to its parameter settings """ used_params: t.Dict[str, str] = {} @@ -292,13 +292,13 @@ def _log_params( ) logger.log( level=self.log_level, - msg=f"Configured model {entity.name} with params {used_params_str}", + msg=f"Configured application {entity.name} with params {used_params_str}", ) file_table = tabulate( file_to_tables.items(), headers=["File name", "Parameters"], ) - log_entry = f"Model name: {entity.name}\n{file_table}\n\n" + log_entry = f"Application name: {entity.name}\n{file_table}\n\n" with open(self.log_file, mode="a", encoding="utf-8") as logfile: logfile.write(log_entry) with open( @@ -309,14 +309,14 @@ def _log_params( else: logger.log( level=self.log_level, - msg=f"Configured model {entity.name} with no parameters", + msg=f"Configured application {entity.name} with no parameters", ) @staticmethod - def _copy_entity_files(entity: Model) -> None: + def _copy_entity_files(entity: Application) -> None: """Copy the entity files and directories attached to this entity. - :param entity: Model + :param entity: Application """ if entity.files: for to_copy in entity.files.copy: @@ -327,10 +327,10 @@ def _copy_entity_files(entity: Model) -> None: shutil.copyfile(to_copy, dst_path) @staticmethod - def _link_entity_files(entity: Model) -> None: + def _link_entity_files(entity: Application) -> None: """Symlink the entity files attached to this entity. - :param entity: Model + :param entity: Application """ if entity.files: for to_link in entity.files.link: diff --git a/smartsim/_core/generation/modelwriter.py b/smartsim/_core/generation/modelwriter.py index 2998d4e354..a22bc029a2 100644 --- a/smartsim/_core/generation/modelwriter.py +++ b/smartsim/_core/generation/modelwriter.py @@ -36,19 +36,19 @@ logger = get_logger(__name__) -class ModelWriter: +class ApplicationWriter: def __init__(self) -> None: self.tag = ";" self.regex = "(;[^;]+;)" self.lines: t.List[str] = [] def set_tag(self, tag: str, regex: t.Optional[str] = None) -> None: - """Set the tag for the modelwriter to search for within + """Set the tag for the applicationwriter to search for within tagged files attached to an entity. - :param tag: tag for the modelwriter to search for, + :param tag: tag for the applicationwriter to search for, defaults to semi-colon e.g. ";" - :param regex: full regex for the modelwriter to search for, + :param regex: full regex for the applicationwriter to search for, defaults to "(;.+;)" """ if regex: @@ -57,17 +57,17 @@ def set_tag(self, tag: str, regex: t.Optional[str] = None) -> None: self.tag = tag self.regex = "".join(("(", tag, ".+", tag, ")")) - def configure_tagged_model_files( + def configure_tagged_application_files( self, tagged_files: t.List[str], params: t.Dict[str, str], make_missing_tags_fatal: bool = False, ) -> t.Dict[str, t.Dict[str, str]]: - """Read, write and configure tagged files attached to a Model + """Read, write and configure tagged files attached to a Application instance. :param tagged_files: list of paths to tagged files - :param params: model parameters + :param params: application parameters :param make_missing_tags_fatal: raise an error if a tag is missing :returns: A dict connecting each file to its parameter settings """ @@ -81,7 +81,7 @@ def configure_tagged_model_files( return files_to_tags def _set_lines(self, file_path: str) -> None: - """Set the lines for the modelwrtter to iterate over + """Set the lines for the applicationwriter to iterate over :param file_path: path to the newly created and tagged file :raises ParameterWriterError: if the newly created file cannot be read @@ -108,9 +108,9 @@ def _replace_tags( self, params: t.Dict[str, str], make_fatal: bool = False ) -> t.Dict[str, str]: """Replace the tagged parameters within the file attached to this - model. The tag defaults to ";" + application. The tag defaults to ";" - :param model: The model instance + :param application: The application instance :param make_fatal: (Optional) Set to True to force a fatal error if a tag is not matched :returns: A dict of parameter names and values set for the file @@ -127,7 +127,7 @@ def _replace_tags( line = re.sub(self.regex, new_val, line, 1) used_params[previous_value] = new_val - # if a tag is found but is not in this model's configurations + # if a tag is found but is not in this application's configurations # put in placeholder value else: tag = tagged_line.split(self.tag)[1] @@ -145,11 +145,11 @@ def _replace_tags( return used_params def _is_ensemble_spec( - self, tagged_line: str, model_params: t.Dict[str, str] + self, tagged_line: str, application_params: t.Dict[str, str] ) -> bool: split_tag = tagged_line.split(self.tag) prev_val = split_tag[1] - if prev_val in model_params.keys(): + if prev_val in application_params.keys(): return True return False diff --git a/smartsim/_core/launcher/step/alpsStep.py b/smartsim/_core/launcher/step/alpsStep.py index 9b744498bd..0753354b4f 100644 --- a/smartsim/_core/launcher/step/alpsStep.py +++ b/smartsim/_core/launcher/step/alpsStep.py @@ -29,7 +29,7 @@ import typing as t from shlex import split as sh_split -from ....entity import DBNode, Model +from ....entity import Application, DBNode from ....error import AllocationError from ....log import get_logger from ....settings import AprunSettings, RunSettings, Singularity @@ -40,7 +40,7 @@ class AprunStep(Step): def __init__( - self, entity: t.Union[Model, DBNode], run_settings: AprunSettings + self, entity: t.Union[Application, DBNode], run_settings: AprunSettings ) -> None: """Initialize a ALPS aprun job step diff --git a/smartsim/_core/launcher/step/localStep.py b/smartsim/_core/launcher/step/localStep.py index 57e2a87837..06fa574599 100644 --- a/smartsim/_core/launcher/step/localStep.py +++ b/smartsim/_core/launcher/step/localStep.py @@ -28,14 +28,14 @@ import shutil import typing as t -from ....entity import DBNode, Model +from ....entity import Application, DBNode from ....settings import Singularity from ....settings.base import RunSettings from .step import Step, proxyable_launch_cmd class LocalStep(Step): - def __init__(self, entity: t.Union[Model, DBNode], run_settings: RunSettings): + def __init__(self, entity: t.Union[Application, DBNode], run_settings: RunSettings): super().__init__(entity, run_settings) self.run_settings = entity.run_settings self._env = self._set_env() diff --git a/smartsim/_core/launcher/step/lsfStep.py b/smartsim/_core/launcher/step/lsfStep.py index 6782fbd273..8c3951bd1f 100644 --- a/smartsim/_core/launcher/step/lsfStep.py +++ b/smartsim/_core/launcher/step/lsfStep.py @@ -28,7 +28,7 @@ import shutil import typing as t -from ....entity import DBNode, Model +from ....entity import Application, DBNode from ....error import AllocationError from ....log import get_logger from ....settings import BsubBatchSettings, JsrunSettings @@ -40,7 +40,7 @@ class BsubBatchStep(Step): def __init__( - self, entity: t.Union[Model, DBNode], batch_settings: BsubBatchSettings + self, entity: t.Union[Application, DBNode], batch_settings: BsubBatchSettings ) -> None: """Initialize a LSF bsub step @@ -106,7 +106,7 @@ def _write_script(self) -> str: class JsrunStep(Step): - def __init__(self, entity: t.Union[Model, DBNode], run_settings: RunSettings): + def __init__(self, entity: t.Union[Application, DBNode], run_settings: RunSettings): """Initialize a LSF jsrun job step :param name: name of the entity to be launched diff --git a/smartsim/_core/launcher/step/mpiStep.py b/smartsim/_core/launcher/step/mpiStep.py index 1cbab9fd8f..4ee10e4d2c 100644 --- a/smartsim/_core/launcher/step/mpiStep.py +++ b/smartsim/_core/launcher/step/mpiStep.py @@ -29,7 +29,7 @@ import typing as t from shlex import split as sh_split -from ....entity import DBNode, Model +from ....entity import Application, DBNode from ....error import AllocationError, SmartSimError from ....log import get_logger from ....settings import MpiexecSettings, MpirunSettings, OrterunSettings @@ -41,7 +41,7 @@ class _BaseMPIStep(Step): def __init__( - self, entity: t.Union[Model, DBNode], run_settings: RunSettings + self, entity: t.Union[Application, DBNode], run_settings: RunSettings ) -> None: """Initialize a job step conforming to the MPI standard @@ -156,7 +156,7 @@ def _make_mpmd(self) -> t.List[str]: class MpiexecStep(_BaseMPIStep): def __init__( - self, entity: t.Union[Model, DBNode], run_settings: MpiexecSettings + self, entity: t.Union[Application, DBNode], run_settings: MpiexecSettings ) -> None: """Initialize an mpiexec job step @@ -172,7 +172,7 @@ def __init__( class MpirunStep(_BaseMPIStep): def __init__( - self, entity: t.Union[Model, DBNode], run_settings: MpirunSettings + self, entity: t.Union[Application, DBNode], run_settings: MpirunSettings ) -> None: """Initialize an mpirun job step @@ -188,7 +188,7 @@ def __init__( class OrterunStep(_BaseMPIStep): def __init__( - self, entity: t.Union[Model, DBNode], run_settings: OrterunSettings + self, entity: t.Union[Application, DBNode], run_settings: OrterunSettings ) -> None: """Initialize an orterun job step diff --git a/smartsim/_core/launcher/step/pbsStep.py b/smartsim/_core/launcher/step/pbsStep.py index 9177dd6d56..34cc0587c5 100644 --- a/smartsim/_core/launcher/step/pbsStep.py +++ b/smartsim/_core/launcher/step/pbsStep.py @@ -26,7 +26,7 @@ import typing as t -from ....entity import DBNode, Model +from ....entity import Application, DBNode from ....log import get_logger from ....settings import QsubBatchSettings from .step import Step @@ -36,7 +36,7 @@ class QsubBatchStep(Step): def __init__( - self, entity: t.Union[Model, DBNode], batch_settings: QsubBatchSettings + self, entity: t.Union[Application, DBNode], batch_settings: QsubBatchSettings ) -> None: """Initialize a PBSpro qsub step diff --git a/smartsim/_core/launcher/step/slurmStep.py b/smartsim/_core/launcher/step/slurmStep.py index 5711a56942..58fcdf97f8 100644 --- a/smartsim/_core/launcher/step/slurmStep.py +++ b/smartsim/_core/launcher/step/slurmStep.py @@ -29,7 +29,7 @@ import typing as t from shlex import split as sh_split -from ....entity import DBNode, Ensemble, Model +from ....entity import Application, DBNode, Ensemble from ....error import AllocationError from ....log import get_logger from ....settings import RunSettings, SbatchSettings, Singularity, SrunSettings @@ -40,7 +40,7 @@ class SbatchStep(Step): def __init__( - self, entity: t.Union[Model, DBNode], batch_settings: SbatchSettings + self, entity: t.Union[Application, DBNode], batch_settings: SbatchSettings ) -> None: """Initialize a Slurm Sbatch step @@ -102,7 +102,7 @@ def _write_script(self) -> str: class SrunStep(Step): def __init__( - self, entity: t.Union[Model, DBNode], run_settings: SrunSettings + self, entity: t.Union[Application, DBNode], run_settings: SrunSettings ) -> None: """Initialize a srun job step @@ -190,7 +190,7 @@ def _get_mpmd(self) -> t.List[RunSettings]: return self.run_settings.mpmd @staticmethod - def _get_exe_args_list(entity: t.Union[Model, DBNode]) -> t.List[str]: + def _get_exe_args_list(entity: t.Union[Application, DBNode]) -> t.List[str]: """Convenience function to encapsulate checking the runsettings.exe_args type to always return a list """ diff --git a/smartsim/_core/launcher/step/step.py b/smartsim/_core/launcher/step/step.py index 46c7478db0..556e219725 100644 --- a/smartsim/_core/launcher/step/step.py +++ b/smartsim/_core/launcher/step/step.py @@ -37,7 +37,7 @@ from smartsim._core.config import CONFIG from smartsim.error.errors import SmartSimError, UnproxyableStepError -from ....entity import DBNode, Ensemble, Model +from ....entity import Application, DBNode, Ensemble from ....log import get_logger from ....settings.base import RunSettings, SettingsBase from ...utils.helpers import encode_cmd, get_base_36_repr @@ -48,7 +48,7 @@ class Step: def __init__( - self, entity: t.Union[Model, DBNode], step_settings: SettingsBase + self, entity: t.Union[Application, DBNode], step_settings: SettingsBase ) -> None: self.name = self._create_unique_name(entity.name) self.entity = entity diff --git a/smartsim/_core/utils/serialize.py b/smartsim/_core/utils/serialize.py index 92cb79df4b..6082ce4c0e 100644 --- a/smartsim/_core/utils/serialize.py +++ b/smartsim/_core/utils/serialize.py @@ -37,7 +37,7 @@ if t.TYPE_CHECKING: from smartsim._core.control.manifest import LaunchedManifest as _Manifest from smartsim.database.orchestrator import Orchestrator - from smartsim.entity import DBNode, Ensemble, Model + from smartsim.entity import Application, DBNode, Ensemble from smartsim.entity.dbobject import DBModel, DBScript from smartsim.settings.base import BatchSettings, RunSettings @@ -58,9 +58,9 @@ def save_launch_manifest(manifest: _Manifest[TStepLaunchMetaData]) -> None: new_run = { "run_id": manifest.metadata.run_id, "timestamp": int(time.time_ns()), - "model": [ - _dictify_model(model, *telemetry_metadata) - for model, telemetry_metadata in manifest.models + "application": [ + _dictify_application(application, *telemetry_metadata) + for application, telemetry_metadata in manifest.applications ], "orchestrator": [ _dictify_db(db, nodes_info) for db, nodes_info in manifest.databases @@ -95,8 +95,8 @@ def save_launch_manifest(manifest: _Manifest[TStepLaunchMetaData]) -> None: json.dump(manifest_dict, file, indent=2) -def _dictify_model( - model: Model, +def _dictify_application( + application: Application, step_id: t.Optional[str], task_id: t.Optional[str], managed: t.Optional[bool], @@ -104,31 +104,31 @@ def _dictify_model( err_file: str, telemetry_data_path: Path, ) -> t.Dict[str, t.Any]: - if model.run_settings is not None: - colo_settings = (model.run_settings.colocated_db_settings or {}).copy() + if application.run_settings is not None: + colo_settings = (application.run_settings.colocated_db_settings or {}).copy() else: colo_settings = ({}).copy() db_scripts = t.cast("t.List[DBScript]", colo_settings.pop("db_scripts", [])) db_models = t.cast("t.List[DBModel]", colo_settings.pop("db_models", [])) return { - "name": model.name, - "path": model.path, - "exe_args": model.exe_args, - "exe": model.exe, - "run_settings": _dictify_run_settings(model.run_settings), + "name": application.name, + "path": application.path, + "exe_args": application.exe_args, + "exe": application.exe, + "run_settings": _dictify_run_settings(application.run_settings), "batch_settings": ( - _dictify_batch_settings(model.batch_settings) - if model.batch_settings + _dictify_batch_settings(application.batch_settings) + if application.batch_settings else {} ), - "params": model.params, + "params": application.params, "files": ( { - "Symlink": model.files.link, - "Configure": model.files.tagged, - "Copy": model.files.copy, + "Symlink": application.files.link, + "Configure": application.files.tagged, + "Copy": application.files.copy, } - if model.files + if application.files else { "Symlink": [], "Configure": [], @@ -173,7 +173,7 @@ def _dictify_model( def _dictify_ensemble( ens: Ensemble, - members: t.Sequence[t.Tuple[Model, TStepLaunchMetaData]], + members: t.Sequence[t.Tuple[Application, TStepLaunchMetaData]], ) -> t.Dict[str, t.Any]: return { "name": ens.name, @@ -185,9 +185,9 @@ def _dictify_ensemble( if ens.batch_settings else {} ), - "models": [ - _dictify_model(model, *launching_metadata) - for model, launching_metadata in members + "applications": [ + _dictify_application(application, *launching_metadata) + for application, launching_metadata in members ], } diff --git a/smartsim/_core/utils/telemetry/manifest.py b/smartsim/_core/utils/telemetry/manifest.py index 942fa4ae87..f5b6a92e0b 100644 --- a/smartsim/_core/utils/telemetry/manifest.py +++ b/smartsim/_core/utils/telemetry/manifest.py @@ -43,8 +43,8 @@ class Run: timestamp: int """the timestamp at the time the `Experiment.start` is called""" - models: t.List[JobEntity] - """models started in this run""" + applications: t.List[JobEntity] + """applications started in this run""" orchestrators: t.List[JobEntity] """orchestrators started in this run""" ensembles: t.List[JobEntity] @@ -58,7 +58,7 @@ def flatten( :param filter_fn: optional boolean filter that returns True for entities to include in the result """ - entities = self.models + self.orchestrators + self.ensembles + entities = self.applications + self.orchestrators + self.ensembles if filter_fn: entities = [entity for entity in entities if filter_fn(entity)] return entities @@ -82,11 +82,11 @@ def load_entity( # an entity w/parent keys must create entities for the items that it # comprises. traverse the children and create each entity - parent_keys = {"shards", "models"} + parent_keys = {"shards", "applications"} parent_keys = parent_keys.intersection(entity_dict.keys()) if parent_keys: - container = "shards" if "shards" in parent_keys else "models" - child_type = "orchestrator" if container == "shards" else "model" + container = "shards" if "shards" in parent_keys else "applications" + child_type = "orchestrator" if container == "shards" else "application" for child_entity in entity_dict[container]: entity = JobEntity.from_manifest( child_type, child_entity, str(exp_dir), raw_experiment @@ -118,7 +118,7 @@ def load_entities( :return: list of loaded `JobEntity` instances """ persisted: t.Dict[str, t.List[JobEntity]] = { - "model": [], + "application": [], "orchestrator": [], } for item in run[entity_type]: @@ -144,7 +144,7 @@ def load_run( # create an output mapping to hold the deserialized entities run_entities: t.Dict[str, t.List[JobEntity]] = { - "model": [], + "application": [], "orchestrator": [], "ensemble": [], } @@ -164,7 +164,7 @@ def load_run( loaded_run = Run( raw_run["timestamp"], - run_entities["model"], + run_entities["application"], run_entities["orchestrator"], run_entities["ensemble"], ) diff --git a/smartsim/_core/utils/telemetry/util.py b/smartsim/_core/utils/telemetry/util.py index 2c51d96000..e716af150e 100644 --- a/smartsim/_core/utils/telemetry/util.py +++ b/smartsim/_core/utils/telemetry/util.py @@ -55,7 +55,7 @@ def write_event( :param task_id: the task_id of a managed task :param step_id: the step_id of an unmanaged task :param entity_type: the SmartSimEntity subtype - (e.g. `orchestrator`, `ensemble`, `model`, `dbnode`, ...) + (e.g. `orchestrator`, `ensemble`, `application`, `dbnode`, ...) :param event_type: the event subtype :param status_dir: path where the SmartSimEntity outputs are written :param detail: (optional) additional information to write with the event diff --git a/smartsim/entity/__init__.py b/smartsim/entity/__init__.py index 40f03fcddc..38162ac42e 100644 --- a/smartsim/entity/__init__.py +++ b/smartsim/entity/__init__.py @@ -30,4 +30,4 @@ from .entity import SmartSimEntity, TelemetryConfiguration from .entityList import EntityList, EntitySequence from .files import TaggedFilesHierarchy -from .model import Model +from .model import Application diff --git a/smartsim/entity/ensemble.py b/smartsim/entity/ensemble.py index e3ddf38cf4..4ce7239fa4 100644 --- a/smartsim/entity/ensemble.py +++ b/smartsim/entity/ensemble.py @@ -44,7 +44,7 @@ from .dbobject import DBModel, DBScript from .entity import SmartSimEntity from .entityList import EntityList -from .model import Model +from .model import Application from .strategies import create_all_permutations, random_permutations, step_values logger = get_logger(__name__) @@ -54,8 +54,8 @@ ] -class Ensemble(EntityList[Model]): - """``Ensemble`` is a group of ``Model`` instances that can +class Ensemble(EntityList[Application]): + """``Ensemble`` is a group of ``Application`` instances that can be treated as a reference to a single instance. """ @@ -72,7 +72,7 @@ def __init__( perm_strat: str = "all_perm", **kwargs: t.Any, ) -> None: - """Initialize an Ensemble of Model instances. + """Initialize an Ensemble of Application instances. The kwargs argument can be used to pass custom input parameters to the permutation strategy. @@ -80,16 +80,16 @@ def __init__( :param name: name of the ensemble :param exe: executable to run :param exe_args: executable arguments - :param params: parameters to expand into ``Model`` members + :param params: parameters to expand into ``Application`` members :param params_as_args: list of params that should be used as command - line arguments to the ``Model`` member executables and not written + line arguments to the ``Application`` member executables and not written to generator files :param batch_settings: describes settings for ``Ensemble`` as batch workload - :param run_settings: describes how each ``Model`` should be executed - :param replicas: number of ``Model`` replicas to create - a keyword + :param run_settings: describes how each ``Application`` should be executed + :param replicas: number of ``Application`` replicas to create - a keyword argument of kwargs :param perm_strategy: strategy for expanding ``params`` into - ``Model`` instances from params argument + ``Application`` instances from params argument options are "all_perm", "step", "random" or a callable function. :return: ``Ensemble`` instance @@ -106,12 +106,12 @@ def __init__( super().__init__(name, path=str(path), perm_strat=perm_strat, **kwargs) @property - def models(self) -> t.Collection[Model]: + def applications(self) -> t.Collection[Application]: """An alias for a shallow copy of the ``entities`` attribute""" return list(self.entities) def _initialize_entities(self, **kwargs: t.Any) -> None: - """Initialize all the models within the ensemble based + """Initialize all the applications within the ensemble based on the parameters passed to the ensemble and the permutation strategy given at init. @@ -125,35 +125,35 @@ def _initialize_entities(self, **kwargs: t.Any) -> None: # the ensemble and assign run_settings to each member if self.params: if self.run_settings and self.exe: - param_names, params = self._read_model_parameters() + param_names, params = self._read_application_parameters() - # Compute all combinations of model parameters and arguments - n_models = kwargs.get("n_models", 0) - all_model_params = strategy(param_names, params, n_models) - if not isinstance(all_model_params, list): + # Compute all combinations of application parameters and arguments + n_applications = kwargs.get("n_applications", 0) + all_application_params = strategy(param_names, params, n_applications) + if not isinstance(all_application_params, list): raise UserStrategyError(strategy) - for i, param_set in enumerate(all_model_params): + for i, param_set in enumerate(all_application_params): if not isinstance(param_set, dict): raise UserStrategyError(strategy) run_settings = deepcopy(self.run_settings) - model_name = "_".join((self.name, str(i))) - model = Model( - name=model_name, + application_name = "_".join((self.name, str(i))) + application = Application( + name=application_name, exe=self.exe, exe_args=self.exe_args, params=param_set, - path=osp.join(self.path, model_name), + path=osp.join(self.path, application_name), run_settings=run_settings, params_as_args=self.params_as_args, ) - model.enable_key_prefixing() - model.params_to_args() + application.enable_key_prefixing() + application.params_to_args() logger.debug( - f"Created ensemble member: {model_name} in {self.name}" + f"Created ensemble member: {application_name} in {self.name}" ) - self.add_model(model) - # cannot generate models without run settings + self.add_application(application) + # cannot generate applications without run settings else: raise SmartSimError( "Ensembles without 'params' or 'replicas' argument to " @@ -163,20 +163,20 @@ def _initialize_entities(self, **kwargs: t.Any) -> None: if self.run_settings and self.exe: if replicas: for i in range(replicas): - model_name = "_".join((self.name, str(i))) - model = Model( - name=model_name, + application_name = "_".join((self.name, str(i))) + application = Application( + name=application_name, params={}, exe=self.exe, exe_args=self.exe_args, - path=osp.join(self.path, model_name), + path=osp.join(self.path, application_name), run_settings=deepcopy(self.run_settings), ) - model.enable_key_prefixing() + application.enable_key_prefixing() logger.debug( - f"Created ensemble member: {model_name} in {self.name}" + f"Created ensemble member: {application_name} in {self.name}" ) - self.add_model(model) + self.add_application(application) else: raise SmartSimError( "Ensembles without 'params' or 'replicas' argument to " @@ -191,29 +191,29 @@ def _initialize_entities(self, **kwargs: t.Any) -> None: else: logger.info("Empty ensemble created for batch launch") - def add_model(self, model: Model) -> None: - """Add a model to this ensemble + def add_application(self, application: Application) -> None: + """Add a application to this ensemble - :param model: model instance to be added - :raises TypeError: if model is not an instance of ``Model`` - :raises EntityExistsError: if model already exists in this ensemble + :param application: application instance to be added + :raises TypeError: if application is not an instance of ``Application`` + :raises EntityExistsError: if application already exists in this ensemble """ - if not isinstance(model, Model): + if not isinstance(application, Application): raise TypeError( - f"Argument to add_model was of type {type(model)}, not Model" + f"Argument to add_application was of type {type(application)}, not Application" ) - # "in" operator uses model name for __eq__ - if model in self.entities: + # "in" operator uses application name for __eq__ + if application in self.entities: raise EntityExistsError( - f"Model {model.name} already exists in ensemble {self.name}" + f"Application {application.name} already exists in ensemble {self.name}" ) if self._db_models: - self._extend_entity_db_models(model, self._db_models) + self._extend_entity_db_models(application, self._db_models) if self._db_scripts: - self._extend_entity_db_scripts(model, self._db_scripts) + self._extend_entity_db_scripts(application, self._db_scripts) - self.entities.append(model) + self.entities.append(application) def register_incoming_entity(self, incoming_entity: SmartSimEntity) -> None: """Register future communication between entities. @@ -226,22 +226,24 @@ def register_incoming_entity(self, incoming_entity: SmartSimEntity) -> None: :param incoming_entity: The entity that data will be received from """ - for model in self.models: - model.register_incoming_entity(incoming_entity) + for application in self.applications: + application.register_incoming_entity(incoming_entity) def enable_key_prefixing(self) -> None: - """If called, each model within this ensemble will prefix its key with its - own model name. + """If called, each application within this ensemble will prefix its key with its + own application name. """ - for model in self.models: - model.enable_key_prefixing() + for application in self.applications: + application.enable_key_prefixing() def query_key_prefixing(self) -> bool: - """Inquire as to whether each model within the ensemble will prefix their keys + """Inquire as to whether each application within the ensemble will prefix their keys - :returns: True if all models have key prefixing enabled, False otherwise + :returns: True if all applications have key prefixing enabled, False otherwise """ - return all(model.query_key_prefixing() for model in self.models) + return all( + application.query_key_prefixing() for application in self.applications + ) def attach_generator_files( self, @@ -249,7 +251,7 @@ def attach_generator_files( to_symlink: t.Optional[t.List[str]] = None, to_configure: t.Optional[t.List[str]] = None, ) -> None: - """Attach files to each model within the ensemble for generation + """Attach files to each application within the ensemble for generation Attach files needed for the entity that, upon generation, will be located in the path of the entity. @@ -258,8 +260,8 @@ def attach_generator_files( the path of the entity, and files "to_symlink" are symlinked into the path of the entity. - Files "to_configure" are text based model input files where - parameters for the model are set. Note that only models + Files "to_configure" are text based application input files where + parameters for the application are set. Note that only applications support the "to_configure" field. These files must have fields tagged that correspond to the values the user would like to change. The tag is settable but defaults @@ -269,24 +271,27 @@ def attach_generator_files( :param to_symlink: files to symlink :param to_configure: input files with tagged parameters """ - for model in self.models: - model.attach_generator_files( + for application in self.applications: + application.attach_generator_files( to_copy=to_copy, to_symlink=to_symlink, to_configure=to_configure ) @property def attached_files_table(self) -> str: """Return a plain-text table with information about files - attached to models belonging to this ensemble. + attached to applications belonging to this ensemble. - :returns: A table of all files attached to all models + :returns: A table of all files attached to all applications """ - if not self.models: + if not self.applications: return "The ensemble is empty, no files to show." table = tabulate( - [[model.name, model.attached_files_table] for model in self.models], - headers=["Model name", "Files"], + [ + [application.name, application.attached_files_table] + for application in self.applications + ], + headers=["Application name", "Files"], tablefmt="grid", ) @@ -298,7 +303,7 @@ def print_attached_files(self) -> None: @staticmethod def _set_strategy(strategy: str) -> StrategyFunction: - """Set the permutation strategy for generating models within + """Set the permutation strategy for generating applications within the ensemble :param strategy: name of the strategy or callable function @@ -317,9 +322,9 @@ def _set_strategy(strategy: str) -> StrategyFunction: f"Permutation strategy given is not supported: {strategy}" ) - def _read_model_parameters(self) -> t.Tuple[t.List[str], t.List[t.List[str]]]: + def _read_application_parameters(self) -> t.Tuple[t.List[str], t.List[t.List[str]]]: """Take in the parameters given to the ensemble and prepare to - create models for the ensemble + create applications for the ensemble :raises TypeError: if params are of the wrong type :return: param names and values for permutation strategy @@ -415,7 +420,7 @@ def add_ml_model( f'An ML Model with name "{db_model.name}" already exists' ) self._db_models.append(db_model) - for entity in self.models: + for entity in self.applications: self._extend_entity_db_models(entity, [db_model]) def add_script( @@ -429,7 +434,7 @@ def add_script( ) -> None: """TorchScript to launch with every entity belonging to this ensemble - Each script added to the model will be loaded into an + Each script added to the application will be loaded into an orchestrator (converged or not) prior to the execution of every entity belonging to this ensemble @@ -437,7 +442,7 @@ def add_script( present, a number can be passed for specification e.g. "GPU:1". Setting ``devices_per_node=N``, with N greater than one will result - in the model being stored in the first N devices of type ``device``. + in the application being stored in the first N devices of type ``device``. One of either script (in memory string representation) or script_path (file) must be provided @@ -470,7 +475,7 @@ def add_script( f'A Script with name "{db_script.name}" already exists' ) self._db_scripts.append(db_script) - for entity in self.models: + for entity in self.applications: self._extend_entity_db_scripts(entity, [db_script]) def add_function( @@ -483,7 +488,7 @@ def add_function( ) -> None: """TorchScript function to launch with every entity belonging to this ensemble - Each script function to the model will be loaded into a + Each script function to the application will be loaded into a non-converged orchestrator prior to the execution of every entity belonging to this ensemble. @@ -523,11 +528,13 @@ def add_function( f'A Script with name "{db_script.name}" already exists' ) self._db_scripts.append(db_script) - for entity in self.models: + for entity in self.applications: self._extend_entity_db_scripts(entity, [db_script]) @staticmethod - def _extend_entity_db_models(model: Model, db_models: t.List[DBModel]) -> None: + def _extend_entity_db_models( + application: Application, db_models: t.List[DBModel] + ) -> None: """ Ensures that the Machine Learning model names being added to the Ensemble are unique. @@ -536,14 +543,14 @@ def _extend_entity_db_models(model: Model, db_models: t.List[DBModel]) -> None: the Ensemble. An SSUnsupportedError is raised if any duplicate names are found. Otherwise, it appends the given list of DBModels to the Ensemble. - :param model: SmartSim Model object. + :param application: SmartSim Application object. :param db_models: List of DBModels to append to the Ensemble. """ for add_ml_model in db_models: dupe = next( ( db_model.name - for db_model in model.db_models + for db_model in application.db_models if db_model.name == add_ml_model.name ), None, @@ -552,10 +559,12 @@ def _extend_entity_db_models(model: Model, db_models: t.List[DBModel]) -> None: raise SSUnsupportedError( f'An ML Model with name "{add_ml_model.name}" already exists' ) - model.add_ml_model_object(add_ml_model) + application.add_ml_model_object(add_ml_model) @staticmethod - def _extend_entity_db_scripts(model: Model, db_scripts: t.List[DBScript]) -> None: + def _extend_entity_db_scripts( + application: Application, db_scripts: t.List[DBScript] + ) -> None: """ Ensures that the script/function names being added to the Ensemble are unique. @@ -564,14 +573,14 @@ def _extend_entity_db_scripts(model: Model, db_scripts: t.List[DBScript]) -> Non are found. Otherwise, it appends the given list of DBScripts to the Ensemble. - :param model: SmartSim Model object. + :param application: SmartSim Application object. :param db_scripts: List of DBScripts to append to the Ensemble. """ for add_script in db_scripts: dupe = next( ( add_script.name - for db_script in model.db_scripts + for db_script in application.db_scripts if db_script.name == add_script.name ), None, @@ -580,4 +589,4 @@ def _extend_entity_db_scripts(model: Model, db_scripts: t.List[DBScript]) -> Non raise SSUnsupportedError( f'A Script with name "{add_script.name}" already exists' ) - model.add_script_object(add_script) + application.add_script_object(add_script) diff --git a/smartsim/entity/files.py b/smartsim/entity/files.py index d00e946e2a..9ec86a68b5 100644 --- a/smartsim/entity/files.py +++ b/smartsim/entity/files.py @@ -32,16 +32,16 @@ class EntityFiles: """EntityFiles are the files a user wishes to have available to - models and nodes within SmartSim. Each entity has a method + applications and nodes within SmartSim. Each entity has a method `entity.attach_generator_files()` that creates one of these objects such that at generation time, each file type will be - present within the generated model or node directory. + present within the generated application or node directory. - Tagged files are the configuration files for a model that - can be searched through and edited by the ModelWriter. + Tagged files are the configuration files for a application that + can be searched through and edited by the ApplicationWriter. Copy files are files that a user wants to copy into the - model or node directory without searching through and + application or node directory without searching through and editing them for tags. Lastly, symlink can be used for big datasets or input @@ -57,10 +57,10 @@ def __init__( ) -> None: """Initialize an EntityFiles instance - :param tagged: tagged files for model configuration - :param copy: files or directories to copy into model + :param tagged: tagged files for application configuration + :param copy: files or directories to copy into application or node directories - :param symlink: files to symlink into model or node + :param symlink: files to symlink into application or node directories """ self.tagged = tagged or [] diff --git a/smartsim/entity/model.py b/smartsim/entity/model.py index 96ab4a780c..98c254a80f 100644 --- a/smartsim/entity/model.py +++ b/smartsim/entity/model.py @@ -47,7 +47,7 @@ logger = get_logger(__name__) -class Model(SmartSimEntity): +class Application(SmartSimEntity): def __init__( self, name: str, @@ -59,12 +59,12 @@ def __init__( params_as_args: t.Optional[t.List[str]] = None, batch_settings: t.Optional[BatchSettings] = None, ): - """Initialize a ``Model`` + """Initialize a ``Application`` - :param name: name of the model + :param name: name of the application :param exe: executable to run :param exe_args: executable arguments - :param params: model parameters for writing into configuration files or + :param params: application parameters for writing into configuration files or to be passed as command line arguments to executable. :param path: path to output, error, and configuration files :param run_settings: launcher settings specified in the experiment @@ -72,7 +72,7 @@ def __init__( interpreted as command line arguments to be added to run_settings :param batch_settings: Launcher settings for running the individual - model as a batch job + application as a batch job """ super().__init__(name, str(path), run_settings) self.exe = [exe] if run_settings.container else [expand_exe_path(exe)] @@ -158,11 +158,11 @@ def register_incoming_entity(self, incoming_entity: SmartSimEntity) -> None: self.incoming_entities.append(incoming_entity) def enable_key_prefixing(self) -> None: - """If called, the entity will prefix its keys with its own model name""" + """If called, the entity will prefix its keys with its own application name""" self._key_prefixing_enabled = True def disable_key_prefixing(self) -> None: - """If called, the entity will not prefix its keys with its own model name""" + """If called, the entity will not prefix its keys with its own application name""" self._key_prefixing_enabled = False def query_key_prefixing(self) -> bool: @@ -189,8 +189,8 @@ def attach_generator_files( the path of the entity, and files "to_symlink" are symlinked into the path of the entity. - Files "to_configure" are text based model input files where - parameters for the model are set. Note that only models + Files "to_configure" are text based application input files where + parameters for the application are set. Note that only applications support the "to_configure" field. These files must have fields tagged that correspond to the values the user would like to change. The tag is settable but defaults @@ -225,7 +225,7 @@ def attached_files_table(self) -> str: :returns: String version of table """ if not self.files: - return "No file attached to this model." + return "No file attached to this application." return str(self.files) def print_attached_files(self) -> None: @@ -233,7 +233,7 @@ def print_attached_files(self) -> None: print(self.attached_files_table) def colocate_db(self, *args: t.Any, **kwargs: t.Any) -> None: - """An alias for ``Model.colocate_db_tcp``""" + """An alias for ``Application.colocate_db_tcp``""" warnings.warn( ( "`colocate_db` has been deprecated and will be removed in a \n" @@ -253,10 +253,10 @@ def colocate_db_uds( db_identifier: str = "", **kwargs: t.Any, ) -> None: - """Colocate an Orchestrator instance with this Model over UDS. + """Colocate an Orchestrator instance with this Application over UDS. This method will initialize settings which add an unsharded - database to this Model instance. Only this Model will be able to communicate + database to this Application instance. Only this Application will be able to communicate with this colocated database by using Unix Domain sockets. Extra parameters for the db can be passed through kwargs. This includes @@ -280,7 +280,7 @@ def colocate_db_uds( :param db_cpus: number of cpus to use for orchestrator :param custom_pinning: CPUs to pin the orchestrator to. Passing an empty iterable disables pinning - :param debug: launch Model with extra debug information about the colocated db + :param debug: launch Application with extra debug information about the colocated db :param kwargs: additional keyword arguments to pass to the orchestrator database """ @@ -314,10 +314,10 @@ def colocate_db_tcp( db_identifier: str = "", **kwargs: t.Any, ) -> None: - """Colocate an Orchestrator instance with this Model over TCP/IP. + """Colocate an Orchestrator instance with this Application over TCP/IP. This method will initialize settings which add an unsharded - database to this Model instance. Only this Model will be able to communicate + database to this Application instance. Only this Application will be able to communicate with this colocated database by using the loopback TCP interface. Extra parameters for the db can be passed through kwargs. This includes @@ -341,7 +341,7 @@ def colocate_db_tcp( :param db_cpus: number of cpus to use for orchestrator :param custom_pinning: CPUs to pin the orchestrator to. Passing an empty iterable disables pinning - :param debug: launch Model with extra debug information about the colocated db + :param debug: launch Application with extra debug information about the colocated db :param kwargs: additional keyword arguments to pass to the orchestrator database """ @@ -376,7 +376,7 @@ def _set_colocated_db_settings( if hasattr(self.run_settings, "mpmd") and len(self.run_settings.mpmd) > 0: raise SSUnsupportedError( - "Models colocated with databases cannot be run as a mpmd workload" + "Applications colocated with databases cannot be run as a mpmd workload" ) if hasattr(self.run_settings, "_prep_colocated_db"): @@ -488,12 +488,12 @@ def params_to_args(self) -> None: for param in self.params_as_args: if not param in self.params: raise ValueError( - f"Tried to convert {param} to command line argument for Model " - f"{self.name}, but its value was not found in model params" + f"Tried to convert {param} to command line argument for Application " + f"{self.name}, but its value was not found in application params" ) if self.run_settings is None: raise ValueError( - "Tried to configure command line parameter for Model " + "Tried to configure command line parameter for Application " f"{self.name}, but no RunSettings are set." ) self.add_exe_args(cat_arg_and_value(param, self.params[param])) @@ -569,7 +569,7 @@ def add_script( ) -> None: """TorchScript to launch with this Model instance - Each script added to the model will be loaded into an + Each script added to the application will be loaded into an orchestrator (converged or not) prior to the execution of this Model instance @@ -613,11 +613,11 @@ def add_function( devices_per_node: int = 1, first_device: int = 0, ) -> None: - """TorchScript function to launch with this Model instance + """TorchScript function to launch with this Application instance - Each script function to the model will be loaded into a + Each script function to the application will be loaded into a non-converged orchestrator prior to the execution - of this Model instance. + of this Application instance. For converged orchestrators, the :meth:`add_script` method should be used. @@ -625,7 +625,7 @@ def add_function( present, a number can be passed for specification e.g. "GPU:1". Setting ``devices_per_node=N``, with N greater than one will result - in the model being stored in the first N devices of type ``device``. + in the application being stored in the first N devices of type ``device``. :param name: key to store function under :param function: TorchScript function code @@ -650,7 +650,7 @@ def __hash__(self) -> int: return hash(self.name) def __eq__(self, other: object) -> bool: - if not isinstance(other, Model): + if not isinstance(other, Application): return False if self.name == other.name: @@ -673,7 +673,7 @@ def add_ml_model_object(self, db_model: DBModel) -> None: err_msg += ( f"Please store the ML model named {db_model.name} in binary format " ) - err_msg += "and add it to the SmartSim Model as file." + err_msg += "and add it to the SmartSim Application as file." raise SSUnsupportedError(err_msg) self._db_models.append(db_model) @@ -685,7 +685,7 @@ def add_script_object(self, db_script: DBScript) -> None: "Functions can not be set from memory for colocated databases.\n" f"Please convert the function named {db_script.name} " "to a string or store it as a text file and add it to the " - "SmartSim Model with add_script." + "SmartSim Application with add_script." ) raise SSUnsupportedError(err_msg) self._db_scripts.append(db_script) @@ -696,7 +696,7 @@ def _check_db_objects_colo(self) -> None: err_msg = ( "ML model can not be set from memory for colocated databases.\n" f"Please store the ML model named {db_model.name} in binary " - "format and add it to the SmartSim Model as file." + "format and add it to the SmartSim Application as file." ) raise SSUnsupportedError(err_msg) @@ -707,7 +707,7 @@ def _check_db_objects_colo(self) -> None: "Functions can not be set from memory for colocated " "databases.\nPlease convert the function named " f"{db_script.name} to a string or store it as a text" - "file and add it to the SmartSim Model with add_script." + "file and add it to the SmartSim Application with add_script." ) raise SSUnsupportedError(err_msg) diff --git a/smartsim/entity/strategies.py b/smartsim/entity/strategies.py index 2af88b58e7..f6f57b329c 100644 --- a/smartsim/entity/strategies.py +++ b/smartsim/entity/strategies.py @@ -31,20 +31,24 @@ # create permutations of all parameters -# single model if parameters only have one value +# single application if parameters only have one value def create_all_permutations( - param_names: t.List[str], param_values: t.List[t.List[str]], _n_models: int = 0 + param_names: t.List[str], + param_values: t.List[t.List[str]], + _n_applications: int = 0, ) -> t.List[t.Dict[str, str]]: perms = list(product(*param_values)) all_permutations = [] for permutation in perms: - temp_model = dict(zip(param_names, permutation)) - all_permutations.append(temp_model) + temp_application = dict(zip(param_names, permutation)) + all_permutations.append(temp_application) return all_permutations def step_values( - param_names: t.List[str], param_values: t.List[t.List[str]], _n_models: int = 0 + param_names: t.List[str], + param_values: t.List[t.List[str]], + _n_applications: int = 0, ) -> t.List[t.Dict[str, str]]: permutations = [] for param_value in zip(*param_values): @@ -53,12 +57,12 @@ def step_values( def random_permutations( - param_names: t.List[str], param_values: t.List[t.List[str]], n_models: int = 0 + param_names: t.List[str], param_values: t.List[t.List[str]], n_applications: int = 0 ) -> t.List[t.Dict[str, str]]: permutations = create_all_permutations(param_names, param_values) - # sample from available permutations if n_models is specified - if n_models and n_models < len(permutations): - permutations = random.sample(permutations, n_models) + # sample from available permutations if n_applications is specified + if n_applications and n_applications < len(permutations): + permutations = random.sample(permutations, n_applications) return permutations diff --git a/smartsim/error/errors.py b/smartsim/error/errors.py index 333258a34b..9a5d0c92d1 100644 --- a/smartsim/error/errors.py +++ b/smartsim/error/errors.py @@ -44,7 +44,7 @@ class EntityExistsError(SmartSimError): class UserStrategyError(SmartSimError): - """Raised when there is an error with model creation inside an ensemble + """Raised when there is an error with application creation inside an ensemble that is from a user provided permutation strategy """ @@ -60,7 +60,7 @@ def create_message(perm_strat: str) -> str: class ParameterWriterError(SmartSimError): - """Raised in the event that input parameter files for a model + """Raised in the event that input parameter files for a application could not be written. """ diff --git a/smartsim/experiment.py b/smartsim/experiment.py index d8163cf711..01f4ed58e9 100644 --- a/smartsim/experiment.py +++ b/smartsim/experiment.py @@ -40,9 +40,9 @@ from ._core import Controller, Generator, Manifest, previewrenderer from .database import Orchestrator from .entity import ( + Application, Ensemble, EntitySequence, - Model, SmartSimEntity, TelemetryConfiguration, ) @@ -86,8 +86,8 @@ class Experiment: and manages their execution. The instances created by an Experiment represent executable code - that is either user-specified, like the ``Model`` instance created - by ``Experiment.create_model``, or pre-configured, like the ``Orchestrator`` + that is either user-specified, like the ``Application`` instance created + by ``Experiment.create_application``, or pre-configured, like the ``Orchestrator`` instance created by ``Experiment.create_database``. Experiment methods that accept a variable list of arguments, such as @@ -192,7 +192,7 @@ def start( ) -> None: """Start passed instances using Experiment launcher - Any instance ``Model``, ``Ensemble`` or ``Orchestrator`` + Any instance ``Application``, ``Ensemble`` or ``Orchestrator`` instance created by the Experiment can be passed as an argument to the start method. @@ -201,8 +201,8 @@ def start( exp = Experiment(name="my_exp", launcher="slurm") settings = exp.create_run_settings(exe="./path/to/binary") - model = exp.create_model("my_model", settings) - exp.start(model) + application = exp.create_application("my_application", settings) + exp.start(application) Multiple entity instances can also be passed to the start method at once no matter which type of instance they are. These will @@ -211,9 +211,9 @@ def start( .. highlight:: python .. code-block:: python - exp.start(model_1, model_2, db, ensemble, block=True) + exp.start(application_1, application_2, db, ensemble, block=True) # alternatively - stage_1 = [model_1, model_2, db, ensemble] + stage_1 = [application_1, application_2, db, ensemble] exp.start(*stage_1, block=True) @@ -257,7 +257,7 @@ def stop( ) -> None: """Stop specific instances launched by this ``Experiment`` - Instances of ``Model``, ``Ensemble`` and ``Orchestrator`` + Instances of ``Application``, ``Ensemble`` and ``Orchestrator`` can all be passed as arguments to the stop method. Whichever launcher was specified at Experiment initialization @@ -270,9 +270,9 @@ def stop( .. highlight:: python .. code-block:: python - exp.stop(model) + exp.stop(application) # multiple - exp.stop(model_1, model_2, db, ensemble) + exp.stop(application_1, application_2, db, ensemble) :param args: One or more SmartSimEntity or EntitySequence objects. :raises TypeError: if wrong type @@ -280,7 +280,7 @@ def stop( """ stop_manifest = Manifest(*args) try: - for entity in stop_manifest.models: + for entity in stop_manifest.applications: self._control.stop_entity(entity) for entity_list in stop_manifest.ensembles: self._control.stop_entity_list(entity_list) @@ -304,12 +304,12 @@ def generate( ``Experiment.generate`` creates directories for each entity passed to organize Experiments that launch many entities. - If files or directories are attached to ``Model`` objects - using ``Model.attach_generator_files()``, those files or + If files or directories are attached to ``application`` objects + using ``application.attach_generator_files()``, those files or directories will be symlinked, copied, or configured and written into the created directory for that instance. - Instances of ``Model``, ``Ensemble`` and ``Orchestrator`` + Instances of ``application``, ``Ensemble`` and ``Orchestrator`` can all be passed as arguments to the generate method. :param tag: tag used in `to_configure` generator files @@ -369,7 +369,7 @@ def poll( def finished(self, entity: SmartSimEntity) -> bool: """Query if a job has completed. - An instance of ``Model`` or ``Ensemble`` can be passed + An instance of ``application`` or ``Ensemble`` can be passed as an argument. Passing ``Orchestrator`` will return an error as a @@ -399,7 +399,7 @@ def get_status( .. highlight:: python .. code-block:: python - exp.get_status(model) + exp.get_status(application) As with an Experiment method, multiple instance of varying types can be passed to and all statuses will @@ -408,7 +408,7 @@ def get_status( .. highlight:: python .. code-block:: python - statuses = exp.get_status(model, ensemble, orchestrator) + statuses = exp.get_status(application, ensemble, orchestrator) complete = [s == smartsim.status.STATUS_COMPLETED for s in statuses] assert all(complete) @@ -418,7 +418,7 @@ def get_status( try: manifest = Manifest(*args) statuses: t.List[SmartSimStatus] = [] - for entity in manifest.models: + for entity in manifest.applications: statuses.append(self._control.get_entity_status(entity)) for entity_list in manifest.all_entity_lists: statuses.extend(self._control.get_entity_list_status(entity_list)) @@ -441,7 +441,7 @@ def create_ensemble( path: t.Optional[str] = None, **kwargs: t.Any, ) -> Ensemble: - """Create an ``Ensemble`` of ``Model`` instances + """Create an ``Ensemble`` of ``Application`` instances Ensembles can be launched sequentially or as a batch if using a non-local launcher. e.g. slurm @@ -456,8 +456,8 @@ def create_ensemble( - ``batch_settings``, ``run_settings``, and ``replicas`` If given solely batch settings, an empty ensemble - will be created that Models can be added to manually - through ``Ensemble.add_model()``. + will be created that applications can be added to manually + through ``Ensemble.add_application()``. The entire Ensemble will launch as one batch. Provided batch and run settings, either ``params`` @@ -472,14 +472,14 @@ def create_ensemble( parameters to the permutation strategy. :param name: name of the ``Ensemble`` - :param params: parameters to expand into ``Model`` members + :param params: parameters to expand into ``Application`` members :param exe: executable to run :param exe_args: executable arguments :param batch_settings: describes settings for ``Ensemble`` as batch workload - :param run_settings: describes how each ``Model`` should be executed + :param run_settings: describes how each ``Application`` should be executed :param replicas: number of replicas to create :param perm_strategy: strategy for expanding ``params`` into - ``Model`` instances from params argument + ``Application`` instances from params argument options are "all_perm", "step", "random" or a callable function. :raises SmartSimError: if initialization fails @@ -509,7 +509,7 @@ def create_ensemble( raise @_contextualize - def create_model( + def create_application( self, name: str, exe: str, @@ -519,85 +519,85 @@ def create_model( path: t.Optional[str] = None, enable_key_prefixing: bool = False, batch_settings: t.Optional[base.BatchSettings] = None, - ) -> Model: - """Create a general purpose ``Model`` + ) -> Application: + """Create a general purpose ``Application`` - The ``Model`` class is the most general encapsulation of - executable code in SmartSim. ``Model`` instances are named + The ``Application`` class is the most general encapsulation of + executable code in SmartSim. ``Application`` instances are named references to pieces of a workflow that can be parameterized, and executed. - ``Model`` instances can be launched sequentially, as a batch job, + ``Application`` instances can be launched sequentially, as a batch job, or as a group by adding them into an ``Ensemble``. - All ``Models`` require a reference to run settings to specify which + All ``Applications`` require a reference to run settings to specify which executable to launch as well provide options for how to launch the executable with the underlying WLM. Furthermore, batch a - reference to a batch settings can be added to launch the ``Model`` - as a batch job through ``Experiment.start``. If a ``Model`` with + reference to a batch settings can be added to launch the ``Application`` + as a batch job through ``Experiment.start``. If a ``Application`` with a reference to a set of batch settings is added to a larger entity with its own set of batch settings (for e.g. an ``Ensemble``) the batch settings of the larger entity will take - precedence and the batch setting of the ``Model`` will be + precedence and the batch setting of the ``Application`` will be strategically ignored. Parameters supplied in the `params` argument can be written into - configuration files supplied at runtime to the ``Model`` through - ``Model.attach_generator_files``. `params` can also be turned - into executable arguments by calling ``Model.params_to_args`` + configuration files supplied at runtime to the ``Application`` through + ``Application.attach_generator_files``. `params` can also be turned + into executable arguments by calling ``Application.params_to_args`` - By default, ``Model`` instances will be executed in the - exp_path/model_name directory if no `path` argument is supplied. - If a ``Model`` instance is passed to ``Experiment.generate``, + By default, ``Application`` instances will be executed in the + exp_path/application_name directory if no `path` argument is supplied. + If a ``Application`` instance is passed to ``Experiment.generate``, a directory within the ``Experiment`` directory will be created - to house the input and output files from the ``Model``. + to house the input and output files from the ``Application``. - Example initialization of a ``Model`` instance + Example initialization of a ``Application`` instance .. highlight:: python .. code-block:: python from smartsim import Experiment run_settings = exp.create_run_settings("python", "run_pytorch_model.py") - model = exp.create_model("pytorch_model", run_settings) + application = exp.create_application("pytorch_model", run_settings) - # adding parameters to a model + # adding parameters to a application run_settings = exp.create_run_settings("python", "run_pytorch_model.py") train_params = { "batch": 32, "epoch": 10, "lr": 0.001 } - model = exp.create_model("pytorch_model", run_settings, params=train_params) - model.attach_generator_files(to_configure="./train.cfg") - exp.generate(model) + application = exp.create_application("pytorch_model", run_settings, params=train_params) + application.attach_generator_files(to_configure="./train.cfg") + exp.generate(application) - New in 0.4.0, ``Model`` instances can be colocated with an - Orchestrator database shard through ``Model.colocate_db``. This + New in 0.4.0, ``application`` instances can be colocated with an + Orchestrator database shard through ``application.colocate_db``. This will launch a single ``Orchestrator`` instance on each compute host used by the (possibly distributed) application. This is useful for performant online inference or processing at runtime. - New in 0.4.2, ``Model`` instances can now be colocated with + New in 0.4.2, ``Application`` instances can now be colocated with an Orchestrator database over either TCP or UDS using the - ``Model.colocate_db_tcp`` or ``Model.colocate_db_uds`` method - respectively. The original ``Model.colocate_db`` method is now - deprecated, but remains as an alias for ``Model.colocate_db_tcp`` + ``pplication.colocate_db_tcp`` or ``Application.colocate_db_uds`` method + respectively. The original ``Application.colocate_db`` method is now + deprecated, but remains as an alias for ``Application.colocate_db_tcp`` for backward compatibility. - :param name: name of the ``Model`` + :param name: name of the ``Application`` :param exe: executable to run :param exe_args: executable arguments - :param run_settings: defines how ``Model`` should be run - :param params: ``Model`` parameters for writing into configuration files - :param path: path to where the ``Model`` should be executed at runtime + :param run_settings: defines how ``Application`` should be run + :param params: ``Application`` parameters for writing into configuration files + :param path: path to where the ``Application`` should be executed at runtime :param enable_key_prefixing: If True, data sent to the ``Orchestrator`` - using SmartRedis from this ``Model`` will - be prefixed with the ``Model`` name. - :param batch_settings: Settings to run ``Model`` individually as a batch job. + using SmartRedis from this ``Application`` will + be prefixed with the ``Application`` name. + :param batch_settings: Settings to run ``Application`` individually as a batch job. :raises SmartSimError: if initialization fails - :return: the created ``Model`` + :return: the created ``Application`` """ if name is None: raise AttributeError("Entity has no name. Please set name attribute.") @@ -607,7 +607,7 @@ def create_model( params = {} try: - new_model = Model( + new_application = Application( name=name, exe=exe, exe_args=exe_args, @@ -617,8 +617,8 @@ def create_model( batch_settings=batch_settings, ) if enable_key_prefixing: - new_model.enable_key_prefixing() - return new_model + new_application.enable_key_prefixing() + return new_application except SmartSimError as e: logger.error(e) raise @@ -758,12 +758,12 @@ def create_database( When launched, ``Orchestrator`` can be used to communicate data between Fortran, Python, C, and C++ applications. - Machine Learning models in Pytorch, Tensorflow, and ONNX (i.e. scikit-learn) + Machine Learning model in Pytorch, Tensorflow, and ONNX (i.e. scikit-learn) can also be stored within the ``Orchestrator`` database where they can be called remotely and executed on CPU or GPU where the database is hosted. - To enable a SmartSim ``Model`` to communicate with the database + To enable a SmartSim ``Application`` to communicate with the database the workload must utilize the SmartRedis clients. For more information on the database, and SmartRedis clients see the documentation at https://www.craylabs.org/docs/smartredis.html @@ -935,8 +935,8 @@ def _launch_summary(self, manifest: Manifest) -> None: summary += f"Experiment: {self.name}\n" summary += f"Experiment Path: {self.exp_path}\n" summary += f"Launcher: {self._launcher}\n" - if manifest.models: - summary += f"Models: {len(manifest.models)}\n" + if manifest.applications: + summary += f"Applications: {len(manifest.applications)}\n" if self._control.orchestrator_active: summary += "Database Status: active\n" @@ -950,12 +950,14 @@ def _launch_summary(self, manifest: Manifest) -> None: logger.info(summary) def _create_entity_dir(self, start_manifest: Manifest) -> None: - def create_entity_dir(entity: t.Union[Orchestrator, Model, Ensemble]) -> None: + def create_entity_dir( + entity: t.Union[Orchestrator, Application, Ensemble] + ) -> None: if not os.path.isdir(entity.path): os.makedirs(entity.path) - for model in start_manifest.models: - create_entity_dir(model) + for application in start_manifest.applications: + create_entity_dir(application) for orch in start_manifest.dbs: create_entity_dir(orch) @@ -963,7 +965,7 @@ def create_entity_dir(entity: t.Union[Orchestrator, Model, Ensemble]) -> None: for ensemble in start_manifest.ensembles: create_entity_dir(ensemble) - for member in ensemble.models: + for member in ensemble.applications: create_entity_dir(member) def __str__(self) -> str: diff --git a/smartsim/settings/alpsSettings.py b/smartsim/settings/alpsSettings.py index 74c6e1a6ce..9e22a42b4f 100644 --- a/smartsim/settings/alpsSettings.py +++ b/smartsim/settings/alpsSettings.py @@ -64,7 +64,7 @@ def make_mpmd(self, settings: RunSettings) -> None: """ if self.colocated_db_settings: raise SSUnsupportedError( - "Colocated models cannot be run as a mpmd workload" + "Colocated applications cannot be run as a mpmd workload" ) if self.container: raise SSUnsupportedError( diff --git a/smartsim/settings/base.py b/smartsim/settings/base.py index 417ba6c609..ed87e223a0 100644 --- a/smartsim/settings/base.py +++ b/smartsim/settings/base.py @@ -53,7 +53,7 @@ def __init__( container: t.Optional[Container] = None, **_kwargs: t.Any, ) -> None: - """Run parameters for a ``Model`` + """Run parameters for a `Aapplication`` The base ``RunSettings`` class should only be used with the `local` launcher on single node, workstations, or laptops. diff --git a/smartsim/settings/lsfSettings.py b/smartsim/settings/lsfSettings.py index 7e49fe8d87..c9a93c40f8 100644 --- a/smartsim/settings/lsfSettings.py +++ b/smartsim/settings/lsfSettings.py @@ -197,7 +197,7 @@ def make_mpmd(self, settings: RunSettings) -> None: """ if self.colocated_db_settings: raise SSUnsupportedError( - "Colocated models cannot be run as a mpmd workload" + "Colocated applications cannot be run as a mpmd workload" ) self.mpmd.append(settings) diff --git a/smartsim/settings/mpiSettings.py b/smartsim/settings/mpiSettings.py index 76180071f2..fd7909ec6e 100644 --- a/smartsim/settings/mpiSettings.py +++ b/smartsim/settings/mpiSettings.py @@ -87,13 +87,13 @@ def make_mpmd(self, settings: RunSettings) -> None: """Make a mpmd workload by combining two ``mpirun`` commands This connects the two settings to be executed with a single - Model instance + Application instance :param settings: MpirunSettings instance """ if self.colocated_db_settings: raise SSUnsupportedError( - "Colocated models cannot be run as a mpmd workload" + "Colocated applications cannot be run as a mpmd workload" ) self.mpmd.append(settings) diff --git a/smartsim/settings/slurmSettings.py b/smartsim/settings/slurmSettings.py index 83833897ca..983f5329f4 100644 --- a/smartsim/settings/slurmSettings.py +++ b/smartsim/settings/slurmSettings.py @@ -80,13 +80,13 @@ def make_mpmd(self, settings: RunSettings) -> None: """Make a mpmd workload by combining two ``srun`` commands This connects the two settings to be executed with a single - Model instance + Application instance :param settings: SrunSettings instance """ if self.colocated_db_settings: raise SSUnsupportedError( - "Colocated models cannot be run as a mpmd workload" + "Colocated applications cannot be run as a mpmd workload" ) if self.container: raise SSUnsupportedError( diff --git a/tests/_legacy/backends/test_dataloader.py b/tests/_legacy/backends/test_dataloader.py index de4bf6d8e3..95016f4aaa 100644 --- a/tests/_legacy/backends/test_dataloader.py +++ b/tests/_legacy/backends/test_dataloader.py @@ -218,7 +218,7 @@ def create_trainer_torch(experiment: Experiment, filedir, wlmutils): args=["training_service_torch.py"], ) - trainer = experiment.create_model("trainer", run_settings=run_settings) + trainer = experiment.create_application("trainer", run_settings=run_settings) trainer.attach_generator_files( to_copy=[osp.join(filedir, "training_service_torch.py")] diff --git a/tests/_legacy/backends/test_dbmodel.py b/tests/_legacy/backends/test_dbmodel.py index 6155b6884c..9d12126ab4 100644 --- a/tests/_legacy/backends/test_dbmodel.py +++ b/tests/_legacy/backends/test_dbmodel.py @@ -165,7 +165,7 @@ def test_tf_db_model( run_settings.set_tasks(1) # Create Model - smartsim_model = wlm_experiment.create_model("smartsim_model", run_settings) + smartsim_model = wlm_experiment.create_application("smartsim_model", run_settings) # Create database db = prepare_db(single_db).orchestrator @@ -236,7 +236,7 @@ def test_pt_db_model( run_settings.set_tasks(1) # Create Model - smartsim_model = wlm_experiment.create_model("smartsim_model", run_settings) + smartsim_model = wlm_experiment.create_applicationl("smartsim_model", run_settings) # Create database db = prepare_db(single_db).orchestrator @@ -299,7 +299,7 @@ def test_db_model_ensemble( ) # Create Model - smartsim_model = wlm_experiment.create_model("smartsim_model", run_settings) + smartsim_model = wlm_experiment.create_application("smartsim_model", run_settings) # Create database db = prepare_db(single_db).orchestrator @@ -336,7 +336,7 @@ def test_db_model_ensemble( ) # Add new ensemble member - smartsim_ensemble.add_model(smartsim_model) + smartsim_ensemble.add_application(smartsim_model) # Add the second ML model to the newly added entity. This is # because the test script runs both ML models for all entities. @@ -391,7 +391,7 @@ def test_colocated_db_model_tf(fileutils, test_dir, wlmutils, mlutils): colo_settings.set_tasks(1) # Create colocated Model - colo_model = exp.create_model("colocated_model", colo_settings) + colo_model = exp.create_application("colocated_model", colo_settings) colo_model.colocate_db_tcp( port=test_port, db_cpus=1, debug=True, ifname=test_interface ) @@ -463,7 +463,7 @@ def test_colocated_db_model_pytorch(fileutils, test_dir, wlmutils, mlutils): colo_settings.set_tasks(1) # Create colocated SmartSim Model - colo_model = exp.create_model("colocated_model", colo_settings) + colo_model = exp.create_application("colocated_model", colo_settings) colo_model.colocate_db_tcp( port=test_port, db_cpus=1, debug=True, ifname=test_interface ) @@ -530,7 +530,7 @@ def test_colocated_db_model_ensemble(fileutils, test_dir, wlmutils, mlutils): ) # Create a third model with a colocated database - colo_model = exp.create_model("colocated_model", colo_settings) + colo_model = exp.create_application("colocated_model", colo_settings) colo_model.colocate_db_tcp( port=test_port, db_cpus=1, debug=True, ifname=test_interface ) @@ -572,7 +572,7 @@ def test_colocated_db_model_ensemble(fileutils, test_dir, wlmutils, mlutils): ) # Add a new model to the ensemble - colo_ensemble.add_model(colo_model) + colo_ensemble.add_application(colo_model) # Add the ML model to SmartSim Model just added to the ensemble colo_model.add_ml_model( @@ -631,7 +631,7 @@ def test_colocated_db_model_ensemble_reordered(fileutils, test_dir, wlmutils, ml ) # Create colocated SmartSim Model - colo_model = exp.create_model("colocated_model", colo_settings) + colo_model = exp.create_application("colocated_model", colo_settings) # Create and save ML model to filesystem model_file, inputs, outputs = save_tf_cnn(test_dir, "model1.pb") @@ -669,7 +669,7 @@ def test_colocated_db_model_ensemble_reordered(fileutils, test_dir, wlmutils, ml entity.disable_key_prefixing() # Add another ensemble member - colo_ensemble.add_model(colo_model) + colo_ensemble.add_application(colo_model) # Colocate a database with the new ensemble member colo_model.colocate_db_tcp( @@ -728,7 +728,7 @@ def test_colocated_db_model_errors(fileutils, test_dir, wlmutils, mlutils): colo_settings.set_tasks(1) # Create colocated SmartSim Model - colo_model = exp.create_model("colocated_model", colo_settings) + colo_model = exp.create_application("colocated_model", colo_settings) colo_model.colocate_db_tcp( port=test_port, db_cpus=1, debug=True, ifname=test_interface ) @@ -805,7 +805,7 @@ def test_colocated_db_model_errors(fileutils, test_dir, wlmutils, mlutils): ) with pytest.raises(SSUnsupportedError): - colo_ensemble.add_model(colo_model) + colo_ensemble.add_application(colo_model) @pytest.mark.skipif(not should_run_tf, reason="Test needs TensorFlow to run") @@ -862,7 +862,7 @@ def test_db_model_ensemble_duplicate(fileutils, test_dir, wlmutils, mlutils): ) # Create Model - smartsim_model = exp.create_model("smartsim_model", run_settings) + smartsim_model = exp.create_application("smartsim_model", run_settings) # Create and save ML model to filesystem model, inputs, outputs = create_tf_cnn() @@ -906,7 +906,7 @@ def test_db_model_ensemble_duplicate(fileutils, test_dir, wlmutils, mlutils): outputs=outputs2, ) - # Attempt to add a duplicate ML model to Ensemble via Ensemble.add_model() + # Attempt to add a duplicate ML model to Ensemble via Ensemble.add_application() with pytest.raises(SSUnsupportedError) as ex: - smartsim_ensemble.add_model(smartsim_model) + smartsim_ensemble.add_application(smartsim_model) assert ex.value.args[0] == 'An ML Model with name "cnn" already exists' diff --git a/tests/_legacy/backends/test_dbscript.py b/tests/_legacy/backends/test_dbscript.py index 2c04bf5db0..66b71baca2 100644 --- a/tests/_legacy/backends/test_dbscript.py +++ b/tests/_legacy/backends/test_dbscript.py @@ -73,19 +73,21 @@ def test_db_script(wlm_experiment, prepare_db, single_db, fileutils, mlutils): run_settings.set_nodes(1) run_settings.set_tasks(1) - # Create the SmartSim Model - smartsim_model = wlm_experiment.create_model("smartsim_model", run_settings) + # Create the SmartSim Application + smartsim_application = wlm_experiment.create_application( + "smartsim_application", run_settings + ) # Create the SmartSim database db = prepare_db(single_db).orchestrator wlm_experiment.reconnect_orchestrator(db.checkpoint_file) - wlm_experiment.generate(smartsim_model) + wlm_experiment.generate(smartsim_application) # Define the torch script string torch_script_str = "def negate(x):\n\treturn torch.neg(x)\n" # Add the script via file - smartsim_model.add_script( + smartsim_application.add_script( "test_script1", script_path=torch_script, device=test_device, @@ -94,7 +96,7 @@ def test_db_script(wlm_experiment, prepare_db, single_db, fileutils, mlutils): ) # Add script via string - smartsim_model.add_script( + smartsim_application.add_script( "test_script2", script=torch_script_str, device=test_device, @@ -103,7 +105,7 @@ def test_db_script(wlm_experiment, prepare_db, single_db, fileutils, mlutils): ) # Add script function - smartsim_model.add_function( + smartsim_application.add_function( "test_func", function=timestwo, device=test_device, @@ -112,11 +114,11 @@ def test_db_script(wlm_experiment, prepare_db, single_db, fileutils, mlutils): ) # Assert we have all three scripts - assert len(smartsim_model._db_scripts) == 3 + assert len(smartsim_application._db_scripts) == 3 # Launch and check successful completion - wlm_experiment.start(smartsim_model, block=True) - statuses = wlm_experiment.get_status(smartsim_model) + wlm_experiment.start(smartsim_application, block=True) + statuses = wlm_experiment.get_status(smartsim_application) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) @@ -144,13 +146,15 @@ def test_db_script_ensemble(wlm_experiment, prepare_db, single_db, fileutils, ml db = prepare_db(single_db).orchestrator wlm_experiment.reconnect_orchestrator(db.checkpoint_file) - # Create Ensemble with two identical models + # Create Ensemble with two identical applications ensemble = wlm_experiment.create_ensemble( "dbscript_ensemble", run_settings=run_settings, replicas=2 ) - # Create SmartSim model - smartsim_model = wlm_experiment.create_model("smartsim_model", run_settings) + # Create SmartSim application + smartsim_application = wlm_experiment.create_application( + "smartsim_application", run_settings + ) # Create the script string torch_script_str = "def negate(x):\n\treturn torch.neg(x)\n" @@ -185,8 +189,8 @@ def test_db_script_ensemble(wlm_experiment, prepare_db, single_db, fileutils, ml ) # Add an additional ensemble member and attach a script to the new member - ensemble.add_model(smartsim_model) - smartsim_model.add_script( + ensemble.add_application(smartsim_application) + smartsim_application.add_script( "test_script2", script=torch_script_str, device=test_device, @@ -232,9 +236,9 @@ def test_colocated_db_script(fileutils, test_dir, wlmutils, mlutils): colo_settings.set_nodes(1) colo_settings.set_tasks(1) - # Create model with colocated database - colo_model = exp.create_model("colocated_model", colo_settings) - colo_model.colocate_db_tcp( + # Create application with colocated database + colo_application = exp.create_application("colocated_application", colo_settings) + colo_application.colocate_db_tcp( port=test_port, db_cpus=1, debug=True, ifname=test_interface ) @@ -242,7 +246,7 @@ def test_colocated_db_script(fileutils, test_dir, wlmutils, mlutils): torch_script_str = "def negate(x):\n\treturn torch.neg(x)\n" # Add script via file - colo_model.add_script( + colo_application.add_script( "test_script1", script_path=torch_script, device=test_device, @@ -250,7 +254,7 @@ def test_colocated_db_script(fileutils, test_dir, wlmutils, mlutils): first_device=0, ) # Add script via string - colo_model.add_script( + colo_application.add_script( "test_script2", script=torch_script_str, device=test_device, @@ -259,19 +263,19 @@ def test_colocated_db_script(fileutils, test_dir, wlmutils, mlutils): ) # Assert we have added both models - assert len(colo_model._db_scripts) == 2 + assert len(colo_application._db_scripts) == 2 - exp.generate(colo_model) + exp.generate(colo_application) - for db_script in colo_model._db_scripts: + for db_script in colo_application._db_scripts: logger.debug(db_script) try: - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) finally: - exp.stop(colo_model) + exp.stop(colo_application) @pytest.mark.skipif(not should_run, reason="Test needs Torch to run") @@ -301,13 +305,13 @@ def test_colocated_db_script_ensemble(fileutils, test_dir, wlmutils, mlutils): colo_settings.set_nodes(1) colo_settings.set_tasks(1) - # Create SmartSim Ensemble with two identical models + # Create SmartSim Ensemble with two identical applications colo_ensemble = exp.create_ensemble( "colocated_ensemble", run_settings=colo_settings, replicas=2 ) - # Create a SmartSim model - colo_model = exp.create_model("colocated_model", colo_settings) + # Create a SmartSim application + colo_application = exp.create_application("colocated_application", colo_settings) # Colocate a db with each ensemble entity and add a script # to each entity via file @@ -328,15 +332,15 @@ def test_colocated_db_script_ensemble(fileutils, test_dir, wlmutils, mlutils): first_device=0, ) - # Colocate a db with the non-ensemble Model - colo_model.colocate_db_tcp( + # Colocate a db with the non-ensemble Application + colo_application.colocate_db_tcp( port=test_port + len(colo_ensemble), db_cpus=1, debug=True, ifname=test_interface, ) - # Add a script to the non-ensemble model + # Add a script to the non-ensemble application torch_script_str = "def negate(x):\n\treturn torch.neg(x)\n" colo_ensemble.add_script( "test_script2", @@ -346,11 +350,11 @@ def test_colocated_db_script_ensemble(fileutils, test_dir, wlmutils, mlutils): first_device=0, ) - # Add the third SmartSim model to the ensemble - colo_ensemble.add_model(colo_model) + # Add the third SmartSim application to the ensemble + colo_ensemble.add_application(colo_application) # Add another script via file to the entire ensemble - colo_model.add_script( + colo_application.add_script( "test_script1", script_path=torch_script, device=test_device, @@ -358,9 +362,9 @@ def test_colocated_db_script_ensemble(fileutils, test_dir, wlmutils, mlutils): first_device=0, ) - # Assert we have added one model to the ensemble + # Assert we have added one application to the ensemble assert len(colo_ensemble._db_scripts) == 1 - # Assert we have added both models to each entity + # Assert we have added both applications to each entity assert all([len(entity._db_scripts) == 2 for entity in colo_ensemble]) exp.generate(colo_ensemble) @@ -400,13 +404,13 @@ def test_colocated_db_script_ensemble_reordered(fileutils, test_dir, wlmutils, m colo_settings.set_nodes(1) colo_settings.set_tasks(1) - # Create Ensemble with two identical SmartSim Model + # Create Ensemble with two identical SmartSim Application colo_ensemble = exp.create_ensemble( "colocated_ensemble", run_settings=colo_settings, replicas=2 ) - # Create an additional SmartSim Model entity - colo_model = exp.create_model("colocated_model", colo_settings) + # Create an additional SmartSim Application entity + colo_application = exp.create_application("colocated_application", colo_settings) # Add a script via string to the ensemble members torch_script_str = "def negate(x):\n\treturn torch.neg(x)\n" @@ -437,18 +441,18 @@ def test_colocated_db_script_ensemble_reordered(fileutils, test_dir, wlmutils, m first_device=0, ) - # Add a colocated database to the non-ensemble SmartSim Model - colo_model.colocate_db_tcp( + # Add a colocated database to the non-ensemble SmartSim Application + colo_application.colocate_db_tcp( port=test_port + len(colo_ensemble), db_cpus=1, debug=True, ifname=test_interface, ) - # Add the non-ensemble SmartSim Model to the Ensemble + # Add the non-ensemble SmartSim Application to the Ensemble # and then add a script via file - colo_ensemble.add_model(colo_model) - colo_model.add_script( + colo_ensemble.add_application(colo_application) + colo_application.add_script( "test_script1", script_path=torch_script, device=test_device, @@ -456,9 +460,9 @@ def test_colocated_db_script_ensemble_reordered(fileutils, test_dir, wlmutils, m first_device=0, ) - # Assert we have added one model to the ensemble + # Assert we have added one application to the ensemble assert len(colo_ensemble._db_scripts) == 1 - # Assert we have added both models to each entity + # Assert we have added both applications to each entity assert all([len(entity._db_scripts) == 2 for entity in colo_ensemble]) exp.generate(colo_ensemble) @@ -496,9 +500,9 @@ def test_db_script_errors(fileutils, test_dir, wlmutils, mlutils): colo_settings.set_nodes(1) colo_settings.set_tasks(1) - # Create a SmartSim model with a colocated database - colo_model = exp.create_model("colocated_model", colo_settings) - colo_model.colocate_db_tcp( + # Create a SmartSim application with a colocated database + colo_application = exp.create_application("colocated_application", colo_settings) + colo_application.colocate_db_tcp( port=test_port, db_cpus=1, debug=True, @@ -508,7 +512,7 @@ def test_db_script_errors(fileutils, test_dir, wlmutils, mlutils): # Check that an error is raised for adding in-memory # function when using colocated deployment with pytest.raises(SSUnsupportedError): - colo_model.add_function( + colo_application.add_function( "test_func", function=timestwo, device=test_device, @@ -516,7 +520,7 @@ def test_db_script_errors(fileutils, test_dir, wlmutils, mlutils): first_device=0, ) - # Create ensemble with two identical SmartSim Model entities + # Create ensemble with two identical SmartSim Application entities colo_settings = exp.create_run_settings(exe=sys.executable, exe_args=test_script) colo_ensemble = exp.create_ensemble( "colocated_ensemble", run_settings=colo_settings, replicas=2 @@ -542,7 +546,7 @@ def test_db_script_errors(fileutils, test_dir, wlmutils, mlutils): first_device=0, ) - # Create an ensemble with two identical SmartSim Model entities + # Create an ensemble with two identical SmartSim Application entities colo_settings = exp.create_run_settings(exe=sys.executable, exe_args=test_script) colo_ensemble = exp.create_ensemble( "colocated_ensemble", run_settings=colo_settings, replicas=2 @@ -573,7 +577,7 @@ def test_db_script_errors(fileutils, test_dir, wlmutils, mlutils): # a colocated database to an Ensemble that has # an in-memory script with pytest.raises(SSUnsupportedError): - colo_ensemble.add_model(colo_model) + colo_ensemble.add_application(colo_application) def test_inconsistent_params_db_script(fileutils): @@ -631,15 +635,17 @@ def test_db_script_ensemble_duplicate(fileutils, test_dir, wlmutils, mlutils): run_settings.set_nodes(1) run_settings.set_tasks(1) - # Create Ensemble with two identical models + # Create Ensemble with two identical applications ensemble = exp.create_ensemble( "dbscript_ensemble", run_settings=run_settings, replicas=2 ) - # Create SmartSim model - smartsim_model = exp.create_model("smartsim_model", run_settings) - # Create 2nd SmartSim model - smartsim_model_2 = exp.create_model("smartsim_model_2", run_settings) + # Create SmartSim application + smartsim_application = exp.create_application("smartsim_application", run_settings) + # Create 2nd SmartSim application + smartsim_application_2 = exp.create_application( + "smartsim_application_2", run_settings + ) # Create the script string torch_script_str = "def negate(x):\n\treturn torch.neg(x)\n" @@ -683,8 +689,8 @@ def test_db_script_ensemble_duplicate(fileutils, test_dir, wlmutils, mlutils): ) assert ex.value.args[0] == 'A Script with name "test_func" already exists' - # Add a script with a non-unique name to a SmartSim Model - smartsim_model.add_script( + # Add a script with a non-unique name to a SmartSim application + smartsim_application.add_script( "test_script1", script_path=torch_script, device=test_device, @@ -693,11 +699,11 @@ def test_db_script_ensemble_duplicate(fileutils, test_dir, wlmutils, mlutils): ) with pytest.raises(SSUnsupportedError) as ex: - ensemble.add_model(smartsim_model) + ensemble.add_application(smartsim_application) assert ex.value.args[0] == 'A Script with name "test_script1" already exists' - # Add a function with a non-unique name to a SmartSim Model - smartsim_model_2.add_function( + # Add a function with a non-unique name to a SmartSim Application + smartsim_application_2.add_function( "test_func", function=timestwo, device=test_device, @@ -706,5 +712,5 @@ def test_db_script_ensemble_duplicate(fileutils, test_dir, wlmutils, mlutils): ) with pytest.raises(SSUnsupportedError) as ex: - ensemble.add_model(smartsim_model_2) + ensemble.add_application(smartsim_application_2) assert ex.value.args[0] == 'A Script with name "test_func" already exists' diff --git a/tests/_legacy/backends/test_onnx.py b/tests/_legacy/backends/test_onnx.py index 29771bb1ca..dd666e567d 100644 --- a/tests/_legacy/backends/test_onnx.py +++ b/tests/_legacy/backends/test_onnx.py @@ -83,7 +83,7 @@ def test_sklearn_onnx(wlm_experiment, prepare_db, single_db, mlutils, wlmutils): ) if wlmutils.get_test_launcher() != "local": run_settings.set_tasks(1) - model = wlm_experiment.create_model("onnx_models", run_settings) + model = wlm_experiment.create_application("onnx_models", run_settings) script_dir = os.path.dirname(os.path.abspath(__file__)) script_path = Path(script_dir, "run_sklearn_onnx.py").resolve() diff --git a/tests/_legacy/backends/test_tf.py b/tests/_legacy/backends/test_tf.py index adf0e9daaf..3d94f20085 100644 --- a/tests/_legacy/backends/test_tf.py +++ b/tests/_legacy/backends/test_tf.py @@ -70,7 +70,7 @@ def test_keras_model(wlm_experiment, prepare_db, single_db, mlutils, wlmutils): if wlmutils.get_test_launcher() != "local": run_settings.set_tasks(1) - model = wlm_experiment.create_model("tf_script", run_settings) + model = wlm_experiment.create_application("tf_script", run_settings) script_dir = os.path.dirname(os.path.abspath(__file__)) script_path = Path(script_dir, "run_tf.py").resolve() diff --git a/tests/_legacy/backends/test_torch.py b/tests/_legacy/backends/test_torch.py index c995f76ca4..8008fa7199 100644 --- a/tests/_legacy/backends/test_torch.py +++ b/tests/_legacy/backends/test_torch.py @@ -71,7 +71,7 @@ def test_torch_model_and_script( ) if wlmutils.get_test_launcher() != "local": run_settings.set_tasks(1) - model = wlm_experiment.create_model("torch_script", run_settings) + model = wlm_experiment.create_application("torch_script", run_settings) script_dir = os.path.dirname(os.path.abspath(__file__)) script_path = Path(script_dir, "run_torch.py").resolve() diff --git a/tests/_legacy/full_wlm/test_generic_batch_launch.py b/tests/_legacy/full_wlm/test_generic_batch_launch.py index 4681f5d1ab..dcada256a4 100644 --- a/tests/_legacy/full_wlm/test_generic_batch_launch.py +++ b/tests/_legacy/full_wlm/test_generic_batch_launch.py @@ -51,10 +51,10 @@ def add_batch_resources(wlmutils, batch_settings): batch_settings.set_resource(key, value) -def test_batch_model(fileutils, test_dir, wlmutils): - """Test the launch of a manually construced batch model""" +def test_batch_application(fileutils, test_dir, wlmutils): + """Test the launch of a manually construced batch application""" - exp_name = "test-batch-model" + exp_name = "test-batch-application" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher(), exp_path=test_dir) script = fileutils.get_test_conf_path("sleep.py") @@ -63,13 +63,16 @@ def test_batch_model(fileutils, test_dir, wlmutils): batch_settings.set_account(wlmutils.get_test_account()) add_batch_resources(wlmutils, batch_settings) run_settings = wlmutils.get_run_settings("python", f"{script} --time=5") - model = exp.create_model( - "model", path=test_dir, run_settings=run_settings, batch_settings=batch_settings + application = exp.create_application( + "application", + path=test_dir, + run_settings=run_settings, + batch_settings=batch_settings, ) - exp.generate(model) - exp.start(model, block=True) - statuses = exp.get_status(model) + exp.generate(application) + exp.start(application, block=True) + statuses = exp.get_status(application) assert len(statuses) == 1 assert statuses[0] == SmartSimStatus.STATUS_COMPLETED @@ -82,16 +85,16 @@ def test_batch_ensemble(fileutils, test_dir, wlmutils): script = fileutils.get_test_conf_path("sleep.py") settings = wlmutils.get_run_settings("python", f"{script} --time=5") - M1 = exp.create_model("m1", path=test_dir, run_settings=settings) - M2 = exp.create_model("m2", path=test_dir, run_settings=settings) + M1 = exp.create_application("m1", path=test_dir, run_settings=settings) + M2 = exp.create_application("m2", path=test_dir, run_settings=settings) batch = exp.create_batch_settings(nodes=1, time="00:01:00") add_batch_resources(wlmutils, batch) batch.set_account(wlmutils.get_test_account()) ensemble = exp.create_ensemble("batch-ens", batch_settings=batch) - ensemble.add_model(M1) - ensemble.add_model(M2) + ensemble.add_application(M1) + ensemble.add_application(M2) exp.generate(ensemble) exp.start(ensemble, block=True) diff --git a/tests/_legacy/full_wlm/test_mpmd.py b/tests/_legacy/full_wlm/test_mpmd.py index 0167a8f083..8195de116c 100644 --- a/tests/_legacy/full_wlm/test_mpmd.py +++ b/tests/_legacy/full_wlm/test_mpmd.py @@ -38,7 +38,7 @@ def test_mpmd(fileutils, test_dir, wlmutils): - """Run an MPMD model twice + """Run an MPMD application twice and check that it always gets executed the same way. All MPMD-compatible run commands which do not @@ -87,13 +87,13 @@ def prune_commands(launcher): settings.make_mpmd(deepcopy(settings)) settings.make_mpmd(deepcopy(settings)) - mpmd_model = exp.create_model( + mpmd_application = exp.create_application( f"mpmd-{run_command}", path=test_dir, run_settings=settings ) - exp.start(mpmd_model, block=True) - statuses = exp.get_status(mpmd_model) + exp.start(mpmd_application, block=True) + statuses = exp.get_status(mpmd_application) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) - exp.start(mpmd_model, block=True) - statuses = exp.get_status(mpmd_model) + exp.start(mpmd_application, block=True) + statuses = exp.get_status(mpmd_application) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) diff --git a/tests/_legacy/full_wlm/test_symlinking.py b/tests/_legacy/full_wlm/test_symlinking.py index c5b5b90bab..b122c41726 100644 --- a/tests/_legacy/full_wlm/test_symlinking.py +++ b/tests/_legacy/full_wlm/test_symlinking.py @@ -36,23 +36,29 @@ pytestmark = pytest.mark.skip(reason="Not testing WLM integrations") -def test_batch_model_and_ensemble(test_dir, wlmutils): +def test_batch_application_and_ensemble(test_dir, wlmutils): exp_name = "test-batch" launcher = wlmutils.get_test_launcher() exp = Experiment(exp_name, launcher=launcher, exp_path=test_dir) rs = exp.create_run_settings("echo", ["spam", "eggs"]) bs = exp.create_batch_settings() - test_model = exp.create_model( - "test_model", path=test_dir, run_settings=rs, batch_settings=bs + test_application = exp.create_application( + "test_application", path=test_dir, run_settings=rs, batch_settings=bs ) - exp.generate(test_model) - exp.start(test_model, block=True) + exp.generate(test_application) + exp.start(test_application, block=True) - assert pathlib.Path(test_model.path).exists() - _should_be_symlinked(pathlib.Path(test_model.path, f"{test_model.name}.out"), True) - _should_be_symlinked(pathlib.Path(test_model.path, f"{test_model.name}.err"), False) - _should_not_be_symlinked(pathlib.Path(test_model.path, f"{test_model.name}.sh")) + assert pathlib.Path(test_application.path).exists() + _should_be_symlinked( + pathlib.Path(test_application.path, f"{test_application.name}.out"), True + ) + _should_be_symlinked( + pathlib.Path(test_application.path, f"{test_application.name}.err"), False + ) + _should_not_be_symlinked( + pathlib.Path(test_application.path, f"{test_application.name}.sh") + ) test_ensemble = exp.create_ensemble( "test_ensemble", params={}, batch_settings=bs, run_settings=rs, replicas=3 @@ -61,7 +67,7 @@ def test_batch_model_and_ensemble(test_dir, wlmutils): exp.start(test_ensemble, block=True) assert pathlib.Path(test_ensemble.path).exists() - for i in range(len(test_ensemble.models)): + for i in range(len(test_ensemble.applications)): _should_be_symlinked( pathlib.Path( test_ensemble.path, @@ -94,7 +100,7 @@ def test_batch_ensemble_symlinks(test_dir, wlmutils): exp.generate(test_ensemble) exp.start(test_ensemble, block=True) - for i in range(len(test_ensemble.models)): + for i in range(len(test_ensemble.applications)): _should_be_symlinked( pathlib.Path( test_ensemble.path, @@ -115,23 +121,29 @@ def test_batch_ensemble_symlinks(test_dir, wlmutils): _should_not_be_symlinked(pathlib.Path(exp.exp_path, "smartsim_params.txt")) -def test_batch_model_symlinks(test_dir, wlmutils): - exp_name = "test-batch-model" +def test_batch_application_symlinks(test_dir, wlmutils): + exp_name = "test-batch-application" launcher = wlmutils.get_test_launcher() exp = Experiment(exp_name, launcher=launcher, exp_path=test_dir) rs = exp.create_run_settings("echo", ["spam", "eggs"]) bs = exp.create_batch_settings() - test_model = exp.create_model( - "test_model", path=test_dir, run_settings=rs, batch_settings=bs + test_application = exp.create_application( + "test_application", path=test_dir, run_settings=rs, batch_settings=bs ) - exp.generate(test_model) - exp.start(test_model, block=True) + exp.generate(test_application) + exp.start(test_application, block=True) - assert pathlib.Path(test_model.path).exists() + assert pathlib.Path(test_application.path).exists() - _should_be_symlinked(pathlib.Path(test_model.path, f"{test_model.name}.out"), True) - _should_be_symlinked(pathlib.Path(test_model.path, f"{test_model.name}.err"), False) - _should_not_be_symlinked(pathlib.Path(test_model.path, f"{test_model.name}.sh")) + _should_be_symlinked( + pathlib.Path(test_application.path, f"{test_application.name}.out"), True + ) + _should_be_symlinked( + pathlib.Path(test_application.path, f"{test_application.name}.err"), False + ) + _should_not_be_symlinked( + pathlib.Path(test_application.path, f"{test_application.name}.sh") + ) def test_batch_orchestrator_symlinks(test_dir, wlmutils): diff --git a/tests/_legacy/on_wlm/test_base_settings_on_wlm.py b/tests/_legacy/on_wlm/test_base_settings_on_wlm.py index 77bebd524c..cbdf659d30 100644 --- a/tests/_legacy/on_wlm/test_base_settings_on_wlm.py +++ b/tests/_legacy/on_wlm/test_base_settings_on_wlm.py @@ -32,7 +32,7 @@ from smartsim.status import SmartSimStatus """ -Test the launch and stop of models and ensembles using base +Test the launch and stop of applications and ensembles using base RunSettings while on WLM. """ @@ -41,34 +41,34 @@ pytestmark = pytest.mark.skip(reason="Not testing WLM integrations") -def test_model_on_wlm(fileutils, test_dir, wlmutils): - exp_name = "test-base-settings-model-launch" +def test_application_on_wlm(fileutils, test_dir, wlmutils): + exp_name = "test-base-settings-application-launch" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher(), exp_path=test_dir) script = fileutils.get_test_conf_path("sleep.py") settings1 = wlmutils.get_base_run_settings("python", f"{script} --time=5") settings2 = wlmutils.get_base_run_settings("python", f"{script} --time=5") - M1 = exp.create_model("m1", path=test_dir, run_settings=settings1) - M2 = exp.create_model("m2", path=test_dir, run_settings=settings2) + M1 = exp.create_application("m1", path=test_dir, run_settings=settings1) + M2 = exp.create_application("m2", path=test_dir, run_settings=settings2) - # launch models twice to show that they can also be restarted + # launch applications twice to show that they can also be restarted for _ in range(2): exp.start(M1, M2, block=True) statuses = exp.get_status(M1, M2) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) -def test_model_stop_on_wlm(fileutils, test_dir, wlmutils): - exp_name = "test-base-settings-model-stop" +def test_application_stop_on_wlm(fileutils, test_dir, wlmutils): + exp_name = "test-base-settings-application-stop" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher(), exp_path=test_dir) script = fileutils.get_test_conf_path("sleep.py") settings1 = wlmutils.get_base_run_settings("python", f"{script} --time=5") settings2 = wlmutils.get_base_run_settings("python", f"{script} --time=5") - M1 = exp.create_model("m1", path=test_dir, run_settings=settings1) - M2 = exp.create_model("m2", path=test_dir, run_settings=settings2) + M1 = exp.create_application("m1", path=test_dir, run_settings=settings1) + M2 = exp.create_application("m2", path=test_dir, run_settings=settings2) - # stop launched models + # stop launched applications exp.start(M1, M2, block=False) time.sleep(2) exp.stop(M1, M2) diff --git a/tests/_legacy/on_wlm/test_colocated_model.py b/tests/_legacy/on_wlm/test_colocated_model.py index 97a47542d7..a615c91da4 100644 --- a/tests/_legacy/on_wlm/test_colocated_model.py +++ b/tests/_legacy/on_wlm/test_colocated_model.py @@ -29,7 +29,7 @@ import pytest from smartsim import Experiment -from smartsim.entity import Model +from smartsim.entity import Application from smartsim.status import SmartSimStatus if sys.platform == "darwin": @@ -47,35 +47,37 @@ @pytest.mark.parametrize("db_type", supported_dbs) -def test_launch_colocated_model_defaults(fileutils, test_dir, coloutils, db_type): - """Test the launch of a model with a colocated database and local launcher""" +def test_launch_colocated_application_defaults(fileutils, test_dir, coloutils, db_type): + """Test the launch of a application with a colocated database and local launcher""" db_args = {"debug": DEBUG_DB} - exp = Experiment("colocated_model_defaults", launcher=launcher, exp_path=test_dir) - colo_model = coloutils.setup_test_colo( + exp = Experiment( + "colocated_application_defaults", launcher=launcher, exp_path=test_dir + ) + colo_application = coloutils.setup_test_colo( fileutils, db_type, exp, "send_data_local_smartredis.py", db_args, on_wlm=True ) - exp.generate(colo_model) - assert colo_model.run_settings.colocated_db_settings["custom_pinning"] == "0" - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + exp.generate(colo_application) + assert colo_application.run_settings.colocated_db_settings["custom_pinning"] == "0" + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all( stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses ), f"Statuses: {statuses}" - # test restarting the colocated model - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + # test restarting the colocated application + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all( stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses ), f"Statuses: {statuses}" @pytest.mark.parametrize("db_type", supported_dbs) -def test_colocated_model_disable_pinning(fileutils, test_dir, coloutils, db_type): +def test_colocated_application_disable_pinning(fileutils, test_dir, coloutils, db_type): exp = Experiment( - "colocated_model_pinning_auto_1cpu", launcher=launcher, exp_path=test_dir + "colocated_application_pinning_auto_1cpu", launcher=launcher, exp_path=test_dir ) db_args = { "db_cpus": 1, @@ -84,22 +86,24 @@ def test_colocated_model_disable_pinning(fileutils, test_dir, coloutils, db_type } # Check to make sure that the CPU mask was correctly generated - colo_model = coloutils.setup_test_colo( + colo_application = coloutils.setup_test_colo( fileutils, db_type, exp, "send_data_local_smartredis.py", db_args, on_wlm=True ) - assert colo_model.run_settings.colocated_db_settings["custom_pinning"] is None - exp.generate(colo_model) - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + assert colo_application.run_settings.colocated_db_settings["custom_pinning"] is None + exp.generate(colo_application) + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all( stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses ), f"Statuses: {statuses}" @pytest.mark.parametrize("db_type", supported_dbs) -def test_colocated_model_pinning_auto_2cpu(fileutils, test_dir, coloutils, db_type): +def test_colocated_application_pinning_auto_2cpu( + fileutils, test_dir, coloutils, db_type +): exp = Experiment( - "colocated_model_pinning_auto_2cpu", + "colocated_application_pinning_auto_2cpu", launcher=launcher, exp_path=test_dir, ) @@ -107,88 +111,97 @@ def test_colocated_model_pinning_auto_2cpu(fileutils, test_dir, coloutils, db_ty db_args = {"db_cpus": 2, "debug": DEBUG_DB} # Check to make sure that the CPU mask was correctly generated - colo_model = coloutils.setup_test_colo( + colo_application = coloutils.setup_test_colo( fileutils, db_type, exp, "send_data_local_smartredis.py", db_args, on_wlm=True ) - assert colo_model.run_settings.colocated_db_settings["custom_pinning"] == "0,1" - exp.generate(colo_model) - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + assert ( + colo_application.run_settings.colocated_db_settings["custom_pinning"] == "0,1" + ) + exp.generate(colo_application) + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all( stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses ), f"Statuses: {statuses}" @pytest.mark.parametrize("db_type", supported_dbs) -def test_colocated_model_pinning_range(fileutils, test_dir, coloutils, db_type): +def test_colocated_application_pinning_range(fileutils, test_dir, coloutils, db_type): # Check to make sure that the CPU mask was correctly generated # Assume that there are at least 4 cpus on the node exp = Experiment( - "colocated_model_pinning_manual", + "colocated_application_pinning_manual", launcher=launcher, exp_path=test_dir, ) db_args = {"db_cpus": 4, "custom_pinning": range(4), "debug": DEBUG_DB} - colo_model = coloutils.setup_test_colo( + colo_application = coloutils.setup_test_colo( fileutils, db_type, exp, "send_data_local_smartredis.py", db_args, on_wlm=True ) - assert colo_model.run_settings.colocated_db_settings["custom_pinning"] == "0,1,2,3" - exp.generate(colo_model) - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + assert ( + colo_application.run_settings.colocated_db_settings["custom_pinning"] + == "0,1,2,3" + ) + exp.generate(colo_application) + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all( stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses ), f"Statuses: {statuses}" @pytest.mark.parametrize("db_type", supported_dbs) -def test_colocated_model_pinning_list(fileutils, test_dir, coloutils, db_type): +def test_colocated_application_pinning_list(fileutils, test_dir, coloutils, db_type): # Check to make sure that the CPU mask was correctly generated # note we presume that this has more than 2 CPUs on the supercomputer node exp = Experiment( - "colocated_model_pinning_manual", + "colocated_application_pinning_manual", launcher=launcher, exp_path=test_dir, ) db_args = {"db_cpus": 2, "custom_pinning": [0, 2]} - colo_model = coloutils.setup_test_colo( + colo_application = coloutils.setup_test_colo( fileutils, db_type, exp, "send_data_local_smartredis.py", db_args, on_wlm=True ) - assert colo_model.run_settings.colocated_db_settings["custom_pinning"] == "0,2" - exp.generate(colo_model) - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + assert ( + colo_application.run_settings.colocated_db_settings["custom_pinning"] == "0,2" + ) + exp.generate(colo_application) + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all( stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses ), f"Statuses: {statuses}" @pytest.mark.parametrize("db_type", supported_dbs) -def test_colocated_model_pinning_mixed(fileutils, test_dir, coloutils, db_type): +def test_colocated_application_pinning_mixed(fileutils, test_dir, coloutils, db_type): # Check to make sure that the CPU mask was correctly generated # note we presume that this at least 4 CPUs on the supercomputer node exp = Experiment( - "colocated_model_pinning_manual", + "colocated_application_pinning_manual", launcher=launcher, exp_path=test_dir, ) db_args = {"db_cpus": 2, "custom_pinning": [range(2), 3]} - colo_model = coloutils.setup_test_colo( + colo_application = coloutils.setup_test_colo( fileutils, db_type, exp, "send_data_local_smartredis.py", db_args, on_wlm=True ) - assert colo_model.run_settings.colocated_db_settings["custom_pinning"] == "0,1,3" - exp.generate(colo_model) - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + assert ( + colo_application.run_settings.colocated_db_settings["custom_pinning"] == "0,1,3" + ) + exp.generate(colo_application) + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all( stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses ), f"Statuses: {statuses}" diff --git a/tests/_legacy/on_wlm/test_containers_wlm.py b/tests/_legacy/on_wlm/test_containers_wlm.py index 21f1e1c5e1..50b35dde1a 100644 --- a/tests/_legacy/on_wlm/test_containers_wlm.py +++ b/tests/_legacy/on_wlm/test_containers_wlm.py @@ -44,7 +44,7 @@ def test_singularity_wlm_smartredis(fileutils, test_dir, wlmutils): """Run two processes, each process puts a tensor on the DB, then accesses the other process's tensor. - Finally, the tensor is used to run a model. + Finally, the tensor is used to run a application. Note: This is a containerized port of test_smartredis.py for WLM system """ @@ -87,7 +87,7 @@ def test_singularity_wlm_smartredis(fileutils, test_dir, wlmutils): exp.generate(ensemble) - # start the models + # start the applications exp.start(ensemble, summary=False) # get and confirm statuses diff --git a/tests/_legacy/on_wlm/test_dragon.py b/tests/_legacy/on_wlm/test_dragon.py index a05d381415..cf7160a2a7 100644 --- a/tests/_legacy/on_wlm/test_dragon.py +++ b/tests/_legacy/on_wlm/test_dragon.py @@ -42,7 +42,7 @@ def test_dragon_global_path(global_dragon_teardown, wlmutils, test_dir, monkeypa launcher=wlmutils.get_test_launcher(), ) rs = exp.create_run_settings(exe="sleep", exe_args=["1"]) - model = exp.create_model("sleep", run_settings=rs) + model = exp.create_application("sleep", run_settings=rs) exp.generate(model) exp.start(model, block=True) @@ -63,7 +63,7 @@ def test_dragon_exp_path(global_dragon_teardown, wlmutils, test_dir, monkeypatch launcher=wlmutils.get_test_launcher(), ) rs = exp.create_run_settings(exe="sleep", exe_args=["1"]) - model = exp.create_model("sleep", run_settings=rs) + model = exp.create_application("sleep", run_settings=rs) exp.generate(model) exp.start(model, block=True) @@ -82,7 +82,7 @@ def test_dragon_cannot_honor(wlmutils, test_dir): ) rs = exp.create_run_settings(exe="sleep", exe_args=["1"]) rs.set_nodes(100) - model = exp.create_model("sleep", run_settings=rs) + model = exp.create_application("sleep", run_settings=rs) exp.generate(model) exp.start(model, block=True) diff --git a/tests/_legacy/on_wlm/test_launch_errors.py b/tests/_legacy/on_wlm/test_launch_errors.py index 2498a5a91a..dc11e84780 100644 --- a/tests/_legacy/on_wlm/test_launch_errors.py +++ b/tests/_legacy/on_wlm/test_launch_errors.py @@ -38,7 +38,7 @@ def test_failed_status(fileutils, test_dir, wlmutils): - """Test when a failure occurs deep into model execution""" + """Test when a failure occurs deep into application execution""" exp_name = "test-report-failure" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher(), exp_path=test_dir) @@ -48,12 +48,14 @@ def test_failed_status(fileutils, test_dir, wlmutils): "python", f"{script} --time=7", run_comamnd="auto" ) - model = exp.create_model("bad-model", path=test_dir, run_settings=settings) + application = exp.create_application( + "bad-application", path=test_dir, run_settings=settings + ) - exp.start(model, block=False) - while not exp.finished(model): + exp.start(application, block=False) + while not exp.finished(application): time.sleep(2) - stat = exp.get_status(model) + stat = exp.get_status(application) assert len(stat) == 1 assert stat[0] == SmartSimStatus.STATUS_FAILED @@ -79,7 +81,9 @@ def test_bad_run_command_args(fileutils, test_dir, wlmutils): "python", f"{script} --time=5", run_args={"badarg": "badvalue"} ) - model = exp.create_model("bad-model", path=test_dir, run_settings=settings) + application = exp.create_application( + "bad-application", path=test_dir, run_settings=settings + ) with pytest.raises(SmartSimError): - exp.start(model) + exp.start(application) diff --git a/tests/_legacy/on_wlm/test_launch_ompi_lsf.py b/tests/_legacy/on_wlm/test_launch_ompi_lsf.py index 51c82e4184..c2806a1b0d 100644 --- a/tests/_legacy/on_wlm/test_launch_ompi_lsf.py +++ b/tests/_legacy/on_wlm/test_launch_ompi_lsf.py @@ -47,7 +47,9 @@ def test_launch_openmpi_lsf(fileutils, test_dir, wlmutils): settings.set_cpus_per_task(1) settings.set_tasks(1) - model = exp.create_model("ompi-model", path=test_dir, run_settings=settings) - exp.start(model, block=True) - statuses = exp.get_status(model) + application = exp.create_application( + "ompi-application", path=test_dir, run_settings=settings + ) + exp.start(application, block=True) + statuses = exp.get_status(application) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) diff --git a/tests/_legacy/on_wlm/test_local_step.py b/tests/_legacy/on_wlm/test_local_step.py index 8f7d823b8b..00c76bb331 100644 --- a/tests/_legacy/on_wlm/test_local_step.py +++ b/tests/_legacy/on_wlm/test_local_step.py @@ -61,9 +61,9 @@ def test_local_env_pass_implicit(fileutils, test_dir) -> None: # NOTE: not passing env_args into run_settings here, relying on --export=ALL default settings = RunSettings(exe_name, exe_args, run_command="srun", run_args=run_args) app_name = "echo_app" - app = exp.create_model(app_name, settings) + app = exp.create_application(app_name, settings) - # generate the experiment structure and start the model + # generate the experiment structure and start the application exp.generate(app, overwrite=True) exp.start(app, block=True, summary=False) @@ -100,9 +100,9 @@ def test_local_env_pass_explicit(fileutils, test_dir) -> None: exe_name, exe_args, run_command="srun", run_args=run_args, env_vars=env_vars ) app_name = "echo_app" - app = exp.create_model(app_name, settings) + app = exp.create_application(app_name, settings) - # generate the experiment structure and start the model + # generate the experiment structure and start the application exp.generate(app, overwrite=True) exp.start(app, block=True, summary=False) diff --git a/tests/_legacy/on_wlm/test_preview_wlm.py b/tests/_legacy/on_wlm/test_preview_wlm.py index 78da30c9af..77cd938e62 100644 --- a/tests/_legacy/on_wlm/test_preview_wlm.py +++ b/tests/_legacy/on_wlm/test_preview_wlm.py @@ -126,8 +126,8 @@ def test_preview_model_on_wlm(fileutils, test_dir, wlmutils): script = fileutils.get_test_conf_path("sleep.py") settings1 = wlmutils.get_base_run_settings("python", f"{script} --time=5") settings2 = wlmutils.get_base_run_settings("python", f"{script} --time=5") - M1 = exp.create_model("m1", path=test_dir, run_settings=settings1) - M2 = exp.create_model("m2", path=test_dir, run_settings=settings2) + M1 = exp.create_application("m1", path=test_dir, run_settings=settings1) + M2 = exp.create_application("m2", path=test_dir, run_settings=settings2) preview_manifest = Manifest(M1, M2) @@ -158,7 +158,7 @@ def test_preview_batch_model(fileutils, test_dir, wlmutils): batch_settings.set_account(wlmutils.get_test_account()) add_batch_resources(wlmutils, batch_settings) run_settings = wlmutils.get_run_settings("python", f"{script} --time=5") - model = exp.create_model( + model = exp.create_application( "model", path=test_dir, run_settings=run_settings, batch_settings=batch_settings ) model.set_path(test_dir) @@ -187,8 +187,8 @@ def test_preview_batch_ensemble(fileutils, test_dir, wlmutils): script = fileutils.get_test_conf_path("sleep.py") settings = wlmutils.get_run_settings("python", f"{script} --time=5") - M1 = exp.create_model("m1", path=test_dir, run_settings=settings) - M2 = exp.create_model("m2", path=test_dir, run_settings=settings) + M1 = exp.create_application("m1", path=test_dir, run_settings=settings) + M2 = exp.create_application("m2", path=test_dir, run_settings=settings) batch = exp.create_batch_settings(nodes=1, time="00:01:00") add_batch_resources(wlmutils, batch) @@ -235,11 +235,11 @@ def test_preview_launch_command(test_dir, wlmutils, choose_host): rs1 = RunSettings("bash", "multi_tags_template.sh") rs2 = exp.create_run_settings("echo", ["spam", "eggs"]) - hello_world_model = exp.create_model( + hello_world_model = exp.create_application( "echo-hello", run_settings=rs1, params=model_params ) - spam_eggs_model = exp.create_model("echo-spam", run_settings=rs2) + spam_eggs_model = exp.create_application("echo-spam", run_settings=rs2) # setup ensemble parameter space learning_rate = list(np.linspace(0.01, 0.5)) @@ -288,7 +288,7 @@ def test_preview_batch_launch_command(fileutils, test_dir, wlmutils): batch_settings.set_account(wlmutils.get_test_account()) add_batch_resources(wlmutils, batch_settings) run_settings = wlmutils.get_run_settings("python", f"{script} --time=5") - model = exp.create_model( + model = exp.create_application( "model", path=test_dir, run_settings=run_settings, batch_settings=batch_settings ) model.set_path(test_dir) @@ -342,7 +342,7 @@ def test_ensemble_batch(test_dir, wlmutils): exp.generate(ensemble, overwrite=True) rs2 = exp.create_run_settings("echo", ["spam", "eggs"]) # Create model - ml_model = exp.create_model("tf_training", rs2) + ml_model = exp.create_application("tf_training", rs2) for sim in ensemble.entities: ml_model.register_incoming_entity(sim) @@ -380,8 +380,8 @@ def test_preview_ensemble_db_script(wlmutils, test_dir): model_settings_2 = exp.create_run_settings(exe="python", exe_args="params.py") model_settings_3 = exp.create_run_settings(exe="python", exe_args="params.py") # Initialize a Model object - model_instance = exp.create_model("model_name", model_settings) - model_instance_2 = exp.create_model("model_name_2", model_settings_2) + model_instance = exp.create_application("model_name", model_settings) + model_instance_2 = exp.create_application("model_name_2", model_settings_2) batch = exp.create_batch_settings(time="24:00:00", account="test") ensemble = exp.create_ensemble( "ensemble", batch_settings=batch, run_settings=model_settings_3, replicas=2 diff --git a/tests/_legacy/on_wlm/test_restart.py b/tests/_legacy/on_wlm/test_restart.py index 0116c10d39..0b8de3fd51 100644 --- a/tests/_legacy/on_wlm/test_restart.py +++ b/tests/_legacy/on_wlm/test_restart.py @@ -44,8 +44,8 @@ def test_restart(fileutils, test_dir, wlmutils): settings = exp.create_run_settings("python", f"{script} --time=5") settings.set_tasks(1) - M1 = exp.create_model("m1", path=test_dir, run_settings=settings) - M2 = exp.create_model("m2", path=test_dir, run_settings=deepcopy(settings)) + M1 = exp.create_application("m1", path=test_dir, run_settings=settings) + M2 = exp.create_application("m2", path=test_dir, run_settings=deepcopy(settings)) exp.start(M1, M2, block=True) statuses = exp.get_status(M1, M2) diff --git a/tests/_legacy/on_wlm/test_simple_base_settings_on_wlm.py b/tests/_legacy/on_wlm/test_simple_base_settings_on_wlm.py index caa55da3ed..8453e85275 100644 --- a/tests/_legacy/on_wlm/test_simple_base_settings_on_wlm.py +++ b/tests/_legacy/on_wlm/test_simple_base_settings_on_wlm.py @@ -33,7 +33,7 @@ from smartsim.status import SmartSimStatus """ -Test the launch and stop of simple models and ensembles that use base +Test the launch and stop of simple applications and ensembles that use base RunSettings while on WLM that do not include a run command These tests will execute code (very light scripts) on the head node @@ -49,37 +49,37 @@ pytestmark = pytest.mark.skip(reason="Not testing WLM integrations") -def test_simple_model_on_wlm(fileutils, test_dir, wlmutils): +def test_simple_application_on_wlm(fileutils, test_dir, wlmutils): launcher = wlmutils.get_test_launcher() if launcher not in ["pbs", "slurm", "lsf"]: pytest.skip("Test only runs on systems with LSF, PBSPro, or Slurm as WLM") - exp_name = "test-simplebase-settings-model-launch" + exp_name = "test-simplebase-settings-application-launch" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher(), exp_path=test_dir) script = fileutils.get_test_conf_path("sleep.py") settings = RunSettings("python", exe_args=f"{script} --time=5") - M = exp.create_model("m", path=test_dir, run_settings=settings) + M = exp.create_application("m", path=test_dir, run_settings=settings) - # launch model twice to show that it can also be restarted + # launch application twice to show that it can also be restarted for _ in range(2): exp.start(M, block=True) assert exp.get_status(M)[0] == SmartSimStatus.STATUS_COMPLETED -def test_simple_model_stop_on_wlm(fileutils, test_dir, wlmutils): +def test_simple_application_stop_on_wlm(fileutils, test_dir, wlmutils): launcher = wlmutils.get_test_launcher() if launcher not in ["pbs", "slurm", "lsf"]: pytest.skip("Test only runs on systems with LSF, PBSPro, or Slurm as WLM") - exp_name = "test-simplebase-settings-model-stop" + exp_name = "test-simplebase-settings-application-stop" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher(), exp_path=test_dir) script = fileutils.get_test_conf_path("sleep.py") settings = RunSettings("python", exe_args=f"{script} --time=5") - M = exp.create_model("m", path=test_dir, run_settings=settings) + M = exp.create_application("m", path=test_dir, run_settings=settings) - # stop launched model + # stop launched application exp.start(M, block=False) time.sleep(2) exp.stop(M) diff --git a/tests/_legacy/on_wlm/test_simple_entity_launch.py b/tests/_legacy/on_wlm/test_simple_entity_launch.py index 28ddf92f74..6cdd7d7a1f 100644 --- a/tests/_legacy/on_wlm/test_simple_entity_launch.py +++ b/tests/_legacy/on_wlm/test_simple_entity_launch.py @@ -49,16 +49,16 @@ pytestmark = pytest.mark.skip(reason="Not testing WLM integrations") -def test_models(fileutils, test_dir, wlmutils): - exp_name = "test-models-launch" +def test_applications(fileutils, test_dir, wlmutils): + exp_name = "test-applications-launch" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher(), exp_path=test_dir) script = fileutils.get_test_conf_path("sleep.py") settings = exp.create_run_settings("python", f"{script} --time=5") settings.set_tasks(1) - M1 = exp.create_model("m1", path=test_dir, run_settings=settings) - M2 = exp.create_model("m2", path=test_dir, run_settings=deepcopy(settings)) + M1 = exp.create_application("m1", path=test_dir, run_settings=settings) + M2 = exp.create_application("m2", path=test_dir, run_settings=deepcopy(settings)) exp.start(M1, M2, block=True) statuses = exp.get_status(M1, M2) @@ -76,7 +76,7 @@ def test_multinode_app(mpi_app_path, test_dir, wlmutils): settings = exp.create_run_settings(str(mpi_app_path), []) settings.set_nodes(3) - model = exp.create_model("mpi_app", run_settings=settings) + model = exp.create_application("mpi_app", run_settings=settings) exp.generate(model) exp.start(model, block=True) @@ -125,8 +125,10 @@ def test_summary(fileutils, test_dir, wlmutils): bad_settings = exp.create_run_settings("python", f"{bad} --time=6") bad_settings.set_tasks(1) - sleep_exp = exp.create_model("sleep", path=test_dir, run_settings=sleep_settings) - bad = exp.create_model("bad", path=test_dir, run_settings=bad_settings) + sleep_exp = exp.create_application( + "sleep", path=test_dir, run_settings=sleep_settings + ) + bad = exp.create_application("bad", path=test_dir, run_settings=bad_settings) # start and poll exp.start(sleep_exp, bad) diff --git a/tests/_legacy/on_wlm/test_stop.py b/tests/_legacy/on_wlm/test_stop.py index abc7441bb2..ef4a3bf343 100644 --- a/tests/_legacy/on_wlm/test_stop.py +++ b/tests/_legacy/on_wlm/test_stop.py @@ -44,13 +44,13 @@ def test_stop_entity(fileutils, test_dir, wlmutils): - exp_name = "test-launch-stop-model" + exp_name = "test-launch-stop-application" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher(), exp_path=test_dir) script = fileutils.get_test_conf_path("sleep.py") settings = exp.create_run_settings("python", f"{script} --time=10") settings.set_tasks(1) - M1 = exp.create_model("m1", path=test_dir, run_settings=settings) + M1 = exp.create_application("m1", path=test_dir, run_settings=settings) exp.start(M1, block=False) time.sleep(5) diff --git a/tests/_legacy/test_collector_manager.py b/tests/_legacy/test_collector_manager.py index 56add1ef7d..7cc475afea 100644 --- a/tests/_legacy/test_collector_manager.py +++ b/tests/_legacy/test_collector_manager.py @@ -337,8 +337,8 @@ async def snooze() -> None: @pytest.mark.parametrize( "e_type,telemetry_on", [ - pytest.param("model", False, id="models"), - pytest.param("model", True, id="models, telemetry enabled"), + pytest.param("application", False, id="applications"), + pytest.param("application", True, id="applications, telemetry enabled"), pytest.param("ensemble", False, id="ensemble"), pytest.param("ensemble", True, id="ensemble, telemetry enabled"), pytest.param("orchestrator", False, id="orchestrator"), @@ -371,7 +371,7 @@ async def test_collector_manager_find_nondb( async def test_collector_manager_find_db(mock_entity: MockCollectorEntityFunc) -> None: """Ensure that the manifest allows individually enabling a given collector""" entity: JobEntity = mock_entity( - port=1234, name="entity1", type="model", telemetry_on=True + port=1234, name="entity1", type="application", telemetry_on=True ) manager = CollectorManager() @@ -457,7 +457,7 @@ async def test_collector_manager_find_entity_unmapped( ) -> None: """Ensure that an entity type that is not mapped results in no collectors""" entity: JobEntity = mock_entity( - port=1234, name="entity1", type="model", telemetry_on=True + port=1234, name="entity1", type="application", telemetry_on=True ) manager = CollectorManager() diff --git a/tests/_legacy/test_colo_model_local.py b/tests/_legacy/test_colo_model_local.py index fe347ee309..d4d8c97f38 100644 --- a/tests/_legacy/test_colo_model_local.py +++ b/tests/_legacy/test_colo_model_local.py @@ -29,7 +29,7 @@ import pytest from smartsim import Experiment -from smartsim.entity import Model +from smartsim.entity import Application from smartsim.error import SSUnsupportedError from smartsim.status import SmartSimStatus @@ -50,7 +50,9 @@ def test_macosx_warning(fileutils, test_dir, coloutils): db_args = {"custom_pinning": [1]} db_type = "uds" # Test is insensitive to choice of db - exp = Experiment("colocated_model_defaults", launcher="local", exp_path=test_dir) + exp = Experiment( + "colocated_application_defaults", launcher="local", exp_path=test_dir + ) with pytest.warns( RuntimeWarning, match="CPU pinning is not supported on MacOSX. Ignoring pinning specification.", @@ -68,7 +70,9 @@ def test_unsupported_limit_app(fileutils, test_dir, coloutils): db_args = {"limit_app_cpus": True} db_type = "uds" # Test is insensitive to choice of db - exp = Experiment("colocated_model_defaults", launcher="local", exp_path=test_dir) + exp = Experiment( + "colocated_application_defaults", launcher="local", exp_path=test_dir + ) with pytest.raises(SSUnsupportedError): coloutils.setup_test_colo( fileutils, @@ -85,7 +89,9 @@ def test_unsupported_custom_pinning(fileutils, test_dir, coloutils, custom_pinni db_type = "uds" # Test is insensitive to choice of db db_args = {"custom_pinning": custom_pinning} - exp = Experiment("colocated_model_defaults", launcher="local", exp_path=test_dir) + exp = Experiment( + "colocated_application_defaults", launcher="local", exp_path=test_dir + ) with pytest.raises(TypeError): coloutils.setup_test_colo( fileutils, @@ -110,19 +116,21 @@ def test_unsupported_custom_pinning(fileutils, test_dir, coloutils, custom_pinni ], ) def test_create_pinning_string(pin_list, num_cpus, expected): - assert Model._create_pinning_string(pin_list, num_cpus) == expected + assert Application._create_pinning_string(pin_list, num_cpus) == expected @pytest.mark.parametrize("db_type", supported_dbs) -def test_launch_colocated_model_defaults( +def test_launch_colocated_application_defaults( fileutils, test_dir, coloutils, db_type, launcher="local" ): - """Test the launch of a model with a colocated database and local launcher""" + """Test the launch of a application with a colocated database and local launcher""" db_args = {} - exp = Experiment("colocated_model_defaults", launcher=launcher, exp_path=test_dir) - colo_model = coloutils.setup_test_colo( + exp = Experiment( + "colocated_application_defaults", launcher=launcher, exp_path=test_dir + ) + colo_application = coloutils.setup_test_colo( fileutils, db_type, exp, @@ -135,38 +143,39 @@ def test_launch_colocated_model_defaults( else: true_pinning = "0" assert ( - colo_model.run_settings.colocated_db_settings["custom_pinning"] == true_pinning + colo_application.run_settings.colocated_db_settings["custom_pinning"] + == true_pinning ) - exp.generate(colo_model) - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + exp.generate(colo_application) + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all(stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses) - # test restarting the colocated model - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + # test restarting the colocated application + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all( stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses ), f"Statuses {statuses}" @pytest.mark.parametrize("db_type", supported_dbs) -def test_launch_multiple_colocated_models( +def test_launch_multiple_colocated_applications( fileutils, test_dir, coloutils, wlmutils, db_type, launcher="local" ): - """Test the concurrent launch of two models with a colocated database and local launcher""" + """Test the concurrent launch of two applications with a colocated database and local launcher""" db_args = {} - exp = Experiment("multi_colo_models", launcher=launcher, exp_path=test_dir) - colo_models = [ + exp = Experiment("multi_colo_applications", launcher=launcher, exp_path=test_dir) + colo_applications = [ coloutils.setup_test_colo( fileutils, db_type, exp, "send_data_local_smartredis.py", db_args, - colo_model_name="colo0", + colo_application_name="colo0", port=wlmutils.get_test_port(), ), coloutils.setup_test_colo( @@ -175,53 +184,53 @@ def test_launch_multiple_colocated_models( exp, "send_data_local_smartredis.py", db_args, - colo_model_name="colo1", + colo_application_name="colo1", port=wlmutils.get_test_port() + 1, ), ] - exp.generate(*colo_models) - exp.start(*colo_models, block=True) - statuses = exp.get_status(*colo_models) + exp.generate(*colo_applications) + exp.start(*colo_applications, block=True) + statuses = exp.get_status(*colo_applications) assert all(stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses) - # test restarting the colocated model - exp.start(*colo_models, block=True) - statuses = exp.get_status(*colo_models) + # test restarting the colocated application + exp.start(*colo_applications, block=True) + statuses = exp.get_status(*colo_applications) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) @pytest.mark.parametrize("db_type", supported_dbs) -def test_colocated_model_disable_pinning( +def test_colocated_application_disable_pinning( fileutils, test_dir, coloutils, db_type, launcher="local" ): exp = Experiment( - "colocated_model_pinning_auto_1cpu", launcher=launcher, exp_path=test_dir + "colocated_application_pinning_auto_1cpu", launcher=launcher, exp_path=test_dir ) db_args = { "db_cpus": 1, "custom_pinning": [], } # Check to make sure that the CPU mask was correctly generated - colo_model = coloutils.setup_test_colo( + colo_application = coloutils.setup_test_colo( fileutils, db_type, exp, "send_data_local_smartredis.py", db_args, ) - assert colo_model.run_settings.colocated_db_settings["custom_pinning"] is None - exp.generate(colo_model) - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + assert colo_application.run_settings.colocated_db_settings["custom_pinning"] is None + exp.generate(colo_application) + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) @pytest.mark.parametrize("db_type", supported_dbs) -def test_colocated_model_pinning_auto_2cpu( +def test_colocated_application_pinning_auto_2cpu( fileutils, test_dir, coloutils, db_type, launcher="local" ): exp = Experiment( - "colocated_model_pinning_auto_2cpu", launcher=launcher, exp_path=test_dir + "colocated_application_pinning_auto_2cpu", launcher=launcher, exp_path=test_dir ) db_args = { @@ -229,7 +238,7 @@ def test_colocated_model_pinning_auto_2cpu( } # Check to make sure that the CPU mask was correctly generated - colo_model = coloutils.setup_test_colo( + colo_application = coloutils.setup_test_colo( fileutils, db_type, exp, @@ -241,65 +250,68 @@ def test_colocated_model_pinning_auto_2cpu( else: true_pinning = "0,1" assert ( - colo_model.run_settings.colocated_db_settings["custom_pinning"] == true_pinning + colo_application.run_settings.colocated_db_settings["custom_pinning"] + == true_pinning ) - exp.generate(colo_model) - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + exp.generate(colo_application) + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) @pytest.mark.skipif(is_mac, reason="unsupported on MacOSX") @pytest.mark.parametrize("db_type", supported_dbs) -def test_colocated_model_pinning_range( +def test_colocated_application_pinning_range( fileutils, test_dir, coloutils, db_type, launcher="local" ): # Check to make sure that the CPU mask was correctly generated exp = Experiment( - "colocated_model_pinning_manual", launcher=launcher, exp_path=test_dir + "colocated_application_pinning_manual", launcher=launcher, exp_path=test_dir ) db_args = {"db_cpus": 2, "custom_pinning": range(2)} - colo_model = coloutils.setup_test_colo( + colo_application = coloutils.setup_test_colo( fileutils, db_type, exp, "send_data_local_smartredis.py", db_args, ) - assert colo_model.run_settings.colocated_db_settings["custom_pinning"] == "0,1" - exp.generate(colo_model) - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + assert ( + colo_application.run_settings.colocated_db_settings["custom_pinning"] == "0,1" + ) + exp.generate(colo_application) + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) @pytest.mark.skipif(is_mac, reason="unsupported on MacOSX") @pytest.mark.parametrize("db_type", supported_dbs) -def test_colocated_model_pinning_list( +def test_colocated_application_pinning_list( fileutils, test_dir, coloutils, db_type, launcher="local" ): # Check to make sure that the CPU mask was correctly generated exp = Experiment( - "colocated_model_pinning_manual", launcher=launcher, exp_path=test_dir + "colocated_application_pinning_manual", launcher=launcher, exp_path=test_dir ) db_args = {"db_cpus": 1, "custom_pinning": [1]} - colo_model = coloutils.setup_test_colo( + colo_application = coloutils.setup_test_colo( fileutils, db_type, exp, "send_data_local_smartredis.py", db_args, ) - assert colo_model.run_settings.colocated_db_settings["custom_pinning"] == "1" - exp.generate(colo_model) - exp.start(colo_model, block=True) - statuses = exp.get_status(colo_model) + assert colo_application.run_settings.colocated_db_settings["custom_pinning"] == "1" + exp.generate(colo_application) + exp.start(colo_application, block=True) + statuses = exp.get_status(colo_application) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) @@ -308,7 +320,7 @@ def test_colo_uds_verifies_socket_file_name(test_dir, launcher="local"): colo_settings = exp.create_run_settings(exe=sys.executable, exe_args=["--version"]) - colo_model = exp.create_model("wrong_uds_socket_name", colo_settings) + colo_application = exp.create_application("wrong_uds_socket_name", colo_settings) with pytest.raises(ValueError): - colo_model.colocate_db_uds(unix_socket="this is not a valid name!") + colo_application.colocate_db_uds(unix_socket="this is not a valid name!") diff --git a/tests/_legacy/test_colo_model_lsf.py b/tests/_legacy/test_colo_model_lsf.py index 5e1c449cca..afa843ef2e 100644 --- a/tests/_legacy/test_colo_model_lsf.py +++ b/tests/_legacy/test_colo_model_lsf.py @@ -30,7 +30,7 @@ import smartsim.settings.base from smartsim import Experiment -from smartsim.entity import Model +from smartsim.entity import Application from smartsim.settings.lsfSettings import JsrunSettings # The tests in this file belong to the group_a group @@ -59,7 +59,7 @@ def test_jsrun_prep(fileutils, coloutils, monkeypatch): db_args = {"custom_pinning": [1]} db_type = "uds" # Test is insensitive to choice of db - exp = Experiment("colocated_model_lsf", launcher="lsf") + exp = Experiment("colocated_application_lsf", launcher="lsf") with pytest.raises(ExpectationMet, match="mock._prep_colocated_db") as ex: run_settings = JsrunSettings("foo") @@ -83,11 +83,11 @@ def test_non_js_run_prep(fileutils, coloutils, monkeypatch): db_args = {"custom_pinning": [1]} db_type = "tcp" # Test is insensitive to choice of db - exp = Experiment("colocated_model_lsf", launcher="lsf") + exp = Experiment("colocated_application_lsf", launcher="lsf") run_settings = smartsim.settings.base.RunSettings("foo") - colo_model: Model = coloutils.setup_test_colo( + colo_application: Application = coloutils.setup_test_colo( fileutils, db_type, exp, @@ -96,7 +96,7 @@ def test_non_js_run_prep(fileutils, coloutils, monkeypatch): colo_settings=run_settings, ) - assert colo_model + assert colo_application @pytest.mark.parametrize( @@ -128,12 +128,12 @@ def test_jsrun_prep_cpu_per_flag_set_check( db_args = {"custom_pinning": [1]} db_type = "uds" # Test is insensitive to choice of db - exp = Experiment("colocated_model_lsf", launcher="lsf") + exp = Experiment("colocated_application_lsf", launcher="lsf") run_args = {run_arg_key: test_value} run_settings = JsrunSettings("foo", run_args=run_args) - colo_model: Model = coloutils.setup_test_colo( + colo_application: Application = coloutils.setup_test_colo( fileutils, db_type, exp, @@ -142,7 +142,7 @@ def test_jsrun_prep_cpu_per_flag_set_check( colo_settings=run_settings, ) - assert colo_model.run_settings.run_args[exp_run_arg_key] == exp_value + assert colo_application.run_settings.run_args[exp_run_arg_key] == exp_value @pytest.mark.parametrize( @@ -174,12 +174,12 @@ def test_jsrun_prep_db_cpu_override( db_args = {"custom_pinning": [1], "db_cpus": 3} db_type = "tcp" # Test is insensitive to choice of db - exp = Experiment("colocated_model_lsf", launcher="lsf") + exp = Experiment("colocated_application_lsf", launcher="lsf") run_args = {run_arg_key: test_value} run_settings = JsrunSettings("foo", run_args=run_args) - colo_model: Model = coloutils.setup_test_colo( + colo_application: Application = coloutils.setup_test_colo( fileutils, db_type, exp, @@ -188,7 +188,7 @@ def test_jsrun_prep_db_cpu_override( colo_settings=run_settings, ) - assert colo_model.run_settings.run_args[exp_run_arg_key] == exp_value + assert colo_application.run_settings.run_args[exp_run_arg_key] == exp_value @pytest.mark.parametrize( @@ -218,12 +218,12 @@ def test_jsrun_prep_db_cpu_replacement( db_args = {"custom_pinning": [1], "db_cpus": 8} db_type = "uds" # Test is insensitive to choice of db - exp = Experiment("colocated_model_lsf", launcher="lsf") + exp = Experiment("colocated_application_lsf", launcher="lsf") run_args = {run_arg_key: test_value} run_settings = JsrunSettings("foo", run_args=run_args) - colo_model: Model = coloutils.setup_test_colo( + colo_application: Application = coloutils.setup_test_colo( fileutils, db_type, exp, @@ -232,7 +232,7 @@ def test_jsrun_prep_db_cpu_replacement( colo_settings=run_settings, ) - assert colo_model.run_settings.run_args[exp_run_arg_key] == exp_value + assert colo_application.run_settings.run_args[exp_run_arg_key] == exp_value @pytest.mark.parametrize( @@ -268,12 +268,12 @@ def test_jsrun_prep_rs_per_host( db_args = {"custom_pinning": [1]} db_type = "tcp" # Test is insensitive to choice of db - exp = Experiment("colocated_model_lsf", launcher="lsf") + exp = Experiment("colocated_application_lsf", launcher="lsf") run_args = {run_arg_key: test_value} run_settings = JsrunSettings("foo", run_args=run_args) - colo_model: Model = coloutils.setup_test_colo( + colo_application: Application = coloutils.setup_test_colo( fileutils, db_type, exp, @@ -283,4 +283,6 @@ def test_jsrun_prep_rs_per_host( ) # NOTE: _prep_colocated_db sets this to a string & not an integer - assert str(colo_model.run_settings.run_args[exp_run_arg_key]) == str(exp_value) + assert str(colo_application.run_settings.run_args[exp_run_arg_key]) == str( + exp_value + ) diff --git a/tests/_legacy/test_containers.py b/tests/_legacy/test_containers.py index 5d0f933fff..8957f223dd 100644 --- a/tests/_legacy/test_containers.py +++ b/tests/_legacy/test_containers.py @@ -99,16 +99,16 @@ def test_singularity_basic(fileutils, test_dir): run_settings = exp.create_run_settings( "python3", "sleep.py --time=3", container=container ) - model = exp.create_model("singularity_basic", run_settings) + application = exp.create_application("singularity_basic", run_settings) script = fileutils.get_test_conf_path("sleep.py") - model.attach_generator_files(to_copy=[script]) - exp.generate(model) + application.attach_generator_files(to_copy=[script]) + exp.generate(application) - exp.start(model, summary=False) + exp.start(application, summary=False) # get and confirm status - stat = exp.get_status(model)[0] + stat = exp.get_status(application)[0] assert stat == SmartSimStatus.STATUS_COMPLETED print(exp.summary()) @@ -127,15 +127,15 @@ def test_singularity_args(fileutils, test_dir): run_settings = exp.create_run_settings( "python3", "test/check_dirs.py", container=container ) - model = exp.create_model("singularity_args", run_settings) + application = exp.create_application("singularity_args", run_settings) script = fileutils.get_test_conf_path("check_dirs.py") - model.attach_generator_files(to_copy=[script]) - exp.generate(model) + application.attach_generator_files(to_copy=[script]) + exp.generate(application) - exp.start(model, summary=False) + exp.start(application, summary=False) # get and confirm status - stat = exp.get_status(model)[0] + stat = exp.get_status(application)[0] assert stat == SmartSimStatus.STATUS_COMPLETED print(exp.summary()) @@ -145,7 +145,7 @@ def test_singularity_args(fileutils, test_dir): def test_singularity_smartredis(local_experiment, prepare_db, local_db, fileutils): """Run two processes, each process puts a tensor on the DB, then accesses the other process's tensor. - Finally, the tensor is used to run a model. + Finally, the tensor is used to run a application. Note: This is a containerized port of test_smartredis.py """ @@ -175,7 +175,7 @@ def test_singularity_smartredis(local_experiment, prepare_db, local_db, fileutil local_experiment.generate(ensemble) - # start the models + # start the applications local_experiment.start(ensemble, summary=False) # get and confirm statuses diff --git a/tests/_legacy/test_controller_errors.py b/tests/_legacy/test_controller_errors.py index 2d623cdd1a..0f16c4c6f9 100644 --- a/tests/_legacy/test_controller_errors.py +++ b/tests/_legacy/test_controller_errors.py @@ -31,7 +31,7 @@ from smartsim._core.launcher.step import Step from smartsim._core.launcher.step.dragonStep import DragonStep from smartsim.database import Orchestrator -from smartsim.entity import Model +from smartsim.entity import Application from smartsim.entity.ensemble import Ensemble from smartsim.error import SmartSimError, SSUnsupportedError from smartsim.error.errors import SSUnsupportedError @@ -41,10 +41,14 @@ pytestmark = pytest.mark.group_a entity_settings = SrunSettings("echo", ["spam", "eggs"]) -model_dup_setting = RunSettings("echo", ["spam_1", "eggs_2"]) -model = Model("model_name", run_settings=entity_settings, params={}, path="") -# Model entity slightly different but with same name -model_2 = Model("model_name", run_settings=model_dup_setting, params={}, path="") +application_dup_setting = RunSettings("echo", ["spam_1", "eggs_2"]) +application = Application( + "application_name", run_settings=entity_settings, params={}, path="" +) +# Application entity slightly different but with same name +application_2 = Application( + "application_name", run_settings=application_dup_setting, params={}, path="" +) ens = Ensemble("ensemble_name", params={}, run_settings=entity_settings, replicas=2) # Ensemble entity slightly different but with same name ens_2 = Ensemble("ensemble_name", params={}, run_settings=entity_settings, replicas=3) @@ -67,12 +71,12 @@ def test_finished_entity_wrong_type(): def test_finished_not_found(): - """Ask if model is finished that hasnt been launched by this experiment""" + """Ask if application is finished that hasnt been launched by this experiment""" rs = RunSettings("python") - model = Model("hello", {}, "./", rs) + application = Application("hello", {}, "./", rs) cont = Controller(launcher="local") with pytest.raises(ValueError): - cont.finished(model) + cont.finished(application) def test_entity_status_wrong_type(): @@ -136,7 +140,7 @@ def get_launch_cmd(self): "entity", [ pytest.param(ens, id="Ensemble_running"), - pytest.param(model, id="Model_running"), + pytest.param(application, id="Application_running"), pytest.param(orc, id="Orch_running"), ], ) @@ -156,10 +160,13 @@ def test_duplicate_running_entity(test_dir, wlmutils, entity): @pytest.mark.parametrize( "entity", - [pytest.param(ens, id="Ensemble_running"), pytest.param(model, id="Model_running")], + [ + pytest.param(ens, id="Ensemble_running"), + pytest.param(application, id="Application_running"), + ], ) def test_restarting_entity(test_dir, wlmutils, entity): - """Validate restarting a completed Model/Ensemble job""" + """Validate restarting a completed Application/Ensemble job""" step_settings = RunSettings("echo") test_launcher = wlmutils.get_test_launcher() step = MockStep("mock-step", test_dir, step_settings) @@ -188,11 +195,11 @@ def test_restarting_orch(test_dir, wlmutils): "entity,entity_2", [ pytest.param(ens, ens_2, id="Ensemble_running"), - pytest.param(model, model_2, id="Model_running"), + pytest.param(application, application_2, id="Application_running"), ], ) def test_starting_entity(test_dir, wlmutils, entity, entity_2): - """Test launching a job of Model/Ensemble with same name in completed""" + """Test launching a job of Application/Ensemble with same name in completed""" step_settings = RunSettings("echo") step = MockStep("mock-step", test_dir, step_settings) test_launcher = wlmutils.get_test_launcher() diff --git a/tests/_legacy/test_ensemble.py b/tests/_legacy/test_ensemble.py index 0632eee16f..86146c8e47 100644 --- a/tests/_legacy/test_ensemble.py +++ b/tests/_legacy/test_ensemble.py @@ -30,7 +30,7 @@ import pytest from smartsim import Experiment -from smartsim.entity import Ensemble, Model +from smartsim.entity import Application, Ensemble from smartsim.error import EntityExistsError, SSUnsupportedError, UserStrategyError from smartsim.settings import RunSettings @@ -49,7 +49,7 @@ # ---- helpers ------------------------------------------------------ -def step_values(param_names, param_values, n_models=0): +def step_values(param_names, param_values, n_applications=0): permutations = [] for p in zip(*param_values): permutations.append(dict(zip(param_names, p))) @@ -58,13 +58,13 @@ def step_values(param_names, param_values, n_models=0): # bad permutation strategy that doesn't return # a list of dictionaries -def bad_strategy(names, values, n_models=0): +def bad_strategy(names, values, n_applications=0): return -1 # test bad perm strategy that returns a list but of lists # not dictionaries -def bad_strategy_2(names, values, n_models=0): +def bad_strategy_2(names, values, n_applications=0): return [values] @@ -88,11 +88,11 @@ def test_step(): ensemble = Ensemble("step", params, run_settings=rs, perm_strat="step") assert len(ensemble) == 2 - model_1_params = {"h": "5", "g": "7"} - assert ensemble.entities[0].params == model_1_params + application_1_params = {"h": "5", "g": "7"} + assert ensemble.entities[0].params == application_1_params - model_2_params = {"h": "6", "g": "8"} - assert ensemble.entities[1].params == model_2_params + application_2_params = {"h": "6", "g": "8"} + assert ensemble.entities[1].params == application_2_params def test_random(): @@ -104,7 +104,7 @@ def test_random(): params, run_settings=rs, perm_strat="random", - n_models=len(random_ints), + n_applications=len(random_ints), ) assert len(ensemble) == len(random_ints) assigned_params = [m.params["h"] for m in ensemble.entities] @@ -115,7 +115,7 @@ def test_random(): params, run_settings=rs, perm_strat="random", - n_models=len(random_ints) - 1, + n_applications=len(random_ints) - 1, ) assert len(ensemble) == len(random_ints) - 1 assigned_params = [m.params["h"] for m in ensemble.entities] @@ -128,14 +128,14 @@ def test_user_strategy(): ensemble = Ensemble("step", params, run_settings=rs, perm_strat=step_values) assert len(ensemble) == 2 - model_1_params = {"h": "5", "g": "7"} - assert ensemble.entities[0].params == model_1_params + application_1_params = {"h": "5", "g": "7"} + assert ensemble.entities[0].params == application_1_params - model_2_params = {"h": "6", "g": "8"} - assert ensemble.entities[1].params == model_2_params + application_2_params = {"h": "6", "g": "8"} + assert ensemble.entities[1].params == application_2_params -# ----- Model arguments ------------------------------------- +# ----- Application arguments ------------------------------------- def test_arg_params(): @@ -161,9 +161,9 @@ def test_arg_params(): assert ensemble.entities[1].run_settings.exe_args == exe_args_1 -def test_arg_and_model_params_step(): +def test_arg_and_application_params_step(): """Test parameterized exe arguments combined with - model parameters and step strategy + application parameters and step strategy """ params = {"H": [5, 6], "g_param": ["a", "b"], "h": [5, 6], "g": [7, 8]} @@ -185,16 +185,16 @@ def test_arg_and_model_params_step(): exe_args_1 = rs_orig_args + ["-H", "6", "--g_param=b"] assert ensemble.entities[1].run_settings.exe_args == exe_args_1 - model_1_params = {"H": "5", "g_param": "a", "h": "5", "g": "7"} - assert ensemble.entities[0].params == model_1_params + application_1_params = {"H": "5", "g_param": "a", "h": "5", "g": "7"} + assert ensemble.entities[0].params == application_1_params - model_2_params = {"H": "6", "g_param": "b", "h": "6", "g": "8"} - assert ensemble.entities[1].params == model_2_params + application_2_params = {"H": "6", "g_param": "b", "h": "6", "g": "8"} + assert ensemble.entities[1].params == application_2_params -def test_arg_and_model_params_all_perms(): +def test_arg_and_application_params_all_perms(): """Test parameterized exe arguments combined with - model parameters and all_perm strategy + application parameters and all_perm strategy """ params = {"h": [5, 6], "g_param": ["a", "b"]} @@ -218,14 +218,14 @@ def test_arg_and_model_params_all_perms(): assert ensemble.entities[1].run_settings.exe_args == exe_args_1 assert ensemble.entities[3].run_settings.exe_args == exe_args_1 - model_0_params = {"g_param": "a", "h": "5"} - assert ensemble.entities[0].params == model_0_params - model_1_params = {"g_param": "b", "h": "5"} - assert ensemble.entities[1].params == model_1_params - model_2_params = {"g_param": "a", "h": "6"} - assert ensemble.entities[2].params == model_2_params - model_3_params = {"g_param": "b", "h": "6"} - assert ensemble.entities[3].params == model_3_params + application_0_params = {"g_param": "a", "h": "5"} + assert ensemble.entities[0].params == application_0_params + application_1_params = {"g_param": "b", "h": "5"} + assert ensemble.entities[1].params == application_1_params + application_2_params = {"g_param": "a", "h": "6"} + assert ensemble.entities[2].params == application_2_params + application_3_params = {"g_param": "b", "h": "6"} + assert ensemble.entities[3].params == application_3_params # ----- Error Handling -------------------------------------- @@ -258,41 +258,41 @@ def test_incorrect_param_type(): e = Ensemble("ensemble", params, run_settings=rs) -def test_add_model_type(): +def test_add_application_type(): params = {"h": 5} e = Ensemble("ensemble", params, run_settings=rs) with pytest.raises(TypeError): - # should be a Model not string - e.add_model("model") + # should be a Application not string + e.add_application("application") -def test_add_existing_model(): +def test_add_existing_application(): params_1 = {"h": 5} params_2 = {"z": 6} - model_1 = Model("identical_name", params_1, "", rs) - model_2 = Model("identical_name", params_2, "", rs) + application_1 = Application("identical_name", params_1, "", rs) + application_2 = Application("identical_name", params_2, "", rs) e = Ensemble("ensemble", params_1, run_settings=rs) - e.add_model(model_1) + e.add_application(application_1) with pytest.raises(EntityExistsError): - e.add_model(model_2) + e.add_application(application_2) # ----- Other -------------------------------------- -def test_models_property(): +def test_applications_property(): params = {"h": [5, 6, 7, 8]} e = Ensemble("test", params, run_settings=rs) - models = e.models - assert models == [model for model in e] + applications = e.applications + assert applications == [application for application in e] def test_key_prefixing(): params_1 = {"h": [5, 6, 7, 8]} params_2 = {"z": 6} e = Ensemble("test", params_1, run_settings=rs) - model = Model("model", params_2, "", rs) - e.add_model(model) + application = Application("application", params_2, "", rs) + e.add_application(application) assert e.query_key_prefixing() == False e.enable_key_prefixing() assert e.query_key_prefixing() == True diff --git a/tests/_legacy/test_experiment.py b/tests/_legacy/test_experiment.py index 4bae09e68a..3b4c856e09 100644 --- a/tests/_legacy/test_experiment.py +++ b/tests/_legacy/test_experiment.py @@ -36,7 +36,7 @@ from smartsim._core.config.config import Config from smartsim._core.utils import serialize from smartsim.database import Orchestrator -from smartsim.entity import Model +from smartsim.entity import Application from smartsim.error import SmartSimError from smartsim.error.errors import SSUnsupportedError from smartsim.settings import RunSettings @@ -50,23 +50,23 @@ pytestmark = pytest.mark.slow_tests -def test_model_prefix(test_dir: str) -> None: +def test_application_prefix(test_dir: str) -> None: exp_name = "test_prefix" exp = Experiment(exp_name) - model = exp.create_model( - "model", + application = exp.create_application( + "application", path=test_dir, run_settings=RunSettings("python"), enable_key_prefixing=True, ) - assert model._key_prefixing_enabled == True + assert application._key_prefixing_enabled == True -def test_model_no_name(): - exp = Experiment("test_model_no_name") +def test_application_no_name(): + exp = Experiment("test_application_no_name") with pytest.raises(AttributeError): - _ = exp.create_model(name=None, run_settings=RunSettings("python")) + _ = exp.create_application(name=None, run_settings=RunSettings("python")) def test_ensemble_no_name(): @@ -91,17 +91,17 @@ def test_stop_type() -> None: """Wrong argument type given to stop""" exp = Experiment("name") with pytest.raises(TypeError): - exp.stop("model") + exp.stop("application") -def test_finished_new_model() -> None: - # finished should fail as this model hasn't been +def test_finished_new_application() -> None: + # finished should fail as this application hasn't been # launched yet. - model = Model("name", {}, "./", RunSettings("python")) + application = Application("name", {}, "./", RunSettings("python")) exp = Experiment("test") with pytest.raises(ValueError): - exp.finished(model) + exp.finished(application) def test_status_typeerror() -> None: @@ -111,9 +111,9 @@ def test_status_typeerror() -> None: def test_status_pre_launch() -> None: - model = Model("name", {}, "./", RunSettings("python")) + application = Application("name", {}, "./", RunSettings("python")) exp = Experiment("test") - assert exp.get_status(model)[0] == SmartSimStatus.STATUS_NEVER_STARTED + assert exp.get_status(application)[0] == SmartSimStatus.STATUS_NEVER_STARTED def test_bad_ensemble_init_no_rs(test_dir: str) -> None: @@ -140,7 +140,9 @@ def test_bad_ensemble_init_no_rs_bs(test_dir: str) -> None: def test_stop_entity(test_dir: str) -> None: exp_name = "test_stop_entity" exp = Experiment(exp_name, exp_path=test_dir) - m = exp.create_model("model", path=test_dir, run_settings=RunSettings("sleep", "5")) + m = exp.create_application( + "application", path=test_dir, run_settings=RunSettings("sleep", "5") + ) exp.start(m, block=False) assert exp.finished(m) == False exp.stop(m) @@ -151,19 +153,19 @@ def test_poll(test_dir: str) -> None: # Ensure that a SmartSimError is not raised exp_name = "test_exp_poll" exp = Experiment(exp_name, exp_path=test_dir) - model = exp.create_model( - "model", path=test_dir, run_settings=RunSettings("sleep", "5") + application = exp.create_application( + "application", path=test_dir, run_settings=RunSettings("sleep", "5") ) - exp.start(model, block=False) + exp.start(application, block=False) exp.poll(interval=1) - exp.stop(model) + exp.stop(application) def test_summary(test_dir: str) -> None: exp_name = "test_exp_summary" exp = Experiment(exp_name, exp_path=test_dir) - m = exp.create_model( - "model", path=test_dir, run_settings=RunSettings("echo", "Hello") + m = exp.create_application( + "application", path=test_dir, run_settings=RunSettings("echo", "Hello") ) exp.start(m) summary_str = exp.summary(style="plain") @@ -267,20 +269,20 @@ def test_default_orch_path( assert db.path == str(orch_path) -def test_default_model_path( +def test_default_application_path( monkeypatch: pytest.MonkeyPatch, test_dir: str, wlmutils: "conftest.WLMUtils" ) -> None: - """Ensure the default file structure is created for Model""" + """Ensure the default file structure is created for Application""" - exp_name = "default-model-path" + exp_name = "default-application-path" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher(), exp_path=test_dir) monkeypatch.setattr(exp._control, "start", lambda *a, **kw: ...) settings = exp.create_run_settings(exe="echo", exe_args="hello") - model = exp.create_model(name="model_name", run_settings=settings) - exp.start(model) - model_path = pathlib.Path(test_dir) / model.name - assert model_path.exists() - assert model.path == str(model_path) + application = exp.create_application(name="application_name", run_settings=settings) + exp.start(application) + application_path = pathlib.Path(test_dir) / application.name + assert application_path.exists() + assert application.path == str(application_path) def test_default_ensemble_path( @@ -299,7 +301,7 @@ def test_default_ensemble_path( ensemble_path = pathlib.Path(test_dir) / ensemble.name assert ensemble_path.exists() assert ensemble.path == str(ensemble_path) - for member in ensemble.models: + for member in ensemble.applications: member_path = ensemble_path / member.name assert member_path.exists() assert member.path == str(ensemble_path / member.name) @@ -325,23 +327,23 @@ def test_user_orch_path( shutil.rmtree(orch_path) -def test_default_model_with_path( +def test_default_application_with_path( monkeypatch: pytest.MonkeyPatch, test_dir: str, wlmutils: "conftest.WLMUtils" ) -> None: - """Ensure a relative path is used to created Model folder""" + """Ensure a relative path is used to created Application folder""" exp_name = "default-ensemble-path" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher(), exp_path=test_dir) monkeypatch.setattr(exp._control, "start", lambda *a, **kw: ...) settings = exp.create_run_settings(exe="echo", exe_args="hello") - model = exp.create_model( - name="model_name", run_settings=settings, path="./testing_folder1234" + application = exp.create_application( + name="application_name", run_settings=settings, path="./testing_folder1234" ) - exp.start(model) - model_path = pathlib.Path(osp.abspath("./testing_folder1234")) - assert model_path.exists() - assert model.path == str(model_path) - shutil.rmtree(model_path) + exp.start(application) + application_path = pathlib.Path(osp.abspath("./testing_folder1234")) + assert application_path.exists() + assert application.path == str(application_path) + shutil.rmtree(application_path) def test_default_ensemble_with_path( @@ -363,7 +365,7 @@ def test_default_ensemble_with_path( ensemble_path = pathlib.Path(osp.abspath("./testing_folder1234")) assert ensemble_path.exists() assert ensemble.path == str(ensemble_path) - for member in ensemble.models: + for member in ensemble.applications: member_path = ensemble_path / member.name assert member_path.exists() assert member.path == str(member_path) diff --git a/tests/_legacy/test_generator.py b/tests/_legacy/test_generator.py index fd9a5b8363..b17db06fdf 100644 --- a/tests/_legacy/test_generator.py +++ b/tests/_legacy/test_generator.py @@ -119,9 +119,9 @@ def test_ensemble_overwrite_error(fileutils, test_dir): def test_full_exp(fileutils, test_dir, wlmutils): exp = Experiment("gen-test", test_dir, launcher="local") - model = exp.create_model("model", run_settings=rs) + application = exp.create_application("application", run_settings=rs) script = fileutils.get_test_conf_path("sleep.py") - model.attach_generator_files(to_copy=script) + application.attach_generator_files(to_copy=script) orc = Orchestrator(wlmutils.get_test_port()) params = {"THERMO": [10, 20, 30], "STEPS": [10, 20, 30]} @@ -129,7 +129,7 @@ def test_full_exp(fileutils, test_dir, wlmutils): config = get_gen_file(fileutils, "in.atm") ensemble.attach_generator_files(to_configure=config) - exp.generate(orc, ensemble, model) + exp.generate(orc, ensemble, application) # test for ensemble assert osp.isdir(osp.join(test_dir, "test_ens/")) @@ -139,13 +139,13 @@ def test_full_exp(fileutils, test_dir, wlmutils): # test for orc dir assert osp.isdir(osp.join(test_dir, orc.name)) - # test for model file - assert osp.isdir(osp.join(test_dir, "model")) - assert osp.isfile(osp.join(test_dir, "model/sleep.py")) + # test for application file + assert osp.isdir(osp.join(test_dir, "application")) + assert osp.isfile(osp.join(test_dir, "application/sleep.py")) def test_dir_files(fileutils, test_dir): - """test the generate of models with files that + """test the generate of applications with files that are directories with subdirectories and files """ @@ -160,10 +160,10 @@ def test_dir_files(fileutils, test_dir): assert osp.isdir(osp.join(test_dir, "dir_test/")) for i in range(9): - model_path = osp.join(test_dir, "dir_test/dir_test_" + str(i)) - assert osp.isdir(model_path) - assert osp.isdir(osp.join(model_path, "test_dir_1")) - assert osp.isfile(osp.join(model_path, "test.in")) + application_path = osp.join(test_dir, "dir_test/dir_test_" + str(i)) + assert osp.isdir(application_path) + assert osp.isdir(osp.join(application_path, "test_dir_1")) + assert osp.isfile(osp.join(application_path, "test.in")) def test_print_files(fileutils, test_dir, capsys): @@ -189,10 +189,10 @@ def test_print_files(fileutils, test_dir, capsys): expected_out = ( tabulate( [ - [model.name, "No file attached to this model."] - for model in ensemble.models + [application.name, "No file attached to this application."] + for application in ensemble.applications ], - headers=["Model name", "Files"], + headers=["Application name", "Files"], tablefmt="grid", ) + "\n" @@ -206,10 +206,10 @@ def test_print_files(fileutils, test_dir, capsys): expected_out = ( tabulate( [ - [model.name, "No file attached to this entity."] - for model in ensemble.models + [application.name, "No file attached to this entity."] + for application in ensemble.applications ], - headers=["Model name", "Files"], + headers=["Application name", "Files"], tablefmt="grid", ) + "\n" @@ -230,12 +230,14 @@ def test_print_files(fileutils, test_dir, capsys): tablefmt="grid", ) - assert all(str(model.files) == expected_out for model in ensemble.models) + assert all( + str(application.files) == expected_out for application in ensemble.applications + ) expected_out_multi = ( tabulate( - [[model.name, expected_out] for model in ensemble.models], - headers=["Model name", "Files"], + [[application.name, expected_out] for application in ensemble.applications], + headers=["Application name", "Files"], tablefmt="grid", ) + "\n" @@ -250,17 +252,17 @@ def test_multiple_tags(fileutils, test_dir): """Test substitution of multiple tagged parameters on same line""" exp = Experiment("test-multiple-tags", test_dir) - model_params = {"port": 6379, "password": "unbreakable_password"} - model_settings = RunSettings("bash", "multi_tags_template.sh") - parameterized_model = exp.create_model( - "multi-tags", run_settings=model_settings, params=model_params + application_params = {"port": 6379, "password": "unbreakable_password"} + application_settings = RunSettings("bash", "multi_tags_template.sh") + parameterized_application = exp.create_application( + "multi-tags", run_settings=application_settings, params=application_params ) config = get_gen_file(fileutils, "multi_tags_template.sh") - parameterized_model.attach_generator_files(to_configure=[config]) - exp.generate(parameterized_model, overwrite=True) - exp.start(parameterized_model, block=True) + parameterized_application.attach_generator_files(to_configure=[config]) + exp.generate(parameterized_application, overwrite=True) + exp.start(parameterized_application, block=True) - with open(osp.join(parameterized_model.path, "multi-tags.out")) as f: + with open(osp.join(parameterized_application.path, "multi-tags.out")) as f: log_content = f.read() assert "My two parameters are 6379 and unbreakable_password, OK?" in log_content @@ -303,7 +305,7 @@ def not_header(line): def test_config_dir(fileutils, test_dir): - """Test the generation and configuration of models with + """Test the generation and configuration of applications with tagged files that are directories with subdirectories and files """ exp = Experiment("config-dir", launcher="local") diff --git a/tests/_legacy/test_interrupt.py b/tests/_legacy/test_interrupt.py index c38ae02251..700f2dd4a4 100644 --- a/tests/_legacy/test_interrupt.py +++ b/tests/_legacy/test_interrupt.py @@ -46,15 +46,15 @@ def keyboard_interrupt(pid): def test_interrupt_blocked_jobs(test_dir): """ - Launches and polls a model and an ensemble with two more models. + Launches and polls a application and an ensemble with two more applications. Once polling starts, the SIGINT signal is sent to the main thread, and consequently, all running jobs are killed. """ exp_name = "test_interrupt_blocked_jobs" exp = Experiment(exp_name, exp_path=test_dir) - model = exp.create_model( - "interrupt_blocked_model", + application = exp.create_application( + "interrupt_blocked_application", path=test_dir, run_settings=RunSettings("sleep", "100"), ) @@ -71,7 +71,7 @@ def test_interrupt_blocked_jobs(test_dir): keyboard_interrupt_thread.start() with pytest.raises(KeyboardInterrupt): - exp.start(model, ensemble, block=True, kill_on_interrupt=True) + exp.start(application, ensemble, block=True, kill_on_interrupt=True) time.sleep(2) # allow time for jobs to be stopped active_jobs = exp._control._jobs.jobs @@ -83,8 +83,8 @@ def test_interrupt_blocked_jobs(test_dir): def test_interrupt_multi_experiment_unblocked_jobs(test_dir): """ - Starts two Experiments, each having one model - and an ensemble with two more models. Since + Starts two Experiments, each having one application + and an ensemble with two more applications. Since blocking is False, the main thread sleeps until the SIGINT signal is sent, resulting in both Experiment's running jobs to be killed. @@ -94,8 +94,8 @@ def test_interrupt_multi_experiment_unblocked_jobs(test_dir): experiments = [Experiment(exp_names[i], exp_path=test_dir) for i in range(2)] jobs_per_experiment = [0] * len(experiments) for i, experiment in enumerate(experiments): - model = experiment.create_model( - "interrupt_model_" + str(i), + application = experiment.create_application( + "interrupt_application_" + str(i), path=test_dir, run_settings=RunSettings("sleep", "100"), ) @@ -114,7 +114,7 @@ def test_interrupt_multi_experiment_unblocked_jobs(test_dir): with pytest.raises(KeyboardInterrupt): for experiment in experiments: - experiment.start(model, ensemble, block=False, kill_on_interrupt=True) + experiment.start(application, ensemble, block=False, kill_on_interrupt=True) keyboard_interrupt_thread.join() # since jobs aren't blocked, wait for SIGINT time.sleep(2) # allow time for jobs to be stopped diff --git a/tests/_legacy/test_launch_errors.py b/tests/_legacy/test_launch_errors.py index 21b3184e5e..e67115ce37 100644 --- a/tests/_legacy/test_launch_errors.py +++ b/tests/_legacy/test_launch_errors.py @@ -41,7 +41,7 @@ def test_unsupported_run_settings(test_dir): exp_name = "test-unsupported-run-settings" exp = Experiment(exp_name, launcher="slurm", exp_path=test_dir) bad_settings = JsrunSettings("echo", "hello") - model = exp.create_model("bad_rs", bad_settings) + model = exp.create_application("bad_rs", bad_settings) with pytest.raises(SSUnsupportedError): exp.start(model) @@ -54,7 +54,7 @@ def test_model_failure(fileutils, test_dir): script = fileutils.get_test_conf_path("bad.py") settings = RunSettings("python", f"{script} --time=3") - M1 = exp.create_model("m1", path=test_dir, run_settings=settings) + M1 = exp.create_application("m1", path=test_dir, run_settings=settings) exp.start(M1, block=True) statuses = exp.get_status(M1) diff --git a/tests/_legacy/test_local_launch.py b/tests/_legacy/test_local_launch.py index 85687e0142..6f72c8c082 100644 --- a/tests/_legacy/test_local_launch.py +++ b/tests/_legacy/test_local_launch.py @@ -38,15 +38,15 @@ """ -def test_models(fileutils, test_dir): - exp_name = "test-models-local-launch" +def test_applications(fileutils, test_dir): + exp_name = "test-applications-local-launch" exp = Experiment(exp_name, launcher="local", exp_path=test_dir) script = fileutils.get_test_conf_path("sleep.py") settings = exp.create_run_settings("python", f"{script} --time=3") - M1 = exp.create_model("m1", path=test_dir, run_settings=settings) - M2 = exp.create_model("m2", path=test_dir, run_settings=settings) + M1 = exp.create_application("m1", path=test_dir, run_settings=settings) + M2 = exp.create_application("m2", path=test_dir, run_settings=settings) exp.start(M1, M2, block=True, summary=True) statuses = exp.get_status(M1, M2) diff --git a/tests/_legacy/test_local_multi_run.py b/tests/_legacy/test_local_multi_run.py index a2c1d70ee9..d22bc6d352 100644 --- a/tests/_legacy/test_local_multi_run.py +++ b/tests/_legacy/test_local_multi_run.py @@ -38,21 +38,21 @@ """ -def test_models(fileutils, test_dir): - exp_name = "test-models-local-launch" +def test_applications(fileutils, test_dir): + exp_name = "test-applications-local-launch" exp = Experiment(exp_name, launcher="local", exp_path=test_dir) script = fileutils.get_test_conf_path("sleep.py") settings = exp.create_run_settings("python", f"{script} --time=5") - M1 = exp.create_model("m1", path=test_dir, run_settings=settings) - M2 = exp.create_model("m2", path=test_dir, run_settings=settings) + M1 = exp.create_application("m1", path=test_dir, run_settings=settings) + M2 = exp.create_application("m2", path=test_dir, run_settings=settings) exp.start(M1, block=False) statuses = exp.get_status(M1) assert all([stat != SmartSimStatus.STATUS_FAILED for stat in statuses]) - # start another while first model is running + # start another while first application is running exp.start(M2, block=True) statuses = exp.get_status(M1, M2) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) diff --git a/tests/_legacy/test_local_restart.py b/tests/_legacy/test_local_restart.py index 2556c55977..e62c17c2c7 100644 --- a/tests/_legacy/test_local_restart.py +++ b/tests/_legacy/test_local_restart.py @@ -34,24 +34,24 @@ """ -Test restarting ensembles and models. +Test restarting ensembles and applications. """ def test_restart(fileutils, test_dir): - exp_name = "test-models-local-restart" + exp_name = "test-applications-local-restart" exp = Experiment(exp_name, launcher="local", exp_path=test_dir) script = fileutils.get_test_conf_path("sleep.py") settings = exp.create_run_settings("python", f"{script} --time=3") - M1 = exp.create_model("m1", path=test_dir, run_settings=settings) + M1 = exp.create_application("m1", path=test_dir, run_settings=settings) exp.start(M1, block=True) statuses = exp.get_status(M1) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) - # restart the model + # restart the application exp.start(M1, block=True) statuses = exp.get_status(M1) assert all([stat == SmartSimStatus.STATUS_COMPLETED for stat in statuses]) diff --git a/tests/_legacy/test_logs.py b/tests/_legacy/test_logs.py index a187baa2a3..42c3335760 100644 --- a/tests/_legacy/test_logs.py +++ b/tests/_legacy/test_logs.py @@ -204,7 +204,7 @@ def thrower(_self): sleep_rs.set_nodes(1) sleep_rs.set_tasks(1) - sleep = exp.create_model("SleepModel", sleep_rs) + sleep = exp.create_application("SleepModel", sleep_rs) exp.generate(sleep) exp.start(sleep, block=True) except Exception as ex: diff --git a/tests/_legacy/test_manifest.py b/tests/_legacy/test_manifest.py index c26868ebb8..fccc1a7b25 100644 --- a/tests/_legacy/test_manifest.py +++ b/tests/_legacy/test_manifest.py @@ -54,8 +54,8 @@ rs = RunSettings("python", "sleep.py") exp = Experiment("util-test", launcher="local") -model = exp.create_model("model_1", run_settings=rs) -model_2 = exp.create_model("model_1", run_settings=rs) +application = exp.create_application("application_1", run_settings=rs) +application_2 = exp.create_application("application_1", run_settings=rs) ensemble = exp.create_ensemble("ensemble", run_settings=rs, replicas=1) orc = Orchestrator() @@ -67,9 +67,9 @@ def test_separate(): - manifest = Manifest(model, ensemble, orc) - assert manifest.models[0] == model - assert len(manifest.models) == 1 + manifest = Manifest(application, ensemble, orc) + assert manifest.applications[0] == application + assert len(manifest.applications) == 1 assert manifest.ensembles[0] == ensemble assert len(manifest.ensembles) == 1 assert manifest.dbs[0] == orc @@ -82,7 +82,7 @@ def test_separate_type(): def test_name_collision(): with pytest.raises(SmartSimError): - _ = Manifest(model, model_2) + _ = Manifest(application, application_2) def test_catch_empty_ensemble(): @@ -109,9 +109,13 @@ class Person: "patch, has_db_objects", [ pytest.param((), False, id="No DB Objects"), - pytest.param((model, "_db_models", [db_model]), True, id="Model w/ DB Model"), pytest.param( - (model, "_db_scripts", [db_script]), True, id="Model w/ DB Script" + (application, "_db_models", [db_model]), True, id="Application w/ DB Model" + ), + pytest.param( + (application, "_db_scripts", [db_script]), + True, + id="Application w/ DB Script", ), pytest.param( (ensemble, "_db_models", [db_model]), True, id="Ensemble w/ DB Model" @@ -134,34 +138,34 @@ class Person: def test_manifest_detects_db_objects(monkeypatch, patch, has_db_objects): if patch: monkeypatch.setattr(*patch) - assert Manifest(model, ensemble).has_db_objects == has_db_objects + assert Manifest(application, ensemble).has_db_objects == has_db_objects def test_launched_manifest_transform_data(): - models = [(model, 1), (model_2, 2)] + applications = [(application, 1), (application_2, 2)] ensembles = [(ensemble, [(m, i) for i, m in enumerate(ensemble.entities)])] dbs = [(orc, [(n, i) for i, n in enumerate(orc.entities)])] launched = LaunchedManifest( metadata=LaunchedManifestMetadata("name", "path", "launcher", "run_id"), - models=models, + applications=applications, ensembles=ensembles, databases=dbs, ) transformed = launched.map(lambda x: str(x)) - assert transformed.models == tuple((m, str(i)) for m, i in models) + assert transformed.applications == tuple((m, str(i)) for m, i in applications) assert transformed.ensembles[0][1] == tuple((m, str(i)) for m, i in ensembles[0][1]) assert transformed.databases[0][1] == tuple((n, str(i)) for n, i in dbs[0][1]) def test_launched_manifest_builder_correctly_maps_data(): lmb = LaunchedManifestBuilder("name", "path", "launcher name", str(uuid4())) - lmb.add_model(model, 1) - lmb.add_model(model_2, 1) + lmb.add_application(application, 1) + lmb.add_application(application_2, 1) lmb.add_ensemble(ensemble, [i for i in range(len(ensemble.entities))]) lmb.add_database(orc, [i for i in range(len(orc.entities))]) manifest = lmb.finalize() - assert len(manifest.models) == 2 + assert len(manifest.applications) == 2 assert len(manifest.ensembles) == 1 assert len(manifest.databases) == 1 diff --git a/tests/_legacy/test_model.py b/tests/_legacy/test_model.py index 64a68b2992..74888a52b7 100644 --- a/tests/_legacy/test_model.py +++ b/tests/_legacy/test_model.py @@ -31,7 +31,7 @@ from smartsim import Experiment from smartsim._core.control.manifest import LaunchedManifestBuilder from smartsim._core.launcher.step import SbatchStep, SrunStep -from smartsim.entity import Ensemble, Model +from smartsim.entity import Application, Ensemble from smartsim.error import EntityExistsError, SSUnsupportedError from smartsim.settings import RunSettings, SbatchSettings, SrunSettings from smartsim.settings.mpiSettings import _BaseMPISettings @@ -44,7 +44,7 @@ def test_register_incoming_entity_preexists(): exp = Experiment("experiment", launcher="local") rs = RunSettings("python", exe_args="sleep.py") ensemble = exp.create_ensemble(name="ensemble", replicas=1, run_settings=rs) - m = exp.create_model("model", run_settings=rs) + m = exp.create_application("application", run_settings=rs) m.register_incoming_entity(ensemble["ensemble_0"]) assert len(m.incoming_entities) == 1 with pytest.raises(EntityExistsError): @@ -54,36 +54,38 @@ def test_register_incoming_entity_preexists(): def test_disable_key_prefixing(): exp = Experiment("experiment", launcher="local") rs = RunSettings("python", exe_args="sleep.py") - m = exp.create_model("model", run_settings=rs) + m = exp.create_application("application", run_settings=rs) m.disable_key_prefixing() assert m.query_key_prefixing() == False -def test_catch_colo_mpmd_model(): +def test_catch_colo_mpmd_application(): exp = Experiment("experiment", launcher="local") rs = _BaseMPISettings("python", exe_args="sleep.py", fail_if_missing_exec=False) - # make it an mpmd model + # make it an mpmd application rs_2 = _BaseMPISettings("python", exe_args="sleep.py", fail_if_missing_exec=False) rs.make_mpmd(rs_2) - model = exp.create_model("bad_colo_model", rs) + application = exp.create_application("bad_colo_application", rs) # make it colocated which should raise and error with pytest.raises(SSUnsupportedError): - model.colocate_db() + application.colocate_db() -def test_attach_batch_settings_to_model(): +def test_attach_batch_settings_to_application(): exp = Experiment("experiment", launcher="slurm") bs = SbatchSettings() rs = SrunSettings("python", exe_args="sleep.py") - model_wo_bs = exp.create_model("test_model", run_settings=rs) - assert model_wo_bs.batch_settings is None + application_wo_bs = exp.create_application("test_application", run_settings=rs) + assert application_wo_bs.batch_settings is None - model_w_bs = exp.create_model("test_model_2", run_settings=rs, batch_settings=bs) - assert isinstance(model_w_bs.batch_settings, SbatchSettings) + application_w_bs = exp.create_application( + "test_application_2", run_settings=rs, batch_settings=bs + ) + assert isinstance(application_w_bs.batch_settings, SbatchSettings) @pytest.fixture @@ -116,53 +118,57 @@ def launch_step_nop(self, step, entity): return _monkeypatch_exp_controller -def test_model_with_batch_settings_makes_batch_step( +def test_application_with_batch_settings_makes_batch_step( monkeypatch_exp_controller, test_dir ): exp = Experiment("experiment", launcher="slurm", exp_path=test_dir) bs = SbatchSettings() rs = SrunSettings("python", exe_args="sleep.py") - model = exp.create_model("test_model", run_settings=rs, batch_settings=bs) + application = exp.create_application( + "test_application", run_settings=rs, batch_settings=bs + ) entity_steps = monkeypatch_exp_controller(exp) - exp.start(model) + exp.start(application) assert len(entity_steps) == 1 step, entity = entity_steps[0] - assert isinstance(entity, Model) + assert isinstance(entity, Application) assert isinstance(step, SbatchStep) -def test_model_without_batch_settings_makes_run_step( +def test_application_without_batch_settings_makes_run_step( monkeypatch, monkeypatch_exp_controller, test_dir ): exp = Experiment("experiment", launcher="slurm", exp_path=test_dir) rs = SrunSettings("python", exe_args="sleep.py") - model = exp.create_model("test_model", run_settings=rs) + application = exp.create_application("test_application", run_settings=rs) # pretend we are in an allocation to not raise alloc err monkeypatch.setenv("SLURM_JOB_ID", "12345") entity_steps = monkeypatch_exp_controller(exp) - exp.start(model) + exp.start(application) assert len(entity_steps) == 1 step, entity = entity_steps[0] - assert isinstance(entity, Model) + assert isinstance(entity, Application) assert isinstance(step, SrunStep) -def test_models_batch_settings_are_ignored_in_ensemble( +def test_applications_batch_settings_are_ignored_in_ensemble( monkeypatch_exp_controller, test_dir ): exp = Experiment("experiment", launcher="slurm", exp_path=test_dir) bs_1 = SbatchSettings(nodes=5) rs = SrunSettings("python", exe_args="sleep.py") - model = exp.create_model("test_model", run_settings=rs, batch_settings=bs_1) + application = exp.create_application( + "test_application", run_settings=rs, batch_settings=bs_1 + ) bs_2 = SbatchSettings(nodes=10) ens = exp.create_ensemble("test_ensemble", batch_settings=bs_2) - ens.add_model(model) + ens.add_application(application) entity_steps = monkeypatch_exp_controller(exp) exp.start(ens) @@ -174,5 +180,7 @@ def test_models_batch_settings_are_ignored_in_ensemble( assert step.batch_settings.batch_args["nodes"] == "10" assert len(step.step_cmds) == 1 step_cmd = step.step_cmds[0] - assert any("srun" in tok for tok in step_cmd) # call the model using run settings + assert any( + "srun" in tok for tok in step_cmd + ) # call the application using run settings assert not any("sbatch" in tok for tok in step_cmd) # no sbatch in sbatch diff --git a/tests/_legacy/test_modelwriter.py b/tests/_legacy/test_modelwriter.py index a857d7c5f0..9aab51e619 100644 --- a/tests/_legacy/test_modelwriter.py +++ b/tests/_legacy/test_modelwriter.py @@ -31,7 +31,7 @@ import pytest -from smartsim._core.generation.modelwriter import ModelWriter +from smartsim._core.generation.modelwriter import ApplicationWriter from smartsim.error.errors import ParameterWriterError, SmartSimError from smartsim.settings import RunSettings @@ -62,9 +62,9 @@ def test_write_easy_configs(fileutils, test_dir): dir_util.copy_tree(conf_path, test_dir) assert path.isdir(test_dir) - # init modelwriter - writer = ModelWriter() - writer.configure_tagged_model_files(glob(test_dir + "/*"), param_dict) + # init ApplicationWriter + writer = ApplicationWriter() + writer.configure_tagged_application_files(glob(test_dir + "/*"), param_dict) written_files = sorted(glob(test_dir + "/*")) correct_files = sorted(glob(correct_path + "*")) @@ -90,11 +90,11 @@ def test_write_med_configs(fileutils, test_dir): dir_util.copy_tree(conf_path, test_dir) assert path.isdir(test_dir) - # init modelwriter - writer = ModelWriter() + # init ApplicationWriter + writer = ApplicationWriter() writer.set_tag(writer.tag, "(;.+;)") assert writer.regex == "(;.+;)" - writer.configure_tagged_model_files(glob(test_dir + "/*"), param_dict) + writer.configure_tagged_application_files(glob(test_dir + "/*"), param_dict) written_files = sorted(glob(test_dir + "/*")) correct_files = sorted(glob(correct_path + "*")) @@ -122,10 +122,10 @@ def test_write_new_tag_configs(fileutils, test_dir): dir_util.copy_tree(conf_path, test_dir) assert path.isdir(test_dir) - # init modelwriter - writer = ModelWriter() + # init ApplicationWriter + writer = ApplicationWriter() writer.set_tag("@") - writer.configure_tagged_model_files(glob(test_dir + "/*"), param_dict) + writer.configure_tagged_application_files(glob(test_dir + "/*"), param_dict) written_files = sorted(glob(test_dir + "/*")) correct_files = sorted(glob(correct_path + "*")) @@ -135,13 +135,13 @@ def test_write_new_tag_configs(fileutils, test_dir): def test_mw_error_1(): - writer = ModelWriter() + writer = ApplicationWriter() with pytest.raises(ParameterWriterError): - writer.configure_tagged_model_files("[not/a/path]", {"5": 10}) + writer.configure_tagged_application_files("[not/a/path]", {"5": 10}) def test_mw_error_2(): - writer = ModelWriter() + writer = ApplicationWriter() with pytest.raises(ParameterWriterError): writer._write_changes("[not/a/path]") @@ -157,9 +157,9 @@ def test_write_mw_error_3(fileutils, test_dir): dir_util.copy_tree(conf_path, test_dir) assert path.isdir(test_dir) - # init modelwriter - writer = ModelWriter() + # init ApplicationWriter + writer = ApplicationWriter() with pytest.raises(SmartSimError): - writer.configure_tagged_model_files( + writer.configure_tagged_application_files( glob(test_dir + "/*"), param_dict, make_missing_tags_fatal=True ) diff --git a/tests/_legacy/test_multidb.py b/tests/_legacy/test_multidb.py index 81f21856af..0cc89fed6e 100644 --- a/tests/_legacy/test_multidb.py +++ b/tests/_legacy/test_multidb.py @@ -152,7 +152,7 @@ def test_db_identifier_colo_then_standard( colo_settings.set_tasks_per_node(1) # Create the SmartSim Model - smartsim_model = exp.create_model("colocated_model", colo_settings) + smartsim_model = exp.create_application("colocated_model", colo_settings) db_args = { "port": test_port, @@ -324,7 +324,7 @@ def test_multidb_colo_once(fileutils, test_dir, wlmutils, coloutils, db_type): run_settings.set_tasks_per_node(1) # Create the SmartSim Model - smartsim_model = exp.create_model("smartsim_model", run_settings) + smartsim_model = exp.create_application("smartsim_model", run_settings) db_args = { "port": test_port + 1, diff --git a/tests/_legacy/test_output_files.py b/tests/_legacy/test_output_files.py index f3830051c8..65f080804a 100644 --- a/tests/_legacy/test_output_files.py +++ b/tests/_legacy/test_output_files.py @@ -35,7 +35,7 @@ from smartsim._core.launcher.step import Step from smartsim.database.orchestrator import Orchestrator from smartsim.entity.ensemble import Ensemble -from smartsim.entity.model import Model +from smartsim.entity.model import Application from smartsim.settings.base import RunSettings from smartsim.settings.slurmSettings import SbatchSettings, SrunSettings @@ -51,46 +51,68 @@ ens = Ensemble("ens", params={}, run_settings=rs, batch_settings=bs, replicas=3) orc = Orchestrator(db_nodes=3, batch=True, launcher="slurm", run_command="srun") -model = Model("test_model", params={}, path="", run_settings=rs) -batch_model = Model( - "batch_test_model", params={}, path="", run_settings=batch_rs, batch_settings=bs +application = Application("test_application", params={}, path="", run_settings=rs) +batch_application = Application( + "batch_test_application", + params={}, + path="", + run_settings=batch_rs, + batch_settings=bs, ) -anon_batch_model = _AnonymousBatchJob(batch_model) +anon_batch_application = _AnonymousBatchJob(batch_application) -def test_mutated_model_output(test_dir): - exp_name = "test-mutated-model-output" +def test_mutated_application_output(test_dir): + exp_name = "test-mutated-application-output" exp = Experiment(exp_name, launcher="local", exp_path=test_dir) - test_model = exp.create_model("test_model", path=test_dir, run_settings=rs) - exp.generate(test_model) - exp.start(test_model, block=True) - - assert pathlib.Path(test_model.path).exists() - assert pathlib.Path(test_model.path, f"{test_model.name}.out").is_symlink() - assert pathlib.Path(test_model.path, f"{test_model.name}.err").is_symlink() - - with open(pathlib.Path(test_model.path, f"{test_model.name}.out"), "r") as file: + test_application = exp.create_application( + "test_application", path=test_dir, run_settings=rs + ) + exp.generate(test_application) + exp.start(test_application, block=True) + + assert pathlib.Path(test_application.path).exists() + assert pathlib.Path( + test_application.path, f"{test_application.name}.out" + ).is_symlink() + assert pathlib.Path( + test_application.path, f"{test_application.name}.err" + ).is_symlink() + + with open( + pathlib.Path(test_application.path, f"{test_application.name}.out"), "r" + ) as file: log_contents = file.read() assert "spam eggs" in log_contents - first_link = os.readlink(pathlib.Path(test_model.path, f"{test_model.name}.out")) - - test_model.run_settings.exe_args = ["hello", "world"] - exp.generate(test_model, overwrite=True) - exp.start(test_model, block=True) - - assert pathlib.Path(test_model.path).exists() - assert pathlib.Path(test_model.path, f"{test_model.name}.out").is_symlink() - assert pathlib.Path(test_model.path, f"{test_model.name}.err").is_symlink() - - with open(pathlib.Path(test_model.path, f"{test_model.name}.out"), "r") as file: + first_link = os.readlink( + pathlib.Path(test_application.path, f"{test_application.name}.out") + ) + + test_application.run_settings.exe_args = ["hello", "world"] + exp.generate(test_application, overwrite=True) + exp.start(test_application, block=True) + + assert pathlib.Path(test_application.path).exists() + assert pathlib.Path( + test_application.path, f"{test_application.name}.out" + ).is_symlink() + assert pathlib.Path( + test_application.path, f"{test_application.name}.err" + ).is_symlink() + + with open( + pathlib.Path(test_application.path, f"{test_application.name}.out"), "r" + ) as file: log_contents = file.read() assert "hello world" in log_contents - second_link = os.readlink(pathlib.Path(test_model.path, f"{test_model.name}.out")) + second_link = os.readlink( + pathlib.Path(test_application.path, f"{test_application.name}.out") + ) with open(first_link, "r") as file: first_historical_log = file.read() @@ -106,10 +128,10 @@ def test_mutated_model_output(test_dir): def test_get_output_files_with_create_job_step(test_dir): """Testing output files through _create_job_step""" exp_dir = pathlib.Path(test_dir) - status_dir = exp_dir / CONFIG.telemetry_subdir / model.type - step = controller._create_job_step(model, status_dir) - expected_out_path = status_dir / model.name / (model.name + ".out") - expected_err_path = status_dir / model.name / (model.name + ".err") + status_dir = exp_dir / CONFIG.telemetry_subdir / application.type + step = controller._create_job_step(application, status_dir) + expected_out_path = status_dir / application.name / (application.name + ".out") + expected_err_path = status_dir / application.name / (application.name + ".err") assert step.get_output_files() == (str(expected_out_path), str(expected_err_path)) @@ -137,20 +159,20 @@ def test_get_output_files_with_create_batch_job_step(entity, test_dir): ) -def test_model_get_output_files(test_dir): - """Testing model output files with manual step creation""" +def test_application_get_output_files(test_dir): + """Testing application output files with manual step creation""" exp_dir = pathlib.Path(test_dir) - step = Step(model.name, model.path, model.run_settings) + step = Step(application.name, application.path, application.run_settings) step.meta["status_dir"] = exp_dir / "output_dir" - expected_out_path = step.meta["status_dir"] / (model.name + ".out") - expected_err_path = step.meta["status_dir"] / (model.name + ".err") + expected_out_path = step.meta["status_dir"] / (application.name + ".out") + expected_err_path = step.meta["status_dir"] / (application.name + ".err") assert step.get_output_files() == (str(expected_out_path), str(expected_err_path)) def test_ensemble_get_output_files(test_dir): """Testing ensemble output files with manual step creation""" exp_dir = pathlib.Path(test_dir) - for member in ens.models: + for member in ens.applications: step = Step(member.name, member.path, member.run_settings) step.meta["status_dir"] = exp_dir / "output_dir" expected_out_path = step.meta["status_dir"] / (member.name + ".out") diff --git a/tests/_legacy/test_preview.py b/tests/_legacy/test_preview.py index 3c7bed6fe4..79dcd12062 100644 --- a/tests/_legacy/test_preview.py +++ b/tests/_legacy/test_preview.py @@ -290,11 +290,11 @@ def test_model_preview(test_dir, wlmutils): rs1 = RunSettings("bash", "multi_tags_template.sh") rs2 = exp.create_run_settings("echo", ["spam", "eggs"]) - hello_world_model = exp.create_model( + hello_world_model = exp.create_application( "echo-hello", run_settings=rs1, params=model_params ) - spam_eggs_model = exp.create_model("echo-spam", run_settings=rs2) + spam_eggs_model = exp.create_application("echo-spam", run_settings=rs2) preview_manifest = Manifest(hello_world_model, spam_eggs_model) @@ -333,8 +333,10 @@ def test_model_preview_properties(test_dir, wlmutils): se_param3 = "eggs" rs2 = exp.create_run_settings(se_param1, [se_param2, se_param3]) - hello_world_model = exp.create_model(hw_name, run_settings=rs1, params=model_params) - spam_eggs_model = exp.create_model(se_name, run_settings=rs2) + hello_world_model = exp.create_application( + hw_name, run_settings=rs1, params=model_params + ) + spam_eggs_model = exp.create_application(se_name, run_settings=rs2) preview_manifest = Manifest(hello_world_model, spam_eggs_model) @@ -385,7 +387,7 @@ def test_preview_model_tagged_files(fileutils, test_dir, wlmutils): model_params = {"port": 6379, "password": "unbreakable_password"} model_settings = RunSettings("bash", "multi_tags_template.sh") - hello_world_model = exp.create_model( + hello_world_model = exp.create_application( "echo-hello", run_settings=model_settings, params=model_params ) @@ -420,7 +422,7 @@ def test_model_key_prefixing(test_dir, wlmutils): db = exp.create_database(port=6780, interface="lo") exp.generate(db, overwrite=True) rs1 = exp.create_run_settings("echo", ["hello", "world"]) - model = exp.create_model("model_test", run_settings=rs1) + model = exp.create_application("model_test", run_settings=rs1) # enable key prefixing on model model.enable_key_prefixing() @@ -491,8 +493,8 @@ def test_preview_models_and_ensembles(test_dir, wlmutils): hw_name = "echo-hello" se_name = "echo-spam" ens_name = "echo-ensemble" - hello_world_model = exp.create_model(hw_name, run_settings=rs1) - spam_eggs_model = exp.create_model(se_name, run_settings=rs2) + hello_world_model = exp.create_application(hw_name, run_settings=rs1) + spam_eggs_model = exp.create_application(se_name, run_settings=rs2) hello_ensemble = exp.create_ensemble(ens_name, run_settings=rs1, replicas=3) exp.generate(hello_world_model, spam_eggs_model, hello_ensemble) @@ -530,7 +532,7 @@ def test_ensemble_preview_client_configuration(test_dir, wlmutils): exp.generate(ensemble, overwrite=True) rs2 = exp.create_run_settings("echo", ["spam", "eggs"]) # Create model - ml_model = exp.create_model("tf_training", rs2) + ml_model = exp.create_application("tf_training", rs2) for sim in ensemble.entities: ml_model.register_incoming_entity(sim) @@ -575,7 +577,7 @@ def test_ensemble_preview_client_configuration_multidb(test_dir, wlmutils): exp.generate(ensemble, overwrite=True) rs2 = exp.create_run_settings("echo", ["spam", "eggs"]) # Create model - ml_model = exp.create_model("tf_training", rs2) + ml_model = exp.create_application("tf_training", rs2) for sim in ensemble.entities: ml_model.register_incoming_entity(sim) exp.generate(ml_model, overwrite=True) @@ -674,7 +676,7 @@ def test_preview_colocated_db_model_ensemble(fileutils, test_dir, wlmutils, mlut ) # Create colocated SmartSim Model - colo_model = exp.create_model("colocated_model", colo_settings) + colo_model = exp.create_application("colocated_model", colo_settings) # Create and save ML model to filesystem content = "empty test" @@ -794,7 +796,7 @@ def test_preview_colocated_db_script_ensemble(fileutils, test_dir, wlmutils, mlu ) # Create a SmartSim model - colo_model = exp.create_model("colocated_model", colo_settings) + colo_model = exp.create_application("colocated_model", colo_settings) # Colocate a db with each ensemble entity and add a script # to each entity via file @@ -1050,8 +1052,8 @@ def test_verbosity_info_ensemble(test_dir, wlmutils): hw_name = "echo-hello" se_name = "echo-spam" ens_name = "echo-ensemble" - hello_world_model = exp.create_model(hw_name, run_settings=rs1) - spam_eggs_model = exp.create_model(se_name, run_settings=rs2) + hello_world_model = exp.create_application(hw_name, run_settings=rs1) + spam_eggs_model = exp.create_application(se_name, run_settings=rs2) hello_ensemble = exp.create_ensemble(ens_name, run_settings=rs1, replicas=3) exp.generate(hello_world_model, spam_eggs_model, hello_ensemble) @@ -1092,7 +1094,7 @@ def test_verbosity_info_colocated_db_model_ensemble( ) # Create colocated SmartSim Model - colo_model = exp.create_model("colocated_model", colo_settings) + colo_model = exp.create_application("colocated_model", colo_settings) # Create and save ML model to filesystem content = "empty test" @@ -1209,7 +1211,7 @@ def test_verbosity_info_ensemble(test_dir, wlmutils): exp.generate(ensemble, overwrite=True) rs2 = exp.create_run_settings("echo", ["spam", "eggs"]) # Create model - ml_model = exp.create_model("tf_training", rs2) + ml_model = exp.create_application("tf_training", rs2) for sim in ensemble.entities: ml_model.register_incoming_entity(sim) @@ -1277,8 +1279,8 @@ def test_preview_colocated_db_singular_model(wlmutils, test_dir): rs = exp.create_run_settings("sleep", ["100"]) - model_1 = exp.create_model("model_1", run_settings=rs) - model_2 = exp.create_model("model_2", run_settings=rs) + model_1 = exp.create_application("model_1", run_settings=rs) + model_2 = exp.create_application("model_2", run_settings=rs) model_1.colocate_db() @@ -1307,7 +1309,7 @@ def test_preview_db_script(wlmutils, test_dir): model_settings = exp.create_run_settings(exe="python", exe_args="params.py") # Initialize a Model object - model_instance = exp.create_model("model_name", model_settings) + model_instance = exp.create_application("model_name", model_settings) model_instance.colocate_db_tcp() # TorchScript string diff --git a/tests/_legacy/test_serialize.py b/tests/_legacy/test_serialize.py index b2dc0b7a70..a8c9cf1d9c 100644 --- a/tests/_legacy/test_serialize.py +++ b/tests/_legacy/test_serialize.py @@ -123,25 +123,25 @@ def test_started_entities_are_serialized(test_dir, manifest_json): rs1 = exp.create_run_settings("echo", ["hello", "world"]) rs2 = exp.create_run_settings("echo", ["spam", "eggs"]) - hello_world_model = exp.create_model("echo-hello", run_settings=rs1) - spam_eggs_model = exp.create_model("echo-spam", run_settings=rs2) + hello_world_application = exp.create_application("echo-hello", run_settings=rs1) + spam_eggs_application = exp.create_application("echo-spam", run_settings=rs2) hello_ensemble = exp.create_ensemble("echo-ensemble", run_settings=rs1, replicas=3) - exp.generate(hello_world_model, spam_eggs_model, hello_ensemble) - exp.start(hello_world_model, spam_eggs_model, block=False) + exp.generate(hello_world_application, spam_eggs_application, hello_ensemble) + exp.start(hello_world_application, spam_eggs_application, block=False) exp.start(hello_ensemble, block=False) try: with open(manifest_json, "r") as f: manifest = json.load(f) assert len(manifest["runs"]) == 2 - assert len(manifest["runs"][0]["model"]) == 2 + assert len(manifest["runs"][0]["application"]) == 2 assert len(manifest["runs"][0]["ensemble"]) == 0 - assert len(manifest["runs"][1]["model"]) == 0 + assert len(manifest["runs"][1]["application"]) == 0 assert len(manifest["runs"][1]["ensemble"]) == 1 - assert len(manifest["runs"][1]["ensemble"][0]["models"]) == 3 + assert len(manifest["runs"][1]["ensemble"][0]["applications"]) == 3 finally: - exp.stop(hello_world_model, spam_eggs_model, hello_ensemble) + exp.stop(hello_world_application, spam_eggs_application, hello_ensemble) def test_serialzed_database_does_not_break_if_using_a_non_standard_install(monkeypatch): diff --git a/tests/_legacy/test_smartredis.py b/tests/_legacy/test_smartredis.py index 6f7b199340..2b7d789185 100644 --- a/tests/_legacy/test_smartredis.py +++ b/tests/_legacy/test_smartredis.py @@ -30,7 +30,7 @@ from smartsim import Experiment from smartsim._core.utils import installed_redisai_backends from smartsim.database import Orchestrator -from smartsim.entity import Ensemble, Model +from smartsim.entity import Application, Ensemble from smartsim.status import SmartSimStatus # The tests in this file belong to the group_b group @@ -63,7 +63,7 @@ def test_exchange(local_experiment, local_db, prepare_db, fileutils): """Run two processes, each process puts a tensor on the DB, then accesses the other process's tensor. - Finally, the tensor is used to run a model. + Finally, the tensor is used to run a application. """ db = prepare_db(local_db).orchestrator @@ -87,7 +87,7 @@ def test_exchange(local_experiment, local_db, prepare_db, fileutils): local_experiment.generate(ensemble) - # start the models + # start the applications local_experiment.start(ensemble, summary=False) # get and confirm statuses @@ -99,7 +99,7 @@ def test_consumer(local_experiment, local_db, prepare_db, fileutils): """Run three processes, each one of the first two processes puts a tensor on the DB; the third process accesses the tensors put by the two producers. - Finally, the tensor is used to run a model by each producer + Finally, the tensor is used to run a application by each producer and the consumer accesses the two results. """ @@ -113,10 +113,10 @@ def test_consumer(local_experiment, local_db, prepare_db, fileutils): name="producer", params=params, run_settings=rs_prod, perm_strat="step" ) - consumer = Model( + consumer = Application( "consumer", params={}, path=ensemble.path, run_settings=rs_consumer ) - ensemble.add_model(consumer) + ensemble.add_application(consumer) ensemble.register_incoming_entity(ensemble["producer_0"]) ensemble.register_incoming_entity(ensemble["producer_1"]) @@ -126,7 +126,7 @@ def test_consumer(local_experiment, local_db, prepare_db, fileutils): local_experiment.generate(ensemble) - # start the models + # start the applications local_experiment.start(ensemble, summary=False) # get and confirm statuses diff --git a/tests/_legacy/test_symlinking.py b/tests/_legacy/test_symlinking.py index 2b70e3e9f9..622b960b2d 100644 --- a/tests/_legacy/test_symlinking.py +++ b/tests/_legacy/test_symlinking.py @@ -34,7 +34,7 @@ from smartsim._core.control.controller import Controller, _AnonymousBatchJob from smartsim.database.orchestrator import Orchestrator from smartsim.entity.ensemble import Ensemble -from smartsim.entity.model import Model +from smartsim.entity.model import Application from smartsim.settings.base import RunSettings from smartsim.settings.slurmSettings import SbatchSettings, SrunSettings @@ -50,22 +50,26 @@ ens = Ensemble("ens", params={}, run_settings=rs, batch_settings=bs, replicas=3) orc = Orchestrator(db_nodes=3, batch=True, launcher="slurm", run_command="srun") -model = Model("test_model", params={}, path="", run_settings=rs) -batch_model = Model( - "batch_test_model", params={}, path="", run_settings=batch_rs, batch_settings=bs +application = Application("test_application", params={}, path="", run_settings=rs) +batch_application = Application( + "batch_test_application", + params={}, + path="", + run_settings=batch_rs, + batch_settings=bs, ) -anon_batch_model = _AnonymousBatchJob(batch_model) +anon_batch_application = _AnonymousBatchJob(batch_application) @pytest.mark.parametrize( "entity", - [pytest.param(ens, id="ensemble"), pytest.param(model, id="model")], + [pytest.param(ens, id="ensemble"), pytest.param(application, id="application")], ) def test_symlink(test_dir, entity): """Test symlinking historical output files""" entity.path = test_dir if entity.type == Ensemble: - for member in ens.models: + for member in ens.applications: symlink_with_create_job_step(test_dir, member) else: symlink_with_create_job_step(test_dir, entity) @@ -93,7 +97,7 @@ def symlink_with_create_job_step(test_dir, entity): [ pytest.param(ens, id="ensemble"), pytest.param(orc, id="orchestrator"), - pytest.param(anon_batch_model, id="model"), + pytest.param(anon_batch_application, id="application"), ], ) def test_batch_symlink(entity, test_dir): @@ -116,31 +120,35 @@ def test_batch_symlink(entity, test_dir): def test_symlink_error(test_dir): """Ensure FileNotFoundError is thrown""" - bad_model = Model( - "bad_model", + bad_application = Application( + "bad_application", params={}, path=pathlib.Path(test_dir, "badpath"), run_settings=RunSettings("echo"), ) - telem_dir = pathlib.Path(test_dir, "bad_model_telemetry") - bad_step = controller._create_job_step(bad_model, telem_dir) + telem_dir = pathlib.Path(test_dir, "bad_application_telemetry") + bad_step = controller._create_job_step(bad_application, telem_dir) with pytest.raises(FileNotFoundError): - controller.symlink_output_files(bad_step, bad_model) + controller.symlink_output_files(bad_step, bad_application) -def test_failed_model_launch_symlinks(test_dir): +def test_failed_application_launch_symlinks(test_dir): exp_name = "failed-exp" exp = Experiment(exp_name, exp_path=test_dir) - test_model = exp.create_model( - "test_model", run_settings=batch_rs, batch_settings=bs + test_application = exp.create_application( + "test_application", run_settings=batch_rs, batch_settings=bs ) - exp.generate(test_model) + exp.generate(test_application) with pytest.raises(TypeError): - exp.start(test_model) + exp.start(test_application) - _should_not_be_symlinked(pathlib.Path(test_model.path)) - assert not pathlib.Path(test_model.path, f"{test_model.name}.out").is_symlink() - assert not pathlib.Path(test_model.path, f"{test_model.name}.err").is_symlink() + _should_not_be_symlinked(pathlib.Path(test_application.path)) + assert not pathlib.Path( + test_application.path, f"{test_application.name}.out" + ).is_symlink() + assert not pathlib.Path( + test_application.path, f"{test_application.name}.err" + ).is_symlink() def test_failed_ensemble_launch_symlinks(test_dir): @@ -161,7 +169,7 @@ def test_failed_ensemble_launch_symlinks(test_dir): test_ensemble.path, f"{test_ensemble.name}.err" ).is_symlink() - for i in range(len(test_ensemble.models)): + for i in range(len(test_ensemble.applications)): assert not pathlib.Path( test_ensemble.path, f"{test_ensemble.name}_{i}", @@ -184,7 +192,7 @@ def test_non_batch_ensemble_symlinks(test_dir): exp.generate(test_ensemble) exp.start(test_ensemble, block=True) - for i in range(len(test_ensemble.models)): + for i in range(len(test_ensemble.applications)): _should_be_symlinked( pathlib.Path( test_ensemble.path, @@ -205,19 +213,25 @@ def test_non_batch_ensemble_symlinks(test_dir): _should_not_be_symlinked(pathlib.Path(exp.exp_path, "smartsim_params.txt")) -def test_non_batch_model_symlinks(test_dir): - exp_name = "test-non-batch-model" +def test_non_batch_application_symlinks(test_dir): + exp_name = "test-non-batch-application" exp = Experiment(exp_name, exp_path=test_dir) rs = RunSettings("echo", ["spam", "eggs"]) - test_model = exp.create_model("test_model", path=test_dir, run_settings=rs) - exp.generate(test_model) - exp.start(test_model, block=True) + test_application = exp.create_application( + "test_application", path=test_dir, run_settings=rs + ) + exp.generate(test_application) + exp.start(test_application, block=True) - assert pathlib.Path(test_model.path).exists() + assert pathlib.Path(test_application.path).exists() - _should_be_symlinked(pathlib.Path(test_model.path, f"{test_model.name}.out"), True) - _should_be_symlinked(pathlib.Path(test_model.path, f"{test_model.name}.err"), False) + _should_be_symlinked( + pathlib.Path(test_application.path, f"{test_application.name}.out"), True + ) + _should_be_symlinked( + pathlib.Path(test_application.path, f"{test_application.name}.err"), False + ) _should_not_be_symlinked(pathlib.Path(exp.exp_path, "smartsim_params.txt")) diff --git a/tests/_legacy/test_telemetry_monitor.py b/tests/_legacy/test_telemetry_monitor.py index c1bfe27199..e0b1228209 100644 --- a/tests/_legacy/test_telemetry_monitor.py +++ b/tests/_legacy/test_telemetry_monitor.py @@ -296,14 +296,14 @@ def test_load_manifest(fileutils: FileUtils, test_dir: str, config: cfg.Config): assert manifest.launcher == "Slurm" assert len(manifest.runs) == 6 - assert len(manifest.runs[0].models) == 1 - assert len(manifest.runs[2].models) == 8 # 8 models in ensemble + assert len(manifest.runs[0].applications) == 1 + assert len(manifest.runs[2].applications) == 8 # 8 applications in ensemble assert len(manifest.runs[0].orchestrators) == 0 assert len(manifest.runs[1].orchestrators) == 3 # 3 shards in db -def test_load_manifest_colo_model(fileutils: FileUtils): - """Ensure that the runtime manifest loads correctly when containing a colocated model""" +def test_load_manifest_colo_application(fileutils: FileUtils): + """Ensure that the runtime manifest loads correctly when containing a colocated application""" # NOTE: for regeneration, this manifest can use `test_telemetry_colo` sample_manifest_path = fileutils.get_test_conf_path("telemetry/colocatedmodel.json") sample_manifest = pathlib.Path(sample_manifest_path) @@ -315,11 +315,11 @@ def test_load_manifest_colo_model(fileutils: FileUtils): assert manifest.launcher == "Slurm" assert len(manifest.runs) == 1 - assert len(manifest.runs[0].models) == 1 + assert len(manifest.runs[0].applications) == 1 -def test_load_manifest_serial_models(fileutils: FileUtils): - """Ensure that the runtime manifest loads correctly when containing multiple models""" +def test_load_manifest_serial_applications(fileutils: FileUtils): + """Ensure that the runtime manifest loads correctly when containing multiple applications""" # NOTE: for regeneration, this manifest can use `test_telemetry_colo` sample_manifest_path = fileutils.get_test_conf_path("telemetry/serialmodels.json") sample_manifest = pathlib.Path(sample_manifest_path) @@ -331,11 +331,11 @@ def test_load_manifest_serial_models(fileutils: FileUtils): assert manifest.launcher == "Slurm" assert len(manifest.runs) == 1 - assert len(manifest.runs[0].models) == 5 + assert len(manifest.runs[0].applications) == 5 -def test_load_manifest_db_and_models(fileutils: FileUtils): - """Ensure that the runtime manifest loads correctly when containing models & +def test_load_manifest_db_and_applications(fileutils: FileUtils): + """Ensure that the runtime manifest loads correctly when containing applications & orchestrator across 2 separate runs""" # NOTE: for regeneration, this manifest can use `test_telemetry_colo` sample_manifest_path = fileutils.get_test_conf_path("telemetry/db_and_model.json") @@ -349,7 +349,7 @@ def test_load_manifest_db_and_models(fileutils: FileUtils): assert len(manifest.runs) == 2 assert len(manifest.runs[0].orchestrators) == 1 - assert len(manifest.runs[1].models) == 1 + assert len(manifest.runs[1].applications) == 1 # verify collector paths from manifest are deserialized to collector config assert manifest.runs[0].orchestrators[0].collectors["client"] @@ -358,8 +358,8 @@ def test_load_manifest_db_and_models(fileutils: FileUtils): assert not manifest.runs[0].orchestrators[0].collectors["client_count"] -def test_load_manifest_db_and_models_1run(fileutils: FileUtils): - """Ensure that the runtime manifest loads correctly when containing models & +def test_load_manifest_db_and_applications_1run(fileutils: FileUtils): + """Ensure that the runtime manifest loads correctly when containing applications & orchestrator in a single run""" # NOTE: for regeneration, this manifest can use `test_telemetry_colo` sample_manifest_path = fileutils.get_test_conf_path( @@ -375,13 +375,13 @@ def test_load_manifest_db_and_models_1run(fileutils: FileUtils): assert len(manifest.runs) == 1 assert len(manifest.runs[0].orchestrators) == 1 - assert len(manifest.runs[0].models) == 1 + assert len(manifest.runs[0].applications) == 1 @pytest.mark.parametrize( ["task_id", "step_id", "etype", "exp_isorch", "exp_ismanaged"], [ - pytest.param("123", "", "model", False, False, id="unmanaged, non-orch"), + pytest.param("123", "", "application", False, False, id="unmanaged, non-orch"), pytest.param("456", "123", "ensemble", False, True, id="managed, non-orch"), pytest.param("789", "987", "orchestrator", True, True, id="managed, orch"), pytest.param("987", "", "orchestrator", True, False, id="unmanaged, orch"), @@ -411,8 +411,8 @@ def test_persistable_computed_properties( def test_deserialize_ensemble(fileutils: FileUtils): - """Ensure that the children of ensembles (models) are correctly - placed in the models collection""" + """Ensure that the children of ensembles (applications) are correctly + placed in the applications collection""" sample_manifest_path = fileutils.get_test_conf_path("telemetry/ensembles.json") sample_manifest = pathlib.Path(sample_manifest_path) assert sample_manifest.exists() @@ -424,7 +424,7 @@ def test_deserialize_ensemble(fileutils: FileUtils): # NOTE: no longer returning ensembles, only children... # assert len(manifest.runs[0].ensembles) == 1 - assert len(manifest.runs[0].models) == 8 + assert len(manifest.runs[0].applications) == 8 def test_shutdown_conditions__no_monitored_jobs(test_dir: str): @@ -611,12 +611,12 @@ def is_alive(self) -> bool: assert observer.stop_count == 1 -def test_telemetry_single_model(fileutils, test_dir, wlmutils, config): +def test_telemetry_single_application(fileutils, test_dir, wlmutils, config): """Test that it is possible to create_database then colocate_db_uds/colocate_db_tcp with unique db_identifiers""" # Set experiment name - exp_name = "telemetry_single_model" + exp_name = "telemetry_single_application" # Retrieve parameters from testing environment test_launcher = wlmutils.get_test_launcher() @@ -630,11 +630,11 @@ def test_telemetry_single_model(fileutils, test_dir, wlmutils, config): app_settings.set_nodes(1) app_settings.set_tasks_per_node(1) - # Create the SmartSim Model - smartsim_model = exp.create_model("perroquet", app_settings) - exp.generate(smartsim_model) - exp.start(smartsim_model, block=True) - assert exp.get_status(smartsim_model)[0] == SmartSimStatus.STATUS_COMPLETED + # Create the SmartSim Aapplication + smartsim_application = exp.create_application("perroquet", app_settings) + exp.generate(smartsim_application) + exp.start(smartsim_application, block=True) + assert exp.get_status(smartsim_application)[0] == SmartSimStatus.STATUS_COMPLETED telemetry_output_path = pathlib.Path(test_dir) / config.telemetry_subdir start_events = list(telemetry_output_path.rglob("start.json")) @@ -644,7 +644,7 @@ def test_telemetry_single_model(fileutils, test_dir, wlmutils, config): assert len(stop_events) == 1 -def test_telemetry_single_model_nonblocking( +def test_telemetry_single_application_nonblocking( fileutils, test_dir, wlmutils, monkeypatch, config ): """Ensure that the telemetry monitor logs exist when the experiment @@ -653,7 +653,7 @@ def test_telemetry_single_model_nonblocking( ctx.setattr(cfg.Config, "telemetry_frequency", 1) # Set experiment name - exp_name = "test_telemetry_single_model_nonblocking" + exp_name = "test_telemetry_single_application_nonblocking" # Retrieve parameters from testing environment test_launcher = wlmutils.get_test_launcher() @@ -667,15 +667,17 @@ def test_telemetry_single_model_nonblocking( app_settings.set_nodes(1) app_settings.set_tasks_per_node(1) - # Create the SmartSim Model - smartsim_model = exp.create_model("perroquet", app_settings) - exp.generate(smartsim_model) - exp.start(smartsim_model) + # Create the SmartSim Application + smartsim_application = exp.create_application("perroquet", app_settings) + exp.generate(smartsim_application) + exp.start(smartsim_application) telemetry_output_path = pathlib.Path(test_dir) / config.telemetry_subdir snooze_blocking(telemetry_output_path, max_delay=10, post_data_delay=1) - assert exp.get_status(smartsim_model)[0] == SmartSimStatus.STATUS_COMPLETED + assert ( + exp.get_status(smartsim_application)[0] == SmartSimStatus.STATUS_COMPLETED + ) start_events = list(telemetry_output_path.rglob("start.json")) stop_events = list(telemetry_output_path.rglob("stop.json")) @@ -684,15 +686,17 @@ def test_telemetry_single_model_nonblocking( assert len(stop_events) == 1 -def test_telemetry_serial_models(fileutils, test_dir, wlmutils, monkeypatch, config): +def test_telemetry_serial_applications( + fileutils, test_dir, wlmutils, monkeypatch, config +): """ - Test telemetry with models being run in serial (one after each other) + Test telemetry with applications being run in serial (one after each other) """ with monkeypatch.context() as ctx: ctx.setattr(cfg.Config, "telemetry_frequency", 1) # Set experiment name - exp_name = "telemetry_serial_models" + exp_name = "telemetry_serial_applications" # Retrieve parameters from testing environment test_launcher = wlmutils.get_test_launcher() @@ -706,16 +710,16 @@ def test_telemetry_serial_models(fileutils, test_dir, wlmutils, monkeypatch, con app_settings.set_nodes(1) app_settings.set_tasks_per_node(1) - # Create the SmartSim Model - smartsim_models = [ - exp.create_model(f"perroquet_{i}", app_settings) for i in range(5) + # Create the SmartSim Aapplication + smartsim_applications = [ + exp.create_application(f"perroquet_{i}", app_settings) for i in range(5) ] - exp.generate(*smartsim_models) - exp.start(*smartsim_models, block=True) + exp.generate(*smartsim_applications) + exp.start(*smartsim_applications, block=True) assert all( [ status == SmartSimStatus.STATUS_COMPLETED - for status in exp.get_status(*smartsim_models) + for status in exp.get_status(*smartsim_applications) ] ) @@ -727,18 +731,18 @@ def test_telemetry_serial_models(fileutils, test_dir, wlmutils, monkeypatch, con assert len(stop_events) == 5 -def test_telemetry_serial_models_nonblocking( +def test_telemetry_serial_applications_nonblocking( fileutils, test_dir, wlmutils, monkeypatch, config ): """ - Test telemetry with models being run in serial (one after each other) + Test telemetry with applications being run in serial (one after each other) in a non-blocking experiment """ with monkeypatch.context() as ctx: ctx.setattr(cfg.Config, "telemetry_frequency", 1) # Set experiment name - exp_name = "telemetry_serial_models" + exp_name = "telemetry_serial_applications" # Retrieve parameters from testing environment test_launcher = wlmutils.get_test_launcher() @@ -752,12 +756,12 @@ def test_telemetry_serial_models_nonblocking( app_settings.set_nodes(1) app_settings.set_tasks_per_node(1) - # Create the SmartSim Model - smartsim_models = [ - exp.create_model(f"perroquet_{i}", app_settings) for i in range(5) + # Create the SmartSim Aapplication + smartsim_applications = [ + exp.create_application(f"perroquet_{i}", app_settings) for i in range(5) ] - exp.generate(*smartsim_models) - exp.start(*smartsim_models) + exp.generate(*smartsim_applications) + exp.start(*smartsim_applications) telemetry_output_path = pathlib.Path(test_dir) / config.telemetry_subdir snooze_blocking(telemetry_output_path, max_delay=10, post_data_delay=1) @@ -765,7 +769,7 @@ def test_telemetry_serial_models_nonblocking( assert all( [ status == SmartSimStatus.STATUS_COMPLETED - for status in exp.get_status(*smartsim_models) + for status in exp.get_status(*smartsim_applications) ] ) @@ -862,16 +866,18 @@ def test_telemetry_db_only_without_generate(test_dir, wlmutils, monkeypatch, con assert len(stop_events) == 1 -def test_telemetry_db_and_model(fileutils, test_dir, wlmutils, monkeypatch, config): +def test_telemetry_db_and_application( + fileutils, test_dir, wlmutils, monkeypatch, config +): """ - Test telemetry with only a database and a model running + Test telemetry with only a database and a application running """ with monkeypatch.context() as ctx: ctx.setattr(cfg.Config, "telemetry_frequency", 1) # Set experiment name - exp_name = "telemetry_db_and_model" + exp_name = "telemetry_db_and_application" # Retrieve parameters from testing environment test_launcher = wlmutils.get_test_launcher() @@ -893,10 +899,10 @@ def test_telemetry_db_and_model(fileutils, test_dir, wlmutils, monkeypatch, conf app_settings.set_nodes(1) app_settings.set_tasks_per_node(1) - # Create the SmartSim Model - smartsim_model = exp.create_model("perroquet", app_settings) - exp.generate(smartsim_model) - exp.start(smartsim_model, block=True) + # Create the SmartSim Aapplication + smartsim_application = exp.create_application("perroquet", app_settings) + exp.generate(smartsim_application) + exp.start(smartsim_application, block=True) finally: exp.stop(orc) @@ -904,7 +910,9 @@ def test_telemetry_db_and_model(fileutils, test_dir, wlmutils, monkeypatch, conf snooze_blocking(telemetry_output_path, max_delay=10, post_data_delay=1) assert exp.get_status(orc)[0] == SmartSimStatus.STATUS_CANCELLED - assert exp.get_status(smartsim_model)[0] == SmartSimStatus.STATUS_COMPLETED + assert ( + exp.get_status(smartsim_application)[0] == SmartSimStatus.STATUS_COMPLETED + ) start_events = list(telemetry_output_path.rglob("database/**/start.json")) stop_events = list(telemetry_output_path.rglob("database/**/stop.json")) @@ -912,8 +920,8 @@ def test_telemetry_db_and_model(fileutils, test_dir, wlmutils, monkeypatch, conf assert len(start_events) == 1 assert len(stop_events) == 1 - start_events = list(telemetry_output_path.rglob("model/**/start.json")) - stop_events = list(telemetry_output_path.rglob("model/**/stop.json")) + start_events = list(telemetry_output_path.rglob("application/**/start.json")) + stop_events = list(telemetry_output_path.rglob("application/**/stop.json")) assert len(start_events) == 1 assert len(stop_events) == 1 @@ -961,7 +969,7 @@ def test_telemetry_ensemble(fileutils, test_dir, wlmutils, monkeypatch, config): def test_telemetry_colo(fileutils, test_dir, wlmutils, coloutils, monkeypatch, config): """ - Test telemetry with only a colocated model running + Test telemetry with only a colocated application running """ with monkeypatch.context() as ctx: @@ -976,7 +984,7 @@ def test_telemetry_colo(fileutils, test_dir, wlmutils, coloutils, monkeypatch, c # Create SmartSim Experiment exp = Experiment(exp_name, launcher=test_launcher, exp_path=test_dir) - smartsim_model = coloutils.setup_test_colo( + smartsim_application = coloutils.setup_test_colo( fileutils, "uds", exp, @@ -984,12 +992,12 @@ def test_telemetry_colo(fileutils, test_dir, wlmutils, coloutils, monkeypatch, c {}, ) - exp.generate(smartsim_model) - exp.start(smartsim_model, block=True) + exp.generate(smartsim_application) + exp.start(smartsim_application, block=True) assert all( [ status == SmartSimStatus.STATUS_COMPLETED - for status in exp.get_status(smartsim_model) + for status in exp.get_status(smartsim_application) ] ) @@ -1039,10 +1047,10 @@ def test_telemetry_autoshutdown( exp = Experiment(exp_name, launcher=test_launcher, exp_path=test_dir) rs = RunSettings("python", exe_args=["sleep.py", "1"]) - model = exp.create_model("model", run_settings=rs) + application = exp.create_application("application", run_settings=rs) start_time = get_ts_ms() - exp.start(model, block=True) + exp.start(application, block=True) telemetry_output_path = pathlib.Path(test_dir) / config.telemetry_subdir empty_mani = list(telemetry_output_path.rglob("manifest.json")) @@ -1197,7 +1205,7 @@ def test_multistart_experiment( rs_m = exp.create_run_settings("echo", ["hello", "world"], run_command=run_command) rs_m.set_nodes(1) rs_m.set_tasks(1) - model = exp.create_model("my-model", run_settings=rs_m) + application = exp.create_application("my-application", run_settings=rs_m) db = exp.create_database( db_nodes=1, @@ -1205,13 +1213,13 @@ def test_multistart_experiment( interface=wlmutils.get_test_interface(), ) - exp.generate(db, ens, model, overwrite=True) + exp.generate(db, ens, application, overwrite=True) with monkeypatch.context() as ctx: ctx.setattr(cfg.Config, "telemetry_frequency", 1) ctx.setattr(cfg.Config, "telemetry_cooldown", 45) - exp.start(model, block=False) + exp.start(application, block=False) # track PID to see that telmon cooldown avoids restarting process tm_pid = exp._control._telemetry_monitor.pid diff --git a/tests/temp_tests/model_tests.py b/tests/temp_tests/model_tests.py index 4d9369fe61..021cfb2c23 100644 --- a/tests/temp_tests/model_tests.py +++ b/tests/temp_tests/model_tests.py @@ -1,60 +1,60 @@ from smartsim import Experiment from smartsim.database import Orchestrator -from smartsim.entity import Ensemble, Model +from smartsim.entity import Application, Ensemble from smartsim.settings import RunSettings, SrunSettings from smartsim.status import SmartSimStatus -def test_model_constructor(): +def test_application_constructor(): run_settings = RunSettings() - model = Model( + application = Application( name="testing", run_settings=run_settings, exe="echo", exe_args=["hello"], params={}, ) - assert model.exe == ["/usr/bin/echo"] - assert model.exe_args == ["hello"] + assert application.exe == ["/usr/bin/echo"] + assert application.exe_args == ["hello"] -def test_model_add_exe_args(): +def test_application_add_exe_args(): run_settings = SrunSettings() - model = Model( + application = Application( name="testing", run_settings=run_settings, exe="echo", exe_args=["hello"], params={}, ) - model.add_exe_args("there") - assert model.exe_args == ["hello", "there"] - model.add_exe_args(["how", "are", "you"]) - assert model.exe_args == ["hello", "there", "how", "are", "you"] + application.add_exe_args("there") + assert application.exe_args == ["hello", "there"] + application.add_exe_args(["how", "are", "you"]) + assert application.exe_args == ["hello", "there", "how", "are", "you"] -def test_create_model(): +def test_create_application(): run_settings = SrunSettings() exp = Experiment("exp") - model = exp.create_model( - name="model", run_settings=run_settings, exe="echo", exe_args=["hello"] + application = exp.create_application( + name="application", run_settings=run_settings, exe="echo", exe_args=["hello"] ) - assert model.exe == ["/usr/bin/echo"] - assert model.exe_args == ["hello"] + assert application.exe == ["/usr/bin/echo"] + assert application.exe_args == ["hello"] -def test_start_a_model(): +def test_start_a_application(): exp = Experiment("exp") run_settings = SrunSettings() - model = Model( + application = Application( name="testing", exe="echo", run_settings=run_settings, exe_args=["hello"], params={}, ) - assert model.exe == ["/usr/bin/echo"] - assert model.exe_args == ["hello"] - exp.start(model) - model_status = exp.get_status(model)[0] - assert model_status != SmartSimStatus.STATUS_FAILED + assert application.exe == ["/usr/bin/echo"] + assert application.exe_args == ["hello"] + exp.start(application) + application_status = exp.get_status(application)[0] + assert application_status != SmartSimStatus.STATUS_FAILED diff --git a/tests/temp_tests/steps_tests.py b/tests/temp_tests/steps_tests.py index b41f53a50e..2237a57824 100644 --- a/tests/temp_tests/steps_tests.py +++ b/tests/temp_tests/steps_tests.py @@ -12,7 +12,7 @@ SbatchStep, SrunStep, ) -from smartsim.entity import Model +from smartsim.entity import Application from smartsim.settings import ( AprunSettings, BsubBatchSettings, @@ -55,7 +55,7 @@ def test_instantiate_run_settings(settings_type, step_type): run_settings = settings_type() run_settings.in_batch = True - model = Model( + model = Application( exe="echo", exe_args="hello", name="model_name", run_settings=run_settings ) jobStep = step_type(entity=model, run_settings=model.run_settings) @@ -85,7 +85,7 @@ def test_instantiate_run_settings(settings_type, step_type): def test_instantiate_mpi_run_settings(settings_type, step_type): run_settings = settings_type(fail_if_missing_exec=False) run_settings.in_batch = True - model = Model( + model = Application( exe="echo", exe_args="hello", name="model_name", run_settings=run_settings ) jobStep = step_type(entity=model, run_settings=model.run_settings) @@ -124,7 +124,7 @@ def test_instantiate_batch_settings(settings_type, batch_settings_type, step_typ run_settings = settings_type() run_settings.in_batch = True batch_settings = batch_settings_type() - model = Model( + model = Application( exe="echo", exe_args="hello", name="model_name", diff --git a/tests/test_configs/generator_files/log_params/dir_test/dir_test_0/smartsim_params.txt b/tests/test_configs/generator_files/log_params/dir_test/dir_test_0/smartsim_params.txt index 373cec87e0..d29f0741f4 100644 --- a/tests/test_configs/generator_files/log_params/dir_test/dir_test_0/smartsim_params.txt +++ b/tests/test_configs/generator_files/log_params/dir_test/dir_test_0/smartsim_params.txt @@ -1,4 +1,4 @@ -Model name: dir_test_0 +Application name: dir_test_0 File name Parameters -------------------------- --------------- dir_test/dir_test_0/in.atm Name Value diff --git a/tests/test_configs/generator_files/log_params/dir_test/dir_test_1/smartsim_params.txt b/tests/test_configs/generator_files/log_params/dir_test/dir_test_1/smartsim_params.txt index e45ebb6bf7..86cc2151b8 100644 --- a/tests/test_configs/generator_files/log_params/dir_test/dir_test_1/smartsim_params.txt +++ b/tests/test_configs/generator_files/log_params/dir_test/dir_test_1/smartsim_params.txt @@ -1,4 +1,4 @@ -Model name: dir_test_1 +Application name: dir_test_1 File name Parameters -------------------------- --------------- dir_test/dir_test_1/in.atm Name Value diff --git a/tests/test_configs/generator_files/log_params/dir_test/dir_test_2/smartsim_params.txt b/tests/test_configs/generator_files/log_params/dir_test/dir_test_2/smartsim_params.txt index 081dc56c67..ef4ea24736 100644 --- a/tests/test_configs/generator_files/log_params/dir_test/dir_test_2/smartsim_params.txt +++ b/tests/test_configs/generator_files/log_params/dir_test/dir_test_2/smartsim_params.txt @@ -1,4 +1,4 @@ -Model name: dir_test_2 +Application name: dir_test_2 File name Parameters -------------------------- --------------- dir_test/dir_test_2/in.atm Name Value diff --git a/tests/test_configs/generator_files/log_params/dir_test/dir_test_3/smartsim_params.txt b/tests/test_configs/generator_files/log_params/dir_test/dir_test_3/smartsim_params.txt index 3403f7c714..496e12e3bd 100644 --- a/tests/test_configs/generator_files/log_params/dir_test/dir_test_3/smartsim_params.txt +++ b/tests/test_configs/generator_files/log_params/dir_test/dir_test_3/smartsim_params.txt @@ -1,4 +1,4 @@ -Model name: dir_test_3 +Application name: dir_test_3 File name Parameters -------------------------- --------------- dir_test/dir_test_3/in.atm Name Value diff --git a/tests/test_configs/generator_files/log_params/smartsim_params.txt b/tests/test_configs/generator_files/log_params/smartsim_params.txt index 6ac92049fe..d3dcc5aac6 100644 --- a/tests/test_configs/generator_files/log_params/smartsim_params.txt +++ b/tests/test_configs/generator_files/log_params/smartsim_params.txt @@ -1,5 +1,5 @@ Generation start date and time: 08/09/2023 18:22:44 -Model name: dir_test_0 +Application name: dir_test_0 File name Parameters -------------------------- --------------- dir_test/dir_test_0/in.atm Name Value @@ -7,7 +7,7 @@ dir_test/dir_test_0/in.atm Name Value THERMO 10 STEPS 10 -Model name: dir_test_1 +Application name: dir_test_1 File name Parameters -------------------------- --------------- dir_test/dir_test_1/in.atm Name Value @@ -15,7 +15,7 @@ dir_test/dir_test_1/in.atm Name Value THERMO 10 STEPS 20 -Model name: dir_test_2 +Application name: dir_test_2 File name Parameters -------------------------- --------------- dir_test/dir_test_2/in.atm Name Value @@ -23,7 +23,7 @@ dir_test/dir_test_2/in.atm Name Value THERMO 20 STEPS 10 -Model name: dir_test_3 +Application name: dir_test_3 File name Parameters -------------------------- --------------- dir_test/dir_test_3/in.atm Name Value diff --git a/tests/test_configs/send_data.py b/tests/test_configs/send_data.py index f9b9440c47..7c8cc7c25b 100644 --- a/tests/test_configs/send_data.py +++ b/tests/test_configs/send_data.py @@ -42,7 +42,7 @@ def send_data(key): if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--iters", type=int, default=10) - parser.add_argument("--name", type=str, default="model") + parser.add_argument("--name", type=str, default="application") args = parser.parse_args() # send data in iterations diff --git a/tests/test_configs/telemetry/colocatedmodel.json b/tests/test_configs/telemetry/colocatedmodel.json index f3e93ac762..8ecec1c766 100644 --- a/tests/test_configs/telemetry/colocatedmodel.json +++ b/tests/test_configs/telemetry/colocatedmodel.json @@ -12,10 +12,10 @@ { "run_id": "002816b", "timestamp": 1699037041106269774, - "model": [ + "application": [ { - "name": "colocated_model", - "path": "/tmp/my-exp/colocated_model", + "name": "colocated_application", + "path": "/tmp/my-exp/colocated_application", "exe_args": [ "/path/to/my/script.py" ], @@ -53,7 +53,7 @@ "models": [] }, "telemetry_metadata": { - "status_dir": "/tmp/my-exp/.smartsim/telemetry/telemetry_ensemble/002816b/model/colocated_model", + "status_dir": "/tmp/my-exp/.smartsim/telemetry/telemetry_ensemble/002816b/application/colocated_application", "step_id": "4139111.21", "task_id": "21529", "managed": true @@ -66,4 +66,4 @@ "ensemble": [] } ] -} +} \ No newline at end of file diff --git a/tests/test_configs/telemetry/db_and_model.json b/tests/test_configs/telemetry/db_and_model.json index 36edc74868..62656a30ad 100644 --- a/tests/test_configs/telemetry/db_and_model.json +++ b/tests/test_configs/telemetry/db_and_model.json @@ -12,7 +12,7 @@ { "run_id": "2ca19ad", "timestamp": 1699038647234488933, - "model": [], + "application": [], "orchestrator": [ { "name": "orchestrator", @@ -47,7 +47,7 @@ { "run_id": "4b5507a", "timestamp": 1699038661491043211, - "model": [ + "application": [ { "name": "perroquet", "path": "/tmp/my-exp/perroquet", @@ -86,4 +86,4 @@ "ensemble": [] } ] -} +} \ No newline at end of file diff --git a/tests/test_configs/telemetry/db_and_model_1run.json b/tests/test_configs/telemetry/db_and_model_1run.json index 44e32bfe40..cbce377994 100644 --- a/tests/test_configs/telemetry/db_and_model_1run.json +++ b/tests/test_configs/telemetry/db_and_model_1run.json @@ -12,7 +12,7 @@ { "run_id": "4b5507a", "timestamp": 1699038661491043211, - "model": [ + "application": [ { "name": "perroquet", "path": "/tmp/my-exp/perroquet", @@ -76,4 +76,4 @@ "ensemble": [] } ] -} +} \ No newline at end of file diff --git a/tests/test_configs/telemetry/ensembles.json b/tests/test_configs/telemetry/ensembles.json index 67e53ca096..4f340e7e07 100644 --- a/tests/test_configs/telemetry/ensembles.json +++ b/tests/test_configs/telemetry/ensembles.json @@ -12,7 +12,7 @@ { "run_id": "d041b90", "timestamp": 1698679830384608928, - "model": [], + "application": [], "orchestrator": [], "ensemble": [ { @@ -32,7 +32,7 @@ ] }, "batch_settings": {}, - "models": [ + "applications": [ { "name": "my-ens_0", "path": "/home/someuser/code/ss", @@ -326,4 +326,4 @@ ] } ] -} +} \ No newline at end of file diff --git a/tests/test_configs/telemetry/serialmodels.json b/tests/test_configs/telemetry/serialmodels.json index 40337ecebe..77dddcc1ec 100644 --- a/tests/test_configs/telemetry/serialmodels.json +++ b/tests/test_configs/telemetry/serialmodels.json @@ -12,7 +12,7 @@ { "run_id": "8c0fbb1", "timestamp": 1699037881502730708, - "model": [ + "application": [ { "name": "perroquet_0", "path": "/tmp/my-exp/perroquet_0", @@ -183,4 +183,4 @@ "ensemble": [] } ] -} +} \ No newline at end of file diff --git a/tests/test_configs/telemetry/telemetry.json b/tests/test_configs/telemetry/telemetry.json index 916f5922b4..348bffd495 100644 --- a/tests/test_configs/telemetry/telemetry.json +++ b/tests/test_configs/telemetry/telemetry.json @@ -8,10 +8,10 @@ { "run_id": "d999ad89-020f-4e6a-b834-dbd88658ce84", "timestamp": 1697824072792854287, - "model": [ + "application": [ { - "name": "my-model", - "path": "/path/to/my-exp/my-model", + "name": "my-application", + "path": "/path/to/my-exp/my-application", "exe_args": [ "hello", "world" @@ -74,7 +74,7 @@ { "run_id": "fd3cd1a8-cb8f-4f61-b847-73a8eb0881fa", "timestamp": 1697824102122439975, - "model": [], + "application": [], "orchestrator": [ { "name": "orchestrator", @@ -136,7 +136,7 @@ { "run_id": "d65ae1df-cb5e-45d9-ab09-6fa641755997", "timestamp": 1697824127962219505, - "model": [], + "application": [], "orchestrator": [], "ensemble": [ { @@ -156,7 +156,7 @@ ] }, "batch_settings": {}, - "models": [ + "applications": [ { "name": "my-ens_0", "path": "/path/to/my-exp/my-ens/my-ens_0", @@ -476,10 +476,10 @@ { "run_id": "e41f8e17-c4b2-441d-adf9-707443ee2c72", "timestamp": 1697835227560376025, - "model": [ + "application": [ { - "name": "my-model", - "path": "/path/to/my-exp/my-model", + "name": "my-application", + "path": "/path/to/my-exp/my-application", "exe_args": [ "hello", "world" @@ -542,7 +542,7 @@ { "run_id": "b33a5d27-6822-4795-8e0e-cfea18551fa4", "timestamp": 1697835261956135240, - "model": [], + "application": [], "orchestrator": [ { "name": "orchestrator", @@ -604,7 +604,7 @@ { "run_id": "45772df2-fd80-43fd-adf0-d5e319870182", "timestamp": 1697835287798613875, - "model": [], + "application": [], "orchestrator": [], "ensemble": [ { @@ -624,7 +624,7 @@ ] }, "batch_settings": {}, - "models": [ + "applications": [ { "name": "my-ens_0", "path": "/path/to/my-exp/my-ens/my-ens_0", @@ -942,4 +942,4 @@ ] } ] -} +} \ No newline at end of file