Skip to content
This repository was archived by the owner on Mar 21, 2024. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ gets uploaded to AzureML, by skipping all test folders.
- ([#572](https://github.com/microsoft/InnerEye-DeepLearning/pull/572)) Updated to new version of hi-ml package

### Fixed
- ([#593](https://github.com/microsoft/InnerEye-DeepLearning/pull/593)) Bug fix for hi-ml 0.1.11 issue (#130): empty mount point is turned into ".", which fails the AML job
- ([#587](https://github.com/microsoft/InnerEye-DeepLearning/pull/587)) Bug fix for regression in AzureML's handling of environments: upgrade to hi-ml 0.1.11
- ([#537](https://github.com/microsoft/InnerEye-DeepLearning/pull/537)) Print warning if inference is disabled but comparison requested.
- ([#567](https://github.com/microsoft/InnerEye-DeepLearning/pull/567)) fix pillow version.
Expand Down
4 changes: 3 additions & 1 deletion InnerEye/Azure/azure_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,9 @@ def create_dataset_configs(azure_config: AzureConfig,
for i, (dataset_id, mount_point) in enumerate(zip(all_azure_dataset_ids, all_dataset_mountpoints)):
if dataset_id:
datasets.append(DatasetConfig(name=dataset_id,
target_folder=mount_point,
# Workaround for a bug in hi-ml 0.1.11: mount_point=="" creates invalid jobs,
# setting to None works.
target_folder=mount_point or None,
use_mounting=azure_config.use_dataset_mount,
datastore=azure_config.azureml_datastore))
elif mount_point:
Expand Down
1 change: 0 additions & 1 deletion InnerEye/Common/fixed_paths.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ def repository_root_directory(path: Optional[PathOrString] = None) -> Path:
DEFAULT_RESULT_IMAGE_NAME = "segmentation.nii.gz"
# Default filename if scoring produces a zipped DICOM-RT file.
DEFAULT_RESULT_ZIP_DICOM_NAME = "segmentation.dcm.zip"
DEFAULT_AML_LOGS_DIR = "azureml-logs"

DEFAULT_LOGS_DIR_NAME = "logs"

Expand Down
1 change: 0 additions & 1 deletion InnerEye/ML/visualizers/plot_cross_validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@
RUN_DICTIONARY_NAME = "RunDictionary.txt"

MAX_STRUCTURES_PER_PLOT = 7
DRIVER_LOG_BASENAME = "70_driver_log.txt"
RUN_RECOVERY_ID_KEY = 'run_recovery_id'
WILCOXON_RESULTS_FILE = "CrossValidationWilcoxonSignedRankTestResults.txt"
MANN_WHITNEY_RESULTS_FILE = "CrossValidationMannWhitneyTestResults.txt"
Expand Down
18 changes: 12 additions & 6 deletions Tests/AfterTraining/test_after_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
from InnerEye.Common import common_util, fixed_paths, fixed_paths_for_tests
from InnerEye.Common.common_util import BEST_EPOCH_FOLDER_NAME, CROSSVAL_RESULTS_FOLDER, ENSEMBLE_SPLIT_NAME, \
get_best_epoch_results_path
from InnerEye.Common.fixed_paths import (DEFAULT_AML_LOGS_DIR, DEFAULT_RESULT_IMAGE_NAME, DEFAULT_RESULT_ZIP_DICOM_NAME,
from InnerEye.Common.fixed_paths import (DEFAULT_RESULT_IMAGE_NAME, DEFAULT_RESULT_ZIP_DICOM_NAME,
PYTHON_ENVIRONMENT_NAME, repository_root_directory)
from InnerEye.Common.fixed_paths_for_tests import full_ml_test_data_path
from InnerEye.Common.output_directories import OutputFolderForTests
Expand All @@ -50,7 +50,7 @@
from InnerEye.Scripts import submit_for_inference
from Tests.ML.util import assert_nifti_content, get_default_azure_config, get_default_workspace, get_nifti_shape

FALLBACK_SINGLE_RUN = "refs_pull_545_merge:refs_pull_545_merge_1626538212_d2b07afd"
FALLBACK_SINGLE_RUN = "refs_pull_593_merge_1637188926_7ba554ba"
FALLBACK_ENSEMBLE_RUN = "refs_pull_545_merge:HD_caea82ae-9603-48ba-8280-7d2bc6272411"
FALLBACK_2NODE_RUN = "refs_pull_545_merge:refs_pull_545_merge_1626538178_9f3023b2"
FALLBACK_CV_GLAUCOMA = "refs_pull_545_merge:HD_72ecc647-07c3-4353-a538-620346114ebd"
Expand Down Expand Up @@ -200,10 +200,16 @@ def test_check_dataset_mountpoint(test_output_dirs: OutputFolderForTests) -> Non
"""
run = get_most_recent_run(fallback_run_id_for_local_execution=FALLBACK_SINGLE_RUN)
files = run.get_file_names()
driver_log = f"{DEFAULT_AML_LOGS_DIR}/70_driver_log.txt"
assert driver_log in files
downloaded = test_output_dirs.root_dir / "70_driver_log.txt"
run.download_file(driver_log, output_file_path=str(downloaded))

# Account for old and new job runtime: log files live in different places
driver_log_files = ["azureml-logs/70_driver_log.txt", "user_logs/std_log.txt"]
downloaded = test_output_dirs.root_dir / "driver_log.txt"
for f in driver_log_files:
if f in files:
run.download_file(f, output_file_path=str(downloaded))
break
else:
raise ValueError("The run does not contain any of the driver log files")
logs = downloaded.read_text()
expected_mountpoint = BasicModel2Epochs().dataset_mountpoint
assert f"local_dataset : {expected_mountpoint}" in logs
Expand Down