From 71ffa9d1e6701e165a3ed218a4005f8fd07ee0c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fernando=20P=C3=A9rez-Garc=C3=ADa?= Date: Thu, 3 Mar 2022 14:00:46 +0000 Subject: [PATCH 1/3] Pad patches if they are smaller than input to aggregator Update CHANGELOG Fix linting errors Ignore mypy error Fix linting error --- CHANGELOG.md | 1 + InnerEye/ML/pipelines/inference.py | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c0d3520f..da0069cf9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -90,6 +90,7 @@ gets uploaded to AzureML, by skipping all test folders. - ([#632](https://github.com/microsoft/InnerEye-DeepLearning/pull/632)) Nifti test data is no longer stored in Git LFS ### Fixed +- ([#681](https://github.com/microsoft/InnerEye-DeepLearning/pull/681)) Pad model outputs if they are smaller than the inputs. - ([#659](https://github.com/microsoft/InnerEye-DeepLearning/pull/659)) Fix caching and checkpointing for TCGA CRCk dataset. - ([#649](https://github.com/microsoft/InnerEye-DeepLearning/pull/649)) Fix for the _convert_to_tensor_if_necessary method so that PIL.Image as well as np.array get converted to torch.Tensor. - ([#606](https://github.com/microsoft/InnerEye-DeepLearning/pull/606)) Bug fix: registered models do not include the hi-ml submodule diff --git a/InnerEye/ML/pipelines/inference.py b/InnerEye/ML/pipelines/inference.py index bfe3755f2..229db44d4 100644 --- a/InnerEye/ML/pipelines/inference.py +++ b/InnerEye/ML/pipelines/inference.py @@ -285,6 +285,12 @@ def predict_whole_image(self, image_channels: np.ndarray, locations = patches_batch[tio.LOCATION] # perform the forward pass patches_posteriors = self.model(input_tensor).detach() + # crop or -most likely- pad posteriors if they are smaller than the input + input_shape = input_tensor.shape[-3:] + if input_shape != patches_posteriors.shape[-3:]: + crop_pad = tio.CropOrPad(input_shape) + cropped_patches = [crop_pad(patch) for patch in patches_posteriors] # type: ignore + patches_posteriors = torch.stack(cropped_patches) # type: ignore # collect the predictions over each of the batches aggregator.add_batch(patches_posteriors, locations) posteriors = aggregator.get_output_tensor().numpy() From 858b5f0e67ce2959350a97c134b123e3ad857c78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fernando=20P=C3=A9rez-Garc=C3=ADa?= Date: Thu, 3 Mar 2022 17:07:19 +0000 Subject: [PATCH 2/3] Use PyTorch to pad tensors that might be on GPU --- InnerEye/ML/pipelines/inference.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/InnerEye/ML/pipelines/inference.py b/InnerEye/ML/pipelines/inference.py index 229db44d4..815722b79 100644 --- a/InnerEye/ML/pipelines/inference.py +++ b/InnerEye/ML/pipelines/inference.py @@ -285,12 +285,14 @@ def predict_whole_image(self, image_channels: np.ndarray, locations = patches_batch[tio.LOCATION] # perform the forward pass patches_posteriors = self.model(input_tensor).detach() - # crop or -most likely- pad posteriors if they are smaller than the input + # pad posteriors if they are smaller than the input input_shape = input_tensor.shape[-3:] - if input_shape != patches_posteriors.shape[-3:]: - crop_pad = tio.CropOrPad(input_shape) - cropped_patches = [crop_pad(patch) for patch in patches_posteriors] # type: ignore - patches_posteriors = torch.stack(cropped_patches) # type: ignore + patches_posteriors_shape = patches_posteriors.shape[-3:] + if input_shape != patches_posteriors_shape: + difference = np.array(input_shape) - np.array(patches_posteriors_shape) + assert not np.any(difference % 2) # the differences in shape are expected to be even + padding = np.repeat(difference // 2, 2) + patches_posteriors = torch.nn.functional.pad(patches_posteriors, padding) # collect the predictions over each of the batches aggregator.add_batch(patches_posteriors, locations) posteriors = aggregator.get_output_tensor().numpy() From 39fc27a0491029b678c8bafac396255d68d0cfe8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fernando=20P=C3=A9rez-Garc=C3=ADa?= Date: Thu, 3 Mar 2022 17:21:17 +0000 Subject: [PATCH 3/3] Fix type of argument to F.pad --- InnerEye/ML/pipelines/inference.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/InnerEye/ML/pipelines/inference.py b/InnerEye/ML/pipelines/inference.py index 815722b79..5b397c6e0 100644 --- a/InnerEye/ML/pipelines/inference.py +++ b/InnerEye/ML/pipelines/inference.py @@ -291,7 +291,7 @@ def predict_whole_image(self, image_channels: np.ndarray, if input_shape != patches_posteriors_shape: difference = np.array(input_shape) - np.array(patches_posteriors_shape) assert not np.any(difference % 2) # the differences in shape are expected to be even - padding = np.repeat(difference // 2, 2) + padding = tuple(np.repeat(difference // 2, 2)) patches_posteriors = torch.nn.functional.pad(patches_posteriors, padding) # collect the predictions over each of the batches aggregator.add_batch(patches_posteriors, locations)