From 5942b7202aa95f4068d9cb1f5688060cb25208d4 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 20 Dec 2022 15:59:50 +0000 Subject: [PATCH] correct a crash in img2img under particular circumstances When using the inpainting model, the following sequence of events would cause a predictable crash: 1. Use unified canvas to outcrop a portion of the image. 2. Accept outcropped image and import into img2img 3. Try any img2img operation This closes #1596. The crash was: ``` operands could not be broadcast together with shapes (320,512) (512,576) Traceback (most recent call last): File "/data/lstein/InvokeAI/backend/invoke_ai_web_server.py", line 1125, in generate_images self.generate.prompt2image( File "/data/lstein/InvokeAI/ldm/generate.py", line 492, in prompt2image results = generator.generate( File "/data/lstein/InvokeAI/ldm/invoke/generator/base.py", line 98, in generate image = make_image(x_T) File "/data/lstein/InvokeAI/ldm/invoke/generator/omnibus.py", line 138, in make_image return self.sample_to_image(samples) File "/data/lstein/InvokeAI/ldm/invoke/generator/omnibus.py", line 173, in sample_to_image corrected_result = super(Img2Img, self).repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius) File "/data/lstein/InvokeAI/ldm/invoke/generator/base.py", line 148, in repaste_and_color_correct mask_pixels = init_a_pixels * init_mask_pixels > 0 ValueError: operands could not be broadcast together with shapes (320,512) (512,576) ``` This error was caused by the image and its mask not being of identical size due to the outcropping operation. The ultimate cause of this error has something to do with different code paths being followed in the `inpaint` vs the `omnibus` modules. Since omnibus will be obsoleted by diffusers, I have chosen just to work around the problem rather than track it down to its source. The only ill effect is that color correction will not be applied to the first image created by `img2img` after applying the outcrop and immediately importing into the img2img canvas. Since the inpainting model has less of a color drift problem than the standard model, this is unlikely to be problematic. --- ldm/invoke/generator/omnibus.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ldm/invoke/generator/omnibus.py b/ldm/invoke/generator/omnibus.py index 7eaefee0c63..d6b5b164ab8 100644 --- a/ldm/invoke/generator/omnibus.py +++ b/ldm/invoke/generator/omnibus.py @@ -167,6 +167,8 @@ def sample_to_image(self, samples)->Image.Image: if self.pil_image is None or self.pil_mask is None: return gen_result + if self.pil_image.size != self.pil_mask.size: + return gen_result corrected_result = super(Img2Img, self).repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius)