@@ -101,16 +101,21 @@ def super_resolution(self, image, steps=100, target_scale=2, half_attention=Fals
101
101
down_sample_rate = target_scale / 4
102
102
wd = width_og * down_sample_rate
103
103
hd = height_og * down_sample_rate
104
- width_downsampled_pre = int (wd )
105
- height_downsampled_pre = int (hd )
104
+ width_downsampled_pre = int (np . ceil ( wd ) )
105
+ height_downsampled_pre = int (np . ceil ( hd ) )
106
106
107
107
if down_sample_rate != 1 :
108
108
print (
109
109
f'Downsampling from [{ width_og } , { height_og } ] to [{ width_downsampled_pre } , { height_downsampled_pre } ]' )
110
110
im_og = im_og .resize ((width_downsampled_pre , height_downsampled_pre ), Image .LANCZOS )
111
111
else :
112
112
print (f"Down sample rate is 1 from { target_scale } / 4 (Not downsampling)" )
113
- logs = self .run (model ["model" ], im_og , diffusion_steps , eta )
113
+
114
+ # pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts
115
+ pad_w , pad_h = np .max (((2 , 2 ), np .ceil (np .array (im_og .size ) / 64 ).astype (int )), axis = 0 ) * 64 - im_og .size
116
+ im_padded = Image .fromarray (np .pad (np .array (im_og ), ((0 , pad_h ), (0 , pad_w ), (0 , 0 )), mode = 'edge' ))
117
+
118
+ logs = self .run (model ["model" ], im_padded , diffusion_steps , eta )
114
119
115
120
sample = logs ["sample" ]
116
121
sample = sample .detach ().cpu ()
@@ -120,6 +125,9 @@ def super_resolution(self, image, steps=100, target_scale=2, half_attention=Fals
120
125
sample = np .transpose (sample , (0 , 2 , 3 , 1 ))
121
126
a = Image .fromarray (sample [0 ])
122
127
128
+ # remove padding
129
+ a = a .crop ((0 , 0 ) + tuple (np .array (im_og .size ) * 4 ))
130
+
123
131
del model
124
132
gc .collect ()
125
133
torch .cuda .empty_cache ()
0 commit comments