diff --git a/examples/research_projects/instructpix2pix_lora/train_instruct_pix2pix_lora.py b/examples/research_projects/instructpix2pix_lora/train_instruct_pix2pix_lora.py index 06079fe9ed41..0f158131ea40 100644 --- a/examples/research_projects/instructpix2pix_lora/train_instruct_pix2pix_lora.py +++ b/examples/research_projects/instructpix2pix_lora/train_instruct_pix2pix_lora.py @@ -788,7 +788,7 @@ def preprocess_images(examples): ) # We need to ensure that the original and the edited images undergo the same # augmentation transforms. - images = np.concatenate([original_images, edited_images]) + images = np.stack([original_images, edited_images]) images = torch.tensor(images) images = 2 * (images / 255) - 1 return train_transforms(images) @@ -799,7 +799,7 @@ def preprocess_train(examples): # Since the original and edited images were concatenated before # applying the transformations, we need to separate them and reshape # them accordingly. - original_images, edited_images = preprocessed_images.chunk(2) + original_images, edited_images = preprocessed_images original_images = original_images.reshape(-1, 3, args.resolution, args.resolution) edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution)