Skip to content

Commit 28097ba

Browse files
fix readme (#1862)
* unify cmd line, fix readme * handle reading errors
1 parent 5ec5b96 commit 28097ba

File tree

2 files changed

+45
-29
lines changed

2 files changed

+45
-29
lines changed

demos/python_demos/image_translation_demo/README.md

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -25,18 +25,18 @@ python3 cocosnet_demo.py -h
2525
The command yields the following usage message:
2626

2727
```
28-
usage: image_translation_demo.py [-h] -t TRANSLATION_MODEL
29-
[-s SEGMENTATION_MODEL] [-ii INPUT_IMAGES]
28+
usage: image_translation_demo.py [-h] -m_trn TRANSLATION_MODEL
29+
[-m_seg SEGMENTATION_MODEL] [-ii INPUT_IMAGES]
3030
[-is INPUT_SEMANTICS] -ri REFERENCE_IMAGES
3131
[-rs REFERENCE_SEMANTICS] -o OUTPUT_DIR
3232
[-d DEVICE]
3333
3434
Options:
3535
-h, --help Show this help message and exit.
36-
-t TRANSLATION_MODEL, --translation_model TRANSLATION_MODEL
36+
-m_trn TRANSLATION_MODEL, --translation_model TRANSLATION_MODEL
3737
Required. Path to an .xml file with a trained
3838
translation model
39-
-s SEGMENTATION_MODEL, --segmentation_model SEGMENTATION_MODEL
39+
-m_seg SEGMENTATION_MODEL, --segmentation_model SEGMENTATION_MODEL
4040
Optional. Path to an .xml file with a trained
4141
semantic segmentation model
4242
-ii INPUT_IMAGES, --input_images INPUT_IMAGES
@@ -69,35 +69,35 @@ To run the demo, you can use public or pre-trained models. You can download the
6969
7070
There are two ways to use this demo:
7171

72-
1. To use only translation model.
73-
You can use the following command run demo on CPU using CoCosNet as translation model:
72+
1. Run with segmentation model in addition to translation model. You should use only models trained on ADE20k dataset. Example: [hrnet-v2-c1-segmentation](../../../models/public/hrnet-v2-c1-segmentation/hrnet-v2-c1-segmentation.md).
73+
In this case only input and reference images are required without any masks.
74+
Segmentation masks will be generated via segmentation model.
75+
76+
You can use the following command to run demo on CPU using cocosnet and hrnet-v2-c1-segmentation models:
7477

7578
```
76-
python3 cocosnet_demo.py \
79+
python3 image_translation_demo.py \
7780
-d CPU \
78-
-t <path_to_translation_model>/CoCosNet.xml \
79-
-is <path_to_semantic_mask_of_image>/input_mask.png \
80-
-ri <path_to_exemplar_image>/reference_image.jpg \
81-
-rs <path_to_exemplar_semantic>/reference_mask.png
81+
-m_trn <path_to_translation_model>/cocosnet.xml \
82+
-m_seg <path_to_segmentation_model>/hrnet-v2-c1-segmentation.xml \
83+
-ii <path_to_input_image>/input_image.jpg \
84+
-ri <path_to_exemplar_image>/reference_image.jpg
8285
```
8386

84-
> **NOTE**: For segmentation masks you should use mask (with shape: [height x width]) that specifies class for each pixel. Number of classes is 151 (from ADE20k), where '0' - background class.
85-
86-
2. To use the segmentation model in addition to translation. You should use only models trained on ADE20k dataset. Example: [hrnet-v2-c1-segmentation](../../../models/public/hrnet-v2-c1-segmentation/hrnet-v2-c1-segmentation.md).
87-
In this case user have to set input image and reference image without any masks.
88-
Segmentation masks will be generated via segmentation model.
89-
90-
You can use the following command run demo on CPU using CoCosNet as translation model:
87+
2. Run with only translation model.
88+
You can use the following command to run demo on CPU using cocosnet as translation model:
9189

9290
```
93-
python3 cocosnet_demo.py \
91+
python3 image_translation_demo.py \
9492
-d CPU \
95-
-t <path_to_translation_model>/CoCosNet.xml \
96-
-s <path_to_segmentation_model>/Seg.xml \
97-
-ii <path_to_input_image>/input_image.jpg \
98-
-ri <path_to_exemplar_image>/reference_image.jpg
93+
-m_trn <path_to_translation_model>/cocosnet.xml \
94+
-is <path_to_semantic_mask_of_image>/input_mask.png \
95+
-ri <path_to_exemplar_image>/reference_image.jpg \
96+
-rs <path_to_exemplar_semantic>/reference_mask.png
9997
```
10098

99+
> **NOTE**: For segmentation masks you should use mask (with shape: [height x width]) that specifies class for each pixel. Number of classes is 151 (from ADE20k), where '0' - background class.
100+
101101
## Demo Output
102102

103103
The results of the demo processing are saved to a folder that is specified by the parameter `output_dir`.

demos/python_demos/image_translation_demo/image_translation_demo.py

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,10 @@ def build_argparser():
3030
args = parser.add_argument_group('Options')
3131
args.add_argument('-h', '--help', action='help', default=SUPPRESS,
3232
help='Show this help message and exit.')
33-
args.add_argument("-t", "--translation_model",
33+
args.add_argument("-m_trn", "--translation_model",
3434
help="Required. Path to an .xml file with a trained translation model",
3535
required=True, type=str)
36-
args.add_argument("-s", "--segmentation_model",
36+
args.add_argument("-m_seg", "--segmentation_model",
3737
help="Optional. Path to an .xml file with a trained semantic segmentation model",
3838
type=str)
3939
args.add_argument("-ii", "--input_images",
@@ -104,13 +104,29 @@ def main():
104104
samples = [number_of_objects * [''], input_semantics, reference_images, reference_semantics]
105105
for input_img, input_sem, ref_img, ref_sem in zip(*samples):
106106
if use_seg:
107-
input_sem = get_mask_from_image(cv2.imread(input_img), seg_model)
108-
ref_sem = get_mask_from_image(cv2.imread(ref_img), seg_model)
107+
in_img = cv2.imread(input_img)
108+
if in_img is None:
109+
raise IOError('Image {} cannot be read'.format(input_img))
110+
input_sem = get_mask_from_image(in_img, seg_model)
111+
r_img = cv2.imread(ref_img)
112+
if r_img is None:
113+
raise IOError('Image {} cannot be read'.format(ref_img))
114+
ref_sem = get_mask_from_image(r_img, seg_model)
109115
else:
110-
input_sem = cv2.imread(input_sem, cv2.IMREAD_GRAYSCALE)
116+
input_sem_file = input_sem
117+
input_sem = cv2.imread(input_sem_file, cv2.IMREAD_GRAYSCALE)
118+
if input_sem is None:
119+
raise IOError('Image {} cannot be read'.format(input_sem_file))
120+
ref_sem_file = ref_sem
111121
ref_sem = cv2.imread(ref_sem, cv2.IMREAD_GRAYSCALE)
122+
if ref_sem is None:
123+
raise IOError('Image {} cannot be read'.format(ref_sem_file))
112124
input_sem = preprocess_semantics(input_sem, input_size=gan_model.input_semantic_size)
113-
ref_img = preprocess_image(cv2.imread(ref_img), input_size=gan_model.input_image_size)
125+
ref_img_file = ref_img
126+
ref_img = cv2.imread(ref_img_file)
127+
if ref_img is None:
128+
raise IOError('Image {} cannot be read'.format(ref_img_file))
129+
ref_img = preprocess_image(ref_img, input_size=gan_model.input_image_size)
114130
ref_sem = preprocess_semantics(ref_sem, input_size=gan_model.input_semantic_size)
115131
input_dict = {
116132
'input_semantics': input_sem,

0 commit comments

Comments
 (0)