Skip to content

Commit d645db9

Browse files
committed
convert range(len() to enumerate()
1 parent 8137691 commit d645db9

File tree

4 files changed

+35
-64
lines changed

4 files changed

+35
-64
lines changed

tensorlayer/files.py

Lines changed: 30 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -570,22 +570,24 @@ def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False
570570

571571
filename = 'mirflickr25k.zip'
572572
url = 'http://press.liacs.nl/mirflickr/mirflickr25k/'
573-
## download dataset
573+
574+
# download dataset
574575
if folder_exists(path + "/mirflickr") is False:
575576
logging.info("[*] Flickr25k is nonexistent in {}".format(path))
576577
maybe_download_and_extract(filename, path, url, extract=True)
577578
del_file(path + '/' + filename)
578-
## return images by the given tag.
579+
580+
# return images by the given tag.
579581
# 1. image path list
580582
folder_imgs = path + "/mirflickr"
581583
path_imgs = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False)
582584
path_imgs.sort(key=natural_keys)
583-
# logging.info(path_imgs[0:10])
585+
584586
# 2. tag path list
585587
folder_tags = path + "/mirflickr/meta/tags"
586588
path_tags = load_file_list(path=folder_tags, regx='\\.txt', printable=False)
587589
path_tags.sort(key=natural_keys)
588-
# logging.info(path_tags[0:10])
590+
589591
# 3. select images
590592
if tag is None:
591593
logging.info("[Flickr25k] reading all images")
@@ -643,7 +645,8 @@ def load_flickr1M_dataset(tag='sky', size=10, path="data", n_threads=50, printab
643645
]
644646
tag_zip = 'tags.zip'
645647
url = 'http://press.liacs.nl/mirflickr/mirflickr1m/'
646-
## download dataset
648+
649+
# download dataset
647650
for image_zip in images_zip[0:size]:
648651
image_folder = image_zip.split(".")[0]
649652
# logging.info(path+"/"+image_folder)
@@ -655,50 +658,46 @@ def load_flickr1M_dataset(tag='sky', size=10, path="data", n_threads=50, printab
655658
os.system("mv {} {}".format(path + '/images', path + '/' + image_folder))
656659
else:
657660
logging.info("[Flickr1M] {} exists in {}".format(image_folder, path))
658-
## download tag
661+
662+
# download tag
659663
if folder_exists(path + "/tags") is False:
660664
logging.info("[Flickr1M] tag files is nonexistent in {}".format(path))
661665
maybe_download_and_extract(tag_zip, path, url, extract=True)
662666
del_file(path + '/' + tag_zip)
663667
else:
664668
logging.info("[Flickr1M] tags exists in {}".format(path))
665669

666-
## 1. image path list
670+
# 1. image path list
667671
images_list = []
668672
images_folder_list = []
669673
for i in range(0, size):
670-
# images_folder_list += load_folder_list(path=path + '/images%d' % i)
671674
images_folder_list += load_folder_list(path=os.path.join(path, 'images%d' % i))
672675
images_folder_list.sort(key=lambda s: int(s.split('/')[-1])) # folder/images/ddd
673-
# logging.info(images_folder_list)
674-
# exit()
676+
675677
for folder in images_folder_list[0:size * 10]:
676678
tmp = load_file_list(path=folder, regx='\\.jpg', printable=False)
677679
tmp.sort(key=lambda s: int(s.split('.')[-2])) # ddd.jpg
678-
# logging.info(tmp[0::570])
679680
images_list.extend([folder + '/' + x for x in tmp])
680-
# logging.info('IM', len(images_list), images_list[0::6000])
681-
## 2. tag path list
681+
682+
# 2. tag path list
682683
tag_list = []
683684
tag_folder_list = load_folder_list(path + "/tags")
684685
tag_folder_list.sort(key=lambda s: int(s.split('/')[-1])) # folder/images/ddd
685686

686687
for folder in tag_folder_list[0:size * 10]:
687-
# logging.info(folder)
688688
tmp = load_file_list(path=folder, regx='\\.txt', printable=False)
689689
tmp.sort(key=lambda s: int(s.split('.')[-2])) # ddd.txt
690690
tmp = [folder + '/' + s for s in tmp]
691691
tag_list += tmp
692-
# logging.info('T', len(tag_list), tag_list[0::6000])
693-
# exit()
694-
## 3. select images
692+
693+
# 3. select images
695694
logging.info("[Flickr1M] searching tag: {}".format(tag))
696695
select_images_list = []
697-
for idx in range(0, len(tag_list)):
696+
for idx, _val in enumerate(tag_list):
698697
tags = read_file(tag_list[idx]).split('\n')
699698
if tag in tags:
700699
select_images_list.append(images_list[idx])
701-
# logging.info(idx, tags, tag_list[idx], images_list[idx])
700+
702701
logging.info("[Flickr1M] reading images with tag: {}".format(tag))
703702
images = visualize.read_images(select_images_list, '', n_threads=n_threads, printable=printable)
704703
return images
@@ -737,7 +736,7 @@ def load_image_from_folder(path):
737736
im_test_B = load_image_from_folder(os.path.join(path, filename, "testB"))
738737

739738
def if_2d_to_3d(images): # [h, w] --> [h, w, 3]
740-
for i in range(len(images)):
739+
for i, _v in enumerate(images):
741740
if len(images[i].shape) == 2:
742741
images[i] = images[i][:, :, np.newaxis]
743742
images[i] = np.tile(images[i], (1, 1, 3))
@@ -821,7 +820,7 @@ def load_celebA_dataset(path='data'):
821820
os.rename(os.path.join(path, zip_dir), image_path)
822821

823822
data_files = load_file_list(path=image_path, regx='\\.jpg', printable=False)
824-
for i in range(len(data_files)):
823+
for i, _v in enumerate(data_files):
825824
data_files[i] = os.path.join(image_path, data_files[i])
826825
return data_files
827826

@@ -930,7 +929,6 @@ def _recursive_parse_xml_to_dict(xml):
930929
from lxml import etree # pip install lxml
931930
import xml.etree.ElementTree as ET
932931

933-
##
934932
if dataset == "2012":
935933
url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/"
936934
tar_filename = "VOCtrainval_11-May-2012.tar"
@@ -967,7 +965,7 @@ def _recursive_parse_xml_to_dict(xml):
967965
else:
968966
raise Exception("Please set the dataset aug to 2012, 2012test or 2007.")
969967

970-
##======== download dataset
968+
# download dataset
971969
if dataset != "2012test":
972970
from sys import platform as _platform
973971
if folder_exists(os.path.join(path, extracted_filename)) is False:
@@ -990,7 +988,7 @@ def _recursive_parse_xml_to_dict(xml):
990988
else:
991989
os.system("mv {}/VOCdevkit/VOC2007 {}/VOC2007test".format(path, path))
992990
del_folder(os.path.join(path, 'VOCdevkit'))
993-
##======== object classes(labels) NOTE: YOU CAN CUSTOMIZE THIS LIST
991+
# object classes(labels) NOTE: YOU CAN CUSTOMIZE THIS LIST
994992
classes = [
995993
"aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
996994
"pottedplant", "sheep", "sofa", "train", "tvmonitor"
@@ -1005,7 +1003,7 @@ def _recursive_parse_xml_to_dict(xml):
10051003
classes_dict = utils.list_string_to_dict(classes)
10061004
logging.info("[VOC] object classes {}".format(classes_dict))
10071005

1008-
##======== 1. image path list
1006+
# 1. image path list
10091007
# folder_imgs = path+"/"+extracted_filename+"/JPEGImages/"
10101008
folder_imgs = os.path.join(path, extracted_filename, "JPEGImages")
10111009
imgs_file_list = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False)
@@ -1033,7 +1031,7 @@ def _recursive_parse_xml_to_dict(xml):
10331031
else:
10341032
imgs_semseg_file_list = []
10351033
imgs_insseg_file_list = []
1036-
##======== 4. annotations for bounding box and object class
1034+
# 4. annotations for bounding box and object class
10371035
# folder_ann = path+"/"+extracted_filename+"/Annotations/"
10381036
folder_ann = os.path.join(path, extracted_filename, "Annotations")
10391037
imgs_ann_file_list = load_file_list(path=folder_ann, regx='\\.xml', printable=False)
@@ -1053,7 +1051,7 @@ def _recursive_parse_xml_to_dict(xml):
10531051
imgs_file_list = imgs_file_list_new
10541052
logging.info("[VOC] keep %d images" % len(imgs_file_list_new))
10551053

1056-
##======== parse XML annotations
1054+
# parse XML annotations
10571055
def convert(size, box):
10581056
dw = 1. / size[0]
10591057
dh = 1. / size[1]
@@ -1065,7 +1063,7 @@ def convert(size, box):
10651063
w = w * dw
10661064
y = y * dh
10671065
h = h * dh
1068-
return (x, y, w, h)
1066+
return x, y, w, h
10691067

10701068
def convert_annotation(file_name):
10711069
"""Given VOC2012 XML Annotations, returns number of objects and info."""
@@ -1078,8 +1076,6 @@ def convert_annotation(file_name):
10781076
h = int(size.find('height').text)
10791077
n_objs = 0
10801078

1081-
# logging.info(file_name, w, h, size)
1082-
# exit()
10831079
for obj in root.iter('object'):
10841080
if dataset != "2012test":
10851081
difficult = obj.find('difficult').text
@@ -1094,7 +1090,7 @@ def convert_annotation(file_name):
10941090
xmlbox = obj.find('bndbox')
10951091
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
10961092
bb = convert((w, h), b)
1097-
# out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
1093+
10981094
out_file += str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n'
10991095
n_objs += 1
11001096
if cls in "person":
@@ -1117,7 +1113,6 @@ def convert_annotation(file_name):
11171113
objs_info_list = [] # Darknet Format list of string
11181114
objs_info_dicts = {}
11191115
for idx, ann_file in enumerate(imgs_ann_file_list):
1120-
# logging.info(ann_file)
11211116
n_objs, objs_info = convert_annotation(ann_file)
11221117
n_objs_list.append(n_objs)
11231118
objs_info_list.append(objs_info)
@@ -1132,7 +1127,6 @@ def convert_annotation(file_name):
11321127
n_objs_list, objs_info_list, objs_info_dicts
11331128

11341129

1135-
## Load and save network list npz
11361130
def save_npz(save_list=None, name='model.npz', sess=None):
11371131
"""Input parameters and the file name, save parameters into .npz file. Use tl.utils.load_npz() to restore.
11381132
@@ -1169,7 +1163,6 @@ def save_npz(save_list=None, name='model.npz', sess=None):
11691163
- `Saving dictionary using numpy <http://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez>`__
11701164
11711165
"""
1172-
## save params into a list
11731166
if save_list is None:
11741167
save_list = []
11751168

@@ -1187,13 +1180,6 @@ def save_npz(save_list=None, name='model.npz', sess=None):
11871180
del save_list_var
11881181
logging.info("[*] %s saved" % name)
11891182

1190-
## save params into a dictionary
1191-
# rename_dict = {}
1192-
# for k, value in enumerate(save_dict):
1193-
# rename_dict.update({'param'+str(k) : value.eval()})
1194-
# np.savez(name, **rename_dict)
1195-
# logging.info('Model is saved to: %s' % name)
1196-
11971183

11981184
def load_npz(path='', name='model.npz'):
11991185
"""Load the parameters of a Model saved by tl.files.save_npz().
@@ -1219,23 +1205,8 @@ def load_npz(path='', name='model.npz'):
12191205
- `Saving dictionary using numpy <http://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez>`__
12201206
12211207
"""
1222-
## if save_npz save params into a dictionary
1223-
# d = np.load( path+name )
1224-
# params = []
1225-
# logging.info('Load Model')
1226-
# for key, val in sorted( d.items() ):
1227-
# params.append(val)
1228-
# logging.info('Loading %s, %s' % (key, str(val.shape)))
1229-
# return params
1230-
## if save_npz save params into a list
12311208
d = np.load(path + name)
1232-
# for val in sorted( d.items() ):
1233-
# params = val
1234-
# return params
12351209
return d['params']
1236-
# logging.info(d.items()[0][1]['params'])
1237-
# exit()
1238-
# return d.items()[0][1]['params']
12391210

12401211

12411212
def assign_params(sess, params, network):
@@ -1523,7 +1494,7 @@ def load_ckpt(sess=None, mode_name='model.ckpt', save_dir='checkpoint', var_list
15231494
else:
15241495
ckpt_file = os.path.join(save_dir, mode_name)
15251496

1526-
if var_list == []:
1497+
if not var_list:
15271498
var_list = tf.global_variables()
15281499

15291500
logging.info("[*] load %s n_params: %d" % (ckpt_file, len(var_list)))
@@ -1565,12 +1536,12 @@ def load_npy_to_any(path='', name='file.npy'):
15651536
file_path = os.path.join(path, name)
15661537
try:
15671538
npy = np.load(file_path).item()
1568-
except:
1539+
except Exception:
15691540
npy = np.load(file_path)
15701541
finally:
15711542
try:
15721543
return npy
1573-
except:
1544+
except Exception:
15741545
logging.info("[!] Fail to load %s" % file_path)
15751546
exit()
15761547

tensorlayer/layers/stack.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ def unstack_layer(layer, num=None, axis=0, name='unstack'):
8080
else:
8181
whole_name = name
8282

83-
for i in range(len(outputs)):
83+
for i, _v in enumerate(outputs):
8484
n = Layer(None, name=whole_name + str(i))
8585
n.outputs = outputs[i]
8686
n.all_layers = list(layer.all_layers)

tensorlayer/prepro.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2165,7 +2165,7 @@ def parse_darknet_ann_str_to_list(annotations):
21652165
for a in annotations:
21662166
a = a.split()
21672167
if len(a) == 5:
2168-
for i in range(len(a)):
2168+
for i, _v in enumerate(a):
21692169
if i == 0:
21702170
a[i] = int(a[i])
21712171
else:
@@ -3069,8 +3069,8 @@ def sequences_add_end_id_after_pad(sequences, end_id=888, pad_id=0):
30693069
# sequences_out[i].append(pad_id)
30703070
# # pad -- > end
30713071
# max_len = 0
3072-
for i in range(len(sequences)):
3073-
for j in range(len(sequences[i])):
3072+
for i, v in enumerate(sequences):
3073+
for j, _v2 in enumerate(v):
30743074
if sequences[i][j] == pad_id:
30753075
sequences_out[i][j] = end_id
30763076
# if j > max_len:

tensorlayer/visualize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ def draw_boxes_and_labels_to_image(image, classes, coords, scores, classes_list,
174174
imh, imw = image.shape[0:2]
175175
thick = int((imh + imw) // 430)
176176

177-
for i in range(len(coords)):
177+
for i, _v in enumerate(coords):
178178
if is_center:
179179
x, y, x2, y2 = prepro.obj_box_coord_centroid_to_upleft_butright(coords[i])
180180
else:

0 commit comments

Comments
 (0)