-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathdermnet.py
More file actions
164 lines (135 loc) · 4.93 KB
/
dermnet.py
File metadata and controls
164 lines (135 loc) · 4.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
'''
Written in Python 3.7
'''
import requests
from PIL import Image
from bs4 import BeautifulSoup
from io import BytesIO
import os
import shutil
root = 'http://www.dermnet.com'
type_name = ['Acne-and-Rosacea-Photos', 'Actinic-Keratosis-Basal-Cell-Carcinoma-and-other-Malignant-Lesions',
'Atopic-Dermatitis-Photos', 'Bullous-Disease-Photos', 'Cellulitis-Impetigo-and-other-Bacterial-Infections',
'Eczema-Photos', 'Exanthems-and-Drug-Eruptions', 'Hair-Loss-Photos-Alopecia-and-other-Hair-Diseases',
'Herpes-HPV-and-other-STDs-Photos', 'Light-Diseases-and-Disorders-of-Pigmentation',
'Lupus-and-other-Connective-Tissue-diseases', 'Melanoma-Skin-Cancer-Nevi-and-Moles', 'Nail-Fungus-and-other-Nail-Disease',
'Poison-Ivy-Photos-and-other-Contact-Dermatitis', 'Psoriasis-pictures-Lichen-Planus-and-related-diseases',
'Scabies-Lyme-Disease-and-other-Infestations-and-Bites', 'Seborrheic-Keratoses-and-other-Benign-Tumors',
'Systemic-Disease', 'Tinea-Ringworm-Candidiasis-and-other-Fungal-Infections',
'Urticaria-Hives', 'Vascular-Tumors', 'Vasculitis-Photos', 'Warts-Molluscum-and-other-Viral-Infections']
type_LinksA = []
type_PagesA = []
type_SubLinksA = []
def get_max(link_):
r_ = requests.get(link_)
html_page1_1 = r_.text
soup_page1_1 = BeautifulSoup(html_page1_1, "html5lib")
navigationL = soup_page1_1.find_all("div", attrs={"class": "pagination"})
max_ = 1
if not navigationL:
pass
else:
for navi_ in navigationL:
for i in navi_.children:
try:
i_ = i.contents[0]
except:
pass
else:
if i_ == 'Next':
max_ = int(last_)
break
else:
last_ = i_
return int(max_)
'''
Util: /Photo/ -> Real Links
'''
def photo2links(PhotosL):
thumbRLinks = []
for url_ in PhotosL:
# print url_
soup_page1_1_ = BeautifulSoup(requests.get(url_).text, "html5lib")
for link in soup_page1_1_.find_all("img"):
link_ = link.get("src")
if 'Thumb' in link_:
# print link_
thumbRLinks.append(link_.replace('Thumb', ''))
# print "\n"
return list(set(thumbRLinks))
def download(url, image_path):
try:
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(image_path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
except Exception as e:
print (e)
print ("Failed to save " + image_path)
print (url)
# print image_path
else:
print "Successfully saved " + image_path
'''
Main
'''
def main():
for sub_ in type_name:
SubLinks = []
# SubLinks = {}
r_ = requests.get(root + '/images/' + sub_)
html_page1 = r_.text
soup_page1 = BeautifulSoup(html_page1, "html5lib")
for link in soup_page1.find_all('a'):
id_ = link.get('href')
if '/images/' in id_:
# SubLinks[root + id_] = "."
SubLinks.append(root + id_)
type_LinksA.append(SubLinks)
attr = '/photos/'
for type_ in type_LinksA:
maxL_ = []
thumbRLinks = []
for link_ in type_:
PhotosL = []
max_ = get_max(link_)
maxL_.append(max_)
print link_
for i_ in range(max_):
PhotosL.append(link_ + attr + str(i_+1))
print ("Pages: ", len(PhotosL))
realLinks = photo2links(PhotosL)
print ("Links: ", len(realLinks))
thumbRLinks.append(realLinks)
type_PagesA.append(maxL_)
type_SubLinksA.append(thumbRLinks)
dir_root = './DermNet/'
if os.path.exists(dir_root):
pass
else:
os.mkdir(dir_root)
for category in type_name:
iloc = type_name.index(category)
dir_disease = dir_root + category + '/'
if os.path.exists(dir_disease):
pass
else:
os.mkdir(dir_disease)
print("Disease: ", dir_disease)
for sub_ in type_LinksA[iloc]:
iloc_sub = type_LinksA[iloc].index(sub_)
# print iloc_sub
# print sub_.split('/')[-1]
dir_sub_disease = dir_disease + sub_.split('/')[-1]
# print dir_sub_disease
if os.path.exists(dir_sub_disease):
pass
else:
os.mkdir(dir_sub_disease)
for l_ in type_SubLinksA[iloc][iloc_sub]:
img_path = dir_sub_disease + '/' + l_.split('/')[-1]
img_url = l_
download(img_url, img_path)
if __name__ == "__main__":
main()