@@ -36,10 +36,10 @@ def __init__(self, title, url):
3636 self .url = url
3737
3838
39- def get_soup_session (url , cw = None ):
39+ def get_soup_session (url , cw = None , win = None ):
4040 print_ = get_print (cw )
4141 session = Session ()
42- res = clf2 .solve (url , session = session , cw = cw )
42+ res = clf2 .solve (url , session = session , cw = cw , win = win )
4343 print_ ('{} -> {}' .format (url , res ['url' ]))
4444 if res ['url' ].rstrip ('/' ) == 'https://welovemanga.one' :
4545 raise errors .LoginRequired ()
@@ -55,10 +55,9 @@ class Downloader_lhscan(Downloader):
5555 ]
5656 MAX_CORE = 16
5757 display_name = 'LHScan'
58- _soup = None
5958
6059 def init (self ):
61- self ._soup , self .session = get_soup_session (self .url , self .cw )
60+ self .soup , self .session = get_soup_session (self .url , self .cw )
6261 if not self .soup .find ('ul' , class_ = 'manga-info' ):
6362 raise errors .Invalid ('{}: {}' .format (tr_ ('목록 주소를 입력해주세요' ), self .url ))
6463
@@ -68,21 +67,6 @@ def fix_url(cls, url):
6867 url = url .replace ('welovemanga.net' , 'welovemanga.one' ) #4298
6968 return url
7069
71- @property
72- def soup (self ):
73- if self ._soup is None :
74- for try_ in range (8 ):
75- try :
76- html = downloader .read_html (self .url , session = self .session )
77- break
78- except Exception as e :
79- e_ = e
80- print (e )
81- else :
82- raise e_
83- self ._soup = Soup (html )
84- return self ._soup
85-
8670 @property
8771 def name (self ):
8872 title = self .soup .find ('ul' , class_ = 'manga-info' ).find ('h3' ).text
@@ -115,10 +99,10 @@ def get_imgs_page(page, referer, session, cw=None):
11599 pass
116100 soup = Soup (html )
117101
118- view = soup .find ('div' , class_ = 'chapter-content' )
119-
120- if not view :
121- raise Exception ( 'no chapter-content' )
102+ cid = re .find (r'''load_image\(([0-9]+)''' , html )
103+ if cid : #6186
104+ url_api = urljoin ( page . url , f'/app/manga/controllers/cont.listImg.php?cid= { cid } ' )
105+ soup = downloader . read_soup ( url_api , page . url , session = session )
122106
123107 imgs = []
124108 for img in soup .findAll ('img' , class_ = 'chapter-img' ):
@@ -140,9 +124,12 @@ def get_imgs_page(page, referer, session, cw=None):
140124 continue
141125 if '/uploads/lazy_loading.gif' in src :
142126 continue
127+ if '/xstaff.jpg.pagespeed.ic.gPQ2SGcYaN.webp' in src :
128+ continue
143129 src = src .replace ('\n ' , '' ).replace ('\r ' , '' ) #5238
144- if 'proxy.php?link=' not in src : #5351
145- src = 'https://welovekai.com/proxy.php?link=' + src #5238
130+ #6105
131+ ## if 'proxy.php?link=' not in src: #5351
132+ ## src = 'https://welovekai.com/proxy.php?link=' + src #5238
146133 if not imgs :
147134 print_ (src0 )
148135 print_ (src )
@@ -174,9 +161,8 @@ def get_pages(url, session, soup=None, cw=None):
174161
175162
176163@page_selector .register ('lhscan' )
177- @try_n (4 )
178- def f (url ):
179- soup , session = get_soup_session (url )
164+ def f (url , win ):
165+ soup , session = get_soup_session (url , win = win )
180166 pages = get_pages (url , session , soup = soup )
181167 return pages
182168
0 commit comments