|
| 1 | +"""Extractor for mangahere.co.""" |
| 2 | + |
| 3 | +from base_comic import BaseComic, BaseChapter |
| 4 | +from urllib.parse import urlparse, urljoin |
| 5 | +import requests |
| 6 | +import bs4 as bsoup |
| 7 | +from collections import defaultdict |
| 8 | +import re |
| 9 | +import os |
| 10 | +import shutil |
| 11 | +from random import shuffle, uniform |
| 12 | +from copy import deepcopy |
| 13 | +from time import sleep |
| 14 | + |
| 15 | + |
| 16 | +class MangaHereComic(BaseComic): |
| 17 | + """Base comic class.""" |
| 18 | + |
| 19 | + def extract_chapters(self): |
| 20 | + """Extract chapters function (backbone).""" |
| 21 | + comic_name = self.name |
| 22 | + url = self.url |
| 23 | + urlscheme = urlparse(url) |
| 24 | + |
| 25 | + # Get chapters |
| 26 | + r = requests.get(url, verify=self.verify_https) |
| 27 | + soup = bsoup.BeautifulSoup(r.text, 'html.parser') |
| 28 | + |
| 29 | + chapters = defaultdict(MangaHereChapter) |
| 30 | + links = [link.get('href') |
| 31 | + for link in soup.find_all('a') |
| 32 | + if link.get('href') and |
| 33 | + (comic_name in link.get('href')) and |
| 34 | + ('manga' in link.get('href'))] |
| 35 | + |
| 36 | + for link in links: |
| 37 | + chapter_link = urljoin(urlscheme.scheme |
| 38 | + + "://" + urlscheme.netloc, |
| 39 | + '/'.join(link.split('/')[:-1])) |
| 40 | + matched_groups = re.search('c([\d \.]+)', chapter_link) |
| 41 | + if matched_groups: |
| 42 | + chapter_num = float(matched_groups.group(1)) |
| 43 | + if chapter_num in chapters: |
| 44 | + continue |
| 45 | + else: |
| 46 | + chapters[chapter_num] = MangaHereChapter( |
| 47 | + self, chapter_num, chapter_link) |
| 48 | + |
| 49 | + return chapters |
| 50 | + |
| 51 | + |
| 52 | +class MangaHereChapter(BaseChapter): |
| 53 | + """Base chapter class.""" |
| 54 | + |
| 55 | + def get_pages(self): |
| 56 | + """Obtain list of pages in a manga chapter.""" |
| 57 | + # Get base url |
| 58 | + base_url = self.chapter_url |
| 59 | + max_retries = deepcopy(self.max_retries) |
| 60 | + wait_retry_time = deepcopy(self.wait_time) |
| 61 | + |
| 62 | + while True: |
| 63 | + # Get javascript blocks |
| 64 | + r = requests.get(base_url, verify=self.verify_https) |
| 65 | + soup = bsoup.BeautifulSoup(r.text, 'html.parser') |
| 66 | + scripts = [script for script in soup.find_all( |
| 67 | + 'script', attrs={'type': 'text/javascript'})] |
| 68 | + |
| 69 | + if scripts: |
| 70 | + # Get total pages |
| 71 | + for script in scripts: |
| 72 | + if script.contents: |
| 73 | + matched_groups = re.search( |
| 74 | + 'var total_pages\s?=\s?(\d*)\s?;', |
| 75 | + script.contents[0]) |
| 76 | + if matched_groups: |
| 77 | + total_pages = int(matched_groups.group(1)) |
| 78 | + break |
| 79 | + # Get page urls |
| 80 | + page_urls = ["%s/%d.html" % (self.chapter_url, i + 1) |
| 81 | + for i in range(total_pages)] |
| 82 | + page_num = [i + 1 for i in range(total_pages)] |
| 83 | + pages = list(zip(page_urls, page_num)) |
| 84 | + shuffle(pages) |
| 85 | + |
| 86 | + return True, pages |
| 87 | + |
| 88 | + elif (max_retries > 0): |
| 89 | + # Idea from manga_downloader (which in turn was from wget) |
| 90 | + sleep(uniform(0.5 * wait_retry_time, 1.5 * wait_retry_time)) |
| 91 | + max_retries -= 1 |
| 92 | + else: |
| 93 | + return False, None |
| 94 | + |
| 95 | + def download_page(self, page): |
| 96 | + """Download individual pages in a manga.""" |
| 97 | + page_url, page_num = page |
| 98 | + filename = os.path.join(self.chapter_location, |
| 99 | + '%0.3d.jpg' % (page_num)) |
| 100 | + |
| 101 | + max_retries = deepcopy(self.max_retries) |
| 102 | + wait_retry_time = deepcopy(self.wait_time) |
| 103 | + |
| 104 | + while True: |
| 105 | + r = requests.get(page_url, verify=self.verify_https) |
| 106 | + soup = bsoup.BeautifulSoup(r.text, 'html.parser') |
| 107 | + img = soup.find_all('img', attrs={'id': 'image'}) |
| 108 | + if img: |
| 109 | + image = img[0].get('src') |
| 110 | + self.download_image(image, filename) |
| 111 | + return True |
| 112 | + elif (max_retries > 0): |
| 113 | + # Idea from manga_downloader (which in turn was from wget) |
| 114 | + sleep(uniform(0.5 * wait_retry_time, 1.5 * wait_retry_time)) |
| 115 | + max_retries -= 1 |
| 116 | + else: |
| 117 | + print("Failed download: Chapter-%g, page-%d" |
| 118 | + % (self.chapter_num, page_num)) |
| 119 | + shutil.copyfile( |
| 120 | + os.path.join(os.path.dirname( |
| 121 | + os.path.realpath(__file__)), 'no_image_available.png'), |
| 122 | + filename) |
| 123 | + return False |
0 commit comments