Skip to content

Commit 775654a

Browse files
authored
Merge pull request #15 from Oshan96/dev
Dev Changes for v1.0.2
2 parents 5d8b6a1 + b82efc1 commit 775654a

File tree

8 files changed

+76
-15
lines changed

8 files changed

+76
-15
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ If this project is helpful to you and love my work and feel like showing love/ap
1313
|--- |--- |--- |--- |--- |--- |
1414
| [9Anime](https://9anime.to/) | Yes | Default only | No | 500-600MB | Will always work, provided token |
1515
| [4Anime](https://4anime.to/) | No | Default only | No | Around 150MB | Upon failure, visit 4anime website and restart anime downloader |
16-
| [AnimePahe](https://animepahe.com/) | No | 720p, 1080p | No | 720p: ~150MB, 1080p: ~200MB | Anime Downloader v1.0.1 upwards(v1.0.0 no longer works). Also download speed is capped by host |
17-
| [AnimeUltima](https://www.animeultima.to/) | No | 240p, 360p, 480p, 720p, 1080p | Yes | 1080p is around 1GB | - |
16+
| [AnimePahe](https://animepahe.com/) | No | 720p, 1080p | No | 720p: ~150MB, 1080p: ~200MB | 2captcha API key is needed to download from AnimePahe. Also download speed is capped by host |
17+
| [AnimeUltima](https://www.animeultima.to/) | No | 240p, 360p, 480p, 720p, 1080p | Yes | 1080p is around 1GB | AnimeUltima is having issues in their end. Will be supported again once they are backup |
1818

1919
## Download Anime Downloader [Windows]
2020
> Note : Currently only windows executable is provided (Linux, Mac users go to [Build from source](#Building-from-source))

anime_downloader/extractors/base_extractor.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,13 @@
1+
import cloudscraper
12
class BaseExtractor:
23

34
def __init__(self, url, session):
45
self.url = url
56
self.session = session
67

78
def extract_page_content(self):
8-
video_page = self.session.get(self.url).content
9-
return video_page.decode('utf-8')
9+
video_page = self.session.get(self.url)
10+
return video_page.text
1011

1112
def extract_direct_url(self):
1213
raise NotImplementedError

anime_downloader/extractors/jwplayer_extractor.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,19 @@ def __init__(self, url, session):
1010
def extract_sources(self):
1111
page_content = self.extract_page_content()
1212

13+
# print(page_content)
14+
1315
link_sources = [match.group(1) for match in
1416
re.finditer("{\s*file\s*:\s*[\"\']\s*([htps][^\"\']+)", page_content)]
1517

1618
return link_sources
1719

1820
def extract_direct_url(self):
21+
print("extracting direct stream links")
1922
direct_links = self.extract_sources()
2023

24+
# print(direct_links)
25+
2126
if len(direct_links) > 0:
2227
# return the first direct link
2328
return direct_links[0]
@@ -51,6 +56,7 @@ def get_resolution_link(self, master_url, resolution):
5156
return link
5257

5358
def extract_stream_link(self, resolution="720"):
59+
print("Extracting stream link")
5460
link = self.extract_direct_url()
5561

5662
print("Master Link : " + link)

anime_downloader/gui/GUI.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import queue
2+
import json
23
import cloudscraper
34
import PySimpleGUI as sg
45
from threading import Thread
@@ -40,6 +41,27 @@ def download(anime_url, names_url, start_epi, end_epi, is_filler, is_titles, tok
4041

4142
elif "animepahe.com" in anime_url:
4243
printer("INFO", "AnimePahe URL detected...", gui)
44+
api_key = ""
45+
try:
46+
with open("settings.json") as (json_file):
47+
data = json.load(json_file)
48+
api_key = data["api_key"]
49+
except:
50+
api_key = ""
51+
52+
if api_key != "" and api_key != "insert_2captcha_api_key":
53+
session = cloudscraper.create_scraper(
54+
recaptcha={
55+
'provider': '2captcha',
56+
'api_key': api_key
57+
}
58+
)
59+
60+
else:
61+
printer("ERROR", "You need 2captcha API key to download from AnimePahe!", gui)
62+
printer("ERROR", "Set 2captcha API key in 'settings.json' file to download from AnimePahe!", gui)
63+
return
64+
4365
scraper = AnimePaheScraper(anime_url, start_epi, end_epi, session, gui, resolution, is_filler)
4466

4567
else:

anime_downloader/scrapers/animepahe/animepahe_scraper.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import re
2+
from bs4 import BeautifulSoup
23
from util.Episode import Episode
34
from scrapers.base_scraper import BaseScraper
45
from util.Color import printer
@@ -16,9 +17,17 @@ def __init__(self, url, start_episode, end_episode, session, gui=None, resolutio
1617
self.end_page = 1
1718
self.extractor = KwikExtractor(session, gui)
1819

20+
self.__set_working_url()
1921
self.__set_anime_id()
2022
self.__set_start_end_page()
2123

24+
def __set_working_url(self):
25+
page = self.session.get(self.url).content
26+
soup_page = BeautifulSoup(page, "html.parser")
27+
og_url = soup_page.find("meta", attrs={"property": "og:url"})
28+
if og_url is not None:
29+
self.url = og_url["content"]
30+
2231
def __set_anime_id(self):
2332
page = self.session.get(self.url).text
2433
self.id = re.search("release&id=(.*)&l=", page).group(1)

anime_downloader/scrapers/animeultima/animeultima_scraper.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ class AnimeUltimaScraper(BaseScraper):
88

99
def __init__(self, url, start_episode, end_episode, session, gui=None, resolution="720", is_dub=False):
1010
super().__init__(url, start_episode, end_episode, session, gui)
11-
self.is_dub = False
11+
self.is_dub = is_dub
1212
self.resolution = resolution
1313
self.base_url = "https://www1.animeultima.to"
1414
self.extractor = JWPlayerExtractor(None, self.session)
@@ -37,6 +37,8 @@ def get_start_and_end_page(self, anime_id):
3737

3838
data = self.session.get("https://www1.animeultima.to/api/episodeList?animeId=" + anime_id).json()
3939

40+
# print("start end data")
41+
# print(data)
4042
last_page = data["last_page"]
4143
max_total_epis = last_page * 50
4244

@@ -75,25 +77,30 @@ def collect_episodes(self, anime_id, start_page, end_page):
7577
url = base_url + str(page_counter)
7678

7779
data = self.session.get(url).json()
80+
# print("data")
81+
# print(data)
82+
7883
has_dub = data["anime"]["hasDub"]
7984
epis = data["episodes"]
8085

8186
for epi in epis:
8287
epi_no = int(epi["episode_num"])
88+
# print(str(epi_no))
8389

8490
if epi_no < self.start_episode or epi_no > self.end_episode:
8591
continue
8692

8793
title = epi["title"]
8894
page_url = None
8995
if not self.is_dub:
96+
# print("sub")
9097
page_url = epi["urls"]["sub"]
9198
elif has_dub:
9299
page_url = epi["urls"]["dub"]
93100
else:
94101
print("Dubbed episodes not available")
95102

96-
if page_url:
103+
if page_url is not None:
97104
page_url = self.get_page_url(page_url)
98105

99106
episode = Episode(title, "Episode - " + str(epi_no))

anime_downloader/scrapers/nineanime/nineanime_scraper.py

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@ def __init__(self, url, start_episode, end_episode, session, gui=None, token=Non
1919
self.server_name = "Mp4upload"
2020
self.nine_anime_url = "https://9anime.to"
2121

22+
self.headers = {"origin": self.nine_anime_url, "referer": url, "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Safari/537.36 Edg/80.0.361.109"}
23+
2224
self.episodes_url = "https://9anime.to/ajax/film/servers/" + url.split(".")[2].split("/")[0]
2325

2426
if not token:
@@ -45,10 +47,14 @@ def __verify(self):
4547
"g-recaptcha-response": self.token
4648
}
4749

48-
self.session.post("https://9anime.to/waf-verify", data=payload)
50+
data = self.session.post("https://9anime.to/waf-verify", data=payload, headers=self.headers, allow_redirects=False)
51+
self.headers["cookie"] = data.headers["set-cookie"]
4952

5053
def __extract_page_urls(self):
51-
if self.token is None :
54+
d = self.session.get("https://9anime.to/waf-verify", headers=self.headers, allow_redirects=True)
55+
self.headers["cookie"] = d.headers["set-cookie"]
56+
57+
if self.token is None:
5258
if self.api_key != "" and self.api_key != "insert_2captcha_api_key":
5359
Color.printer("INFO", "Solving recaptcha...", self.gui)
5460

@@ -60,21 +66,29 @@ def __extract_page_urls(self):
6066
Color.printer("INFO", "Trying to continue ...", self.gui)
6167

6268
if self.token:
69+
# print(self.token)
6370
self.__verify()
6471
else:
6572
Color.printer("INFO", "No API key or token given, trying to continue...", self.gui)
6673

6774
Color.printer("INFO", "Extracting page URLs...", self.gui)
6875

69-
anime_page = self.session.get(self.url).content
76+
data = self.session.get(self.url, headers=self.headers)
77+
anime_page = data.content
78+
7079
soup_html = BeautifulSoup(anime_page, "html.parser")
7180

72-
try :
81+
try:
7382
self.ts_no = soup_html.find("html")["data-ts"]
7483

7584
eps_url = self.episodes_url + "?ts=" + self.ts_no
7685

77-
epi_data = self.session.get(eps_url).json()["html"]
86+
self.headers["referer"] = eps_url
87+
88+
resp = self.session.get(eps_url, headers=self.headers, allow_redirects=False)
89+
epi_data = resp.json()["html"]
90+
91+
# print(epi_data)
7892

7993
soup = BeautifulSoup(epi_data, "html.parser")
8094

@@ -119,7 +133,7 @@ def __extract_download_urls(self):
119133
continue
120134

121135
url = down_base + "ts=" + self.ts_no + "&id=" + episode.id + "&server=" + self.server_id
122-
target = self.session.get(url).json()["target"]
136+
target = self.session.get(url, headers=self.headers).json()["target"]
123137

124138
episode.page_url = target
125139

requirements.txt

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
1-
art==4.5
21
requests==2.22.0
3-
cloudscraper==1.2.30
2+
art==4.5
3+
cloudscraper==1.2.33
44
beautifulsoup4==4.8.2
5-
PySimpleGUI==4.16.0
5+
js2py==0.68
6+
PySimpleGUI==4.18.0
7+
polling==0.3.1

0 commit comments

Comments
 (0)