Skip to content
This repository was archived by the owner on May 2, 2026. It is now read-only.

Commit 74487a2

Browse files
committed
Revert "Delete viu_media/libs/provider directory"
This reverts commit 7e93e4e.
1 parent 5dd9b38 commit 74487a2

42 files changed

Lines changed: 3140 additions & 0 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

viu_media/libs/provider/__init__.py

Whitespace-only changes.

viu_media/libs/provider/anime/__init__.py

Whitespace-only changes.
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
import re
2+
3+
from .....core.constants import GRAPHQL_DIR
4+
5+
SERVERS_AVAILABLE = [
6+
"sharepoint",
7+
"dropbox",
8+
"gogoanime",
9+
"weTransfer",
10+
"wixmp",
11+
"Yt",
12+
"mp4-upload",
13+
]
14+
API_BASE_URL = "allanime.day"
15+
API_GRAPHQL_REFERER = "https://allanime.to/"
16+
API_GRAPHQL_ENDPOINT = f"https://api.{API_BASE_URL}/api/"
17+
API_GRAPHQL_HEADERS= {
18+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
19+
"Content-Type": "application/json",
20+
"Origin": f"{API_GRAPHQL_REFERER}",
21+
}
22+
23+
# search constants
24+
DEFAULT_COUNTRY_OF_ORIGIN = "all"
25+
DEFAULT_NSFW = True
26+
DEFAULT_UNKNOWN = True
27+
DEFAULT_PER_PAGE = 40
28+
DEFAULT_PAGE = 1
29+
30+
# regex stuff
31+
MP4_SERVER_JUICY_STREAM_REGEX = re.compile(
32+
r"video/mp4\",src:\"(https?://.*/video\.mp4)\""
33+
)
34+
35+
# graphql files
36+
_GQL_QUERIES = GRAPHQL_DIR / "allanime" / "queries"
37+
SEARCH_GQL = _GQL_QUERIES / "search.gql"
38+
ANIME_GQL = _GQL_QUERIES / "anime.gql"
39+
EPISODE_GQL = _GQL_QUERIES / "episodes.gql"
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from .extractor import extract_server
2+
3+
__all__ = ["extract_server"]
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
from ...types import EpisodeStream, Server
2+
from ..constants import API_BASE_URL
3+
from ..types import AllAnimeEpisode, AllAnimeSource
4+
from .base import BaseExtractor
5+
6+
7+
class AkExtractor(BaseExtractor):
8+
@classmethod
9+
def extract(
10+
cls,
11+
url,
12+
client,
13+
episode_number: str,
14+
episode: AllAnimeEpisode,
15+
source: AllAnimeSource,
16+
) -> Server:
17+
response = client.get(
18+
f"https://{API_BASE_URL}{url.replace('clock', 'clock.json')}",
19+
timeout=10,
20+
)
21+
response.raise_for_status()
22+
streams = response.json()
23+
24+
return Server(
25+
name="Ak",
26+
links=[
27+
EpisodeStream(link=link, quality="1080") for link in streams["links"]
28+
],
29+
episode_title=episode["notes"],
30+
headers={"Referer": f"https://{API_BASE_URL}/"},
31+
)
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
from abc import ABC, abstractmethod
2+
3+
from httpx import Client
4+
5+
from ...types import Server
6+
from ..types import AllAnimeEpisode, AllAnimeSource
7+
8+
9+
class BaseExtractor(ABC):
10+
@classmethod
11+
@abstractmethod
12+
def extract(
13+
cls,
14+
url: str,
15+
client: Client,
16+
episode_number: str,
17+
episode: AllAnimeEpisode,
18+
source: AllAnimeSource,
19+
) -> Server | None:
20+
pass
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
from ...types import EpisodeStream, Server
2+
from ..constants import API_BASE_URL
3+
from ..types import AllAnimeEpisode, AllAnimeSource
4+
from .base import BaseExtractor
5+
6+
7+
class SakExtractor(BaseExtractor):
8+
@classmethod
9+
def extract(
10+
cls,
11+
url,
12+
client,
13+
episode_number: str,
14+
episode: AllAnimeEpisode,
15+
source: AllAnimeSource,
16+
) -> Server:
17+
response = client.get(
18+
f"https://{API_BASE_URL}{url.replace('clock', 'clock.json')}",
19+
timeout=10,
20+
)
21+
response.raise_for_status()
22+
streams = response.json()
23+
24+
return Server(
25+
name="dropbox",
26+
links=[
27+
EpisodeStream(link=link, quality="1080") for link in streams["links"]
28+
],
29+
episode_title=episode["notes"],
30+
headers={"Referer": f"https://{API_BASE_URL}/"},
31+
)
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
from httpx import Client
2+
3+
from ...types import Server
4+
from ..types import AllAnimeEpisode, AllAnimeSource
5+
from ..utils import debug_extractor, logger, one_digit_symmetric_xor
6+
from .ak import AkExtractor
7+
from .dropbox import SakExtractor
8+
from .filemoon import FmHlsExtractor, OkExtractor
9+
from .gogoanime import Lufmp4Extractor
10+
from .mp4_upload import Mp4Extractor
11+
from .sharepoint import Smp4Extractor
12+
from .streamsb import SsHlsExtractor
13+
from .vid_mp4 import VidMp4Extractor
14+
from .we_transfer import KirExtractor
15+
from .wixmp import DefaultExtractor
16+
from .yt_mp4 import YtExtractor
17+
18+
AVAILABLE_SOURCES = {
19+
"Sak": SakExtractor,
20+
"S-mp4": Smp4Extractor,
21+
"Luf-Mp4": Lufmp4Extractor,
22+
"Default": DefaultExtractor,
23+
"Yt-mp4": YtExtractor,
24+
"Kir": KirExtractor,
25+
"Mp4": Mp4Extractor,
26+
}
27+
OTHER_SOURCES = {
28+
"Ak": AkExtractor,
29+
"Vid-mp4": VidMp4Extractor,
30+
"Ok": OkExtractor,
31+
"Ss-Hls": SsHlsExtractor,
32+
"Fm-Hls": FmHlsExtractor,
33+
}
34+
35+
36+
@debug_extractor
37+
def extract_server(
38+
client: Client,
39+
episode_number: str,
40+
episode: AllAnimeEpisode,
41+
source: AllAnimeSource,
42+
) -> Server | None:
43+
url = source.get("sourceUrl")
44+
if not url:
45+
logger.debug(f"Url not found in source: {source}")
46+
return
47+
48+
if url.startswith("--"):
49+
url = one_digit_symmetric_xor(56, url[2:])
50+
51+
logger.debug(f"Decrypting url for source: {source['sourceName']}")
52+
if source["sourceName"] in OTHER_SOURCES:
53+
logger.debug(f"Found {source['sourceName']} but ignoring")
54+
return
55+
56+
if source["sourceName"] not in AVAILABLE_SOURCES:
57+
logger.debug(
58+
f"Found {source['sourceName']} but did not expect it, its time to scrape lol"
59+
)
60+
return
61+
logger.debug(f"Found {source['sourceName']}")
62+
63+
return AVAILABLE_SOURCES[source["sourceName"]].extract(
64+
url, client, episode_number, episode, source
65+
)
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
from ...types import EpisodeStream, Server
2+
from ..constants import API_BASE_URL, MP4_SERVER_JUICY_STREAM_REGEX
3+
from ..types import AllAnimeEpisode, AllAnimeSource
4+
from .base import BaseExtractor
5+
6+
7+
# TODO: requires decoding obsfucated js (filemoon)
8+
class FmHlsExtractor(BaseExtractor):
9+
@classmethod
10+
def extract(
11+
cls,
12+
url,
13+
client,
14+
episode_number: str,
15+
episode: AllAnimeEpisode,
16+
source: AllAnimeSource,
17+
) -> Server:
18+
response = client.get(
19+
f"https://{API_BASE_URL}{url.replace('clock', 'clock.json')}",
20+
timeout=10,
21+
)
22+
response.raise_for_status()
23+
24+
embed_html = response.text.replace(" ", "").replace("\n", "")
25+
vid = MP4_SERVER_JUICY_STREAM_REGEX.search(embed_html)
26+
if not vid:
27+
raise Exception("")
28+
return Server(
29+
name="dropbox",
30+
links=[EpisodeStream(link=vid.group(1), quality="1080")],
31+
episode_title=episode["notes"],
32+
headers={"Referer": "https://www.mp4upload.com/"},
33+
)
34+
35+
36+
# TODO: requires decoding obsfucated js (filemoon)
37+
class OkExtractor(BaseExtractor):
38+
@classmethod
39+
def extract(
40+
cls,
41+
url,
42+
client,
43+
episode_number: str,
44+
episode: AllAnimeEpisode,
45+
source: AllAnimeSource,
46+
) -> Server:
47+
response = client.get(
48+
f"https://{API_BASE_URL}{url.replace('clock', 'clock.json')}",
49+
timeout=10,
50+
)
51+
response.raise_for_status()
52+
53+
embed_html = response.text.replace(" ", "").replace("\n", "")
54+
vid = MP4_SERVER_JUICY_STREAM_REGEX.search(embed_html)
55+
if not vid:
56+
raise Exception("")
57+
return Server(
58+
name="dropbox",
59+
links=[EpisodeStream(link=vid.group(1), quality="1080")],
60+
episode_title=episode["notes"],
61+
headers={"Referer": "https://www.mp4upload.com/"},
62+
)

0 commit comments

Comments
 (0)