@@ -55,45 +55,54 @@ def get_session_and_csrf() -> dict:
5555 logging .info (f"CSRF Token: { csrf_token } " )
5656 return session_id , csrf_token
5757
58- def title_search (title : str ) -> int :
58+ def title_search (query : str ) -> int :
5959 """
6060 Function to perform an anime search using a provided title.
6161
6262 Parameters:
63- - title_search (str): The title to search for.
63+ - query (str): The query to search for.
6464
6565 Returns:
6666 - int: A number containing the length of media search manager.
6767 """
68- session_id , csrf_token = get_session_and_csrf ()
69- url = f"{ site_constant .FULL_URL } /api/search/v2"
70-
71- # Set up the headers, params for the request
72- headers = {
73- 'User-Agent' : get_userAgent (),
74- 'Accept' : 'application/json, text/javascript, */*; q=0.01' ,
75- 'CSRF-Token' : csrf_token ,
76- 'X-Requested-With' : 'XMLHttpRequest'
77- }
78- params = {
79- 'keyword' : title ,
80- }
81-
82- # Make the POST request
83- response = httpx .post (url , params = params , cookies = {'sessionId' : session_id }, headers = headers )
84-
85- for dict_title in response .json ()['animes' ]:
86- try :
68+ search_url = f"{ site_constant .FULL_URL } /search?keyword={ query } "
69+ console .print (f"[cyan]Search url: [yellow]{ search_url } " )
70+
71+ # Make the GET request
72+ try :
73+ response = httpx .get (search_url , headers = {'User-Agent' : get_userAgent ()})
74+
75+ except Exception as e :
76+ console .print (f"Site: { site_constant .SITE_NAME } , request search error: { e } " )
77+ return 0
8778
88- media_search_manager .add_media ({
89- 'id' : dict_title .get ('id' ),
90- 'name' : dict_title .get ('name' ),
91- 'type' : 'TV' ,
92- 'status' : dict_title .get ('stateName' ),
93- 'episodes_count' : dict_title .get ('episodes' ),
94- 'plot' : ' ' .join ((words := str (dict_title .get ('story' , '' )).split ())[:10 ]) + ('...' if len (words ) > 10 else '' ),
95- 'url' : f"{ site_constant .FULL_URL } /play/{ dict_title .get ('link' )} .{ dict_title .get ('identifier' )} "
96- })
79+ # Create soup istance
80+ soup = BeautifulSoup (response .text , 'html.parser' )
81+
82+ # Collect data from soup
83+ for element in soup .find_all ('a' , class_ = 'poster' ):
84+ try :
85+ title = element .find ('img' ).get ('alt' )
86+ url = f"{ site_constant .FULL_URL } { element .get ('href' )} "
87+ status_div = element .find ('div' , class_ = 'status' )
88+ is_dubbed = False
89+ anime_type = 'TV'
90+
91+ if status_div :
92+ if status_div .find ('div' , class_ = 'dub' ):
93+ is_dubbed = True
94+
95+ if status_div .find ('div' , class_ = 'movie' ):
96+ anime_type = 'Movie'
97+ elif status_div .find ('div' , class_ = 'ona' ):
98+ anime_type = 'ONA'
99+
100+ media_search_manager .add_media ({
101+ 'name' : title ,
102+ 'type' : anime_type ,
103+ 'DUB' : is_dubbed ,
104+ 'url' : url
105+ })
97106
98107 except Exception as e :
99108 print (f"Error parsing a film entry: { e } " )
0 commit comments