22import argparse
33import bs4 as bsoup
44import requests
5- from collections import defaultdict
5+ from collections import defaultdict , OrderedDict
66import shutil
77import os
88import re
@@ -49,9 +49,10 @@ def set_download_chapters(self, potential_keys=None):
4949 keys = list (self .all_chapters .keys ())
5050
5151 # Sort keys to make it ascending order and make it a new dict
52- keys .sort ()
53- self .chapters_to_download = {key : self .all_chapters [key ]
54- for key in keys }
52+ unsorted_chapters = {key : self .all_chapters [key ]
53+ for key in keys }
54+ self .chapters_to_download = OrderedDict (
55+ sorted (unsorted_chapters .items (), key = lambda t : t [0 ]))
5556 # Print downloading chapters
5657 print ("Downloading the below chapters:" )
5758 print (keys )
@@ -77,7 +78,7 @@ def manga_extract_chapters(self, url):
7778 r = requests .get (url )
7879 soup = bsoup .BeautifulSoup (r .text , 'html.parser' )
7980
80- chapters = defaultdict (defaultdict )
81+ chapters = defaultdict (Chapter )
8182 links = [link .get ('href' )
8283 for link in soup .find_all ('a' )
8384 if link .get ('href' ) and
@@ -103,7 +104,7 @@ def comic_extract_chapters(self, url):
103104 soup = bsoup .BeautifulSoup (r .text , 'html.parser' )
104105 volume_num = 1
105106
106- chapters = defaultdict (defaultdict )
107+ chapters = defaultdict (Chapter )
107108 for link in soup .find_all ('a' ):
108109 if (comic in link .get ('href' )) and ('chapter' in link .get ('href' )):
109110 chapter = link .get ('href' )
0 commit comments