|
16 | 16 | import aiohttp |
17 | 17 |
|
18 | 18 | requests.packages.urllib3.disable_warnings() |
19 | | -requests.timeout = 10 |
| 19 | +outtime = 10 |
20 | 20 |
|
21 | 21 | ua = [ |
22 | 22 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36,Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", |
@@ -54,7 +54,7 @@ def url(urllist, proxies, header_new): |
54 | 54 | newheader = json.loads(str(JSON_handle(header, header_new)).replace("'", "\"")) |
55 | 55 | try: |
56 | 56 | requests.packages.urllib3.disable_warnings() |
57 | | - r = requests.get(url=u, headers=newheader, allow_redirects=False, verify=False, proxies=proxies) |
| 57 | + r = requests.get(url=u, headers=newheader, timeout = outtime, allow_redirects=False, verify=False, proxies=proxies) |
58 | 58 | sleep(int(float(sleeps))) |
59 | 59 | if r.status_code == 503: |
60 | 60 | sys.exit() |
@@ -185,7 +185,7 @@ def dump(urllist, proxies, header_new): |
185 | 185 | def download(url: str, fname: str, proxies: str, newheader): |
186 | 186 | # 用流stream的方式获取url的数据 |
187 | 187 | requests.packages.urllib3.disable_warnings() |
188 | | - resp = requests.get(url, headers=newheader, stream=True, verify=False, proxies=proxies) |
| 188 | + resp = requests.get(url, headers=newheader, timeout = outtime, stream=True, verify=False, proxies=proxies) |
189 | 189 | # 拿到文件的长度,并把total初始化为0 |
190 | 190 | total = int(resp.headers.get('content-length', 0)) |
191 | 191 | # 打开当前目录的fname文件(名字你来传入) |
|
0 commit comments