11# Python Standard Modules
22import sys
33import os
4- import json
54import re
65import string
76import random
1615OUTPUT = "AVAILABLE.txt"
1716
1817# Regex Patterns
19- PLACEHOLDER = r"(%word%)"
2018URLPATT = r"(^https?:\/\/[-.a-zA-Z0-9]+)"
21- DOMAIN = r"\Ahttps ?:\/\/([-a-zA-Z0-9]+)\.[a-zA-Z]+ "
19+ DOMAIN = r"https ?:\/\/(\w*)(?(1)\.|(?(1)\w*)) "
2220
2321# Reads configuration file
2422config = configparser .ConfigParser ()
3432# Site URLs
3533URLS = {
3634 1 :URL ,
37- 2 :"https://api.mojang.com/users/profiles/minecraft/%word% " ,
38- 3 :"https://api.twitter.com/i/users/username_available.json?username=%word% " ,
35+ 2 :"https://api.mojang.com/users/profiles/minecraft/%s " ,
36+ 3 :"https://api.twitter.com/i/users/username_available.json?username=%s " ,
3937 4 :"https://instagram.com/accounts/web_create_ajax/attempt/" ,
40- 5 :"https://steamcommunity.com/id/%word% " ,
41- 6 :"https://steamcommunity.com/groups/%word% " ,
42- 7 :"https://soundcloud.com/%word% " ,
43- 8 :"https://passport.twitch.tv/usernames/%word% " ,
44- 9 :"https://mixer.com/api/v1/channels/%word% " ,
45- 10 :"https://github.com/%word% " ,
46- 11 :"https://about.me/%word% " ,
47- 12 :"https://youtube.com/%word% "
38+ 5 :"https://steamcommunity.com/id/%s " ,
39+ 6 :"https://steamcommunity.com/groups/%s " ,
40+ 7 :"https://soundcloud.com/%s " ,
41+ 8 :"https://passport.twitch.tv/usernames/%s " ,
42+ 9 :"https://mixer.com/api/v1/channels/%s " ,
43+ 10 :"https://github.com/%s " ,
44+ 11 :"https://about.me/%s " ,
45+ 12 :"https://youtube.com/%s "
4846}
4947
5048# Proxy List
5149proxyDict = {}
5250
51+ s = requests .Session ()
52+
5353def generate_pw (size = 16 , chars = string .ascii_uppercase + string .digits + string .ascii_lowercase ):
5454 return '' .join (random .choice (chars ) for _ in range (size ))
5555
5656def replace (word ):
5757 # Finds and replaces matches of the name variable with the actual word to insert in URL
5858 if int (SITE ) != 4 : # if not Instagram
59- x = re .sub (PLACEHOLDER , word , URLS [int (SITE )])
60- return x
59+ return URLS [int (SITE )] % word
6160 else :
6261 print ("instagram" )
6362
@@ -146,7 +145,7 @@ def log_result(response, word, link, matches=None):
146145 manual (response , word , service )
147146
148147def get_cookie ():
149- r = requests .get (URLS [int (SITE )])
148+ r = s .get (URLS [int (SITE )])
150149 return r .cookies
151150
152151def ready_payload (word ):
@@ -155,7 +154,7 @@ def ready_payload(word):
155154156155 "username" : word ,
157156 "password" : generate_pw (),
158- "first_name" : "Croc"
157+ "first_name" : word
159158 }
160159 else :
161160 print ("Wrong site!" )
@@ -176,19 +175,19 @@ def send_get(words):
176175 link = replace (words [w ])
177176 if PROXY :
178177 proxyDict [PROTOCOL ] = get_proxy ()
179- r = requests .get (link , proxies = proxyDict )
178+ r = s .get (link , proxies = proxyDict )
180179 else :
181- r = requests .get (link )
180+ r = s .get (link )
182181 log_result (r , words [w ], link )
183182
184183def parse_page (words ):
185184 for w in range (words .__len__ ()):
186185 link = replace (words [w ])
187186 if PROXY :
188187 proxyDict [PROTOCOL ] = get_proxy ()
189- r = requests .get (link , proxies = proxyDict )
188+ r = s .get (link , proxies = proxyDict )
190189 else :
191- r = requests .get (link )
190+ r = s .get (link )
192191 page = r .content
193192 soup = BeautifulSoup (page , "html.parser" )
194193 matches = []
@@ -218,7 +217,13 @@ def send_post(words):
218217 link = URLS [int (SITE )]
219218 for w in range (words .__len__ ()):
220219 payload = ready_payload (words [w ])
221- r = requests .post (URLS [int (SITE )], json = payload , headers = header , cookies = cookie )
220+ r = None
221+ if PROXY :
222+ proxyDict [PROTOCOL ] = get_proxy ()
223+ r = s .post (URLS [int (SITE )], data = payload , headers = header , cookies = cookie , proxies = proxyDict )
224+ else :
225+ r = s .post (URLS [int (SITE )], data = payload , headers = header , cookies = cookie )
226+
222227 log_result (r , words [w ], link )
223228
224229def main ():
0 commit comments