Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
126 changes: 92 additions & 34 deletions DNSDumpsterAPI.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
"""
This is the (unofficial) Python API for dnsdumpster.com Website.
Using this code, you can retrieve subdomains

"""

from __future__ import print_function
Expand All @@ -14,7 +13,6 @@


class DNSDumpsterAPI(object):

"""DNSDumpsterAPI Main Handler"""

def __init__(self, verbose=False, session=None):
Expand All @@ -24,10 +22,33 @@ def __init__(self, verbose=False, session=None):
else:
self.session = session

# Add realistic headers to reduce blocking and keep them for all requests
self.session.headers.update({
"User-Agent": (
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/119.0 Safari/537.36"
),
"Referer": "https://dnsdumpster.com/"
})

def display_message(self, s):
if self.verbose:
print('[verbose] %s' % s)

def _empty_result(self, domain):
"""Return an empty structured result when DNSDumpster fails."""
return {
'domain': domain,
'dns_records': {
'dns': [],
'mx': [],
'txt': [],
'host': []
},
'image_data': None,
'xls_data': None
}

def retrieve_results(self, table):
res = []
trs = table.findAll('tr')
Expand All @@ -53,7 +74,7 @@ def retrieve_results(self, table):
'country': country,
'header': header}
res.append(data)
except:
except Exception:
pass
return res

Expand All @@ -63,61 +84,98 @@ def retrieve_txt_record(self, table):
res.append(td.text)
return res


def search(self, domain):
dnsdumpster_url = 'https://dnsdumpster.com/'

req = self.session.get(dnsdumpster_url)
# Initial GET to obtain the CSRF token and cookies
try:
req = self.session.get(dnsdumpster_url, timeout=20)
req.raise_for_status()
except requests.RequestException as e:
print("DNSDumpster GET failed: %s" % e, file=sys.stderr)
return self._empty_result(domain)

soup = BeautifulSoup(req.content, 'html.parser')
csrf_middleware = soup.findAll('input', attrs={'name': 'csrfmiddlewaretoken'})[0]['value']

# ---- Robust CSRF extraction (tolerant to HTML changes or block pages)
token_input = soup.find("input", attrs={"name": "csrfmiddlewaretoken"})
csrf_middleware = None
if token_input and token_input.get("value"):
csrf_middleware = token_input["value"]
else:
# Fallback: regex search in raw HTML
m = re.search(
r"name=['\"]csrfmiddlewaretoken['\"]\s+value=['\"]([^'\"]+)['\"]",
req.text
)
if m:
csrf_middleware = m.group(1)

if not csrf_middleware:
print(
"DNSDumpster: CSRF token not found (site may be blocking or serving a captcha).",
file=sys.stderr
)
return self._empty_result(domain)

self.display_message('Retrieved token: %s' % csrf_middleware)

# Prepare POST
cookies = {'csrftoken': csrf_middleware}
headers = {'Referer': dnsdumpster_url, 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'}
headers = {
'Referer': dnsdumpster_url,
'User-Agent': self.session.headers.get('User-Agent', 'Mozilla/5.0')
}
data = {'csrfmiddlewaretoken': csrf_middleware, 'targetip': domain, 'user': 'free'}
req = self.session.post(dnsdumpster_url, cookies=cookies, data=data, headers=headers)

if req.status_code != 200:
print(
"Unexpected status code from {url}: {code}".format(
url=dnsdumpster_url, code=req.status_code),
file=sys.stderr,
)
return []
try:
req = self.session.post(dnsdumpster_url, cookies=cookies, data=data, headers=headers, timeout=30)
req.raise_for_status()
except requests.RequestException as e:
print("DNSDumpster POST failed: %s" % e, file=sys.stderr)
return self._empty_result(domain)

if 'There was an error getting results' in req.content.decode('utf-8'):
if 'There was an error getting results' in req.content.decode('utf-8', errors='ignore'):
print("There was an error getting results", file=sys.stderr)
return []
return self._empty_result(domain)

soup = BeautifulSoup(req.content, 'html.parser')
tables = soup.findAll('table')

res = {}
res['domain'] = domain
res['dns_records'] = {}
res['dns_records']['dns'] = self.retrieve_results(tables[0])
res['dns_records']['mx'] = self.retrieve_results(tables[1])
res['dns_records']['txt'] = self.retrieve_txt_record(tables[2])
res['dns_records']['host'] = self.retrieve_results(tables[3])
if len(tables) < 4:
print("DNSDumpster: unexpected response format (tables missing).", file=sys.stderr)
return self._empty_result(domain)

res = {
'domain': domain,
'dns_records': {
'dns': self.retrieve_results(tables[0]),
'mx': self.retrieve_results(tables[1]),
'txt': self.retrieve_txt_record(tables[2]),
'host': self.retrieve_results(tables[3])
}
}

# Network mapping image
try:
tmp_url = 'https://dnsdumpster.com/static/map/{}.png'.format(domain)
image_data = base64.b64encode(self.session.get(tmp_url).content)
except:
tmp_url = f'https://dnsdumpster.com/static/map/{domain}.png'
image_data = base64.b64encode(self.session.get(tmp_url, timeout=20).content)
except Exception:
image_data = None
finally:
res['image_data'] = image_data

# XLS hosts.
# eg. tsebo.com-201606131255.xlsx
# XLS hosts
try:
pattern = r'/static/xls/' + domain + '-[0-9]{12}\.xlsx'
xls_url = re.findall(pattern, req.content.decode('utf-8'))[0]
xls_url = 'https://dnsdumpster.com' + xls_url
xls_data = base64.b64encode(self.session.get(xls_url).content)
pattern = rf'/static/xls/{re.escape(domain)}-[0-9]{{12}}\.xlsx'
m = re.findall(pattern, req.content.decode('utf-8', errors='ignore'))
if m:
xls_url = 'https://dnsdumpster.com' + m[0]
xls_data = base64.b64encode(self.session.get(xls_url, timeout=20).content)
else:
xls_data = None
except Exception as err:
print(err)
print(err, file=sys.stderr)
xls_data = None
finally:
res['xls_data'] = xls_data
Expand Down
89 changes: 56 additions & 33 deletions cloudfail.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,29 +86,33 @@ def dnsdumpster(target):

res = DNSDumpsterAPI(False).search(target)

if res['dns_records']['host']:
for entry in res['dns_records']['host']:
provider = str(entry['provider'])
if "Cloudflare" not in provider:
print_out(
Style.BRIGHT + Fore.WHITE + "[FOUND:HOST] " + Fore.GREEN + "{domain} {ip} {as} {provider} {country}".format(
**entry))

if res['dns_records']['dns']:
for entry in res['dns_records']['dns']:
provider = str(entry['provider'])
if "Cloudflare" not in provider:
print_out(
Style.BRIGHT + Fore.WHITE + "[FOUND:DNS] " + Fore.GREEN + "{domain} {ip} {as} {provider} {country}".format(
**entry))

if res['dns_records']['mx']:
for entry in res['dns_records']['mx']:
provider = str(entry['provider'])
if "Cloudflare" not in provider:
print_out(
Style.BRIGHT + Fore.WHITE + "[FOUND:MX] " + Fore.GREEN + "{ip} {as} {provider} {domain}".format(
**entry))
# tolerate missing/empty results
if isinstance(res, dict):
if res.get('dns_records', {}).get('host'):
for entry in res['dns_records']['host']:
provider = str(entry['provider'])
if "Cloudflare" not in provider:
print_out(
Style.BRIGHT + Fore.WHITE + "[FOUND:HOST] " + Fore.GREEN + "{domain} {ip} {as} {provider} {country}".format(
**entry))

if res.get('dns_records', {}).get('dns'):
for entry in res['dns_records']['dns']:
provider = str(entry['provider'])
if "Cloudflare" not in provider:
print_out(
Style.BRIGHT + Fore.WHITE + "[FOUND:DNS] " + Fore.GREEN + "{domain} {ip} {as} {provider} {country}".format(
**entry))

if res.get('dns_records', {}).get('mx'):
for entry in res['dns_records']['mx']:
provider = str(entry['provider'])
if "Cloudflare" not in provider:
print_out(
Style.BRIGHT + Fore.WHITE + "[FOUND:MX] " + Fore.GREEN + "{ip} {as} {provider} {domain}".format(
**entry))
else:
print_out(Fore.CYAN + "DNSDumpster returned no usable data, skipping.")


def crimeflare(target):
Expand Down Expand Up @@ -202,21 +206,36 @@ def subdomain_scan(target, subdomains):
subdomainsList = subdomains
else:
subdomainsList = "subdomains.txt"

try:
with open("data/" + subdomainsList, "r") as wordlist:
numOfLines = len(open("data/subdomains.txt").readlines())
numOfLinesInt = numOfLines
numOfLines = str(numOfLines)
file_path = "data/" + subdomainsList
with open(file_path, "r") as wordlist:
# read all lines from the same file we are about to scan
lines = [l for l in (ln.strip() for ln in wordlist) if l]
numOfLinesInt = len(lines)
numOfLines = str(numOfLinesInt)
print_out(Fore.CYAN + "Scanning " + numOfLines + " subdomains (" + subdomainsList + "), please wait...")
for word in wordlist:
# ensure step is never zero (avoid ZeroDivisionError when <100 lines)
step = max(1, int(float(numOfLinesInt) / 100.0))

for word in lines:
c += 1
if (c % int((float(numOfLinesInt) / 100.0))) == 0:
print_out(Fore.CYAN + str(round((c / float(numOfLinesInt)) * 100.0, 2)) + "% complete", '\r')
# recalc step only if you want dynamic behaviour; keeping constant is fine
if (c % step) == 0:
# print a friendly progress percentage
# guard against division by zero (numOfLinesInt > 0 guaranteed here)
pct = round((c / float(numOfLinesInt)) * 100.0, 2) if numOfLinesInt > 0 else 100.0
print_out(Fore.CYAN + str(pct) + "% complete", '\r')

subdomain = "{}.{}".format(word.strip(), target)
try:
target_http = requests.get("http://" + subdomain)
target_http = str(target_http.status_code)
# only use timeouts on requests to avoid long hangs
try:
target_http_r = requests.get("http://" + subdomain, timeout=5)
target_http = str(target_http_r.status_code)
except requests.exceptions.RequestException:
target_http = "err"

ip = socket.gethostbyname(subdomain)
ifIpIsWithin = inCloudFlare(ip)

Expand All @@ -231,6 +250,10 @@ def subdomain_scan(target, subdomains):

except requests.exceptions.RequestException as e:
continue
except socket.gaierror:
# DNS did not resolve - skip
continue

if (i == 0):
print_out(Fore.CYAN + "Scanning finished, we did not find anything, sorry...")
else:
Expand Down Expand Up @@ -265,7 +288,7 @@ def update():
| | | |/ _ \| | | |/ _` | |_ / _` | | |
| |___| | (_) | |_| | (_| | _| (_| | | |
\____|_|\___/ \__,_|\__,_|_| \__,_|_|_|
v1.0.5 by m0rtem
v1.0.6 by eMi

"""

Expand Down