-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcrawler.py
More file actions
68 lines (54 loc) · 1.71 KB
/
crawler.py
File metadata and controls
68 lines (54 loc) · 1.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
#This program is going to start at a domain and crawl adjacent webpages
import requests
from bs4 import BeautifulSoup as bsoup
import csv
import re
import time
http = 'http://'
https = 'https://'
nodes = []
init_url = input("Enter Initial URL Here: ")
urls = [init_url]
visited = []
def crawl_page(soup, c_url):
link_elements = soup.select('a[href]')
for link_element in link_elements:
url = link_element['href']
if re.search(r"https://www.", url) and url not in visited:
return
def get_html(url):
try:
return requests.get(url).content
except Exception as e:
print(e)
return ''
while len(urls) != 0:
time.sleep(1)
current_url = urls.pop()
if 'gustavus.edu' not in current_url:
continue
visited.append(current_url)
if len(nodes) >= 50000:
break
if get_html(current_url) == '':
continue
response = requests.get(current_url)
soup = bsoup(response.content, "html.parser")
crawl_page(soup, current_url)
link_elements = soup.select("a[href]")
check_dupes = []
for link_element in link_elements:
url = link_element['href']
if url not in visited:
if http in url or https in url:
if url not in check_dupes:
node = {"node1" : current_url, "node2" : url}
nodes.append(node)
urls.append(url)
check_dupes.append(url)
print(url)
print(len(urls))
with open('nodes.csv', 'w', newline = '') as csv_file:
writer = csv.writer(csv_file)
for node in nodes:
writer.writerow(node.values())