-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscraper.py
More file actions
49 lines (37 loc) · 1.67 KB
/
scraper.py
File metadata and controls
49 lines (37 loc) · 1.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import requests
from bs4 import BeautifulSoup
tips_and_resources = []
# Define the websites to scrape
websites = [
'https://www.gamedeveloper.com/latest/news',
'https://gamedev.expert/category/news/',
'https://www.polygon.com/gaming'
] # TODO implement general checking
# Make the request and parse the HTML
def scraper():
for url in websites:
response = requests.get(url)
html = response.content
soup = BeautifulSoup(html, 'html.parser')
if "gamedeveloper" in url: # Different HTML structures for each website
articles = soup.find_all('div', class_='topic-content-article')
for article in articles:
title = article.find('span', class_='article-title').text
link = article.find('a')['href']
tips_and_resources.append({'title': title, 'link': 'https://gamedeveloper.com' + link})
elif "gamedev.expert" in url:
articles = soup.find_all('h2', class_='title')
for article in articles:
title = article.find('a').text
link = article.find('a')['href']
tips_and_resources.insert(0, {'title': title, 'link': link})
elif "polygon" in url:
articles = soup.find_all('h2', class_='c-entry-box--compact__title')
for article in articles:
title = article.find('a').text
link = article.find('a')['href']
tips_and_resources.insert(0, {'title': title, 'link': link})
scraper()
for item in tips_and_resources:
print("Title: " + item['title'])
print("\nLink: " + item['link'] + "\n")