-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfunctions.py
More file actions
142 lines (115 loc) · 4.71 KB
/
functions.py
File metadata and controls
142 lines (115 loc) · 4.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import pandas as pd
import requests
from bs4 import BeautifulSoup
import datetime
import csv
# Method for retrieving all html content in a page
def get_html(root, extension):
url = root + extension
response = requests.get(url)
html_content = response.text
return html_content
def scrape_data(html_content):
soup = BeautifulSoup(html_content, 'html.parser')
# Project
box = soup.find(id="block-porto-content")
if box is not None:
inner_html = box.find('h1', class_ = 'separator-bottom mt-5')
if inner_html is not None:
project = inner_html.get_text(strip=True, separator=',')
else:
project = "NA"
else:
project = "NA"
# Description
box = soup.find('div', class_ = "clearfix text-formatted field field--name-field-description field--type-text-long field--label-above")
if box is not None:
description = box.find('div', class_ = 'field__item').get_text(strip=True)
else:
description = "NA"
# Start Date
box = soup.find('div', class_ = 'views-field views-field-field-start-date')
if box is not None:
inner_html = box.find(class_ = 'field-content')
if inner_html is not None:
start_date = inner_html.get_text(strip=True)[:-13]
else:
start_date = "NA"
else:
start_date = "NA"
# End Date
box = soup.find('div', class_ = 'views-field views-field-field-date-of-completion')
if box is not None:
inner_html = box.find(class_ = 'field-content')
if inner_html is not None:
end_date = inner_html.get_text(strip=True)[:-21]
else:
end_date = "NA"
else:
end_date = "NA"
# Partners
box = soup.find('div', class_ = "clearfix text-formatted field field--name-field-partners field--type-text-long field--label-above")
if box is not None:
partners = box.find('div', class_ = 'field__item').get_text(strip=True)
else:
partners = "NA"
# Contact Info
box = soup.find(id = "block-views-block-partnerships-contact-information-block-1")
if box is not None:
box_con_name = box.find('div', class_ = "views-field views-field-field-contact-name")
if box_con_name is not None:
contact_name = box_con_name.find('div', class_ = 'field-content').get_text(strip=True)
else:
contact_name = "NA"
box_con_link = box.find('div', class_ = "views-field views-field-field-email")
if box_con_link is not None:
contact_link = box_con_link.find('div', class_ = 'field-content').get_text(strip=True)
else:
contact_link = "NA"
else:
contact_name = "NA"
contact_link = "NA"
# SDG Codes
if soup.find(id="block-views-block-good-practices-block-7") is not None:
sdg_list = soup.find(id="block-views-block-good-practices-block-7").get_text(strip=True, separator=',')[5:]
else:
sdg_list = "NA"
# Info Link
if soup.find(id="block-views-block-good-practices-block-5") is not None:
info_link = soup.find(id="block-views-block-good-practices-block-5").get_text().strip()[20:]
else:
info_link = "NA"
data = [project, description, start_date, end_date, partners, contact_name, contact_link, sdg_list, info_link]
return data
# Method for retrieving the url extensions for each project in the page
def get_links(root, extension):
html = get_html(root, extension)
soup = BeautifulSoup(html, 'html.parser')
elements = list(soup.find_all('div', class_ = 'views-field views-field-title'))
href_links = []
for element in elements:
soup = BeautifulSoup(str(element), 'html.parser')
a = soup.find('a')['href']
href_links.append(a)
return href_links
# Method for exporting data into a csv file
def write_data(data):
with open('WebScraping.csv', 'a+', newline='', encoding='UTF8') as file:
writer = csv.writer(file)
writer.writerow(data)
def scrape_range(browse_url, page_range, root):
extensions = []
# Getting the extensions in the range
for page in page_range:
extension = get_links(browse_url, str(page))
extensions.extend(extension)
print(f'Extensions in page {page} saved.')
# Iterating through the extensions and writing the data to the excel file
for ext in extensions:
html_con = get_html(root, ext)
data = scrape_data(html_con)
link = root + ext
data.append(link)
write_data(data)
print(f'Data in extension {ext} is saved.')
print(f'Data in {page_range} has been successfully saved in excel file.')