Skip to content

Commit 3ce454f

Browse files
committed
Fix: line lengths
1 parent 4d9c08c commit 3ce454f

File tree

2 files changed

+20
-11
lines changed

2 files changed

+20
-11
lines changed

web-scraping-bs4/job_search.py

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,16 @@
44

55

66
def scrape_jobs(location=None):
7-
"""Scrapes Software Developer job postings from Monster, optionally by location.
7+
"""Scrapes Developer job postings from Monster, optionally by location.
88
99
:param location: Where the job is located
1010
:type location: str
11-
:return: all job postings from first page that match the search URL results
11+
:return: all job postings from first page that match the search results
1212
:rtype: BeautifulSoup object
1313
"""
1414
if location:
15-
URL = f"https://www.monster.com/jobs/search/?q=Software-Developer&where={location}"
15+
URL = f"https://www.monster.com/jobs/search/\
16+
?q=Software-Developer&where={location}"
1617
else:
1718
URL = f"https://www.monster.com/jobs/search/?q=Software-Developer"
1819
page = requests.get(URL)
@@ -23,7 +24,7 @@ def scrape_jobs(location=None):
2324

2425

2526
def filter_jobs_by_keyword(results, word):
26-
"""Filters job postings by word and prints the matching job title plus link.
27+
"""Filters job postings by word and prints matching job title plus link.
2728
2829
:param results: Parsed HTML container with all job listings
2930
:type results: BeautifulSoup object
@@ -32,15 +33,18 @@ def filter_jobs_by_keyword(results, word):
3233
:return: None - just meant to print results
3334
:rtype: None
3435
"""
35-
filtered_jobs = results.find_all('h2', string=lambda text: word in text.lower())
36+
filtered_jobs = results.find_all('h2',
37+
string=lambda text: word in text.lower())
3638
for f_job in filtered_jobs:
3739
link = f_job.find('a')['href']
3840
print(f_job.text.strip())
3941
print(f"Apply here: {link}\n")
4042

4143

4244
def print_all_jobs(results):
43-
"""Print details (title, link, company name and location) of all jobs returned by the search.
45+
"""Print details of all jobs returned by the search.
46+
47+
The printed details are title, link, company name and location of the job.
4448
4549
:param results: Parsed HTML container with all job listings
4650
:type results: BeautifulSoup object
@@ -56,7 +60,7 @@ def print_all_jobs(results):
5660
location_elem = job_elem.find('div', class_='location')
5761
if None in (title_elem, company_elem, location_elem):
5862
continue
59-
# print(job_elem.prettify()) # to inspect the 'None' element further
63+
# print(job_elem.prettify()) # to inspect the 'None' element
6064
print(title_elem.text.strip())
6165
link_elem = title_elem.find('a')
6266
print(link_elem['href'])
@@ -66,8 +70,9 @@ def print_all_jobs(results):
6670

6771

6872
# USE THE SCRIPT AS A COMMAND-LINE INTERFACE
69-
# --------------------------------------------------------------------------------
70-
my_parser = argparse.ArgumentParser(prog='jobs', description='Find Developer Jobs')
73+
# ----------------------------------------------------------------------------
74+
my_parser = argparse.ArgumentParser(prog='jobs',
75+
description='Find Developer Jobs')
7176
my_parser.add_argument('-location',
7277
metavar='location',
7378
type=str,

web-scraping-bs4/scrape_jobs.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,18 @@
22
from bs4 import BeautifulSoup
33

44

5-
URL = 'https://www.monster.com/jobs/search/?q=Software-Developer&where=Australia'
5+
URL = 'https://www.monster.com/jobs/search/?q=Software-Developer\
6+
&where=Australia'
67
page = requests.get(URL)
78

89
soup = BeautifulSoup(page.content, 'html.parser')
910
results = soup.find(id='ResultsContainer')
1011

1112
# Look for Python jobs
12-
python_jobs = results.find_all('h2', string=lambda text: "python" in text.lower())
13+
python_jobs = results.find_all(
14+
'h2',
15+
string=lambda text: "python" in text.lower()
16+
)
1317
for p_job in python_jobs:
1418
link = p_job.find('a')['href']
1519
print(p_job.text.strip())

0 commit comments

Comments
 (0)