Skip to content

Commit a5aaa9e

Browse files
committed
Fix: format code using black
1 parent b598ef3 commit a5aaa9e

File tree

2 files changed

+31
-32
lines changed

2 files changed

+31
-32
lines changed

web-scraping-bs4/job_search.py

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ def scrape_jobs(location=None):
1818
URL = f"https://www.monster.com/jobs/search/?q=Software-Developer"
1919
page = requests.get(URL)
2020

21-
soup = BeautifulSoup(page.content, 'html.parser')
22-
results = soup.find(id='ResultsContainer')
21+
soup = BeautifulSoup(page.content, "html.parser")
22+
results = soup.find(id="ResultsContainer")
2323
return results
2424

2525

@@ -33,10 +33,11 @@ def filter_jobs_by_keyword(results, word):
3333
:return: None - just meant to print results
3434
:rtype: None
3535
"""
36-
filtered_jobs = results.find_all('h2',
37-
string=lambda text: word in text.lower())
36+
filtered_jobs = results.find_all(
37+
"h2", string=lambda text: word in text.lower()
38+
)
3839
for f_job in filtered_jobs:
39-
link = f_job.find('a')['href']
40+
link = f_job.find("a")["href"]
4041
print(f_job.text.strip())
4142
print(f"Apply here: {link}\n")
4243

@@ -51,36 +52,35 @@ def print_all_jobs(results):
5152
:return: None - just meant to print results
5253
:rtype: None
5354
"""
54-
job_elems = results.find_all('section', class_='card-content')
55+
job_elems = results.find_all("section", class_="card-content")
5556

5657
for job_elem in job_elems:
5758
# keep in mind that each job_elem is another BeautifulSoup object!
58-
title_elem = job_elem.find('h2', class_='title')
59-
company_elem = job_elem.find('div', class_='company')
60-
location_elem = job_elem.find('div', class_='location')
59+
title_elem = job_elem.find("h2", class_="title")
60+
company_elem = job_elem.find("div", class_="company")
61+
location_elem = job_elem.find("div", class_="location")
6162
if None in (title_elem, company_elem, location_elem):
6263
continue
6364
# print(job_elem.prettify()) # to inspect the 'None' element
6465
print(title_elem.text.strip())
65-
link_elem = title_elem.find('a')
66-
print(link_elem['href'])
66+
link_elem = title_elem.find("a")
67+
print(link_elem["href"])
6768
print(company_elem.text.strip())
6869
print(location_elem.text.strip())
6970
print()
7071

7172

7273
# USE THE SCRIPT AS A COMMAND-LINE INTERFACE
7374
# ----------------------------------------------------------------------------
74-
my_parser = argparse.ArgumentParser(prog='jobs',
75-
description='Find Developer Jobs')
76-
my_parser.add_argument('-location',
77-
metavar='location',
78-
type=str,
79-
help='The location of the job')
80-
my_parser.add_argument('-word',
81-
metavar='word',
82-
type=str,
83-
help='What keyword to filter by')
75+
my_parser = argparse.ArgumentParser(
76+
prog="jobs", description="Find Developer Jobs"
77+
)
78+
my_parser.add_argument(
79+
"-location", metavar="location", type=str, help="The location of the job"
80+
)
81+
my_parser.add_argument(
82+
"-word", metavar="word", type=str, help="What keyword to filter by"
83+
)
8484

8585
args = my_parser.parse_args()
8686
location, keyword = args.location, args.word

web-scraping-bs4/scrape_jobs.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,27 +2,26 @@
22
from bs4 import BeautifulSoup
33

44

5-
URL = 'https://www.monster.com/jobs/search/?q=Software-Developer\
6-
&where=Australia'
5+
URL = "https://www.monster.com/jobs/search/?q=Software-Developer\
6+
&where=Australia"
77
page = requests.get(URL)
88

9-
soup = BeautifulSoup(page.content, 'html.parser')
10-
results = soup.find(id='ResultsContainer')
9+
soup = BeautifulSoup(page.content, "html.parser")
10+
results = soup.find(id="ResultsContainer")
1111

1212
# Look for Python jobs
13-
python_jobs = results.find_all('h2',
14-
string=lambda t: "python" in t.lower())
13+
python_jobs = results.find_all("h2", string=lambda t: "python" in t.lower())
1514
for p_job in python_jobs:
16-
link = p_job.find('a')['href']
15+
link = p_job.find("a")["href"]
1716
print(p_job.text.strip())
1817
print(f"Apply here: {link}\n")
1918

2019
# Print out all available jobs from the scraped webpage
21-
job_elems = results.find_all('section', class_='card-content')
20+
job_elems = results.find_all("section", class_="card-content")
2221
for job_elem in job_elems:
23-
title_elem = job_elem.find('h2', class_='title')
24-
company_elem = job_elem.find('div', class_='company')
25-
location_elem = job_elem.find('div', class_='location')
22+
title_elem = job_elem.find("h2", class_="title")
23+
company_elem = job_elem.find("div", class_="company")
24+
location_elem = job_elem.find("div", class_="location")
2625
if None in (title_elem, company_elem, location_elem):
2726
continue
2827
print(title_elem.text.strip())

0 commit comments

Comments
 (0)