-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathso.py
40 lines (36 loc) · 1.53 KB
/
so.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import requests
from bs4 import BeautifulSoup
def get_last_page(url):
result = requests.get(url)
soup = BeautifulSoup(result.text, "html.parser")
pages = soup.find("div", {"class": "s-pagination"}).find_all("a")
last_page = pages[-2].get_text(strip=True)
return int(last_page)
def extract_job(html):
title = html.find("h2", {"class": "mb4"}).find("a")["title"]
company, location = html.find("h3", {"class": "mb4"}).find_all("span", recursive=False)
company = company.get_text(strip=True)
location = location.get_text(strip=True)
job_id = html['data-jobid']
return {
'title': title,
'company': company,
'location': location,
'apply_link' : f"https://stackoverflow.com/jobs/{job_id}"
}
def extract_jobs(last_page, url):
jobs = []
for page in range(last_page):
print(f"Searching jobs in StackoverFlow.com...page: {page}")
result = requests.get(f"{url}&pg={page+1}")
soup = BeautifulSoup(result.text, "html.parser")
results = soup.find_all("div", {"class": "-job"})
for result in results:
job = extract_job(result)
jobs.append(job)
return jobs
def get_jobs(word): #사용자가 입력한 word에 +로 SO url을 만든다.
url = f"https://stackoverflow.com/jobs?q={word}&sort=i" # from Web_scraper의 word를 가져온다.
last_page = get_last_page(url) #그 url의 마지막 페이지를 구한다.
jobs = extract_jobs(last_page, url) #이를 통해 jobs를 추출한다.
return jobs