|
| 1 | +# Author: Asib Hossen |
| 2 | +# Date: May 21, 2024 |
| 3 | +# Description: This script scrapes job listings from https://www.devjobsscanner.com/ based on user input, displays the job details, and optionally saves them as CSV and/or TXT files. |
| 4 | +# Version: 1.1 |
| 5 | + |
| 6 | + |
| 7 | +import os |
| 8 | +import re |
| 9 | +import csv |
| 10 | +import time |
| 11 | +from seleniumbase import Driver |
| 12 | +from bs4 import BeautifulSoup |
| 13 | + |
| 14 | +def get_user_input(): |
| 15 | + """ |
| 16 | + Prompt user for job title, remote job preference, number of jobs to scrape, |
| 17 | + sorting preference, and save option. |
| 18 | +
|
| 19 | + Returns: |
| 20 | + tuple: A tuple containing job title (str), remote job preference (bool), |
| 21 | + number of jobs to scrape (int), save option (str), and sorting preference (str). |
| 22 | + """ |
| 23 | + job = input("Enter the job title: ") |
| 24 | + remote = input("Do you want remote jobs only? (yes/no): ").lower() == 'yes' |
| 25 | + num_jobs = int(input("Enter the number of jobs you want to scrape: ")) |
| 26 | + sort_options = ['matches', 'newest', 'salary'] |
| 27 | + print(f"Sort options: {sort_options}") |
| 28 | + sort_by = input("Enter the sorting preference (matches/newest/salary): ") |
| 29 | + save_option = input("Do you want to save the output as CSV, TXT, or both of them? (csv/txt/both): ").lower() |
| 30 | + return job, remote, num_jobs, save_option, sort_by |
| 31 | + |
| 32 | +def construct_url(job, remote, sort_by): |
| 33 | + """ |
| 34 | + Construct the URL based on the job title, remote preference, and sorting preference. |
| 35 | +
|
| 36 | + Args: |
| 37 | + job (str): The job title. |
| 38 | + remote (bool): True if user wants remote jobs only, False otherwise. |
| 39 | + sort_by (str): The sorting preference. |
| 40 | +
|
| 41 | + Returns: |
| 42 | + str: The constructed URL. |
| 43 | + """ |
| 44 | + base_url = "https://www.devjobsscanner.com/search/" |
| 45 | + search_params = f"?search={job}" |
| 46 | + if remote is not None: |
| 47 | + search_params += f"&remote={str(remote).lower()}" |
| 48 | + if sort_by is not None: |
| 49 | + search_params += f"&sort={sort_by}" |
| 50 | + url = base_url + search_params |
| 51 | + return url |
| 52 | + |
| 53 | +def scrape_jobs(url, num_jobs): |
| 54 | + """ |
| 55 | + Scrape job listings from the provided URL using SeleniumBase. |
| 56 | +
|
| 57 | + Args: |
| 58 | + url (str): The URL to scrape job listings from. |
| 59 | + num_jobs (int): The number of jobs to scrape. |
| 60 | +
|
| 61 | + Returns: |
| 62 | + list: A list of dictionaries containing job details. |
| 63 | + """ |
| 64 | + jobs = [] |
| 65 | + try: |
| 66 | + driver = Driver(browser="Firefox", headless=False) |
| 67 | + driver.get(url) |
| 68 | + time.sleep(5) # Initial wait for page load |
| 69 | + |
| 70 | + while len(jobs) < num_jobs: |
| 71 | + soup = BeautifulSoup(driver.page_source, 'html.parser') |
| 72 | + job_divs = soup.find_all('div', class_='flex p-3 rounded group relative overflow-hidden') |
| 73 | + |
| 74 | + for job_div in job_divs: |
| 75 | + if len(jobs) >= num_jobs: |
| 76 | + break |
| 77 | + title = job_div.find('h2').text.strip() |
| 78 | + company = job_div.find('div', class_='jbs-dot-separeted-list').find('a').text.strip() |
| 79 | + tags = [tag.text.strip() for tag in job_div.find_all('a', class_='tag')] |
| 80 | + date_posted = job_div.find('span', class_='text-primary-text').text.strip() |
| 81 | + salary = job_div.find('span', class_='text-gray-text').text.strip() |
| 82 | + |
| 83 | + # Check if the salary contains at least two digits |
| 84 | + if not re.search(r'\d{2}', salary): |
| 85 | + salary = "Not mentioned" |
| 86 | + |
| 87 | + job_url = job_div.find('a', class_='jbs-text-hover-link')['href'] |
| 88 | + |
| 89 | + jobs.append({ |
| 90 | + 'title': title, |
| 91 | + 'company': company, |
| 92 | + 'company_url': f"https://www.devjobsscanner.com/company/{company.lower()}", |
| 93 | + 'tags': tags, |
| 94 | + 'date_posted': date_posted, |
| 95 | + 'salary': salary, |
| 96 | + 'job_url': job_url |
| 97 | + }) |
| 98 | + |
| 99 | + # Scroll down to load more jobs |
| 100 | + driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") |
| 101 | + time.sleep(5) # Wait for new jobs to load |
| 102 | + |
| 103 | + driver.quit() |
| 104 | + return jobs[:num_jobs] |
| 105 | + except Exception as e: |
| 106 | + print("Error scraping jobs:", e) |
| 107 | + return [] |
| 108 | + |
| 109 | +def display_jobs(jobs): |
| 110 | + """ |
| 111 | + Display job details to the console. |
| 112 | +
|
| 113 | + Args: |
| 114 | + jobs (list): A list of dictionaries containing job details. |
| 115 | + """ |
| 116 | + for job in jobs: |
| 117 | + print(f"Title: {job['title']}") |
| 118 | + print(f"Company: {job['company']}") |
| 119 | + print(f"Company URL: {job['company_url']}") |
| 120 | + print(f"Tags: {', '.join(job['tags'])}") |
| 121 | + print(f"Date Posted: {job['date_posted']}") |
| 122 | + print(f"Salary: {job['salary']}") |
| 123 | + print(f"Job URL: {job['job_url']}") |
| 124 | + print("-" * 40) |
| 125 | + |
| 126 | +def save_as_csv(jobs, filename): |
| 127 | + """ |
| 128 | + Save job details as CSV file. |
| 129 | +
|
| 130 | + Args: |
| 131 | + jobs (list): A list of dictionaries containing job details. |
| 132 | + filename (str): The name of the CSV file to save. |
| 133 | + """ |
| 134 | + output_dir = os.path.join(os.getcwd(), "outputFiles") |
| 135 | + os.makedirs(output_dir, exist_ok=True) |
| 136 | + keys = jobs[0].keys() |
| 137 | + try: |
| 138 | + with open(filename, 'w', newline='', encoding='utf-8') as output_file: |
| 139 | + dict_writer = csv.DictWriter(output_file, fieldnames=keys) |
| 140 | + dict_writer.writeheader() |
| 141 | + dict_writer.writerows(jobs) |
| 142 | + except IOError as e: |
| 143 | + print("Error saving as CSV:", e) |
| 144 | + |
| 145 | +def save_as_txt(jobs, filename): |
| 146 | + """ |
| 147 | + Save job details as text file. |
| 148 | +
|
| 149 | + Args: |
| 150 | + jobs (list): A list of dictionaries containing job details. |
| 151 | + filename (str): The name of the text file to save. |
| 152 | + """ |
| 153 | + try: |
| 154 | + with open(filename, 'w', encoding='utf-8') as output_file: |
| 155 | + for job in jobs: |
| 156 | + output_file.write(f"Title: {job['title']}\n") |
| 157 | + output_file.write(f"Company: {job['company']}\n") |
| 158 | + output_file.write(f"Company URL: {job['company_url']}\n") |
| 159 | + output_file.write(f"Tags: {', '.join(job['tags'])}\n") |
| 160 | + output_file.write(f"Date Posted: {job['date_posted']}\n") |
| 161 | + output_file.write(f"Salary: {job['salary']}\n") |
| 162 | + output_file.write(f"Job URL: {job['job_url']}\n") |
| 163 | + output_file.write("-" * 40 + "\n") |
| 164 | + except IOError as e: |
| 165 | + print("Error saving as TXT:", e) |
| 166 | + |
| 167 | +if __name__ == '__main__': |
| 168 | + job, remote, num_jobs, save_option, sort_by = get_user_input() |
| 169 | + url = construct_url(job, remote, sort_by) |
| 170 | + print(f"Scraping URL: {url}") |
| 171 | + jobs = scrape_jobs(url, num_jobs) |
| 172 | + if jobs: |
| 173 | + display_jobs(jobs) |
| 174 | + fileName = f"./outputFiles/{job}_jobs_remote_{str(remote).lower()}_sorted_by_{sort_by}" |
| 175 | + if save_option == 'csv': |
| 176 | + save_as_csv(jobs, f"{fileName}.csv") |
| 177 | + elif save_option == 'txt': |
| 178 | + save_as_txt(jobs, f"{fileName}.txt") |
| 179 | + elif save_option == 'both': |
| 180 | + save_as_csv(jobs, f"{fileName}.csv") |
| 181 | + save_as_txt(jobs, f"{fileName}.txt") |
| 182 | + print(f"Jobs saved as {save_option.upper()} file(s).") |
| 183 | + else: |
| 184 | + print("No jobs found. Exiting.") |
0 commit comments