Skip to content

Commit

Permalink
log search updates, fix glassdoor (#120)
Browse files Browse the repository at this point in the history
  • Loading branch information
troy-conte committed Mar 4, 2024
1 parent f8a4ecc commit db01bc6
Show file tree
Hide file tree
Showing 7 changed files with 133 additions and 124 deletions.
6 changes: 1 addition & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -167,8 +167,4 @@ persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
- Waiting some time between scrapes (site-dependent).
- Trying a VPN or proxy to change your IP address.

---




---
2 changes: 1 addition & 1 deletion src/jobspy/jobs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def glassdoor_domain_value(self):
else:
raise Exception(f"Glassdoor is not available for {self.name}")

def get_url(self):
def get_glassdoor_url(self):
return f"https://{self.glassdoor_domain_value}/"

@classmethod
Expand Down
216 changes: 116 additions & 100 deletions src/jobspy/scrapers/glassdoor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
This module contains routines to scrape Glassdoor.
"""
import json
import re

import requests
from typing import Optional
from datetime import datetime, timedelta
Expand Down Expand Up @@ -42,6 +44,7 @@ def __init__(self, proxy: Optional[str] = None):
self.session = None
self.scraper_input = None
self.jobs_per_page = 30
self.max_pages = 30
self.seen_urls = set()

def scrape(self, scraper_input: ScraperInput) -> JobResponse:
Expand All @@ -52,39 +55,40 @@ def scrape(self, scraper_input: ScraperInput) -> JobResponse:
"""
self.scraper_input = scraper_input
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
self.base_url = self.scraper_input.country.get_url()
self.base_url = self.scraper_input.country.get_glassdoor_url()

self.session = create_session(self.proxy, is_tls=True, has_retry=True)
token = self._get_csrf_token()
self.headers['gd-csrf-token'] = token if token else self.fallback_token

location_id, location_type = self._get_location(
scraper_input.location, scraper_input.is_remote
)
if location_type is None:
logger.error('Glassdoor: location not parsed')
return JobResponse(jobs=[])
all_jobs: list[JobPost] = []
cursor = None
max_pages = 30
self.session = create_session(self.proxy, is_tls=False, has_retry=True)
self.session.get(self.base_url)

try:
for page in range(
1 + (scraper_input.offset // self.jobs_per_page),
min(
(scraper_input.results_wanted // self.jobs_per_page) + 2,
max_pages + 1,
),
):
try:
jobs, cursor = self._fetch_jobs_page(
scraper_input, location_id, location_type, page, cursor
)
all_jobs.extend(jobs)
if len(all_jobs) >= scraper_input.results_wanted:
all_jobs = all_jobs[: scraper_input.results_wanted]
break
except Exception as e:
raise GlassdoorException(str(e))
except Exception as e:
raise GlassdoorException(str(e))
for page in range(
1 + (scraper_input.offset // self.jobs_per_page),
min(
(scraper_input.results_wanted // self.jobs_per_page) + 2,
self.max_pages + 1,
),
):
logger.info(f'Glassdoor search page: {page}')
try:
jobs, cursor = self._fetch_jobs_page(
scraper_input, location_id, location_type, page, cursor
)
all_jobs.extend(jobs)
if not jobs or len(all_jobs) >= scraper_input.results_wanted:
all_jobs = all_jobs[: scraper_input.results_wanted]
break
except Exception as e:
logger.error(f'Glassdoor: {str(e)}')
break
return JobResponse(jobs=all_jobs)

def _fetch_jobs_page(
Expand All @@ -98,27 +102,26 @@ def _fetch_jobs_page(
"""
Scrapes a page of Glassdoor for jobs with scraper_input criteria
"""
jobs = []
self.scraper_input = scraper_input
try:
payload = self._add_payload(
location_id, location_type, page_num, cursor
)
response = self.session.post(
f"{self.base_url}/graph", headers=self.headers, timeout=10, data=payload
f"{self.base_url}/graph", headers=self.headers, timeout_seconds=15, data=payload
)
if response.status_code != 200:
raise GlassdoorException(
f"bad response status code: {response.status_code}"
)
raise GlassdoorException(f"bad response status code: {response.status_code}")
res_json = response.json()[0]
if "errors" in res_json:
raise ValueError("Error encountered in API response")
except Exception as e:
raise GlassdoorException(str(e))
except (requests.exceptions.ReadTimeout, GlassdoorException, ValueError, Exception) as e:
logger.error(f'Glassdoor: {str(e)}')
return jobs, None

jobs_data = res_json["data"]["jobListings"]["jobListings"]

jobs = []
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
future_to_job_data = {executor.submit(self._process_job, job): job for job in jobs_data}
for future in as_completed(future_to_job_data):
Expand All @@ -133,6 +136,18 @@ def _fetch_jobs_page(
res_json["data"]["jobListings"]["paginationCursors"], page_num + 1
)

def _get_csrf_token(self):
"""
Fetches csrf token needed for API by visiting a generic page
"""
res = self.session.get(f'{self.base_url}/Job/computer-science-jobs.htm', headers=self.headers)
pattern = r'"token":\s*"([^"]+)"'
matches = re.findall(pattern, res.text)
token = None
if matches:
token = matches[0]
return token

def _process_job(self, job_data):
"""
Processes a single job and fetches its description.
Expand Down Expand Up @@ -217,7 +232,7 @@ def _get_location(self, location: str, is_remote: bool) -> (int, str):
return "11047", "STATE" # remote options
url = f"{self.base_url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
session = create_session(self.proxy, has_retry=True)
res = session.get(url)
res = self.session.get(url, headers=self.headers)
if res.status_code != 200:
if res.status_code == 429:
logger.error(f'429 Response - Blocked by Glassdoor for too many requests')
Expand Down Expand Up @@ -266,7 +281,74 @@ def _add_payload(
"fromage": fromage,
"sort": "date"
},
"query": """
"query": self.query_template
}
if self.scraper_input.job_type:
payload["variables"]["filterParams"].append(
{"filterKey": "jobType", "values": self.scraper_input.job_type.value[0]}
)
return json.dumps([payload])

@staticmethod
def parse_compensation(data: dict) -> Optional[Compensation]:
pay_period = data.get("payPeriod")
adjusted_pay = data.get("payPeriodAdjustedPay")
currency = data.get("payCurrency", "USD")
if not pay_period or not adjusted_pay:
return None

interval = None
if pay_period == "ANNUAL":
interval = CompensationInterval.YEARLY
elif pay_period:
interval = CompensationInterval.get_interval(pay_period)
min_amount = int(adjusted_pay.get("p10") // 1)
max_amount = int(adjusted_pay.get("p90") // 1)
return Compensation(
interval=interval,
min_amount=min_amount,
max_amount=max_amount,
currency=currency,
)

@staticmethod
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
for job_type in JobType:
if job_type_str in job_type.value:
return [job_type]

@staticmethod
def parse_location(location_name: str) -> Location | None:
if not location_name or location_name == "Remote":
return
city, _, state = location_name.partition(", ")
return Location(city=city, state=state)

@staticmethod
def get_cursor_for_page(pagination_cursors, page_num):
for cursor_data in pagination_cursors:
if cursor_data["pageNumber"] == page_num:
return cursor_data["cursor"]

fallback_token = "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok"
headers = {
"authority": "www.glassdoor.com",
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"apollographql-client-name": "job-search-next",
"apollographql-client-version": "4.65.5",
"content-type": "application/json",
"origin": "https://www.glassdoor.com",
"referer": "https://www.glassdoor.com/",
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"macOS"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
}
query_template = """
query JobSearchResultsQuery(
$excludeJobListingIds: [Long!],
$keyword: String,
Expand Down Expand Up @@ -431,70 +513,4 @@ def _add_payload(
}
__typename
}
"""
}
if self.scraper_input.job_type:
payload["variables"]["filterParams"].append(
{"filterKey": "jobType", "values": self.scraper_input.job_type.value[0]}
)
return json.dumps([payload])

@staticmethod
def parse_compensation(data: dict) -> Optional[Compensation]:
pay_period = data.get("payPeriod")
adjusted_pay = data.get("payPeriodAdjustedPay")
currency = data.get("payCurrency", "USD")
if not pay_period or not adjusted_pay:
return None

interval = None
if pay_period == "ANNUAL":
interval = CompensationInterval.YEARLY
elif pay_period:
interval = CompensationInterval.get_interval(pay_period)
min_amount = int(adjusted_pay.get("p10") // 1)
max_amount = int(adjusted_pay.get("p90") // 1)
return Compensation(
interval=interval,
min_amount=min_amount,
max_amount=max_amount,
currency=currency,
)

@staticmethod
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
for job_type in JobType:
if job_type_str in job_type.value:
return [job_type]

@staticmethod
def parse_location(location_name: str) -> Location | None:
if not location_name or location_name == "Remote":
return
city, _, state = location_name.partition(", ")
return Location(city=city, state=state)

@staticmethod
def get_cursor_for_page(pagination_cursors, page_num):
for cursor_data in pagination_cursors:
if cursor_data["pageNumber"] == page_num:
return cursor_data["cursor"]

headers = {
"authority": "www.glassdoor.com",
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"apollographql-client-name": "job-search-next",
"apollographql-client-version": "4.65.5",
"content-type": "application/json",
"gd-csrf-token": "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok",
"origin": "https://www.glassdoor.com",
"referer": "https://www.glassdoor.com/",
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"macOS"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
}
"""
8 changes: 3 additions & 5 deletions src/jobspy/scrapers/indeed/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from bs4.element import Tag
from concurrent.futures import ThreadPoolExecutor, Future

from ..exceptions import IndeedException
from ..utils import (
count_urgent_words,
extract_emails_from_text,
Expand Down Expand Up @@ -63,8 +62,7 @@ def scrape(self, scraper_input: ScraperInput) -> JobResponse:
while len(self.seen_urls) < scraper_input.results_wanted:
pages_to_process = math.ceil((scraper_input.results_wanted - len(self.seen_urls)) / self.jobs_per_page)
new_jobs = False

with ThreadPoolExecutor(max_workers=10) as executor:
with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
futures: list[Future] = [
executor.submit(self._scrape_page, page + pages_processed)
for page in range(pages_to_process)
Expand Down Expand Up @@ -93,10 +91,11 @@ def _scrape_page(self, page: int=0) -> list[JobPost]:
:param page:
:return: jobs found on page, total number of jobs found for search
"""
logger.info(f'Indeed search page: {page + 1}')
job_list = []
domain = self.scraper_input.country.indeed_domain_value
self.base_url = f"https://{domain}.indeed.com"

try:
session = create_session(self.proxy)
response = session.get(
Expand Down Expand Up @@ -141,7 +140,6 @@ def _scrape_page(self, page: int=0) -> list[JobPost]:
job_results: list[Future] = [
executor.submit(self._process_job, job, job_detailed['job']) for job, job_detailed in zip(jobs, jobs_detailed)
]

job_list = [result.result() for result in job_results if result.result()]

return job_list
Expand Down
Loading

0 comments on commit db01bc6

Please sign in to comment.