Skip to content

Commit

Permalink
feat: Ability to query by time posted for linkedin, indeed, glassdoor…
Browse files Browse the repository at this point in the history
…, ziprecruiter (#103)
  • Loading branch information
VitaminB16 committed Feb 9, 2024
1 parent 2563c5c commit 91b137e
Show file tree
Hide file tree
Showing 7 changed files with 53 additions and 23 deletions.
7 changes: 5 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,18 +29,20 @@ _Python version >= [3.10](https://www.python.org/downloads/release/python-3100/)
### Usage

```python
import csv
from jobspy import scrape_jobs

jobs = scrape_jobs(
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"],
search_term="software engineer",
location="Dallas, TX",
results_wanted=10,
results_wanted=20,
hours_old=72, # (only linkedin is hour specific, others round up to days old)
country_indeed='USA' # only needed for indeed / glassdoor
)
print(f"Found {len(jobs)} jobs")
print(jobs.head())
jobs.to_csv("jobs.csv", index=False) # to_xlsx
jobs.to_csv("jobs.csv", quoting=csv.QUOTE_NONNUMERIC, escapechar="\\", index=False) # to_xlsx
```

### Output
Expand Down Expand Up @@ -73,6 +75,7 @@ Optional
├── linkedin_company_ids (list[int): searches for linkedin jobs with specific company ids
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
├── hours_old (int): filters jobs by the number of hours since the job was posted (all but LinkedIn rounds up to next day)
```

### JobPost Schema
Expand Down
5 changes: 4 additions & 1 deletion src/jobspy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ def scrape_jobs(
full_description: bool | None = False,
linkedin_company_ids: list[int] | None = None,
offset: int | None = 0,
hours_old: int = None,
**kwargs,
) -> pd.DataFrame:
"""
Simultaneously scrapes job data from multiple job sites.
Expand Down Expand Up @@ -84,6 +86,7 @@ def get_site_type():
results_wanted=results_wanted,
linkedin_company_ids=linkedin_company_ids,
offset=offset,
hours_old=hours_old
)

def scrape_site(site: Site) -> Tuple[str, JobResponse]:
Expand Down Expand Up @@ -189,4 +192,4 @@ def worker(site):
else:
jobs_formatted_df = pd.DataFrame()

return jobs_formatted_df
return jobs_formatted_df.sort_values(by='date_posted', ascending=False)
4 changes: 2 additions & 2 deletions src/jobspy/scrapers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@ class ScraperInput(BaseModel):
linkedin_company_ids: list[int] | None = None

results_wanted: int = 15
hours_old: int | None = None


class Scraper:
def __init__(self, site: Site, proxy: list[str] | None = None):
self.site = site
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)

def scrape(self, scraper_input: ScraperInput) -> JobResponse:
...
def scrape(self, scraper_input: ScraperInput) -> JobResponse: ...
14 changes: 12 additions & 2 deletions src/jobspy/scrapers/glassdoor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def process_job(self, job_data):
location_type = job["header"].get("locationType", "")
age_in_days = job["header"].get("ageInDays")
is_remote, location = False, None
date_posted = (datetime.now() - timedelta(days=age_in_days)).date() if age_in_days else None
date_posted = (datetime.now() - timedelta(days=age_in_days)).date() if age_in_days is not None else None

if location_type == "S":
is_remote = True
Expand Down Expand Up @@ -258,18 +258,28 @@ def add_payload(
page_num: int,
cursor: str | None = None,
) -> str:
# `fromage` is the posting time filter in days
fromage = max(scraper_input.hours_old // 24, 1) if scraper_input.hours_old else None
filter_params = []
if scraper_input.easy_apply:
filter_params.append({"filterKey": "applicationType", "values": "1"})
if fromage:
filter_params.append({"filterKey": "fromAge", "values": str(fromage)})
payload = {
"operationName": "JobSearchResultsQuery",

"variables": {
"excludeJobListingIds": [],
"filterParams": [{"filterKey": "applicationType", "values": "1"}] if scraper_input.easy_apply else [],
"filterParams": filter_params,
"keyword": scraper_input.search_term,
"numJobsToShow": 30,
"locationType": location_type,
"locationId": int(location_id),
"parameterUrlInput": f"IL.0,12_I{location_type}{location_id}",
"pageNumber": page_num,
"pageCursor": cursor,
"fromage": fromage,
"sort": "date"
},
"query": "query JobSearchResultsQuery($excludeJobListingIds: [Long!], $keyword: String, $locationId: Int, $locationType: LocationTypeEnum, $numJobsToShow: Int!, $pageCursor: String, $pageNumber: Int, $filterParams: [FilterParams], $originalPageUrl: String, $seoFriendlyUrlInput: String, $parameterUrlInput: String, $seoUrl: Boolean) {\n jobListings(\n contextHolder: {searchParams: {excludeJobListingIds: $excludeJobListingIds, keyword: $keyword, locationId: $locationId, locationType: $locationType, numPerPage: $numJobsToShow, pageCursor: $pageCursor, pageNumber: $pageNumber, filterParams: $filterParams, originalPageUrl: $originalPageUrl, seoFriendlyUrlInput: $seoFriendlyUrlInput, parameterUrlInput: $parameterUrlInput, seoUrl: $seoUrl, searchType: SR}}\n ) {\n companyFilterOptions {\n id\n shortName\n __typename\n }\n filterOptions\n indeedCtk\n jobListings {\n ...JobView\n __typename\n }\n jobListingSeoLinks {\n linkItems {\n position\n url\n __typename\n }\n __typename\n }\n jobSearchTrackingKey\n jobsPageSeoData {\n pageMetaDescription\n pageTitle\n __typename\n }\n paginationCursors {\n cursor\n pageNumber\n __typename\n }\n indexablePageForSeo\n searchResultsMetadata {\n searchCriteria {\n implicitLocation {\n id\n localizedDisplayName\n type\n __typename\n }\n keyword\n location {\n id\n shortName\n localizedShortName\n localizedDisplayName\n type\n __typename\n }\n __typename\n }\n footerVO {\n countryMenu {\n childNavigationLinks {\n id\n link\n textKey\n __typename\n }\n __typename\n }\n __typename\n }\n helpCenterDomain\n helpCenterLocale\n jobAlert {\n jobAlertExists\n __typename\n }\n jobSerpFaq {\n questions {\n answer\n question\n __typename\n }\n __typename\n }\n jobSerpJobOutlook {\n occupation\n paragraph\n __typename\n }\n showMachineReadableJobs\n __typename\n }\n serpSeoLinksVO {\n relatedJobTitlesResults\n searchedJobTitle\n searchedKeyword\n searchedLocationIdAsString\n searchedLocationSeoName\n searchedLocationType\n topCityIdsToNameResults {\n key\n value\n __typename\n }\n topEmployerIdsToNameResults {\n key\n value\n __typename\n }\n topEmployerNameResults\n topOccupationResults\n __typename\n }\n totalJobsCount\n __typename\n }\n}\n\nfragment JobView on JobListingSearchResult {\n jobview {\n header {\n adOrderId\n advertiserType\n adOrderSponsorshipLevel\n ageInDays\n divisionEmployerName\n easyApply\n employer {\n id\n name\n shortName\n __typename\n }\n employerNameFromSearch\n goc\n gocConfidence\n gocId\n jobCountryId\n jobLink\n jobResultTrackingKey\n jobTitleText\n locationName\n locationType\n locId\n needsCommission\n payCurrency\n payPeriod\n payPeriodAdjustedPay {\n p10\n p50\n p90\n __typename\n }\n rating\n salarySource\n savedJobId\n sponsored\n __typename\n }\n job {\n descriptionFragments\n importConfigId\n jobTitleId\n jobTitleText\n listingId\n __typename\n }\n jobListingAdminDetails {\n cpcVal\n importConfigId\n jobListingId\n jobSourceId\n userEligibleForAdminJobDetails\n __typename\n }\n overview {\n shortName\n squareLogoUrl\n __typename\n }\n __typename\n }\n __typename\n}\n",
}
Expand Down
10 changes: 6 additions & 4 deletions src/jobspy/scrapers/indeed/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,12 +363,15 @@ def get_headers():

@staticmethod
def add_params(scraper_input: ScraperInput, page: int) -> dict[str, str | Any]:
# `fromage` is the posting time filter in days
fromage = max(scraper_input.hours_old // 24, 1) if scraper_input.hours_old else None
params = {
"q": scraper_input.search_term,
"l": scraper_input.location if scraper_input.location else scraper_input.country.value[0].split(',')[-1],
"filter": 0,
"start": scraper_input.offset + page * 10,
"sort": "date"
"sort": "date",
"fromage": fromage,
}
if scraper_input.distance:
params["radius"] = scraper_input.distance
Expand Down Expand Up @@ -405,8 +408,7 @@ def is_job_remote(job: dict, job_detailed: dict, description: str) -> bool:
)
return is_remote_in_attributes or is_remote_in_description or is_remote_in_location

@staticmethod
def get_job_details(job_keys: list[str]) -> dict:
def get_job_details(self, job_keys: list[str]) -> dict:
"""
Queries the GraphQL endpoint for detailed job information for the given job keys.
"""
Expand Down Expand Up @@ -478,7 +480,7 @@ def get_job_details(job_keys: list[str]) -> dict:
}}
"""
}
response = requests.post(url, headers=headers, json=payload)
response = requests.post(url, headers=headers, json=payload, proxies=self.proxy)
if response.status_code == 200:
return response.json()['data']['jobData']['results']
else:
Expand Down
33 changes: 21 additions & 12 deletions src/jobspy/scrapers/linkedin/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,12 @@ def scrape(self, scraper_input: ScraperInput) -> JobResponse:
url_lock = Lock()
page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0

seconds_old = (
scraper_input.hours_old * 3600
if scraper_input.hours_old
else None
)

def job_type_code(job_type_enum):
mapping = {
JobType.FULL_TIME: "F",
Expand All @@ -85,7 +91,8 @@ def job_type_code(job_type_enum):
"pageNum": 0,
"start": page + scraper_input.offset,
"f_AL": "true" if scraper_input.easy_apply else None,
"f_C": ','.join(map(str, scraper_input.linkedin_company_ids)) if scraper_input.linkedin_company_ids else None
"f_C": ','.join(map(str, scraper_input.linkedin_company_ids)) if scraper_input.linkedin_company_ids else None,
"f_TPR": f"r{seconds_old}",
}

params = {k: v for k, v in params.items() if v is not None}
Expand All @@ -101,7 +108,9 @@ def job_type_code(job_type_enum):
response.raise_for_status()

except requests.HTTPError as e:
raise LinkedInException(f"bad response status code: {e.response.status_code}")
raise LinkedInException(
f"bad response status code: {e.response.status_code}"
)
except ProxyError as e:
raise LinkedInException("bad proxy")
except Exception as e:
Expand Down Expand Up @@ -145,11 +154,11 @@ def process_job(self, job_card: Tag, job_url: str, full_descr: bool) -> Optional

compensation = None
if salary_tag:
salary_text = salary_tag.get_text(separator=' ').strip()
salary_values = [currency_parser(value) for value in salary_text.split('-')]
salary_text = salary_tag.get_text(separator=" ").strip()
salary_values = [currency_parser(value) for value in salary_text.split("-")]
salary_min = salary_values[0]
salary_max = salary_values[1]
currency = salary_text[0] if salary_text[0] != '$' else 'USD'
currency = salary_text[0] if salary_text[0] != "$" else "USD"

compensation = Compensation(
min_amount=int(salary_min),
Expand Down Expand Up @@ -294,17 +303,17 @@ def get_location(self, metadata_card: Optional[Tag]) -> Location:
@staticmethod
def headers() -> dict:
return {
'authority': 'www.linkedin.com',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'accept-language': 'en-US,en;q=0.9',
'cache-control': 'max-age=0',
'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
"authority": "www.linkedin.com",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-language": "en-US,en;q=0.9",
"cache-control": "max-age=0",
"sec-ch-ua": '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
# 'sec-ch-ua-mobile': '?0',
# 'sec-ch-ua-platform': '"macOS"',
# 'sec-fetch-dest': 'document',
# 'sec-fetch-mode': 'navigate',
# 'sec-fetch-site': 'none',
# 'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
}
3 changes: 3 additions & 0 deletions src/jobspy/scrapers/ziprecruiter/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,9 @@ def add_params(scraper_input) -> dict[str, str | Any]:
"search": scraper_input.search_term,
"location": scraper_input.location,
}
if scraper_input.hours_old:
fromage = max(scraper_input.hours_old // 24, 1) if scraper_input.hours_old else None
params['days'] = fromage
job_type_value = None
if scraper_input.job_type:
if scraper_input.job_type.value == "fulltime":
Expand Down

0 comments on commit 91b137e

Please sign in to comment.