-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathLinkedinJobScraper.py
73 lines (57 loc) · 2.32 KB
/
LinkedinJobScraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
'''
TO DO:
1. Track data daily
2. Automatically run program?
Note: Job postings are in Canada
'''
import os, random, sys, time
from datetime import date
from urllib.parse import urlparse
from selenium import webdriver
from bs4 import BeautifulSoup
#login page from chrome: must download chromedriver
browser = webdriver.Chrome('chromedriver_win32/chromedriver.exe')
browser.get('https://www.linkedin.com/login?fromSignIn=true&trk=guest_homepage-basic_nav-header-signin')
#read login info from text file config.txt
file = open('config.txt')
lines = file.readlines()
username = lines[0]
password = lines[1]
#enter login info
elementID = browser.find_element_by_id('username')
elementID.send_keys(username)
elementID = browser.find_element_by_id('password')
elementID.send_keys(password)
elementID.submit()
job_list = ['computer support technician', 'digital marketer', 'front-end developer', 'security specialist', 'ml engineer', 'marketing analyst', 'hr analyst', 'software engineer', 'data scientist']
job_postings = {}
message_click = True
for job in job_list:
#Access job
browser.get('https://www.linkedin.com/jobs/?showJobAlertsModal=false')
jobID = browser.find_element_by_id('jobs-search-box-keyword-id-ember17')
jobID.send_keys(job)
if message_click:
messaging = browser.find_element_by_class_name('msg-overlay-bubble-header')
messaging.click()
message_click = False
search = browser.find_element_by_class_name('jobs-search-box__submit-button')
search.click()
print("Scraping data for " + job)
SCROLL_PAUSE_TIME = 5
#Could condense, not sure of function
last_height = browser.execute_script('return document.body.scrollHeight')
for i in range(3):
browser.execute_script('window.scrollTo(0, document.body.scrollHeight);')
time.sleep(SCROLL_PAUSE_TIME)
new_height = browser.execute_script('return document.body.scrollHeight')
last_height = new_height
src = browser.page_source
soup = BeautifulSoup(src, 'lxml')
results = soup.find('small', {'class': 'display-flex t-12 t-black--light t-normal'}).get_text().strip().split()[0]
results = int(results.replace(',', ''))
job_postings[job] = results
today = date.today()
data = open("JobPostingsData", "a")
data.write("%s %s %s\n" % (today, job, results))
data.close()