-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathbdjobs_crawler.py
95 lines (75 loc) · 2.66 KB
/
bdjobs_crawler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import requests
import time
import sys
import re
import json
from bs4 import BeautifulSoup
lang_dt = {'php' : 0, 'python': 0, 'c++': 0, 'c#': 0, 'java': 0, 'ruby': 0, 'asp': 0, 'perl': 0}
def get_content(url, pdata=None):
if pdata:
r = requests.post(url, data = pdata)
else:
r = requests.get(url)
content = r.text.encode('utf-8', 'ignore')
content = content.replace("\r", "")
content = content.replace("\n", "")
return content.lower()
def parse_job_description(url):
print "Parsing job description for", url
content = get_content(url)
time.sleep(1)
soup = BeautifulSoup(content)
text = soup.find("div", attrs={"class":"job_detail_left_wrapper"}).text
try:
nov = int(soup.find("div", attrs={"class":"job_nov_d"}).text)
except AttributeError:
print "nov not found"
nov = 1 #assuming 1 vacancy
except ValueError:
print "nov value error"
print soup.find("div", attrs={"class":"job_nov_d"}).text
nov = 1 #assuming 1 vacancy
print nov
for lang in lang_dt.keys():
pattern_lang = re.compile(r'\W({})\W'.format(re.escape(lang)))
result = re.findall(pattern_lang, text)
#print lang, result
if len(result) > 0:
lang_dt[lang] += nov
def process_job_urls(content):
soup = BeautifulSoup(content)
job_list = soup.find_all("div", attrs={"class":"job_title_text"})
for item in job_list:
job_url = 'http://joblist.bdjobs.com/' + item.find('a')['href']
parse_job_description(job_url)
def visit_next_page(content):
print "visiting next page"
pattern = re.compile(r'<li><a href="javascript:GoPage\((\d+)\)" class="prevnext">Next', re.IGNORECASE)
result = re.findall(pattern, content)
print result
if len(result) == 0:
print "next page not found"
return None
post_data = {} # you have to find the post data
url = 'http://joblist.bdjobs.com/JobSearch.asp'
content = get_content(url, post_data)
return content
def main(url):
content = get_content(url)
while True:
process_job_urls(content)
content = visit_next_page(content)
if content is None:
print "Site Crawling Complete"
break
time.sleep(1)
print lang_dt
break
if __name__ == "__main__":
print "Program started"
url = 'http://joblist.bdjobs.com/'
main(url)
with open("jobs.json", "w") as fp:
json.dump(lang_dt, fp, sort_keys = True)
for item in lang_dt:
print item, lang_dt[item]