-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.py
executable file
·93 lines (78 loc) · 2.89 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#!../bin/python
import feedparser
from bs4 import BeautifulSoup
import urllib2
import codecs
import json
import string
import datetime
base_dir = 'out/'
def format_url(link, base):
if link is None:
return None
if 'http://' != link[0:len('http://')]:
return base + '/' + link
else:
return link
def print_feed(feed):
entries = []
entries.extend(feed["items"])
sorted_entries = entries#sorted(entries, key=lambda entry: entry["date_parsed"], reverse=True)
if sorted_entries == []:
print "Not an RSS Feed"
for entry in sorted_entries:
#try:
# f = urllib2.urlopen(entry['link'])
# html = f.read()
# soup = BeautifulSoup(html)
# text = soup.get_text()
# links = [format_url(link.get('href'), entry['link']) for link in soup.find_all('a')]
#except urllib2.HTTPError as e:
# print "%s error: %s" % (entry['link'], str(e))
if entry['link'] is not None:
title = entry['title'].replace('/', '_')
f = codecs.open(base_dir + title + '.txt', encoding='utf-8', mode='w+')
f.write("%s\n\n%s\n\nlink: %s\n\n" % (entry['title'], entry['summary'], entry['link']))
if "comments" in entry:
f.write("comments: %s\n" % entry["comments"])
if "date" in entry:
f.write("date: %r\n" % entry["date"])
if "published" in entry:
f.write("published: %r\n" % entry["published"])
else:
f.write("published: %s\n" % str(datetime.datetime.now()))
f.write("keys: %r\n" % [x for x in entry.iterkeys()])
if 'http://' in entry['link']:
base = entry['link'][len('http://'):]
proto = 'http://'
elif 'https://' in entry['link']:
base = entry['link'][len('https://'):]
proto = 'https://'
else:
base = None
proto = None
splits = string.split(base, '/')
pr = urllib2.urlopen("http://josh-fowler.com/prapi/?url=%s" % (proto + splits[0]))
page_rank = pr.read()
f.write("pagerank: %s\n" % page_rank)
try:
fb = urllib2.urlopen("http://api.facebook.com/method/fql.query?query=select%20like_count%20from%20link_stat%20where%20url='" + entry['link'] +"'&format=json")
fb_text = fb.read()
likes = json.loads(fb_text)
if len(likes) == 1 and u'like_count' in likes[0]:
likes = likes[0][u'like_count']
f.write("# of likes: %s\n" % likes)
except ValueError as e:
print str(e)
#f.write("text: %s\n" % text)
#f.write("links: \n%r\n" % links)
f.close()
def parse_feeds(url_list):
feeds = [feedparser.parse(rss_url) for rss_url in url_list]
for feed in feeds:
print "Feed: %s" % feed['url']
print_feed(feed)
if __name__ == '__main__':
print "hello world"
parse_feeds(["http://news.ycombinator.com/rss", "http://www.engadget.com/rss.xml"])
#parse_feeds(["http://www.engadget.com/rss.xml"])