Listing 1. aggregator.py
#!/usr/bin/python
import feedparser
import sys
# ---------------------------------------------------
# Open the personal feeds output file
aggregation_filename = "myfeeds.html"
max_title_chars = 60
try:
aggregation_file = open(aggregation_filename,"w")
aggregation_file.write("""
My news
""")
except IOError:
print "Error: cannot write '%s' " % \
aggregation_filename
exit
# ---------------------------------------------------
# Each non-blank line in feeds.txt is a feed source.
feeds_filename = "feeds.txt"
feeds_list = []
try:
feeds_file = open(feeds_filename, 'r')
for line in feeds_file:
stripped_line = line.strip().rstrip()
if len(stripped_line) > 0:
feeds_list.append(stripped_line)
sys.stderr.write("Adding feed '" + \
stripped_line + "'\n")
feeds_file.close()
except IOError:
print "Error: cannot read '%s' " % feeds_filename
exit
# ---------------------------------------------------
# Iterate over feeds_list, grabbing the feed for each
for feed_url in feeds_list:
sys.stderr.write("Checking '%s'..." % feed_url)
feed = feedparser.parse(feed_url)
sys.stderr.write("done.\n")
aggregation_file.write('%s
\n' % \
feed.entries[0].title)
# Iterate over each entry from this feed,
# displaying it and putting it in the summary
for entry in feed.entries:
sys.stderr.write("\tWrote: '%s'" % \
entry.title[0:max_title_chars])
if len(entry.title) > max_title_chars:
sys.stderr.write("...")
sys.stderr.write("\n")
aggregation_file.write(
'%s\n' %
(entry.link, entry.title))
aggregation_file.write('\n')
# ---------------------------------------------------
# Finish up with the HTML
aggregation_file.write("""
""")
aggregation_file.close()