Sat, 14 Jun 2008 12:29:11 -0400 <me@nikosapi.org>
[gpodder.git] / src / gpodder / libpodcasts.py
blobb5fe49ec1e015e4efd3e11e864d0ad6540add8fc
1 # -*- coding: utf-8 -*-
3 # gPodder - A media aggregator and podcast client
4 # Copyright (c) 2005-2008 Thomas Perl and the gPodder Team
6 # gPodder is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 3 of the License, or
9 # (at your option) any later version.
11 # gPodder is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
22 # libpodcasts.py -- data classes for gpodder
23 # thomas perl <thp@perli.net> 20051029
25 # Contains code based on:
26 # liblocdbwriter.py (2006-01-09)
27 # liblocdbreader.py (2006-01-10)
30 import gtk
31 import gobject
32 import pango
34 import gpodder
35 from gpodder import util
36 from gpodder import opml
37 from gpodder import cache
38 from gpodder import services
39 from gpodder import draw
40 from gpodder import libtagupdate
41 from gpodder import dumbshelve
43 from gpodder.liblogger import log
44 from gpodder.libgpodder import gl
46 import os.path
47 import os
48 import glob
49 import shutil
50 import sys
51 import urllib
52 import urlparse
53 import time
54 import threading
55 import datetime
56 import md5
57 import xml.dom.minidom
59 from xml.sax import saxutils
62 global_lock = threading.RLock()
65 if gpodder.interface == gpodder.MAEMO:
66 ICON_AUDIO_FILE = 'gnome-mime-audio-mp3'
67 ICON_VIDEO_FILE = 'gnome-mime-video-mp4'
68 ICON_BITTORRENT = 'qgn_toolb_browser_web'
69 ICON_DOWNLOADING = 'qgn_toolb_messagin_moveto'
70 ICON_DELETED = 'qgn_toolb_gene_deletebutton'
71 ICON_NEW = 'qgn_list_gene_favor'
72 else:
73 ICON_AUDIO_FILE = 'audio-x-generic'
74 ICON_VIDEO_FILE = 'video-x-generic'
75 ICON_BITTORRENT = 'applications-internet'
76 ICON_DOWNLOADING = gtk.STOCK_GO_DOWN
77 ICON_DELETED = gtk.STOCK_DELETE
78 ICON_NEW = gtk.STOCK_NEW
81 class ChannelSettings(object):
82 storage = dumbshelve.open_shelve(gl.channel_settings_file)
84 @classmethod
85 def get_settings_by_url( cls, url):
86 if isinstance( url, unicode):
87 url = url.encode('utf-8')
88 if cls.storage.has_key( url):
89 return cls.storage[url]
90 else:
91 return {}
93 @classmethod
94 def set_settings_by_url( cls, url, settings):
95 if isinstance( url, unicode):
96 url = url.encode('utf-8')
97 log( 'Saving settings for %s', url)
98 cls.storage[url] = settings
99 cls.storage.sync()
102 class EpisodeURLMetainfo(object):
103 storage = dumbshelve.open_shelve(gl.episode_metainfo_file)
105 @classmethod
106 def get_metadata_by_url(cls, url):
107 if isinstance(url, unicode):
108 url = url.encode('utf-8')
109 if cls.storage.has_key(url):
110 return cls.storage[url]
111 else:
112 log('Trying to download metainfo for %s', url)
113 result = util.get_episode_info_from_url(url, gl.config.http_proxy)
114 cls.storage[url] = result
115 cls.storage.sync()
116 return result
119 class podcastChannel(list):
120 """holds data for a complete channel"""
121 SETTINGS = ('sync_to_devices', 'device_playlist_name','override_title','username','password')
122 icon_cache = {}
124 storage = dumbshelve.open_shelve(gl.feed_cache_file)
125 fc = cache.Cache( storage)
127 @classmethod
128 def clear_cache(cls, urls_to_keep):
129 for url in cls.storage.keys():
130 if url not in urls_to_keep:
131 log('(podcastChannel) Removing old feed from cache: %s', url)
132 del cls.storage[url]
134 @classmethod
135 def sync_cache(cls):
136 cls.storage.sync()
138 @classmethod
139 def get_by_url(cls, url, force_update=False, offline=False, default_title=None, old_channel=None):
140 if isinstance( url, unicode):
141 url = url.encode('utf-8')
143 (updated, c) = cls.fc.fetch( url, force_update, offline)
144 # If we have an old instance of this channel, and
145 # feedcache says the feed hasn't changed, return old
146 if not updated and old_channel:
147 return old_channel
149 channel = podcastChannel( url)
150 channel.parse_error = c.get('bozo_exception', None)
151 channel.load_settings()
152 if hasattr(c.feed, 'title'):
153 channel.title = c.feed.title
154 elif default_title is not None:
155 channel.title = default_title
156 else:
157 channel.title = url
158 if hasattr( c.feed, 'link'):
159 channel.link = c.feed.link
160 if hasattr( c.feed, 'subtitle'):
161 channel.description = util.remove_html_tags(c.feed.subtitle)
163 if hasattr(c.feed, 'updated_parsed') and c.feed.updated_parsed is not None:
164 channel.pubDate = time.mktime(c.feed.updated_parsed)
165 if hasattr( c.feed, 'image'):
166 if c.feed.image.href:
167 channel.image = c.feed.image.href
169 # We can limit the maximum number of entries that gPodder will parse
170 # via the "max_episodes_per_feed" configuration option.
171 if len(c.entries) > gl.config.max_episodes_per_feed:
172 log('Limiting number of episodes for %s to %d', channel.title, gl.config.max_episodes_per_feed)
173 for entry in c.entries[:min(gl.config.max_episodes_per_feed, len(c.entries))]:
174 episode = None
176 try:
177 episode = podcastItem.from_feedparser_entry( entry, channel)
178 except:
179 log( 'Cannot instantiate episode: %s. Skipping.', entry.get( 'id', '(no id available)'), sender = channel, traceback=True)
181 if episode:
182 channel.append( episode)
184 channel.sort( reverse = True)
186 return channel
188 @staticmethod
189 def create_from_dict(d, load_items=True, force_update=False, callback_error=None, offline=False, old_channel=None):
190 if load_items:
191 try:
192 default_title = None
193 if 'title' in d:
194 default_title = d['title']
195 return podcastChannel.get_by_url(d['url'], force_update, offline, default_title, old_channel)
196 except:
197 callback_error and callback_error( _('Could not load channel feed from URL: %s') % d['url'])
198 log( 'Cannot load podcastChannel from URL: %s', d['url'], traceback=True)
200 c = podcastChannel()
201 for key in ( 'url', 'title', 'description' ):
202 if key in d:
203 setattr( c, key, d[key])
204 c.load_settings()
206 return c
208 def __init__( self, url = "", title = "", link = "", description = ""):
209 self.url = url
210 self.title = title
211 self.link = link
212 self.description = util.remove_html_tags( description)
213 self.image = None
214 self.pubDate = 0
215 self.parse_error = None
216 self.newest_pubdate_cached = None
218 # should this channel be synced to devices? (ex: iPod)
219 self.sync_to_devices = True
220 # to which playlist should be synced
221 self.device_playlist_name = 'gPodder'
222 # if set, this overrides the channel-provided title
223 self.override_title = ''
224 self.username = ''
225 self.password = ''
227 self.save_dir_size = 0
228 self.__save_dir_size_set = False
230 self.__tree_model = None
232 def request_save_dir_size(self):
233 if not self.__save_dir_size_set:
234 self.update_save_dir_size()
235 self.__save_dir_size_set = True
237 def update_save_dir_size(self):
238 self.save_dir_size = util.calculate_size(self.save_dir)
240 def get_filename( self):
241 """Return the MD5 sum of the channel URL"""
242 return md5.new( self.url).hexdigest()
244 filename = property(fget=get_filename)
246 def get_title( self):
247 if self.override_title:
248 return self.override_title
249 elif not self.__title.strip():
250 return self.url
251 else:
252 return self.__title
254 def set_title( self, value):
255 self.__title = value.strip()
257 title = property(fget=get_title,
258 fset=set_title)
260 def set_custom_title( self, custom_title):
261 custom_title = custom_title.strip()
263 if custom_title != self.__title:
264 self.override_title = custom_title
265 else:
266 self.override_title = ''
268 def load_downloaded_episodes( self):
269 try:
270 return LocalDBReader( self.url).read( self.index_file)
271 except:
272 return podcastChannel( self.url, self.title, self.link, self.description)
274 def save_downloaded_episodes( self, channel):
275 try:
276 log( 'Setting localdb channel data => %s', self.index_file, sender = self)
277 LocalDBWriter( self.index_file).write( channel)
278 except:
279 log( 'Error writing to localdb: %s', self.index_file, sender = self, traceback = True)
281 def load_settings( self):
282 settings = ChannelSettings.get_settings_by_url( self.url)
284 for key in self.SETTINGS:
285 if settings.has_key( key):
286 setattr( self, key, settings[key])
288 def save_settings( self):
289 settings = {}
290 for key in self.SETTINGS:
291 settings[key] = getattr( self, key)
293 ChannelSettings.set_settings_by_url( self.url, settings)
295 def reset_pubdate_cache(self):
296 self.newest_pubdate_cached = None
298 def newest_pubdate_downloaded(self):
300 Returns the most recent pubDate value of all downloaded episodes, or
301 None if the pubDate cannot be determined.
303 This value is cached for speedup. You can call reset_pubdate_cache()
304 to clear the cached value and re-calculate the newest pubDate.
307 if self.newest_pubdate_cached == 0:
308 return 0
310 elif self.newest_pubdate_cached is None:
311 # Try DownloadHistory's entries first
312 for episode in self:
313 if gl.history_is_downloaded( episode.url):
314 self.newest_pubdate_cached = episode.pubDate
315 return episode.pubDate
317 # If nothing found, do pubDate comparison
318 pubdate = 0
319 for episode in self.load_downloaded_episodes():
320 pubdate = episode.newer_pubdate( pubdate)
322 self.newest_pubdate_cached = pubdate
324 return self.newest_pubdate_cached
326 def episode_is_new(self, episode, last_pubdate=0):
327 if last_pubdate == 0:
328 last_pubdate = self.newest_pubdate_downloaded()
330 # episode is older than newest downloaded
331 if episode.compare_pubdate(last_pubdate) < 0:
332 return False
334 # episode has been downloaded before
335 if episode.is_downloaded() or gl.history_is_downloaded(episode.url):
336 return False
338 # download is currently in progress
339 if services.download_status_manager.is_download_in_progress(episode.url):
340 return False
342 return True
344 def get_new_episodes( self):
345 last_pubdate = self.newest_pubdate_downloaded()
347 if not last_pubdate:
348 return [episode for episode in self[0:min(len(self),gl.config.default_new)] if self.episode_is_new(episode)]
350 new_episodes = []
351 for episode in self.get_all_episodes():
352 if self.episode_is_new(episode, last_pubdate):
353 new_episodes.append(episode)
355 return new_episodes
357 def can_sort_by_pubdate( self):
358 for episode in self:
359 if episode.pubDate == 0:
360 log('Episode %s has non-parseable pubDate. Sorting disabled.', episode.title)
361 return False
363 return True
365 def update_m3u_playlist(self, downloaded_episodes=None):
366 if gl.config.create_m3u_playlists:
367 if downloaded_episodes is None:
368 downloaded_episodes = self.load_downloaded_episodes()
369 fn = util.sanitize_filename(self.title)
370 if len(fn) == 0:
371 fn = os.path.basename(self.save_dir)
372 m3u_filename = os.path.join(gl.downloaddir, fn+'.m3u')
373 log('Writing playlist to %s', m3u_filename, sender=self)
374 f = open(m3u_filename, 'w')
375 f.write('#EXTM3U\n')
376 for episode in sorted(downloaded_episodes):
377 filename = episode.local_filename()
378 if os.path.dirname(filename).startswith(os.path.dirname(m3u_filename)):
379 filename = filename[len(os.path.dirname(m3u_filename)+os.sep):]
380 f.write('#EXTINF:0,'+self.title+' - '+episode.title+' ('+episode.cute_pubdate()+')\n')
381 f.write(filename+'\n')
382 f.close()
384 def addDownloadedItem( self, item):
385 # no multithreaded access
386 global_lock.acquire()
388 downloaded_episodes = self.load_downloaded_episodes()
389 already_in_list = item.url in [ episode.url for episode in downloaded_episodes ]
391 # only append if not already in list
392 if not already_in_list:
393 downloaded_episodes.append( item)
394 self.save_downloaded_episodes( downloaded_episodes)
396 # Update metadata on file (if possible and wanted)
397 if gl.config.update_tags and libtagupdate.tagging_supported():
398 filename = item.local_filename()
399 try:
400 libtagupdate.update_metadata_on_file(filename, title=item.title, artist=self.title)
401 except:
402 log('Error while calling update_metadata_on_file() :(')
404 # Update the cached newest_pubdate_downloaded() result.
405 newest = self.newest_pubdate_downloaded()
406 if newest is None:
407 self.newest_pubdate_cached = item.pubDate
408 else:
409 self.newest_pubdate_cached = item.newer_pubdate(newest)
411 gl.history_mark_downloaded(item.url)
412 self.update_m3u_playlist(downloaded_episodes)
414 if item.file_type() == 'torrent':
415 torrent_filename = item.local_filename()
416 destination_filename = util.torrent_filename( torrent_filename)
417 gl.invoke_torrent(item.url, torrent_filename, destination_filename)
419 global_lock.release()
420 return not already_in_list
422 def get_all_episodes( self):
423 episodes = []
424 added_urls = []
425 added_guids = []
427 # go through all episodes (both new and downloaded),
428 # prefer already-downloaded (in localdb)
429 for item in [] + self.load_downloaded_episodes() + self:
430 # skip items with the same guid (if it has a guid)
431 if item.guid and item.guid in added_guids:
432 continue
434 # skip items with the same download url
435 if item.url in added_urls:
436 continue
438 episodes.append( item)
440 added_urls.append( item.url)
441 if item.guid:
442 added_guids.append( item.guid)
444 episodes.sort( reverse = True)
446 return episodes
449 def get_episode_stats( self):
450 (downloaded, has_new, unplayed) = (0, False, 0)
452 for episode in self.get_all_episodes():
453 if episode.is_downloaded():
454 downloaded += 1
455 if not episode.is_played():
456 unplayed += 1
457 if not has_new and self.episode_is_new(episode):
458 has_new = True
460 return (downloaded, has_new, unplayed)
463 def force_update_tree_model( self):
464 self.__tree_model = None
466 def update_model( self):
467 new_episodes = self.get_new_episodes()
468 self.update_save_dir_size()
470 iter = self.tree_model.get_iter_first()
471 while iter is not None:
472 self.iter_set_downloading_columns( self.tree_model, iter, new_episodes)
473 iter = self.tree_model.iter_next( iter)
475 @property
476 def tree_model( self):
477 if not self.__tree_model:
478 log('Generating TreeModel for %s', self.url, sender = self)
479 self.__tree_model = self.items_liststore()
481 return self.__tree_model
483 def iter_set_downloading_columns( self, model, iter, new_episodes = []):
484 global ICON_AUDIO_FILE, ICON_VIDEO_FILE, ICON_BITTORRENT
485 global ICON_DOWNLOADING, ICON_DELETED, ICON_NEW
487 url = model.get_value( iter, 0)
488 local_filename = model.get_value( iter, 8)
489 played = not gl.history_is_played(url)
490 locked = gl.history_is_locked(url)
492 if gl.config.episode_list_descriptions:
493 icon_size = 32
494 else:
495 icon_size = 16
497 if os.path.exists( local_filename):
498 file_type = util.file_type_by_extension( util.file_extension_from_url(url))
499 if file_type == 'audio':
500 status_icon = util.get_tree_icon(ICON_AUDIO_FILE, played, locked, self.icon_cache, icon_size)
501 elif file_type == 'video':
502 status_icon = util.get_tree_icon(ICON_VIDEO_FILE, played, locked, self.icon_cache, icon_size)
503 elif file_type == 'torrent':
504 status_icon = util.get_tree_icon(ICON_BITTORRENT, played, locked, self.icon_cache, icon_size)
505 else:
506 status_icon = util.get_tree_icon('unknown', played, locked, self.icon_cache, icon_size)
508 elif services.download_status_manager.is_download_in_progress(url):
509 status_icon = util.get_tree_icon(ICON_DOWNLOADING, icon_cache=self.icon_cache, icon_size=icon_size)
510 elif gl.history_is_downloaded(url):
511 status_icon = util.get_tree_icon(ICON_DELETED, icon_cache=self.icon_cache, icon_size=icon_size)
512 elif url in [e.url for e in new_episodes]:
513 status_icon = util.get_tree_icon(ICON_NEW, icon_cache=self.icon_cache, icon_size=icon_size)
514 else:
515 status_icon = None
517 model.set( iter, 4, status_icon)
519 def items_liststore( self):
521 Return a gtk.ListStore containing episodes for this channel
523 new_model = gtk.ListStore( gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN, gtk.gdk.Pixbuf, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING)
524 new_episodes = self.get_new_episodes()
526 for item in self.get_all_episodes():
527 if gl.config.episode_list_descriptions:
528 description = '%s\n<small>%s</small>' % (saxutils.escape(item.title), saxutils.escape(item.one_line_description()))
529 else:
530 description = saxutils.escape(item.title)
531 new_iter = new_model.append((item.url, item.title, gl.format_filesize(item.length, 1), True, None, item.cute_pubdate(), description, item.description, item.local_filename()))
532 self.iter_set_downloading_columns( new_model, new_iter, new_episodes)
534 self.update_save_dir_size()
535 return new_model
537 def find_episode( self, url):
538 for item in self.get_all_episodes():
539 if url == item.url:
540 return item
542 return None
544 def get_save_dir(self):
545 save_dir = os.path.join(gl.downloaddir, self.filename, '')
547 # Create save_dir if it does not yet exist
548 if not util.make_directory( save_dir):
549 log( 'Could not create save_dir: %s', save_dir, sender = self)
551 return save_dir
553 save_dir = property(fget=get_save_dir)
555 def remove_downloaded( self):
556 shutil.rmtree( self.save_dir, True)
558 def get_index_file(self):
559 # gets index xml filename for downloaded channels list
560 return os.path.join( self.save_dir, 'index.xml')
562 index_file = property(fget=get_index_file)
564 def get_cover_file( self):
565 # gets cover filename for cover download cache
566 return os.path.join( self.save_dir, 'cover')
568 cover_file = property(fget=get_cover_file)
570 def delete_episode_by_url(self, url):
571 global_lock.acquire()
572 downloaded_episodes = self.load_downloaded_episodes()
574 for episode in self.get_all_episodes():
575 if episode.url == url:
576 util.delete_file( episode.local_filename())
577 if episode in downloaded_episodes:
578 downloaded_episodes.remove( episode)
580 self.save_downloaded_episodes( downloaded_episodes)
581 self.update_m3u_playlist(downloaded_episodes)
582 global_lock.release()
584 class podcastItem(object):
585 """holds data for one object in a channel"""
587 @staticmethod
588 def from_feedparser_entry( entry, channel):
589 episode = podcastItem( channel)
591 episode.title = entry.get( 'title', util.get_first_line( util.remove_html_tags( entry.get( 'summary', ''))))
592 episode.link = entry.get( 'link', '')
593 episode.description = util.remove_html_tags( entry.get( 'summary', entry.get( 'link', entry.get( 'title', ''))))
594 episode.guid = entry.get( 'id', '')
595 if entry.get( 'updated_parsed', None):
596 episode.pubDate = time.mktime(entry.updated_parsed)
598 if episode.title == '':
599 log( 'Warning: Episode has no title, adding anyways.. (Feed Is Buggy!)', sender = episode)
601 enclosure = None
602 if hasattr(entry, 'enclosures') and len(entry.enclosures) > 0:
603 enclosure = entry.enclosures[0]
604 if len(entry.enclosures) > 1:
605 for e in entry.enclosures:
606 if hasattr( e, 'href') and hasattr( e, 'length') and hasattr( e, 'type') and (e.type.startswith('audio/') or e.type.startswith('video/')):
607 if util.normalize_feed_url(e.href) is not None:
608 log( 'Selected enclosure: %s', e.href, sender = episode)
609 enclosure = e
610 break
611 episode.url = util.normalize_feed_url( enclosure.get( 'href', ''))
612 elif hasattr(entry, 'link'):
613 extension = util.file_extension_from_url(entry.link)
614 file_type = util.file_type_by_extension(extension)
615 if file_type is not None:
616 log('Adding episode with link to file type "%s".', file_type, sender=episode)
617 episode.url = entry.link
619 if not episode.url:
620 # This item in the feed has no downloadable enclosure
621 return None
623 if not episode.pubDate:
624 metainfo = episode.get_metainfo()
625 if 'pubdate' in metainfo:
626 episode.pubDate = metainfo['pubdate']
628 if hasattr( enclosure, 'length'):
629 try:
630 episode.length = int(enclosure.length)
631 except:
632 episode.length = -1
634 # For episodes with a small length amount, try to find it via HTTP HEAD
635 if episode.length <= 100:
636 metainfo = episode.get_metainfo()
637 if 'length' in metainfo:
638 episode.length = metainfo['length']
640 if hasattr( enclosure, 'type'):
641 episode.mimetype = enclosure.type
643 if episode.title == '':
644 ( filename, extension ) = os.path.splitext( os.path.basename( episode.url))
645 episode.title = filename
647 return episode
650 def __init__( self, channel):
651 self.url = ''
652 self.title = ''
653 self.length = 0
654 self.mimetype = 'application/octet-stream'
655 self.guid = ''
656 self.description = ''
657 self.link = ''
658 self.channel = channel
659 self.pubDate = 0
661 def get_metainfo(self):
662 return EpisodeURLMetainfo.get_metadata_by_url(self.url)
664 def is_played(self):
665 return gl.history_is_played(self.url)
667 def is_deleted(self):
668 return gl.history_is_downloaded(self.url) and not self.is_downloaded()
670 def age_in_days(self):
671 return util.file_age_in_days(self.local_filename())
673 def is_old(self):
674 return self.age_in_days() > gl.config.episode_old_age
676 def get_age_string(self):
677 return util.file_age_to_string(self.age_in_days())
679 age_prop = property(fget=get_age_string)
681 def one_line_description( self):
682 lines = self.description.strip().splitlines()
683 if not lines or lines[0] == '':
684 return _('No description available')
685 else:
686 return ' '.join((l.strip() for l in lines if l.strip() != ''))
688 def is_downloaded( self):
689 return os.path.exists( self.local_filename())
691 def is_locked(self):
692 return gl.history_is_locked(self.url)
694 def delete_from_disk(self):
695 try:
696 self.channel.delete_episode_by_url(self.url)
697 except:
698 log('Cannot delete episode from disk: %s', self.title, traceback=True, sender=self)
700 def local_filename( self):
701 ext = util.file_extension_from_url(self.url)
703 # For compatibility with already-downloaded episodes,
704 # we accept md5 filenames if they are downloaded now.
705 md5_filename = os.path.join(self.channel.save_dir, md5.new(self.url).hexdigest()+ext)
706 if os.path.exists(md5_filename) or not gl.config.experimental_file_naming:
707 return md5_filename
709 # If the md5 filename does not exist,
710 episode = util.file_extension_from_url(self.url, complete_filename=True)
711 episode = util.sanitize_filename(episode)
713 # If the episode filename looks suspicious,
714 # we still return the md5 filename to be on
715 # the safe side of the fence ;)
716 if len(episode) == 0 or episode.startswith('redirect.'):
717 return md5_filename
718 filename = os.path.join(self.channel.save_dir, episode)
719 return filename
721 def sync_filename( self):
722 if gl.config.custom_sync_name_enabled:
723 return util.object_string_formatter(gl.config.custom_sync_name, episode=self, channel=self.channel)
724 else:
725 return self.title
727 def file_type( self):
728 return util.file_type_by_extension( util.file_extension_from_url( self.url))
730 @property
731 def basename( self):
732 return os.path.splitext( os.path.basename( self.url))[0]
734 @property
735 def published( self):
736 try:
737 return datetime.datetime.fromtimestamp(self.pubDate).strftime('%Y%m%d')
738 except:
739 log( 'Cannot format pubDate for "%s".', self.title, sender = self)
740 return '00000000'
742 def __cmp__( self, other):
743 if self.pubDate == other.pubDate:
744 return cmp(self.title, other.title)
746 return self.pubDate - other.pubDate
748 def compare_pubdate(self, pubdate):
749 return self.pubDate - pubdate
751 def newer_pubdate(self, pubdate=0):
752 if self.compare_pubdate(pubdate) > 0:
753 return self.pubDate
754 else:
755 return pubdate
757 def cute_pubdate( self):
758 result = util.format_date(self.pubDate)
759 if result is None:
760 return '(%s)' % _('unknown')
761 else:
762 return result
764 pubdate_prop = property(fget=cute_pubdate)
766 def calculate_filesize( self):
767 try:
768 self.length = os.path.getsize(self.local_filename())
769 except:
770 log( 'Could not get filesize for %s.', self.url)
772 def get_filesize_string( self):
773 return gl.format_filesize( self.length)
775 filesize_prop = property(fget=get_filesize_string)
777 def get_channel_title( self):
778 return self.channel.title
780 channel_prop = property(fget=get_channel_title)
782 def get_played_string( self):
783 if not self.is_played():
784 return _('Unplayed')
786 return ''
788 played_prop = property(fget=get_played_string)
790 def equals( self, other_item):
791 if other_item is None:
792 return False
794 return self.url == other_item.url
798 def channels_to_model(channels, cover_cache=None, max_width=0, max_height=0):
799 new_model = gtk.ListStore(str, str, str, gtk.gdk.Pixbuf, int, gtk.gdk.Pixbuf, str)
801 for channel in channels:
802 (count_downloaded, has_new, count_unplayed) = channel.get_episode_stats()
804 new_iter = new_model.append()
805 new_model.set(new_iter, 0, channel.url)
806 new_model.set(new_iter, 1, channel.title)
808 title_markup = saxutils.escape(channel.title)
809 description_markup = saxutils.escape(util.get_first_line(channel.description))
810 d = []
811 if has_new:
812 d.append('<span weight="bold">')
813 d.append(title_markup)
814 if has_new:
815 d.append('</span>')
816 description = ''.join(d+['\n', '<small>', description_markup, '</small>'])
817 if channel.parse_error is not None:
818 description = ''.join(['<span foreground="#ff0000">', description, '</span>'])
819 new_model.set(new_iter, 6, channel.parse_error)
821 new_model.set(new_iter, 2, description)
823 if count_unplayed > 0 or count_downloaded > 0:
824 new_model.set(new_iter, 3, draw.draw_pill_pixbuf(str(count_unplayed), str(count_downloaded)))
826 # Load the cover if we have it, but don't download
827 # it if it's not available (to avoid blocking here)
828 #pixbuf = services.cover_downloader.get_cover(channel, avoid_downloading=True)
829 #new_pixbuf = None
830 #if pixbuf is not None:
831 # new_pixbuf = util.resize_pixbuf_keep_ratio(pixbuf, max_width, max_height, channel.url, cover_cache)
832 #new_model.set(new_iter, 5, new_pixbuf or pixbuf)
834 return new_model
838 def load_channels(load_items=True, force_update=False, callback_proc=None, callback_url=None, callback_error=None, offline=False, is_cancelled_cb=None, old_channels=None):
839 importer = opml.Importer(gl.channel_opml_file)
840 result = []
841 if old_channels is None:
842 old_channels = {}
843 else:
844 # Convert list of channels to a dict with URLs as keys
845 old_channels = dict(map(lambda c: (c.url, c), old_channels))
847 urls_to_keep = []
848 count = 0
849 for item in importer.items:
850 if is_cancelled_cb is not None:
851 cancelled = is_cancelled_cb()
852 if cancelled:
853 # We don't force updates for all upcoming episodes
854 force_update = False
855 offline = True
856 callback_proc and callback_proc( count, len( importer.items))
857 callback_url and callback_url( item['url'])
858 urls_to_keep.append(item['url'])
859 if item['url'] not in old_channels:
860 old_channel = None
861 else:
862 old_channel = old_channels[item['url']]
863 channel = podcastChannel.create_from_dict(item, load_items, force_update, callback_error, offline, old_channel)
864 result.append(channel)
865 count += 1
867 podcastChannel.clear_cache(urls_to_keep)
868 podcastChannel.sync_cache()
869 result.sort(key=lambda x:x.title.lower())
870 return result
872 def save_channels( channels):
873 exporter = opml.Exporter(gl.channel_opml_file)
874 return exporter.write(channels)
878 class LocalDBReader( object):
879 def __init__( self, url):
880 self.url = url
882 def get_text( self, nodelist):
883 return ''.join( [ node.data for node in nodelist if node.nodeType == node.TEXT_NODE ])
885 def get_text_by_first_node( self, element, name):
886 return self.get_text( element.getElementsByTagName( name)[0].childNodes)
888 def get_episode_from_element( self, channel, element):
889 episode = podcastItem( channel)
890 episode.title = self.get_text_by_first_node( element, 'title')
891 episode.description = self.get_text_by_first_node( element, 'description')
892 episode.url = self.get_text_by_first_node( element, 'url')
893 episode.link = self.get_text_by_first_node( element, 'link')
894 episode.guid = self.get_text_by_first_node( element, 'guid')
895 try:
896 episode.pubDate = int(self.get_text_by_first_node( element, 'pubDate'))
897 except:
898 log('Looks like you have an old pubDate in your LocalDB')
899 episode.pubDate = self.get_text_by_first_node( element, 'pubDate')
900 episode.pubDate = time.mktime(feedparser._parse_date(episode.pubDate))
901 episode.mimetype = self.get_text_by_first_node( element, 'mimetype')
902 episode.calculate_filesize()
903 return episode
905 def load_and_clean( self, filename):
907 Clean-up a LocalDB XML file that could potentially contain
908 "unbound prefix" XML elements (generated by the old print-based
909 LocalDB code). The code removes those lines to make the new
910 DOM parser happy.
912 This should be removed in a future version.
914 lines = []
915 for line in open(filename).read().split('\n'):
916 if not line.startswith('<gpodder:info'):
917 lines.append( line)
919 return '\n'.join( lines)
921 def read( self, filename):
922 doc = xml.dom.minidom.parseString( self.load_and_clean( filename))
923 rss = doc.getElementsByTagName('rss')[0]
925 channel_element = rss.getElementsByTagName('channel')[0]
927 channel = podcastChannel( url = self.url)
928 channel.title = self.get_text_by_first_node( channel_element, 'title')
929 channel.description = self.get_text_by_first_node( channel_element, 'description')
930 channel.link = self.get_text_by_first_node( channel_element, 'link')
931 channel.load_settings()
933 for episode_element in rss.getElementsByTagName('item'):
934 episode = self.get_episode_from_element( channel, episode_element)
935 channel.append( episode)
937 return channel
941 class LocalDBWriter(object):
942 def __init__( self, filename):
943 self.filename = filename
945 def create_node( self, doc, name, content):
946 node = doc.createElement( name)
947 node.appendChild( doc.createTextNode( content))
948 return node
950 def create_item( self, doc, episode):
951 item = doc.createElement( 'item')
952 item.appendChild( self.create_node( doc, 'title', episode.title))
953 item.appendChild( self.create_node( doc, 'description', episode.description))
954 item.appendChild( self.create_node( doc, 'url', episode.url))
955 item.appendChild( self.create_node( doc, 'link', episode.link))
956 item.appendChild( self.create_node( doc, 'guid', episode.guid))
957 item.appendChild( self.create_node( doc, 'pubDate', str(episode.pubDate)))
958 item.appendChild( self.create_node( doc, 'mimetype', episode.mimetype))
959 return item
961 def write( self, channel):
962 doc = xml.dom.minidom.Document()
964 rss = doc.createElement( 'rss')
965 rss.setAttribute( 'version', '1.0')
966 doc.appendChild( rss)
968 channele = doc.createElement( 'channel')
969 channele.appendChild( self.create_node( doc, 'title', channel.title))
970 channele.appendChild( self.create_node( doc, 'description', channel.description))
971 channele.appendChild( self.create_node( doc, 'link', channel.link))
972 rss.appendChild( channele)
974 for episode in channel:
975 if episode.is_downloaded():
976 rss.appendChild( self.create_item( doc, episode))
978 try:
979 fp = open( self.filename, 'w')
980 fp.write( doc.toxml( encoding = 'utf-8'))
981 fp.close()
982 except:
983 log( 'Could not open file for writing: %s', self.filename, sender = self)
984 return False
986 return True