Experimental support for gtkhtml2 for episode dialog (bug 162)
[gpodder.git] / src / gpodder / libpodcasts.py
blob3861a47b205dfd2aab9e3767b16ac999995078e1
1 # -*- coding: utf-8 -*-
3 # gPodder - A media aggregator and podcast client
4 # Copyright (c) 2005-2008 Thomas Perl and the gPodder Team
6 # gPodder is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 3 of the License, or
9 # (at your option) any later version.
11 # gPodder is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
22 # libpodcasts.py -- data classes for gpodder
23 # thomas perl <thp@perli.net> 20051029
25 # Contains code based on:
26 # liblocdbwriter.py (2006-01-09)
27 # liblocdbreader.py (2006-01-10)
30 import gtk
31 import gobject
32 import pango
34 import gpodder
35 from gpodder import util
36 from gpodder import opml
37 from gpodder import cache
38 from gpodder import services
39 from gpodder import draw
40 from gpodder import libtagupdate
41 from gpodder import dumbshelve
43 from gpodder.liblogger import log
44 from gpodder.libgpodder import gl
45 from gpodder.dbsqlite import db
47 import os.path
48 import os
49 import glob
50 import shutil
51 import sys
52 import urllib
53 import urlparse
54 import time
55 import datetime
56 import rfc822
57 import md5
58 import xml.dom.minidom
59 import feedparser
61 from xml.sax import saxutils
64 if gpodder.interface == gpodder.MAEMO:
65 ICON_AUDIO_FILE = 'gnome-mime-audio-mp3'
66 ICON_VIDEO_FILE = 'gnome-mime-video-mp4'
67 ICON_BITTORRENT = 'qgn_toolb_browser_web'
68 ICON_DOWNLOADING = 'qgn_toolb_messagin_moveto'
69 ICON_DELETED = 'qgn_toolb_gene_deletebutton'
70 ICON_NEW = 'qgn_list_gene_favor'
71 else:
72 ICON_AUDIO_FILE = 'audio-x-generic'
73 ICON_VIDEO_FILE = 'video-x-generic'
74 ICON_BITTORRENT = 'applications-internet'
75 ICON_DOWNLOADING = gtk.STOCK_GO_DOWN
76 ICON_DELETED = gtk.STOCK_DELETE
77 ICON_NEW = gtk.STOCK_ABOUT
81 class podcastChannel(object):
82 """holds data for a complete channel"""
83 SETTINGS = ('sync_to_devices', 'device_playlist_name','override_title','username','password')
84 icon_cache = {}
86 fc = cache.Cache()
88 @classmethod
89 def load(cls, url, create=True):
90 if isinstance(url, unicode):
91 url = url.encode('utf-8')
93 tmp = db.load_channels(factory=lambda d: cls.create_from_dict(d), url=url)
94 if len(tmp):
95 return tmp[0]
96 elif create:
97 tmp = podcastChannel(url)
98 if not tmp.update():
99 return None
100 tmp.save()
101 db.force_last_new(tmp)
102 return tmp
104 @staticmethod
105 def create_from_dict(d):
106 c = podcastChannel()
107 for key in d:
108 if hasattr(c, key):
109 setattr(c, key, d[key])
110 return c
112 def update(self):
113 (updated, c) = self.fc.fetch(self.url, self)
115 if c is None:
116 return False
118 if self.url != c.url:
119 log('Updating channel URL from %s to %s', self.url, c.url, sender=self)
120 self.url = c.url
122 # update the cover if it's not there
123 self.update_cover()
125 # If we have an old instance of this channel, and
126 # feedcache says the feed hasn't changed, return old
127 if not updated:
128 log('Channel %s is up to date', self.url)
129 return True
131 # Save etag and last-modified for later reuse
132 if c.headers.get('etag'):
133 self.etag = c.headers.get('etag')
134 if c.headers.get('last-modified'):
135 self.last_modified = c.headers.get('last-modified')
137 self.parse_error = c.get('bozo_exception', None)
139 if hasattr(c.feed, 'title'):
140 self.title = c.feed.title
141 else:
142 self.title = self.url
143 if hasattr( c.feed, 'link'):
144 self.link = c.feed.link
145 if hasattr( c.feed, 'subtitle'):
146 self.description = c.feed.subtitle
148 if hasattr(c.feed, 'updated_parsed') and c.feed.updated_parsed is not None:
149 self.pubDate = rfc822.mktime_tz(c.feed.updated_parsed+(0,))
150 else:
151 self.pubDate = time.time()
152 if hasattr( c.feed, 'image'):
153 if hasattr(c.feed.image, 'href') and c.feed.image.href:
154 old = self.image
155 self.image = c.feed.image.href
156 if old != self.image:
157 self.update_cover(force=True)
159 # Marked as bulk because we commit after importing episodes.
160 db.save_channel(self, bulk=True)
162 # Remove old episodes before adding the new ones. This helps
163 # deal with hyperactive channels, such as TV news, when there
164 # can be more new episodes than the user wants in the list.
165 # By cleaning up old episodes before receiving the new ones we
166 # ensure that the user doesn't miss any.
167 db.purge(gl.config.max_episodes_per_feed, self.id)
169 # Load all episodes to update them properly.
170 existing = self.get_all_episodes()
172 # We can limit the maximum number of entries that gPodder will parse
173 # via the "max_episodes_per_feed" configuration option.
174 if len(c.entries) > gl.config.max_episodes_per_feed:
175 log('Limiting number of episodes for %s to %d', self.title, gl.config.max_episodes_per_feed)
176 for entry in c.entries[:min(gl.config.max_episodes_per_feed, len(c.entries))]:
177 episode = None
179 try:
180 episode = podcastItem.from_feedparser_entry(entry, self)
181 except Exception, e:
182 log('Cannot instantiate episode "%s": %s. Skipping.', entry.get('id', '(no id available)'), e, sender=self, traceback=True)
184 if episode:
185 self.count_new += 1
187 for ex in existing:
188 if ex.guid == episode.guid:
189 for k in ('title', 'title', 'description', 'link', 'pubDate'):
190 setattr(ex, k, getattr(episode, k))
191 self.count_new -= 1
192 episode = ex
194 episode.save(bulk=True)
196 return True
198 def update_cover(self, force=False):
199 if self.cover_file is None or not os.path.exists(self.cover_file) or force:
200 if self.image is not None:
201 services.cover_downloader.request_cover(self)
203 def delete(self):
204 db.delete_channel(self)
206 def save(self):
207 db.save_channel(self)
209 def stat(self, state=None, is_played=None, is_locked=None):
210 return db.get_channel_stat(self.url, state=state, is_played=is_played, is_locked=is_locked)
212 def __init__( self, url = "", title = "", link = "", description = ""):
213 self.id = None
214 self.url = url
215 self.title = title
216 self.link = link
217 self.description = description
218 self.image = None
219 self.pubDate = 0
220 self.parse_error = None
221 self.newest_pubdate_cached = None
222 self.update_flag = False # channel is updating or to be updated
223 self.iter = None
225 # should this channel be synced to devices? (ex: iPod)
226 self.sync_to_devices = True
227 # to which playlist should be synced
228 self.device_playlist_name = 'gPodder'
229 # if set, this overrides the channel-provided title
230 self.override_title = ''
231 self.username = ''
232 self.password = ''
234 self.last_modified = None
235 self.etag = None
237 self.save_dir_size = 0
238 self.__save_dir_size_set = False
240 self.count_downloaded = 0
241 self.count_new = 0
242 self.count_unplayed = 0
244 def request_save_dir_size(self):
245 if not self.__save_dir_size_set:
246 self.update_save_dir_size()
247 self.__save_dir_size_set = True
249 def update_save_dir_size(self):
250 self.save_dir_size = util.calculate_size(self.save_dir)
252 def get_filename( self):
253 """Return the MD5 sum of the channel URL"""
254 return md5.new( self.url).hexdigest()
256 filename = property(fget=get_filename)
258 def get_title( self):
259 if self.override_title:
260 return self.override_title
261 elif not self.__title.strip():
262 return self.url
263 else:
264 return self.__title
266 def set_title( self, value):
267 self.__title = value.strip()
269 title = property(fget=get_title,
270 fset=set_title)
272 def set_custom_title( self, custom_title):
273 custom_title = custom_title.strip()
275 if custom_title != self.__title:
276 self.override_title = custom_title
277 else:
278 self.override_title = ''
280 def get_downloaded_episodes(self):
281 return db.load_episodes(self, factory=lambda c: podcastItem.create_from_dict(c, self), state=db.STATE_DOWNLOADED)
283 def save_settings(self):
284 db.save_channel(self)
286 def get_new_episodes( self):
287 return [episode for episode in db.load_episodes(self, factory=lambda x: podcastItem.create_from_dict(x, self)) if episode.state == db.STATE_NORMAL and not episode.is_played]
289 def update_m3u_playlist(self):
290 if gl.config.create_m3u_playlists:
291 downloaded_episodes = self.get_downloaded_episodes()
292 fn = util.sanitize_filename(self.title)
293 if len(fn) == 0:
294 fn = os.path.basename(self.save_dir)
295 m3u_filename = os.path.join(gl.downloaddir, fn+'.m3u')
296 log('Writing playlist to %s', m3u_filename, sender=self)
297 f = open(m3u_filename, 'w')
298 f.write('#EXTM3U\n')
300 for episode in downloaded_episodes:
301 filename = episode.local_filename()
302 if os.path.dirname(filename).startswith(os.path.dirname(m3u_filename)):
303 filename = filename[len(os.path.dirname(m3u_filename)+os.sep):]
304 f.write('#EXTINF:0,'+self.title+' - '+episode.title+' ('+episode.cute_pubdate()+')\n')
305 f.write(filename+'\n')
306 f.close()
308 def addDownloadedItem(self, item):
309 log('addDownloadedItem(%s)', item.url)
311 if not item.was_downloaded():
312 item.mark_downloaded(save=True)
314 # Update metadata on file (if possible and wanted)
315 if gl.config.update_tags and libtagupdate.tagging_supported():
316 filename = item.local_filename()
317 try:
318 libtagupdate.update_metadata_on_file(filename, title=item.title, artist=self.title, genre='Podcast')
319 except Exception, e:
320 log('Error while calling update_metadata_on_file(): %s', e)
322 self.update_m3u_playlist()
324 if item.file_type() == 'torrent':
325 torrent_filename = item.local_filename()
326 destination_filename = util.torrent_filename( torrent_filename)
327 gl.invoke_torrent(item.url, torrent_filename, destination_filename)
329 def get_all_episodes(self):
330 return db.load_episodes(self, factory = lambda d: podcastItem.create_from_dict(d, self))
332 # not used anymore
333 def update_model( self):
334 self.update_save_dir_size()
335 model = self.tree_model
337 iter = model.get_iter_first()
338 while iter is not None:
339 self.iter_set_downloading_columns(model, iter)
340 iter = model.iter_next( iter)
342 @property
343 def tree_model( self):
344 log('Returning TreeModel for %s', self.url, sender = self)
345 return self.items_liststore()
347 def iter_set_downloading_columns( self, model, iter, episode=None):
348 global ICON_AUDIO_FILE, ICON_VIDEO_FILE, ICON_BITTORRENT
349 global ICON_DOWNLOADING, ICON_DELETED, ICON_NEW
351 if episode is None:
352 url = model.get_value( iter, 0)
353 episode = db.load_episode(url, factory=lambda x: podcastItem.create_from_dict(x, self))
354 else:
355 url = episode.url
357 if gl.config.episode_list_descriptions:
358 icon_size = 32
359 else:
360 icon_size = 16
362 if services.download_status_manager.is_download_in_progress(url):
363 status_icon = util.get_tree_icon(ICON_DOWNLOADING, icon_cache=self.icon_cache, icon_size=icon_size)
364 else:
365 if episode.state == db.STATE_NORMAL:
366 if episode.is_played:
367 status_icon = None
368 else:
369 status_icon = util.get_tree_icon(ICON_NEW, icon_cache=self.icon_cache, icon_size=icon_size)
370 elif episode.was_downloaded():
371 missing = not episode.file_exists()
373 if missing:
374 log('Episode missing: %s (before drawing an icon)', episode.url, sender=self)
376 file_type = util.file_type_by_extension( model.get_value( iter, 9))
377 if file_type == 'audio':
378 status_icon = util.get_tree_icon(ICON_AUDIO_FILE, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
379 elif file_type == 'video':
380 status_icon = util.get_tree_icon(ICON_VIDEO_FILE, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
381 elif file_type == 'torrent':
382 status_icon = util.get_tree_icon(ICON_BITTORRENT, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
383 else:
384 status_icon = util.get_tree_icon('unknown', not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
385 elif episode.state == db.STATE_DELETED or episode.state == db.STATE_DOWNLOADED:
386 status_icon = util.get_tree_icon(ICON_DELETED, icon_cache=self.icon_cache, icon_size=icon_size)
387 else:
388 log('Warning: Cannot determine status icon.', sender=self)
389 status_icon = None
391 model.set( iter, 4, status_icon)
393 def items_liststore( self):
395 Return a gtk.ListStore containing episodes for this channel
397 new_model = gtk.ListStore( gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING,
398 gobject.TYPE_BOOLEAN, gtk.gdk.Pixbuf, gobject.TYPE_STRING, gobject.TYPE_STRING,
399 gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING )
401 for item in self.get_all_episodes():
402 description = item.title_and_description
404 if item.length:
405 filelength = gl.format_filesize(item.length, 1)
406 else:
407 filelength = None
409 new_iter = new_model.append((item.url, item.title, filelength,
410 True, None, item.cute_pubdate(), description, util.remove_html_tags(item.description),
411 item.local_filename(), item.extension()))
412 self.iter_set_downloading_columns( new_model, new_iter, episode=item)
414 self.update_save_dir_size()
415 return new_model
417 def find_episode( self, url):
418 return db.load_episode(url, factory=lambda x: podcastItem.create_from_dict(x, self))
420 def get_save_dir(self):
421 save_dir = os.path.join(gl.downloaddir, self.filename, '')
423 # Create save_dir if it does not yet exist
424 if not util.make_directory( save_dir):
425 log( 'Could not create save_dir: %s', save_dir, sender = self)
427 return save_dir
429 save_dir = property(fget=get_save_dir)
431 def remove_downloaded( self):
432 shutil.rmtree( self.save_dir, True)
434 def get_index_file(self):
435 # gets index xml filename for downloaded channels list
436 return os.path.join( self.save_dir, 'index.xml')
438 index_file = property(fget=get_index_file)
440 def get_cover_file( self):
441 # gets cover filename for cover download cache
442 return os.path.join( self.save_dir, 'cover')
444 cover_file = property(fget=get_cover_file)
446 def delete_episode_by_url(self, url):
447 episode = db.load_episode(url, lambda c: podcastItem.create_from_dict(c, self))
449 if episode is not None:
450 util.delete_file(episode.local_filename())
451 episode.set_state(db.STATE_DELETED)
453 self.update_m3u_playlist()
456 class podcastItem(object):
457 """holds data for one object in a channel"""
459 @staticmethod
460 def load(url, channel):
461 e = podcastItem(channel)
462 d = db.load_episode(url)
463 if d is not None:
464 for k, v in d.iteritems():
465 if hasattr(e, k):
466 setattr(e, k, v)
467 return e
469 @staticmethod
470 def from_feedparser_entry( entry, channel):
471 episode = podcastItem( channel)
473 episode.title = entry.get( 'title', util.get_first_line( util.remove_html_tags( entry.get( 'summary', ''))))
474 episode.link = entry.get( 'link', '')
475 episode.description = entry.get( 'summary', entry.get( 'link', entry.get( 'title', '')))
476 episode.guid = entry.get( 'id', '')
477 if entry.get( 'updated_parsed', None):
478 episode.pubDate = rfc822.mktime_tz(entry.updated_parsed+(0,))
480 if episode.title == '':
481 log( 'Warning: Episode has no title, adding anyways.. (Feed Is Buggy!)', sender = episode)
483 enclosure = None
484 if hasattr(entry, 'enclosures') and len(entry.enclosures) > 0:
485 enclosure = entry.enclosures[0]
486 if len(entry.enclosures) > 1:
487 for e in entry.enclosures:
488 if hasattr( e, 'href') and hasattr( e, 'length') and hasattr( e, 'type') and (e.type.startswith('audio/') or e.type.startswith('video/')):
489 if util.normalize_feed_url(e.href) is not None:
490 log( 'Selected enclosure: %s', e.href, sender = episode)
491 enclosure = e
492 break
493 episode.url = util.normalize_feed_url( enclosure.get( 'href', ''))
494 elif hasattr(entry, 'link'):
495 (filename, extension) = util.filename_from_url(entry.link)
496 if extension == '' and hasattr( entry, 'type'):
497 extension = util.extension_from_mimetype(e.type)
498 file_type = util.file_type_by_extension(extension)
499 if file_type is not None:
500 log('Adding episode with link to file type "%s".', file_type, sender=episode)
501 episode.url = entry.link
503 if not episode.url:
504 # This item in the feed has no downloadable enclosure
505 return None
507 if not episode.pubDate:
508 metainfo = util.get_episode_info_from_url(episode.url)
509 if 'pubdate' in metainfo:
510 try:
511 episode.pubDate = int(float(metainfo['pubdate']))
512 except:
513 log('Cannot convert pubDate "%s" in from_feedparser_entry.', str(metainfo['pubdate']), traceback=True)
515 if hasattr( enclosure, 'length'):
516 try:
517 episode.length = int(enclosure.length)
518 except:
519 episode.length = -1
521 if hasattr( enclosure, 'type'):
522 episode.mimetype = enclosure.type
524 if episode.title == '':
525 ( filename, extension ) = os.path.splitext( os.path.basename( episode.url))
526 episode.title = filename
528 return episode
531 def __init__( self, channel):
532 # Used by Storage for faster saving
533 self.id = None
534 self.url = ''
535 self.title = ''
536 self.length = 0
537 self.mimetype = 'application/octet-stream'
538 self.guid = ''
539 self.description = ''
540 self.link = ''
541 self.channel = channel
542 self.pubDate = None
544 self.state = db.STATE_NORMAL
545 self.is_played = False
546 self.is_locked = False
548 def save(self, bulk=False):
549 if self.state != db.STATE_DOWNLOADED and self.file_exists():
550 self.state = db.STATE_DOWNLOADED
551 db.save_episode(self, bulk=bulk)
553 def set_state(self, state):
554 self.state = state
555 db.mark_episode(self.url, state=self.state, is_played=self.is_played, is_locked=self.is_locked)
557 def mark(self, state=None, is_played=None, is_locked=None):
558 if state is not None:
559 self.state = state
560 if is_played is not None:
561 self.is_played = is_played
562 if is_locked is not None:
563 self.is_locked = is_locked
564 db.mark_episode(self.url, state=state, is_played=is_played, is_locked=is_locked)
566 def mark_downloaded(self, save=False):
567 self.state = db.STATE_DOWNLOADED
568 self.is_played = False
569 if save:
570 self.save()
572 @staticmethod
573 def create_from_dict(d, channel):
574 e = podcastItem(channel)
575 for key in d:
576 if hasattr(e, key):
577 setattr(e, key, d[key])
578 return e
580 @property
581 def title_and_description(self):
583 Returns Pango markup for displaying in a TreeView, and
584 disables the description when the config variable
585 "episode_list_descriptions" is not set.
587 if gl.config.episode_list_descriptions:
588 return '%s\n<small>%s</small>' % (saxutils.escape(self.title), saxutils.escape(self.one_line_description()))
589 else:
590 return saxutils.escape(self.title)
592 def age_in_days(self):
593 return util.file_age_in_days(self.local_filename())
595 def is_old(self):
596 return self.age_in_days() > gl.config.episode_old_age
598 def get_age_string(self):
599 return util.file_age_to_string(self.age_in_days())
601 age_prop = property(fget=get_age_string)
603 def one_line_description( self):
604 lines = util.remove_html_tags(self.description).strip().splitlines()
605 if not lines or lines[0] == '':
606 return _('No description available')
607 else:
608 return ' '.join((l.strip() for l in lines if l.strip() != ''))
610 def delete_from_disk(self):
611 try:
612 self.channel.delete_episode_by_url(self.url)
613 except:
614 log('Cannot delete episode from disk: %s', self.title, traceback=True, sender=self)
616 def local_filename( self):
617 ext = self.extension()
619 # For compatibility with already-downloaded episodes,
620 # we accept md5 filenames if they are downloaded now.
621 md5_filename = os.path.join(self.channel.save_dir, md5.new(self.url).hexdigest()+ext)
622 if os.path.exists(md5_filename) or not gl.config.experimental_file_naming:
623 return md5_filename
625 # If the md5 filename does not exist,
626 ( episode, e ) = util.filename_from_url(self.url)
627 episode = util.sanitize_filename(episode) + ext
629 # If the episode filename looks suspicious,
630 # we still return the md5 filename to be on
631 # the safe side of the fence ;)
632 if len(episode) == 0 or episode.startswith('redirect.'):
633 return md5_filename
634 filename = os.path.join(self.channel.save_dir, episode)
635 return filename
637 def extension( self):
638 ( filename, ext ) = util.filename_from_url(self.url)
639 # if we can't detect the extension from the url fallback on the mimetype
640 if ext == '' or util.file_type_by_extension(ext) is None:
641 ext = util.extension_from_mimetype(self.mimetype)
642 #log('Getting extension from mimetype for: %s (mimetype: %s)' % (self.title, ext), sender=self)
643 return ext
645 def mark_new(self):
646 self.state = db.STATE_NORMAL
647 self.is_played = False
648 db.mark_episode(self.url, state=self.state, is_played=self.is_played)
650 def mark_old(self):
651 self.is_played = True
652 db.mark_episode(self.url, is_played=True)
654 def file_exists(self):
655 return os.path.exists(self.local_filename())
657 def was_downloaded(self, and_exists=False):
658 if self.state != db.STATE_DOWNLOADED:
659 return False
660 if and_exists and not self.file_exists():
661 return False
662 return True
664 def sync_filename( self):
665 if gl.config.custom_sync_name_enabled:
666 return util.object_string_formatter(gl.config.custom_sync_name, episode=self, channel=self.channel)
667 else:
668 return self.title
670 def file_type( self):
671 return util.file_type_by_extension( self.extension() )
673 @property
674 def basename( self):
675 return os.path.splitext( os.path.basename( self.url))[0]
677 @property
678 def published( self):
679 try:
680 return datetime.datetime.fromtimestamp(self.pubDate).strftime('%Y%m%d')
681 except:
682 log( 'Cannot format pubDate for "%s".', self.title, sender = self)
683 return '00000000'
685 def cute_pubdate(self):
686 result = util.format_date(self.pubDate)
687 if result is None:
688 return '(%s)' % _('unknown')
689 else:
690 return result
692 pubdate_prop = property(fget=cute_pubdate)
694 def calculate_filesize( self):
695 try:
696 self.length = os.path.getsize(self.local_filename())
697 except:
698 log( 'Could not get filesize for %s.', self.url)
700 def get_filesize_string( self):
701 return gl.format_filesize( self.length)
703 filesize_prop = property(fget=get_filesize_string)
705 def get_channel_title( self):
706 return self.channel.title
708 channel_prop = property(fget=get_channel_title)
710 def get_played_string( self):
711 if not self.is_played:
712 return _('Unplayed')
714 return ''
716 played_prop = property(fget=get_played_string)
720 def update_channel_model_by_iter( model, iter, channel, color_dict,
721 cover_cache=None, max_width=0, max_height=0 ):
723 count_downloaded = channel.stat(state=db.STATE_DOWNLOADED)
724 count_new = channel.stat(state=db.STATE_NORMAL, is_played=False)
725 count_unplayed = channel.stat(state=db.STATE_DOWNLOADED, is_played=False)
727 channel.iter = iter
728 model.set(iter, 0, channel.url)
729 model.set(iter, 1, channel.title)
731 title_markup = saxutils.escape(channel.title)
732 description_markup = saxutils.escape(util.get_first_line(channel.description) or _('No description available'))
733 d = []
734 if count_new:
735 d.append('<span weight="bold">')
736 d.append(title_markup)
737 if count_new:
738 d.append('</span>')
740 description = ''.join(d+['\n', '<small>', description_markup, '</small>'])
741 model.set(iter, 2, description)
743 if channel.parse_error is not None:
744 model.set(iter, 6, channel.parse_error)
745 color = color_dict['parse_error']
746 else:
747 color = color_dict['default']
749 if channel.update_flag:
750 color = color_dict['updating']
752 model.set(iter, 8, color)
754 if count_unplayed > 0 or count_downloaded > 0:
755 model.set(iter, 3, draw.draw_pill_pixbuf(str(count_unplayed), str(count_downloaded)))
756 model.set(iter, 7, True)
757 else:
758 model.set(iter, 7, False)
760 # Load the cover if we have it, but don't download
761 # it if it's not available (to avoid blocking here)
762 pixbuf = services.cover_downloader.get_cover(channel, avoid_downloading=True)
763 new_pixbuf = None
764 if pixbuf is not None:
765 new_pixbuf = util.resize_pixbuf_keep_ratio(pixbuf, max_width, max_height, channel.url, cover_cache)
766 model.set(iter, 5, new_pixbuf or pixbuf)
768 def channels_to_model(channels, color_dict, cover_cache=None, max_width=0, max_height=0):
769 new_model = gtk.ListStore( str, str, str, gtk.gdk.Pixbuf, int,
770 gtk.gdk.Pixbuf, str, bool, str )
772 for channel in channels:
773 update_channel_model_by_iter( new_model, new_model.append(), channel,
774 color_dict, cover_cache, max_width, max_height )
776 return new_model
779 def load_channels():
780 return db.load_channels(lambda d: podcastChannel.create_from_dict(d))
782 def update_channels(callback_proc=None, callback_error=None, is_cancelled_cb=None):
783 log('Updating channels....')
785 channels = load_channels()
786 count = 0
788 for channel in channels:
789 if is_cancelled_cb is not None and is_cancelled_cb():
790 return channels
791 callback_proc and callback_proc(count, len(channels))
792 channel.update()
793 count += 1
795 return channels
797 def save_channels( channels):
798 exporter = opml.Exporter(gl.channel_opml_file)
799 return exporter.write(channels)
801 def can_restore_from_opml():
802 try:
803 if len(opml.Importer(gl.channel_opml_file).items):
804 return gl.channel_opml_file
805 except:
806 return None
810 class LocalDBReader( object):
812 DEPRECATED - Only used for migration to SQLite
814 def __init__( self, url):
815 self.url = url
817 def get_text( self, nodelist):
818 return ''.join( [ node.data for node in nodelist if node.nodeType == node.TEXT_NODE ])
820 def get_text_by_first_node( self, element, name):
821 return self.get_text( element.getElementsByTagName( name)[0].childNodes)
823 def get_episode_from_element( self, channel, element):
824 episode = podcastItem( channel)
825 episode.title = self.get_text_by_first_node( element, 'title')
826 episode.description = self.get_text_by_first_node( element, 'description')
827 episode.url = self.get_text_by_first_node( element, 'url')
828 episode.link = self.get_text_by_first_node( element, 'link')
829 episode.guid = self.get_text_by_first_node( element, 'guid')
831 if not episode.guid:
832 for k in ('url', 'link'):
833 if getattr(episode, k) is not None:
834 episode.guid = getattr(episode, k)
835 log('Notice: episode has no guid, using %s', episode.guid)
836 break
837 try:
838 episode.pubDate = float(self.get_text_by_first_node(element, 'pubDate'))
839 except:
840 log('Looks like you have an old pubDate in your LocalDB -> converting it')
841 episode.pubDate = self.get_text_by_first_node(element, 'pubDate')
842 log('FYI: pubDate value is: "%s"', episode.pubDate, sender=self)
843 pubdate = feedparser._parse_date(episode.pubDate)
844 if pubdate is None:
845 log('Error converting the old pubDate - sorry!', sender=self)
846 episode.pubDate = 0
847 else:
848 log('PubDate converted successfully - yay!', sender=self)
849 episode.pubDate = time.mktime(pubdate)
850 try:
851 episode.mimetype = self.get_text_by_first_node( element, 'mimetype')
852 except:
853 log('No mimetype info for %s', episode.url, sender=self)
854 episode.calculate_filesize()
855 return episode
857 def load_and_clean( self, filename):
859 Clean-up a LocalDB XML file that could potentially contain
860 "unbound prefix" XML elements (generated by the old print-based
861 LocalDB code). The code removes those lines to make the new
862 DOM parser happy.
864 This should be removed in a future version.
866 lines = []
867 for line in open(filename).read().split('\n'):
868 if not line.startswith('<gpodder:info'):
869 lines.append( line)
871 return '\n'.join( lines)
873 def read( self, filename):
874 doc = xml.dom.minidom.parseString( self.load_and_clean( filename))
875 rss = doc.getElementsByTagName('rss')[0]
877 channel_element = rss.getElementsByTagName('channel')[0]
879 channel = podcastChannel( url = self.url)
880 channel.title = self.get_text_by_first_node( channel_element, 'title')
881 channel.description = self.get_text_by_first_node( channel_element, 'description')
882 channel.link = self.get_text_by_first_node( channel_element, 'link')
884 episodes = []
885 for episode_element in rss.getElementsByTagName('item'):
886 episode = self.get_episode_from_element( channel, episode_element)
887 episodes.append(episode)
889 return episodes