Tue, 08 Jul 2008 21:10:48 -0400 <me@nikosapi.org>
[gpodder.git] / src / gpodder / libpodcasts.py
blobdadb5e24d22466e0c53a0d1c09bba2a85b099525
1 # -*- coding: utf-8 -*-
3 # gPodder - A media aggregator and podcast client
4 # Copyright (c) 2005-2008 Thomas Perl and the gPodder Team
6 # gPodder is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 3 of the License, or
9 # (at your option) any later version.
11 # gPodder is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
22 # libpodcasts.py -- data classes for gpodder
23 # thomas perl <thp@perli.net> 20051029
25 # Contains code based on:
26 # liblocdbwriter.py (2006-01-09)
27 # liblocdbreader.py (2006-01-10)
30 import gtk
31 import gobject
32 import pango
34 import gpodder
35 from gpodder import util
36 from gpodder import opml
37 from gpodder import cache
38 from gpodder import services
39 from gpodder import draw
40 from gpodder import libtagupdate
41 from gpodder import dumbshelve
43 from gpodder.liblogger import log
44 from gpodder.libgpodder import gl
45 from gpodder.dbsqlite import db
47 import os.path
48 import os
49 import glob
50 import shutil
51 import sys
52 import urllib
53 import urlparse
54 import time
55 import datetime
56 import md5
57 import xml.dom.minidom
58 import feedparser
60 from xml.sax import saxutils
63 if gpodder.interface == gpodder.MAEMO:
64 ICON_AUDIO_FILE = 'gnome-mime-audio-mp3'
65 ICON_VIDEO_FILE = 'gnome-mime-video-mp4'
66 ICON_BITTORRENT = 'qgn_toolb_browser_web'
67 ICON_DOWNLOADING = 'qgn_toolb_messagin_moveto'
68 ICON_DELETED = 'qgn_toolb_gene_deletebutton'
69 ICON_NEW = 'qgn_list_gene_favor'
70 else:
71 ICON_AUDIO_FILE = 'audio-x-generic'
72 ICON_VIDEO_FILE = 'video-x-generic'
73 ICON_BITTORRENT = 'applications-internet'
74 ICON_DOWNLOADING = gtk.STOCK_GO_DOWN
75 ICON_DELETED = gtk.STOCK_DELETE
76 ICON_NEW = gtk.STOCK_ABOUT
80 class podcastChannel(object):
81 """holds data for a complete channel"""
82 SETTINGS = ('sync_to_devices', 'device_playlist_name','override_title','username','password')
83 icon_cache = {}
85 fc = cache.Cache()
87 @classmethod
88 def load(cls, url, create=True):
89 if isinstance(url, unicode):
90 url = url.encode('utf-8')
92 tmp = db.load_channels(factory=lambda d: cls.create_from_dict(d), url=url)
93 if len(tmp):
94 return tmp[0]
95 elif create:
96 tmp = podcastChannel(url)
97 tmp.update()
98 tmp.save()
99 db.force_last_new(tmp)
100 return tmp
102 @staticmethod
103 def create_from_dict(d):
104 c = podcastChannel()
105 for key in d:
106 if hasattr(c, key):
107 setattr(c, key, d[key])
108 return c
110 def update(self):
111 (updated, c) = self.fc.fetch(self.url, self)
113 # If we have an old instance of this channel, and
114 # feedcache says the feed hasn't changed, return old
115 if not updated:
116 log('Channel %s is up to date', self.url)
117 return
119 # Save etag and last-modified for later reuse
120 if c.headers.get('etag'):
121 self.etag = c.headers.get('etag')
122 if c.headers.get('last-modified'):
123 self.last_modified = c.headers.get('last-modified')
125 self.parse_error = c.get('bozo_exception', None)
127 if hasattr(c.feed, 'title'):
128 self.title = c.feed.title
129 else:
130 self.title = self.url
131 if hasattr( c.feed, 'link'):
132 self.link = c.feed.link
133 if hasattr( c.feed, 'subtitle'):
134 self.description = util.remove_html_tags(c.feed.subtitle)
136 if hasattr(c.feed, 'updated_parsed') and c.feed.updated_parsed is not None:
137 self.pubDate = time.mktime(c.feed.updated_parsed)
138 else:
139 self.pubDate = time.time()
140 if hasattr( c.feed, 'image'):
141 if c.feed.image.href:
142 self.image = c.feed.image.href
144 # Marked as bulk because we commit after importing episodes.
145 db.save_channel(self, bulk=True)
147 # We can limit the maximum number of entries that gPodder will parse
148 # via the "max_episodes_per_feed" configuration option.
149 if len(c.entries) > gl.config.max_episodes_per_feed:
150 log('Limiting number of episodes for %s to %d', self.title, gl.config.max_episodes_per_feed)
151 for entry in c.entries[:min(gl.config.max_episodes_per_feed, len(c.entries))]:
152 episode = None
154 try:
155 episode = podcastItem.from_feedparser_entry(entry, self)
156 except Exception, e:
157 log('Cannot instantiate episode "%s": %s. Skipping.', entry.get('id', '(no id available)'), e, sender=self, traceback=True)
159 if episode:
160 episode.save(bulk=True)
162 # Now we can flush the updates.
163 db.commit()
165 def delete(self):
166 db.delete_channel(self)
168 def save(self):
169 db.save_channel(self)
171 def stat(self, state=None, is_played=None, is_locked=None):
172 return db.get_channel_stat(self.url, state=state, is_played=is_played, is_locked=is_locked)
174 def __init__( self, url = "", title = "", link = "", description = ""):
175 self.id = None
176 self.url = url
177 self.title = title
178 self.link = link
179 self.description = util.remove_html_tags( description)
180 self.image = None
181 self.pubDate = 0
182 self.parse_error = None
183 self.newest_pubdate_cached = None
185 # should this channel be synced to devices? (ex: iPod)
186 self.sync_to_devices = True
187 # to which playlist should be synced
188 self.device_playlist_name = 'gPodder'
189 # if set, this overrides the channel-provided title
190 self.override_title = ''
191 self.username = ''
192 self.password = ''
194 self.last_modified = None
195 self.etag = None
197 self.save_dir_size = 0
198 self.__save_dir_size_set = False
200 self.__tree_model = None
202 def request_save_dir_size(self):
203 if not self.__save_dir_size_set:
204 self.update_save_dir_size()
205 self.__save_dir_size_set = True
207 def update_save_dir_size(self):
208 self.save_dir_size = util.calculate_size(self.save_dir)
210 def get_filename( self):
211 """Return the MD5 sum of the channel URL"""
212 return md5.new( self.url).hexdigest()
214 filename = property(fget=get_filename)
216 def get_title( self):
217 if self.override_title:
218 return self.override_title
219 elif not self.__title.strip():
220 return self.url
221 else:
222 return self.__title
224 def set_title( self, value):
225 self.__title = value.strip()
227 title = property(fget=get_title,
228 fset=set_title)
230 def set_custom_title( self, custom_title):
231 custom_title = custom_title.strip()
233 if custom_title != self.__title:
234 self.override_title = custom_title
235 else:
236 self.override_title = ''
238 def get_downloaded_episodes(self):
239 return db.load_episodes(self, factory=lambda c: podcastItem.create_from_dict(c, self), state=db.STATE_DOWNLOADED)
241 def save_settings(self):
242 db.save_channel(self)
244 def get_new_episodes( self):
245 return [episode for episode in db.load_episodes(self, factory=lambda x: podcastItem.create_from_dict(x, self)) if episode.state == db.STATE_NORMAL and not episode.is_played]
247 def update_m3u_playlist(self):
248 if gl.config.create_m3u_playlists:
249 downloaded_episodes = self.get_downloaded_episodes()
250 fn = util.sanitize_filename(self.title)
251 if len(fn) == 0:
252 fn = os.path.basename(self.save_dir)
253 m3u_filename = os.path.join(gl.downloaddir, fn+'.m3u')
254 log('Writing playlist to %s', m3u_filename, sender=self)
255 f = open(m3u_filename, 'w')
256 f.write('#EXTM3U\n')
258 for episode in downloaded_episodes:
259 filename = episode.local_filename()
260 if os.path.dirname(filename).startswith(os.path.dirname(m3u_filename)):
261 filename = filename[len(os.path.dirname(m3u_filename)+os.sep):]
262 f.write('#EXTINF:0,'+self.title+' - '+episode.title+' ('+episode.cute_pubdate()+')\n')
263 f.write(filename+'\n')
264 f.close()
266 def addDownloadedItem(self, item):
267 log('addDownloadedItem(%s)', item.url)
269 if not item.was_downloaded():
270 item.mark(is_played=False, state=db.STATE_DOWNLOADED)
272 # Update metadata on file (if possible and wanted)
273 if gl.config.update_tags and libtagupdate.tagging_supported():
274 filename = item.local_filename()
275 try:
276 libtagupdate.update_metadata_on_file(filename, title=item.title, artist=self.title)
277 except Exception, e:
278 log('Error while calling update_metadata_on_file(): %s', e)
280 self.update_m3u_playlist()
282 if item.file_type() == 'torrent':
283 torrent_filename = item.local_filename()
284 destination_filename = util.torrent_filename( torrent_filename)
285 gl.invoke_torrent(item.url, torrent_filename, destination_filename)
287 def get_all_episodes(self):
288 return db.load_episodes(self, factory = lambda d: podcastItem.create_from_dict(d, self), limit=gl.config.max_episodes_per_feed)
290 def force_update_tree_model( self):
291 self.__tree_model = None
293 def update_model( self):
294 self.update_save_dir_size()
296 iter = self.tree_model.get_iter_first()
297 while iter is not None:
298 self.iter_set_downloading_columns( self.tree_model, iter)
299 iter = self.tree_model.iter_next( iter)
301 @property
302 def tree_model( self):
303 if not self.__tree_model:
304 log('Generating TreeModel for %s', self.url, sender = self)
305 self.__tree_model = self.items_liststore()
307 return self.__tree_model
309 def iter_set_downloading_columns( self, model, iter):
310 global ICON_AUDIO_FILE, ICON_VIDEO_FILE, ICON_BITTORRENT
311 global ICON_DOWNLOADING, ICON_DELETED, ICON_NEW
313 url = model.get_value( iter, 0)
314 episode = db.load_episode(url, factory=lambda x: podcastItem.create_from_dict(x, self))
316 if gl.config.episode_list_descriptions:
317 icon_size = 32
318 else:
319 icon_size = 16
321 if services.download_status_manager.is_download_in_progress(url):
322 status_icon = util.get_tree_icon(ICON_DOWNLOADING, icon_cache=self.icon_cache, icon_size=icon_size)
323 else:
324 if episode.state != db.STATE_DOWNLOADED and episode.file_exists():
325 episode.mark(state=db.STATE_DOWNLOADED)
326 log('Resurrected episode %s', episode.guid)
327 elif episode.state == db.STATE_DOWNLOADED and not episode.file_exists():
328 episode.mark(state=db.STATE_DELETED)
329 log('Burried episode %s', episode.guid)
330 if episode.state == db.STATE_NORMAL:
331 if episode.is_played:
332 status_icon = None
333 else:
334 status_icon = util.get_tree_icon(ICON_NEW, icon_cache=self.icon_cache, icon_size=icon_size)
335 elif episode.was_downloaded(and_exists=True):
336 missing = not episode.file_exists()
338 if missing:
339 log('Episode missing: %s (before drawing an icon)', episode.url, sender=self)
341 file_type = util.file_type_by_extension( model.get_value( iter, 9))
342 if file_type == 'audio':
343 status_icon = util.get_tree_icon(ICON_AUDIO_FILE, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
344 elif file_type == 'video':
345 status_icon = util.get_tree_icon(ICON_VIDEO_FILE, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
346 elif file_type == 'torrent':
347 status_icon = util.get_tree_icon(ICON_BITTORRENT, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
348 else:
349 status_icon = util.get_tree_icon('unknown', not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
350 elif episode.state == db.STATE_DELETED or episode.state == db.STATE_DOWNLOADED:
351 status_icon = util.get_tree_icon(ICON_DELETED, icon_cache=self.icon_cache, icon_size=icon_size)
352 else:
353 log('Warning: Cannot determine status icon.', sender=self)
354 status_icon = None
356 model.set( iter, 4, status_icon)
358 def items_liststore( self):
360 Return a gtk.ListStore containing episodes for this channel
362 new_model = gtk.ListStore( gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING,
363 gobject.TYPE_BOOLEAN, gtk.gdk.Pixbuf, gobject.TYPE_STRING, gobject.TYPE_STRING,
364 gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING )
366 for item in self.get_all_episodes():
367 if gl.config.episode_list_descriptions:
368 description = '%s\n<small>%s</small>' % (saxutils.escape(item.title), saxutils.escape(item.one_line_description()))
369 else:
370 description = saxutils.escape(item.title)
372 if item.length:
373 filelength = gl.format_filesize(item.length, 1)
374 else:
375 filelength = None
377 new_iter = new_model.append((item.url, item.title, filelength,
378 True, None, item.cute_pubdate(), description, item.description,
379 item.local_filename(), item.extension()))
380 self.iter_set_downloading_columns( new_model, new_iter)
382 self.update_save_dir_size()
383 return new_model
385 def find_episode( self, url):
386 return db.load_episode(url, factory=lambda x: podcastItem.create_from_dict(x, self))
388 def get_save_dir(self):
389 save_dir = os.path.join(gl.downloaddir, self.filename, '')
391 # Create save_dir if it does not yet exist
392 if not util.make_directory( save_dir):
393 log( 'Could not create save_dir: %s', save_dir, sender = self)
395 return save_dir
397 save_dir = property(fget=get_save_dir)
399 def remove_downloaded( self):
400 shutil.rmtree( self.save_dir, True)
402 def get_index_file(self):
403 # gets index xml filename for downloaded channels list
404 return os.path.join( self.save_dir, 'index.xml')
406 index_file = property(fget=get_index_file)
408 def get_cover_file( self):
409 # gets cover filename for cover download cache
410 return os.path.join( self.save_dir, 'cover')
412 cover_file = property(fget=get_cover_file)
414 def delete_episode_by_url(self, url):
415 episode = db.load_episode(url, lambda c: podcastItem.create_from_dict(c, self))
417 if episode is not None:
418 util.delete_file(episode.local_filename())
419 episode.set_state(db.STATE_DELETED)
421 self.update_m3u_playlist()
424 class podcastItem(object):
425 """holds data for one object in a channel"""
427 @staticmethod
428 def load(url, channel):
429 e = podcastItem(channel)
430 d = db.load_episode(url)
431 if d is not None:
432 for k, v in d.iteritems():
433 if hasattr(e, k):
434 setattr(e, k, v)
435 return e
437 @staticmethod
438 def from_feedparser_entry( entry, channel):
439 episode = podcastItem( channel)
441 episode.title = entry.get( 'title', util.get_first_line( util.remove_html_tags( entry.get( 'summary', ''))))
442 episode.link = entry.get( 'link', '')
443 episode.description = util.remove_html_tags( entry.get( 'summary', entry.get( 'link', entry.get( 'title', ''))))
444 episode.guid = entry.get( 'id', '')
445 if entry.get( 'updated_parsed', None):
446 episode.pubDate = time.mktime(entry.updated_parsed)
448 if episode.title == '':
449 log( 'Warning: Episode has no title, adding anyways.. (Feed Is Buggy!)', sender = episode)
451 enclosure = None
452 if hasattr(entry, 'enclosures') and len(entry.enclosures) > 0:
453 enclosure = entry.enclosures[0]
454 if len(entry.enclosures) > 1:
455 for e in entry.enclosures:
456 if hasattr( e, 'href') and hasattr( e, 'length') and hasattr( e, 'type') and (e.type.startswith('audio/') or e.type.startswith('video/')):
457 if util.normalize_feed_url(e.href) is not None:
458 log( 'Selected enclosure: %s', e.href, sender = episode)
459 enclosure = e
460 break
461 episode.url = util.normalize_feed_url( enclosure.get( 'href', ''))
462 elif hasattr(entry, 'link'):
463 (filename, extension) = util.filename_from_url(entry.link)
464 if extension == '' and hasattr( entry, 'type'):
465 extension = util.extension_from_mimetype(e.type)
466 file_type = util.file_type_by_extension(extension)
467 if file_type is not None:
468 log('Adding episode with link to file type "%s".', file_type, sender=episode)
469 episode.url = entry.link
471 if not episode.url:
472 # This item in the feed has no downloadable enclosure
473 return None
475 if not episode.pubDate:
476 metainfo = util.get_episode_info_from_url(episode.url)
477 if 'pubdate' in metainfo:
478 try:
479 episode.pubDate = int(float(metainfo['pubdate']))
480 except:
481 log('Cannot convert pubDate "%s" in from_feedparser_entry.', str(metainfo['pubdate']), traceback=True)
483 if hasattr( enclosure, 'length'):
484 try:
485 episode.length = int(enclosure.length)
486 except:
487 episode.length = -1
489 if hasattr( enclosure, 'type'):
490 episode.mimetype = enclosure.type
492 if episode.title == '':
493 ( filename, extension ) = os.path.splitext( os.path.basename( episode.url))
494 episode.title = filename
496 return episode
499 def __init__( self, channel):
500 # Used by Storage for faster saving
501 self.id = None
502 self.url = ''
503 self.title = ''
504 self.length = 0
505 self.mimetype = 'application/octet-stream'
506 self.guid = ''
507 self.description = ''
508 self.link = ''
509 self.channel = channel
510 self.pubDate = None
512 self.state = db.STATE_NORMAL
513 self.is_played = False
514 self.is_locked = False
516 def save(self, bulk=False):
517 if self.state != db.STATE_DOWNLOADED and self.file_exists():
518 self.state = db.STATE_DOWNLOADED
519 db.save_episode(self, bulk=bulk)
521 def set_state(self, state):
522 self.state = state
523 db.mark_episode(self.url, state=self.state, is_played=self.is_played, is_locked=self.is_locked)
525 def mark(self, state=None, is_played=None, is_locked=None):
526 if state is not None:
527 self.state = state
528 if is_played is not None:
529 self.is_played = is_played
530 if is_locked is not None:
531 self.is_locked = is_locked
532 db.mark_episode(self.url, state=state, is_played=is_played, is_locked=is_locked)
534 @staticmethod
535 def create_from_dict(d, channel):
536 e = podcastItem(channel)
537 for key in d:
538 if hasattr(e, key):
539 setattr(e, key, d[key])
540 return e
542 def age_in_days(self):
543 return util.file_age_in_days(self.local_filename())
545 def is_old(self):
546 return self.age_in_days() > gl.config.episode_old_age
548 def get_age_string(self):
549 return util.file_age_to_string(self.age_in_days())
551 age_prop = property(fget=get_age_string)
553 def one_line_description( self):
554 lines = self.description.strip().splitlines()
555 if not lines or lines[0] == '':
556 return _('No description available')
557 else:
558 return ' '.join((l.strip() for l in lines if l.strip() != ''))
560 def delete_from_disk(self):
561 try:
562 self.channel.delete_episode_by_url(self.url)
563 except:
564 log('Cannot delete episode from disk: %s', self.title, traceback=True, sender=self)
566 def local_filename( self):
567 ext = self.extension()
569 # For compatibility with already-downloaded episodes,
570 # we accept md5 filenames if they are downloaded now.
571 md5_filename = os.path.join(self.channel.save_dir, md5.new(self.url).hexdigest()+ext)
572 if os.path.exists(md5_filename) or not gl.config.experimental_file_naming:
573 return md5_filename
575 # If the md5 filename does not exist,
576 ( episode, e ) = util.filename_from_url(self.url)
577 episode = util.sanitize_filename(episode) + ext
579 # If the episode filename looks suspicious,
580 # we still return the md5 filename to be on
581 # the safe side of the fence ;)
582 if len(episode) == 0 or episode.startswith('redirect.'):
583 return md5_filename
584 filename = os.path.join(self.channel.save_dir, episode)
585 return filename
587 def extension( self):
588 ( filename, ext ) = util.filename_from_url(self.url)
589 # if we can't detect the extension from the url fallback on the mimetype
590 if ext == '' or util.file_type_by_extension(ext) is None:
591 ext = util.extension_from_mimetype(self.mimetype)
592 #log('Getting extension from mimetype for: %s (mimetype: %s)' % (self.title, ext), sender=self)
593 return ext
595 def mark_new(self):
596 self.state = db.STATE_NORMAL
597 self.is_played = False
598 db.mark_episode(self.url, state=self.state, is_played=self.is_played)
600 def mark_old(self):
601 self.is_played = True
602 db.mark_episode(self.url, is_played=True)
604 def file_exists(self):
605 return os.path.exists(self.local_filename())
607 def was_downloaded(self, and_exists=False):
608 if self.state != db.STATE_DOWNLOADED:
609 return False
610 if and_exists and not self.file_exists():
611 return False
612 return True
614 def sync_filename( self):
615 if gl.config.custom_sync_name_enabled:
616 return util.object_string_formatter(gl.config.custom_sync_name, episode=self, channel=self.channel)
617 else:
618 return self.title
620 def file_type( self):
621 return util.file_type_by_extension( self.extension() )
623 @property
624 def basename( self):
625 return os.path.splitext( os.path.basename( self.url))[0]
627 @property
628 def published( self):
629 try:
630 return datetime.datetime.fromtimestamp(self.pubDate).strftime('%Y%m%d')
631 except:
632 log( 'Cannot format pubDate for "%s".', self.title, sender = self)
633 return '00000000'
635 def cute_pubdate(self):
636 result = util.format_date(self.pubDate)
637 if result is None:
638 return '(%s)' % _('unknown')
639 else:
640 return result
642 pubdate_prop = property(fget=cute_pubdate)
644 def calculate_filesize( self):
645 try:
646 self.length = os.path.getsize(self.local_filename())
647 except:
648 log( 'Could not get filesize for %s.', self.url)
650 def get_filesize_string( self):
651 return gl.format_filesize( self.length)
653 filesize_prop = property(fget=get_filesize_string)
655 def get_channel_title( self):
656 return self.channel.title
658 channel_prop = property(fget=get_channel_title)
660 def get_played_string( self):
661 if not self.is_played:
662 return _('Unplayed')
664 return ''
666 played_prop = property(fget=get_played_string)
671 def channels_to_model(channels, cover_cache=None, max_width=0, max_height=0):
672 new_model = gtk.ListStore(str, str, str, gtk.gdk.Pixbuf, int, gtk.gdk.Pixbuf, str, bool)
674 for channel in channels:
675 count_downloaded = channel.stat(state=db.STATE_DOWNLOADED)
676 count_new = channel.stat(state=db.STATE_NORMAL, is_played=False)
677 count_unplayed = channel.stat(state=db.STATE_DOWNLOADED, is_played=False)
679 new_iter = new_model.append()
680 new_model.set(new_iter, 0, channel.url)
681 new_model.set(new_iter, 1, channel.title)
683 title_markup = saxutils.escape(channel.title)
684 description_markup = saxutils.escape(util.get_first_line(channel.description) or _('No description available'))
685 d = []
686 if count_new:
687 d.append('<span weight="bold">')
688 d.append(title_markup)
689 if count_new:
690 d.append('</span>')
691 description = ''.join(d+['\n', '<small>', description_markup, '</small>'])
692 if channel.parse_error is not None:
693 description = ''.join(['<span foreground="#ff0000">', description, '</span>'])
694 new_model.set(new_iter, 6, channel.parse_error)
696 new_model.set(new_iter, 2, description)
698 if count_unplayed > 0 or count_downloaded > 0:
699 new_model.set(new_iter, 3, draw.draw_pill_pixbuf(str(count_unplayed), str(count_downloaded)))
700 new_model.set(new_iter, 7, True)
701 else:
702 new_model.set(new_iter, 7, False)
704 # Load the cover if we have it, but don't download
705 # it if it's not available (to avoid blocking here)
706 #pixbuf = services.cover_downloader.get_cover(channel, avoid_downloading=True)
707 #new_pixbuf = None
708 #if pixbuf is not None:
709 # new_pixbuf = util.resize_pixbuf_keep_ratio(pixbuf, max_width, max_height, channel.url, cover_cache)
710 #new_model.set(new_iter, 5, new_pixbuf or pixbuf)
712 return new_model
715 def load_channels():
716 return db.load_channels(lambda d: podcastChannel.create_from_dict(d))
718 def update_channels(callback_proc=None, callback_error=None, is_cancelled_cb=None):
719 log('Updating channels....')
721 channels = load_channels()
722 count = 0
724 for channel in channels:
725 if is_cancelled_cb is not None and is_cancelled_cb():
726 return channels
727 callback_proc and callback_proc(count, len(channels))
728 channel.update()
729 count += 1
731 return channels
733 def save_channels( channels):
734 exporter = opml.Exporter(gl.channel_opml_file)
735 return exporter.write(channels)
737 def can_restore_from_opml():
738 try:
739 if len(opml.Importer(gl.channel_opml_file).items):
740 return gl.channel_opml_file
741 except:
742 return None
746 class LocalDBReader( object):
748 DEPRECATED - Only used for migration to SQLite
750 def __init__( self, url):
751 self.url = url
753 def get_text( self, nodelist):
754 return ''.join( [ node.data for node in nodelist if node.nodeType == node.TEXT_NODE ])
756 def get_text_by_first_node( self, element, name):
757 return self.get_text( element.getElementsByTagName( name)[0].childNodes)
759 def get_episode_from_element( self, channel, element):
760 episode = podcastItem( channel)
761 episode.title = self.get_text_by_first_node( element, 'title')
762 episode.description = self.get_text_by_first_node( element, 'description')
763 episode.url = self.get_text_by_first_node( element, 'url')
764 episode.link = self.get_text_by_first_node( element, 'link')
765 episode.guid = self.get_text_by_first_node( element, 'guid')
767 if not episode.guid:
768 for k in ('url', 'link'):
769 if getattr(episode, k) is not None:
770 episode.guid = getattr(episode, k)
771 log('Notice: episode has no guid, using %s', episode.guid)
772 break
773 try:
774 episode.pubDate = float(self.get_text_by_first_node(element, 'pubDate'))
775 except:
776 log('Looks like you have an old pubDate in your LocalDB -> converting it')
777 episode.pubDate = self.get_text_by_first_node(element, 'pubDate')
778 log('FYI: pubDate value is: "%s"', episode.pubDate, sender=self)
779 pubdate = feedparser._parse_date(episode.pubDate)
780 if pubdate is None:
781 log('Error converting the old pubDate - sorry!', sender=self)
782 episode.pubDate = 0
783 else:
784 log('PubDate converted successfully - yay!', sender=self)
785 episode.pubDate = time.mktime(pubdate)
786 try:
787 episode.mimetype = self.get_text_by_first_node( element, 'mimetype')
788 except:
789 log('No mimetype info for %s', episode.url, sender=self)
790 episode.calculate_filesize()
791 return episode
793 def load_and_clean( self, filename):
795 Clean-up a LocalDB XML file that could potentially contain
796 "unbound prefix" XML elements (generated by the old print-based
797 LocalDB code). The code removes those lines to make the new
798 DOM parser happy.
800 This should be removed in a future version.
802 lines = []
803 for line in open(filename).read().split('\n'):
804 if not line.startswith('<gpodder:info'):
805 lines.append( line)
807 return '\n'.join( lines)
809 def read( self, filename):
810 doc = xml.dom.minidom.parseString( self.load_and_clean( filename))
811 rss = doc.getElementsByTagName('rss')[0]
813 channel_element = rss.getElementsByTagName('channel')[0]
815 channel = podcastChannel( url = self.url)
816 channel.title = self.get_text_by_first_node( channel_element, 'title')
817 channel.description = self.get_text_by_first_node( channel_element, 'description')
818 channel.link = self.get_text_by_first_node( channel_element, 'link')
820 episodes = []
821 for episode_element in rss.getElementsByTagName('item'):
822 episode = self.get_episode_from_element( channel, episode_element)
823 episodes.append(episode)
825 return episodes