Fixed the YouTube support.
[gpodder.git] / src / gpodder / libpodcasts.py
blob14e8eb8fc4aef5610008d64d8d3d25b0200151f4
1 # -*- coding: utf-8 -*-
3 # gPodder - A media aggregator and podcast client
4 # Copyright (c) 2005-2009 Thomas Perl and the gPodder Team
6 # gPodder is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 3 of the License, or
9 # (at your option) any later version.
11 # gPodder is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
22 # libpodcasts.py -- data classes for gpodder
23 # thomas perl <thp@perli.net> 20051029
25 # Contains code based on:
26 # liblocdbwriter.py (2006-01-09)
27 # liblocdbreader.py (2006-01-10)
30 import gtk
31 import gobject
32 import pango
34 import gpodder
35 from gpodder import util
36 from gpodder import opml
37 from gpodder import cache
38 from gpodder import services
39 from gpodder import draw
40 from gpodder import libtagupdate
41 from gpodder import dumbshelve
42 from gpodder import resolver
44 from gpodder.liblogger import log
45 from gpodder.libgpodder import gl
46 from gpodder.dbsqlite import db
48 import os.path
49 import os
50 import glob
51 import shutil
52 import sys
53 import urllib
54 import urlparse
55 import time
56 import datetime
57 import rfc822
58 import hashlib
59 import xml.dom.minidom
60 import feedparser
62 from xml.sax import saxutils
65 if gpodder.interface == gpodder.MAEMO:
66 ICON_AUDIO_FILE = 'gnome-mime-audio-mp3'
67 ICON_VIDEO_FILE = 'gnome-mime-video-mp4'
68 ICON_DOWNLOADING = 'qgn_toolb_messagin_moveto'
69 ICON_DELETED = 'qgn_toolb_gene_deletebutton'
70 ICON_NEW = 'qgn_list_gene_favor'
71 else:
72 ICON_AUDIO_FILE = 'audio-x-generic'
73 ICON_VIDEO_FILE = 'video-x-generic'
74 ICON_DOWNLOADING = gtk.STOCK_GO_DOWN
75 ICON_DELETED = gtk.STOCK_DELETE
76 ICON_NEW = gtk.STOCK_ABOUT
79 class HTTPAuthError(Exception): pass
81 class podcastChannel(object):
82 """holds data for a complete channel"""
83 SETTINGS = ('sync_to_devices', 'device_playlist_name','override_title','username','password')
84 icon_cache = {}
86 fc = cache.Cache()
88 @classmethod
89 def load(cls, url, create=True, authentication_tokens=None):
90 if isinstance(url, unicode):
91 url = url.encode('utf-8')
93 tmp = db.load_channels(factory=lambda d: cls.create_from_dict(d), url=url)
94 if len(tmp):
95 return tmp[0]
96 elif create:
97 tmp = podcastChannel(url)
98 if authentication_tokens is not None:
99 tmp.username = authentication_tokens[0]
100 tmp.password = authentication_tokens[1]
101 success, error_code = tmp.update()
102 if not success:
103 if error_code == 401:
104 raise HTTPAuthError
105 else:
106 return None
107 tmp.save()
108 db.force_last_new(tmp)
109 return tmp
111 @staticmethod
112 def create_from_dict(d):
113 c = podcastChannel()
114 for key in d:
115 if hasattr(c, key):
116 setattr(c, key, d[key])
117 return c
119 def update(self):
120 (updated, c) = self.fc.fetch(self.url, self)
122 if c is None:
123 return ( False, None )
125 if c.status == 401:
126 return ( False, 401 )
128 if self.url != c.url:
129 log('Updating channel URL from %s to %s', self.url, c.url, sender=self)
130 self.url = c.url
132 # update the cover if it's not there
133 self.update_cover()
135 # If we have an old instance of this channel, and
136 # feedcache says the feed hasn't changed, return old
137 if not updated:
138 log('Channel %s is up to date', self.url)
139 return ( True, None )
141 # Save etag and last-modified for later reuse
142 if c.headers.get('etag'):
143 self.etag = c.headers.get('etag')
144 if c.headers.get('last-modified'):
145 self.last_modified = c.headers.get('last-modified')
147 self.parse_error = c.get('bozo_exception', None)
149 if hasattr(c.feed, 'title'):
150 self.title = c.feed.title
151 # Start YouTube-specific title FIX
152 YOUTUBE_PREFIX = 'Videos uploaded by '
153 if self.title.startswith(YOUTUBE_PREFIX):
154 self.title = self.title[len(YOUTUBE_PREFIX):] + ' on YouTube'
155 # End YouTube-specific title FIX
156 else:
157 self.title = self.url
158 if hasattr( c.feed, 'link'):
159 self.link = c.feed.link
160 if hasattr( c.feed, 'subtitle'):
161 self.description = c.feed.subtitle
163 if hasattr(c.feed, 'updated_parsed') and c.feed.updated_parsed is not None:
164 self.pubDate = rfc822.mktime_tz(c.feed.updated_parsed+(0,))
165 else:
166 self.pubDate = time.time()
167 if hasattr( c.feed, 'image'):
168 if hasattr(c.feed.image, 'href') and c.feed.image.href:
169 old = self.image
170 self.image = c.feed.image.href
171 if old != self.image:
172 self.update_cover(force=True)
174 # Marked as bulk because we commit after importing episodes.
175 db.save_channel(self, bulk=True)
177 # Remove old episodes before adding the new ones. This helps
178 # deal with hyperactive channels, such as TV news, when there
179 # can be more new episodes than the user wants in the list.
180 # By cleaning up old episodes before receiving the new ones we
181 # ensure that the user doesn't miss any.
182 db.purge(gl.config.max_episodes_per_feed, self.id)
184 # Load all episodes to update them properly.
185 existing = self.get_all_episodes()
187 # We can limit the maximum number of entries that gPodder will parse
188 # via the "max_episodes_per_feed" configuration option.
189 if len(c.entries) > gl.config.max_episodes_per_feed:
190 log('Limiting number of episodes for %s to %d', self.title, gl.config.max_episodes_per_feed)
191 for entry in c.entries[:min(gl.config.max_episodes_per_feed, len(c.entries))]:
192 episode = None
194 try:
195 episode = podcastItem.from_feedparser_entry(entry, self)
196 except Exception, e:
197 log('Cannot instantiate episode "%s": %s. Skipping.', entry.get('id', '(no id available)'), e, sender=self, traceback=True)
199 if episode:
200 self.count_new += 1
202 for ex in existing:
203 if ex.guid == episode.guid:
204 for k in ('title', 'title', 'description', 'link', 'pubDate'):
205 setattr(ex, k, getattr(episode, k))
206 self.count_new -= 1
207 episode = ex
209 if not episode.length:
210 episode.length = resolver.get_real_episode_length(episode)
212 episode.save(bulk=True)
214 return ( True, None )
216 def update_cover(self, force=False):
217 if self.cover_file is None or not os.path.exists(self.cover_file) or force:
218 if self.image is not None:
219 services.cover_downloader.request_cover(self)
221 def delete(self):
222 db.delete_channel(self)
224 def save(self):
225 db.save_channel(self)
227 def stat(self, state=None, is_played=None, is_locked=None):
228 return db.get_channel_stat(self.url, state=state, is_played=is_played, is_locked=is_locked)
230 def __init__( self, url = "", title = "", link = "", description = ""):
231 self.id = None
232 self.url = url
233 self.title = title
234 self.link = link
235 self.description = description
236 self.image = None
237 self.pubDate = 0
238 self.parse_error = None
239 self.newest_pubdate_cached = None
240 self.update_flag = False # channel is updating or to be updated
241 self.iter = None
243 # should this channel be synced to devices? (ex: iPod)
244 self.sync_to_devices = True
245 # to which playlist should be synced
246 self.device_playlist_name = 'gPodder'
247 # if set, this overrides the channel-provided title
248 self.override_title = ''
249 self.username = ''
250 self.password = ''
252 self.last_modified = None
253 self.etag = None
255 self.save_dir_size = 0
256 self.__save_dir_size_set = False
258 self.count_downloaded = 0
259 self.count_new = 0
260 self.count_unplayed = 0
262 self.channel_is_locked = False
264 def request_save_dir_size(self):
265 if not self.__save_dir_size_set:
266 self.update_save_dir_size()
267 self.__save_dir_size_set = True
269 def update_save_dir_size(self):
270 self.save_dir_size = util.calculate_size(self.save_dir)
272 def get_filename( self):
273 """Return the MD5 sum of the channel URL"""
274 return hashlib.md5( self.url).hexdigest()
276 filename = property(fget=get_filename)
278 def get_title( self):
279 if self.override_title:
280 return self.override_title
281 elif not self.__title.strip():
282 return self.url
283 else:
284 return self.__title
286 def set_title( self, value):
287 self.__title = value.strip()
289 title = property(fget=get_title,
290 fset=set_title)
292 def set_custom_title( self, custom_title):
293 custom_title = custom_title.strip()
295 if custom_title != self.__title:
296 self.override_title = custom_title
297 else:
298 self.override_title = ''
300 def get_downloaded_episodes(self):
301 return db.load_episodes(self, factory=lambda c: podcastItem.create_from_dict(c, self), state=db.STATE_DOWNLOADED)
303 def save_settings(self):
304 db.save_channel(self)
306 def get_new_episodes( self):
307 return [episode for episode in db.load_episodes(self, factory=lambda x: podcastItem.create_from_dict(x, self)) if episode.state == db.STATE_NORMAL and not episode.is_played and not services.download_status_manager.is_download_in_progress(episode.url)]
309 def update_m3u_playlist(self):
310 if gl.config.create_m3u_playlists:
311 downloaded_episodes = self.get_downloaded_episodes()
312 fn = util.sanitize_filename(self.title)
313 if len(fn) == 0:
314 fn = os.path.basename(self.save_dir)
315 m3u_filename = os.path.join(gl.downloaddir, fn+'.m3u')
316 log('Writing playlist to %s', m3u_filename, sender=self)
317 f = open(m3u_filename, 'w')
318 f.write('#EXTM3U\n')
320 for episode in downloaded_episodes:
321 filename = episode.local_filename()
322 if os.path.dirname(filename).startswith(os.path.dirname(m3u_filename)):
323 filename = filename[len(os.path.dirname(m3u_filename)+os.sep):]
324 f.write('#EXTINF:0,'+self.title+' - '+episode.title+' ('+episode.cute_pubdate()+')\n')
325 f.write(filename+'\n')
326 f.close()
328 def addDownloadedItem(self, item):
329 log('addDownloadedItem(%s)', item.url)
331 if not item.was_downloaded():
332 item.mark_downloaded(save=True)
334 # Update metadata on file (if possible and wanted)
335 if gl.config.update_tags and libtagupdate.tagging_supported():
336 filename = item.local_filename()
337 try:
338 libtagupdate.update_metadata_on_file(filename, title=item.title, artist=self.title, genre='Podcast')
339 except Exception, e:
340 log('Error while calling update_metadata_on_file(): %s', e)
342 self.update_m3u_playlist()
344 def get_all_episodes(self):
345 return db.load_episodes(self, factory = lambda d: podcastItem.create_from_dict(d, self))
347 def iter_set_downloading_columns( self, model, iter, episode=None):
348 global ICON_AUDIO_FILE, ICON_VIDEO_FILE
349 global ICON_DOWNLOADING, ICON_DELETED, ICON_NEW
351 if episode is None:
352 url = model.get_value( iter, 0)
353 episode = db.load_episode(url, factory=lambda x: podcastItem.create_from_dict(x, self))
354 else:
355 url = episode.url
357 if gl.config.episode_list_descriptions or gpodder.interface == gpodder.MAEMO:
358 icon_size = 32
359 else:
360 icon_size = 16
362 if services.download_status_manager.is_download_in_progress(url):
363 status_icon = util.get_tree_icon(ICON_DOWNLOADING, icon_cache=self.icon_cache, icon_size=icon_size)
364 else:
365 if episode.state == db.STATE_NORMAL:
366 if episode.is_played:
367 status_icon = None
368 else:
369 status_icon = util.get_tree_icon(ICON_NEW, icon_cache=self.icon_cache, icon_size=icon_size)
370 elif episode.was_downloaded():
371 missing = not episode.file_exists()
373 if missing:
374 log('Episode missing: %s (before drawing an icon)', episode.url, sender=self)
376 file_type = util.file_type_by_extension( model.get_value( iter, 9))
377 if file_type == 'audio':
378 status_icon = util.get_tree_icon(ICON_AUDIO_FILE, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
379 elif file_type == 'video':
380 status_icon = util.get_tree_icon(ICON_VIDEO_FILE, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
381 else:
382 status_icon = util.get_tree_icon('unknown', not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
383 elif episode.state == db.STATE_DELETED or episode.state == db.STATE_DOWNLOADED:
384 status_icon = util.get_tree_icon(ICON_DELETED, not episode.is_played, icon_cache=self.icon_cache, icon_size=icon_size)
385 else:
386 log('Warning: Cannot determine status icon.', sender=self)
387 status_icon = None
389 model.set( iter, 4, status_icon)
391 def get_tree_model(self):
393 Return a gtk.ListStore containing episodes for this channel
395 new_model = gtk.ListStore( gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING,
396 gobject.TYPE_BOOLEAN, gtk.gdk.Pixbuf, gobject.TYPE_STRING, gobject.TYPE_STRING,
397 gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING )
399 log('Returning TreeModel for %s', self.url, sender = self)
400 urls = []
401 for item in self.get_all_episodes():
402 description = item.title_and_description
404 if item.length > 0:
405 filelength = gl.format_filesize(item.length, 1)
406 else:
407 filelength = None
409 new_iter = new_model.append((item.url, item.title, filelength,
410 True, None, item.cute_pubdate(), description, util.remove_html_tags(item.description),
411 item.local_filename(), item.extension()))
412 self.iter_set_downloading_columns( new_model, new_iter, episode=item)
413 urls.append(item.url)
415 self.update_save_dir_size()
416 return (new_model, urls)
418 def find_episode( self, url):
419 return db.load_episode(url, factory=lambda x: podcastItem.create_from_dict(x, self))
421 def get_save_dir(self):
422 save_dir = os.path.join(gl.downloaddir, self.filename, '')
424 # Create save_dir if it does not yet exist
425 if not util.make_directory( save_dir):
426 log( 'Could not create save_dir: %s', save_dir, sender = self)
428 return save_dir
430 save_dir = property(fget=get_save_dir)
432 def remove_downloaded( self):
433 shutil.rmtree( self.save_dir, True)
435 def get_index_file(self):
436 # gets index xml filename for downloaded channels list
437 return os.path.join( self.save_dir, 'index.xml')
439 index_file = property(fget=get_index_file)
441 def get_cover_file( self):
442 # gets cover filename for cover download cache
443 return os.path.join( self.save_dir, 'cover')
445 cover_file = property(fget=get_cover_file)
447 def delete_episode_by_url(self, url):
448 episode = db.load_episode(url, lambda c: podcastItem.create_from_dict(c, self))
450 if episode is not None:
451 util.delete_file(episode.local_filename())
452 episode.set_state(db.STATE_DELETED)
454 self.update_m3u_playlist()
457 class podcastItem(object):
458 """holds data for one object in a channel"""
460 @staticmethod
461 def load(url, channel):
462 e = podcastItem(channel)
463 d = db.load_episode(url)
464 if d is not None:
465 for k, v in d.iteritems():
466 if hasattr(e, k):
467 setattr(e, k, v)
468 return e
470 @staticmethod
471 def from_feedparser_entry( entry, channel):
472 episode = podcastItem( channel)
474 episode.title = entry.get( 'title', util.get_first_line( util.remove_html_tags( entry.get( 'summary', ''))))
475 episode.link = entry.get( 'link', '')
476 episode.description = entry.get( 'summary', entry.get( 'link', entry.get( 'title', '')))
477 episode.guid = entry.get( 'id', '')
478 if entry.get( 'updated_parsed', None):
479 episode.pubDate = rfc822.mktime_tz(entry.updated_parsed+(0,))
481 if episode.title == '':
482 log( 'Warning: Episode has no title, adding anyways.. (Feed Is Buggy!)', sender = episode)
484 enclosure = None
485 if hasattr(entry, 'enclosures') and len(entry.enclosures) > 0:
486 enclosure = entry.enclosures[0]
487 if len(entry.enclosures) > 1:
488 for e in entry.enclosures:
489 if hasattr( e, 'href') and hasattr( e, 'length') and hasattr( e, 'type') and (e.type.startswith('audio/') or e.type.startswith('video/')):
490 if util.normalize_feed_url(e.href) is not None:
491 log( 'Selected enclosure: %s', e.href, sender = episode)
492 enclosure = e
493 break
494 episode.url = util.normalize_feed_url( enclosure.get( 'href', ''))
495 elif hasattr(entry, 'link'):
496 (filename, extension) = util.filename_from_url(entry.link)
497 if extension == '' and hasattr( entry, 'type'):
498 extension = util.extension_from_mimetype(e.type)
499 file_type = util.file_type_by_extension(extension)
500 if file_type is not None:
501 log('Adding episode with link to file type "%s".', file_type, sender=episode)
502 episode.url = entry.link
504 # YouTube specific
505 if not episode.url and hasattr(entry, 'links') and len(entry.links) and hasattr(entry.links[0], 'href'):
506 episode.url = entry.links[0].href
508 if not episode.url:
509 log('Episode has no URL')
510 log('Episode: %s', episode)
511 log('Entry: %s', entry)
512 # This item in the feed has no downloadable enclosure
513 return None
515 if not episode.pubDate:
516 metainfo = util.get_episode_info_from_url(episode.url)
517 if 'pubdate' in metainfo:
518 try:
519 episode.pubDate = int(float(metainfo['pubdate']))
520 except:
521 log('Cannot convert pubDate "%s" in from_feedparser_entry.', str(metainfo['pubdate']), traceback=True)
523 if hasattr(enclosure, 'length'):
524 try:
525 episode.length = int(enclosure.length)
526 except:
527 episode.length = -1
529 if hasattr( enclosure, 'type'):
530 episode.mimetype = enclosure.type
532 if episode.title == '':
533 ( filename, extension ) = os.path.splitext( os.path.basename( episode.url))
534 episode.title = filename
536 return episode
539 def __init__( self, channel):
540 # Used by Storage for faster saving
541 self.id = None
542 self.url = ''
543 self.title = ''
544 self.length = 0
545 self.mimetype = 'application/octet-stream'
546 self.guid = ''
547 self.description = ''
548 self.link = ''
549 self.channel = channel
550 self.pubDate = 0
552 self.state = db.STATE_NORMAL
553 self.is_played = False
554 self.is_locked = channel.channel_is_locked
556 def save(self, bulk=False):
557 if self.state != db.STATE_DOWNLOADED and self.file_exists():
558 self.state = db.STATE_DOWNLOADED
559 db.save_episode(self, bulk=bulk)
561 def set_state(self, state):
562 self.state = state
563 db.mark_episode(self.url, state=self.state, is_played=self.is_played, is_locked=self.is_locked)
565 def mark(self, state=None, is_played=None, is_locked=None):
566 if state is not None:
567 self.state = state
568 if is_played is not None:
569 self.is_played = is_played
570 if is_locked is not None:
571 self.is_locked = is_locked
572 db.mark_episode(self.url, state=state, is_played=is_played, is_locked=is_locked)
574 def mark_downloaded(self, save=False):
575 self.state = db.STATE_DOWNLOADED
576 self.is_played = False
577 if save:
578 self.save()
580 @staticmethod
581 def create_from_dict(d, channel):
582 e = podcastItem(channel)
583 for key in d:
584 if hasattr(e, key):
585 setattr(e, key, d[key])
586 return e
588 @property
589 def title_and_description(self):
591 Returns Pango markup for displaying in a TreeView, and
592 disables the description when the config variable
593 "episode_list_descriptions" is not set.
595 if gl.config.episode_list_descriptions and gpodder.interface != gpodder.MAEMO:
596 return '%s\n<small>%s</small>' % (saxutils.escape(self.title), saxutils.escape(self.one_line_description()))
597 else:
598 return saxutils.escape(self.title)
600 def age_in_days(self):
601 return util.file_age_in_days(self.local_filename())
603 def is_old(self):
604 return self.age_in_days() > gl.config.episode_old_age
606 def get_age_string(self):
607 return util.file_age_to_string(self.age_in_days())
609 age_prop = property(fget=get_age_string)
611 def one_line_description( self):
612 lines = util.remove_html_tags(self.description).strip().splitlines()
613 if not lines or lines[0] == '':
614 return _('No description available')
615 else:
616 return ' '.join(lines)
618 def delete_from_disk(self):
619 try:
620 self.channel.delete_episode_by_url(self.url)
621 except:
622 log('Cannot delete episode from disk: %s', self.title, traceback=True, sender=self)
624 def local_filename( self):
625 ext = self.extension()
627 # For compatibility with already-downloaded episodes,
628 # we accept md5 filenames if they are downloaded now.
629 md5_filename = os.path.join(self.channel.save_dir, hashlib.md5(self.url).hexdigest()+ext)
630 if os.path.exists(md5_filename) or not gl.config.experimental_file_naming:
631 return md5_filename
633 # If the md5 filename does not exist,
634 ( episode, e ) = util.filename_from_url(self.url)
635 episode = util.sanitize_filename(episode) + ext
637 # If the episode filename looks suspicious,
638 # we still return the md5 filename to be on
639 # the safe side of the fence ;)
640 if len(episode) == 0 or episode.startswith('redirect.'):
641 return md5_filename
642 filename = os.path.join(self.channel.save_dir, episode)
643 return filename
645 def extension( self):
646 ( filename, ext ) = util.filename_from_url(self.url)
647 # if we can't detect the extension from the url fallback on the mimetype
648 if ext == '' or util.file_type_by_extension(ext) is None:
649 ext = util.extension_from_mimetype(self.mimetype)
650 #log('Getting extension from mimetype for: %s (mimetype: %s)' % (self.title, ext), sender=self)
651 return ext
653 def mark_new(self):
654 self.state = db.STATE_NORMAL
655 self.is_played = False
656 db.mark_episode(self.url, state=self.state, is_played=self.is_played)
658 def mark_old(self):
659 self.is_played = True
660 db.mark_episode(self.url, is_played=True)
662 def file_exists(self):
663 return os.path.exists(self.local_filename())
665 def was_downloaded(self, and_exists=False):
666 if self.state != db.STATE_DOWNLOADED:
667 return False
668 if and_exists and not self.file_exists():
669 return False
670 return True
672 def sync_filename( self):
673 if gl.config.custom_sync_name_enabled:
674 if '{channel' in gl.config.custom_sync_name:
675 log('Fixing OLD syntax {channel.*} => {podcast.*} in custom_sync_name.', sender=self)
676 gl.config.custom_sync_name = gl.config.custom_sync_name.replace('{channel.', '{podcast.')
677 return util.object_string_formatter(gl.config.custom_sync_name, episode=self, podcast=self.channel)
678 else:
679 return self.title
681 def file_type( self):
682 return util.file_type_by_extension( self.extension() )
684 @property
685 def basename( self):
686 return os.path.splitext( os.path.basename( self.url))[0]
688 @property
689 def published( self):
691 Returns published date as YYYYMMDD (or 00000000 if not available)
693 try:
694 return datetime.datetime.fromtimestamp(self.pubDate).strftime('%Y%m%d')
695 except:
696 log( 'Cannot format pubDate for "%s".', self.title, sender = self)
697 return '00000000'
699 @property
700 def pubtime(self):
702 Returns published time as HHMM (or 0000 if not available)
704 try:
705 return datetime.datetime.fromtimestamp(self.pubDate).strftime('%H%M')
706 except:
707 log('Cannot format pubDate (time) for "%s".', self.title, sender=self)
708 return '0000'
710 def cute_pubdate(self):
711 result = util.format_date(self.pubDate)
712 if result is None:
713 return '(%s)' % _('unknown')
714 else:
715 return result
717 pubdate_prop = property(fget=cute_pubdate)
719 def calculate_filesize( self):
720 try:
721 self.length = os.path.getsize(self.local_filename())
722 except:
723 log( 'Could not get filesize for %s.', self.url)
725 def get_filesize_string( self):
726 return gl.format_filesize( self.length)
728 filesize_prop = property(fget=get_filesize_string)
730 def get_channel_title( self):
731 return self.channel.title
733 channel_prop = property(fget=get_channel_title)
735 def get_played_string( self):
736 if not self.is_played:
737 return _('Unplayed')
739 return ''
741 played_prop = property(fget=get_played_string)
745 def update_channel_model_by_iter( model, iter, channel, color_dict,
746 cover_cache=None, max_width=0, max_height=0, initialize_all=False):
748 count_downloaded = channel.stat(state=db.STATE_DOWNLOADED)
749 count_new = channel.stat(state=db.STATE_NORMAL, is_played=False)
750 count_unplayed = channel.stat(state=db.STATE_DOWNLOADED, is_played=False)
752 channel.iter = iter
753 if initialize_all:
754 model.set(iter, 0, channel.url)
756 model.set(iter, 1, channel.title)
757 title_markup = saxutils.escape(channel.title)
758 description_markup = saxutils.escape(util.get_first_line(channel.description) or _('No description available'))
759 d = []
760 if count_new:
761 d.append('<span weight="bold">')
762 d.append(title_markup)
763 if count_new:
764 d.append('</span>')
766 description = ''.join(d+['\n', '<small>', description_markup, '</small>'])
767 model.set(iter, 2, description)
769 if channel.parse_error is not None:
770 model.set(iter, 6, channel.parse_error)
771 color = color_dict['parse_error']
772 else:
773 color = color_dict['default']
775 if channel.update_flag:
776 color = color_dict['updating']
778 model.set(iter, 8, color)
780 if count_unplayed > 0 or count_downloaded > 0:
781 model.set(iter, 3, draw.draw_pill_pixbuf(str(count_unplayed), str(count_downloaded)))
782 model.set(iter, 7, True)
783 else:
784 model.set(iter, 7, False)
786 if initialize_all:
787 # Load the cover if we have it, but don't download
788 # it if it's not available (to avoid blocking here)
789 pixbuf = services.cover_downloader.get_cover(channel, avoid_downloading=True)
790 new_pixbuf = None
791 if pixbuf is not None:
792 new_pixbuf = util.resize_pixbuf_keep_ratio(pixbuf, max_width, max_height, channel.url, cover_cache)
793 model.set(iter, 5, new_pixbuf or pixbuf)
795 def channels_to_model(channels, color_dict, cover_cache=None, max_width=0, max_height=0):
796 new_model = gtk.ListStore( str, str, str, gtk.gdk.Pixbuf, int,
797 gtk.gdk.Pixbuf, str, bool, str )
799 urls = []
800 for channel in channels:
801 update_channel_model_by_iter(new_model, new_model.append(), channel,
802 color_dict, cover_cache, max_width, max_height, True)
803 urls.append(channel.url)
805 return (new_model, urls)
808 def load_channels():
809 return db.load_channels(lambda d: podcastChannel.create_from_dict(d))
811 def update_channels(callback_proc=None, callback_error=None, is_cancelled_cb=None):
812 log('Updating channels....')
814 channels = load_channels()
815 count = 0
817 for channel in channels:
818 if is_cancelled_cb is not None and is_cancelled_cb():
819 return channels
820 callback_proc and callback_proc(count, len(channels))
821 channel.update()
822 count += 1
824 return channels
826 def save_channels( channels):
827 exporter = opml.Exporter(gl.channel_opml_file)
828 return exporter.write(channels)
830 def can_restore_from_opml():
831 try:
832 if len(opml.Importer(gl.channel_opml_file).items):
833 return gl.channel_opml_file
834 except:
835 return None
839 class LocalDBReader( object):
841 DEPRECATED - Only used for migration to SQLite
843 def __init__( self, url):
844 self.url = url
846 def get_text( self, nodelist):
847 return ''.join( [ node.data for node in nodelist if node.nodeType == node.TEXT_NODE ])
849 def get_text_by_first_node( self, element, name):
850 return self.get_text( element.getElementsByTagName( name)[0].childNodes)
852 def get_episode_from_element( self, channel, element):
853 episode = podcastItem( channel)
854 episode.title = self.get_text_by_first_node( element, 'title')
855 episode.description = self.get_text_by_first_node( element, 'description')
856 episode.url = self.get_text_by_first_node( element, 'url')
857 episode.link = self.get_text_by_first_node( element, 'link')
858 episode.guid = self.get_text_by_first_node( element, 'guid')
860 if not episode.guid:
861 for k in ('url', 'link'):
862 if getattr(episode, k) is not None:
863 episode.guid = getattr(episode, k)
864 log('Notice: episode has no guid, using %s', episode.guid)
865 break
866 try:
867 episode.pubDate = float(self.get_text_by_first_node(element, 'pubDate'))
868 except:
869 log('Looks like you have an old pubDate in your LocalDB -> converting it')
870 episode.pubDate = self.get_text_by_first_node(element, 'pubDate')
871 log('FYI: pubDate value is: "%s"', episode.pubDate, sender=self)
872 pubdate = feedparser._parse_date(episode.pubDate)
873 if pubdate is None:
874 log('Error converting the old pubDate - sorry!', sender=self)
875 episode.pubDate = 0
876 else:
877 log('PubDate converted successfully - yay!', sender=self)
878 episode.pubDate = time.mktime(pubdate)
879 try:
880 episode.mimetype = self.get_text_by_first_node( element, 'mimetype')
881 except:
882 log('No mimetype info for %s', episode.url, sender=self)
883 episode.calculate_filesize()
884 return episode
886 def load_and_clean( self, filename):
888 Clean-up a LocalDB XML file that could potentially contain
889 "unbound prefix" XML elements (generated by the old print-based
890 LocalDB code). The code removes those lines to make the new
891 DOM parser happy.
893 This should be removed in a future version.
895 lines = []
896 for line in open(filename).read().split('\n'):
897 if not line.startswith('<gpodder:info'):
898 lines.append( line)
900 return '\n'.join( lines)
902 def read( self, filename):
903 doc = xml.dom.minidom.parseString( self.load_and_clean( filename))
904 rss = doc.getElementsByTagName('rss')[0]
906 channel_element = rss.getElementsByTagName('channel')[0]
908 channel = podcastChannel( url = self.url)
909 channel.title = self.get_text_by_first_node( channel_element, 'title')
910 channel.description = self.get_text_by_first_node( channel_element, 'description')
911 channel.link = self.get_text_by_first_node( channel_element, 'link')
913 episodes = []
914 for episode_element in rss.getElementsByTagName('item'):
915 episode = self.get_episode_from_element( channel, episode_element)
916 episodes.append(episode)
918 return episodes