1 # -*- coding: utf-8 -*-
3 # gPodder - A media aggregator and podcast client
4 # Copyright (c) 2005-2008 Thomas Perl and the gPodder Team
6 # gPodder is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 3 of the License, or
9 # (at your option) any later version.
11 # gPodder is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
22 # libpodcasts.py -- data classes for gpodder
23 # thomas perl <thp@perli.net> 20051029
25 # Contains code based on:
26 # liblocdbwriter.py (2006-01-09)
27 # liblocdbreader.py (2006-01-10)
35 from gpodder
import util
36 from gpodder
import opml
37 from gpodder
import cache
38 from gpodder
import services
39 from gpodder
import draw
40 from gpodder
import libtagupdate
41 from gpodder
import dumbshelve
43 from gpodder
.liblogger
import log
44 from gpodder
.libgpodder
import gl
45 from gpodder
.dbsqlite
import db
58 import xml
.dom
.minidom
61 from xml
.sax
import saxutils
64 if gpodder
.interface
== gpodder
.MAEMO
:
65 ICON_AUDIO_FILE
= 'gnome-mime-audio-mp3'
66 ICON_VIDEO_FILE
= 'gnome-mime-video-mp4'
67 ICON_BITTORRENT
= 'qgn_toolb_browser_web'
68 ICON_DOWNLOADING
= 'qgn_toolb_messagin_moveto'
69 ICON_DELETED
= 'qgn_toolb_gene_deletebutton'
70 ICON_NEW
= 'qgn_list_gene_favor'
72 ICON_AUDIO_FILE
= 'audio-x-generic'
73 ICON_VIDEO_FILE
= 'video-x-generic'
74 ICON_BITTORRENT
= 'applications-internet'
75 ICON_DOWNLOADING
= gtk
.STOCK_GO_DOWN
76 ICON_DELETED
= gtk
.STOCK_DELETE
77 ICON_NEW
= gtk
.STOCK_ABOUT
81 class podcastChannel(object):
82 """holds data for a complete channel"""
83 SETTINGS
= ('sync_to_devices', 'device_playlist_name','override_title','username','password')
89 def load(cls
, url
, create
=True):
90 if isinstance(url
, unicode):
91 url
= url
.encode('utf-8')
93 tmp
= db
.load_channels(factory
=lambda d
: cls
.create_from_dict(d
), url
=url
)
97 tmp
= podcastChannel(url
)
101 db
.force_last_new(tmp
)
105 def create_from_dict(d
):
109 setattr(c
, key
, d
[key
])
113 (updated
, c
) = self
.fc
.fetch(self
.url
, self
)
118 if self
.url
!= c
.url
:
119 log('Updating channel URL from %s to %s', self
.url
, c
.url
, sender
=self
)
122 # update the cover if it's not there
125 # If we have an old instance of this channel, and
126 # feedcache says the feed hasn't changed, return old
128 log('Channel %s is up to date', self
.url
)
131 # Save etag and last-modified for later reuse
132 if c
.headers
.get('etag'):
133 self
.etag
= c
.headers
.get('etag')
134 if c
.headers
.get('last-modified'):
135 self
.last_modified
= c
.headers
.get('last-modified')
137 self
.parse_error
= c
.get('bozo_exception', None)
139 if hasattr(c
.feed
, 'title'):
140 self
.title
= c
.feed
.title
142 self
.title
= self
.url
143 if hasattr( c
.feed
, 'link'):
144 self
.link
= c
.feed
.link
145 if hasattr( c
.feed
, 'subtitle'):
146 self
.description
= util
.remove_html_tags(c
.feed
.subtitle
)
148 if hasattr(c
.feed
, 'updated_parsed') and c
.feed
.updated_parsed
is not None:
149 self
.pubDate
= rfc822
.mktime_tz(c
.feed
.updated_parsed
+(0,))
151 self
.pubDate
= time
.time()
152 if hasattr( c
.feed
, 'image'):
153 if hasattr(c
.feed
.image
, 'href') and c
.feed
.image
.href
:
155 self
.image
= c
.feed
.image
.href
156 if old
!= self
.image
:
157 self
.update_cover(force
=True)
159 # Marked as bulk because we commit after importing episodes.
160 db
.save_channel(self
, bulk
=True)
162 # We can limit the maximum number of entries that gPodder will parse
163 # via the "max_episodes_per_feed" configuration option.
164 if len(c
.entries
) > gl
.config
.max_episodes_per_feed
:
165 log('Limiting number of episodes for %s to %d', self
.title
, gl
.config
.max_episodes_per_feed
)
166 for entry
in c
.entries
[:min(gl
.config
.max_episodes_per_feed
, len(c
.entries
))]:
170 episode
= podcastItem
.from_feedparser_entry(entry
, self
)
172 log('Cannot instantiate episode "%s": %s. Skipping.', entry
.get('id', '(no id available)'), e
, sender
=self
, traceback
=True)
175 episode
.save(bulk
=True)
179 def update_cover(self
, force
=False):
180 if self
.cover_file
is None or not os
.path
.exists(self
.cover_file
) or force
:
181 if self
.image
is not None:
182 services
.cover_downloader
.request_cover(self
)
185 db
.delete_channel(self
)
188 db
.save_channel(self
)
190 def stat(self
, state
=None, is_played
=None, is_locked
=None):
191 return db
.get_channel_stat(self
.url
, state
=state
, is_played
=is_played
, is_locked
=is_locked
)
193 def __init__( self
, url
= "", title
= "", link
= "", description
= ""):
198 self
.description
= util
.remove_html_tags( description
)
201 self
.parse_error
= None
202 self
.newest_pubdate_cached
= None
203 self
.update_flag
= False # channel is updating or to be updated
206 # should this channel be synced to devices? (ex: iPod)
207 self
.sync_to_devices
= True
208 # to which playlist should be synced
209 self
.device_playlist_name
= 'gPodder'
210 # if set, this overrides the channel-provided title
211 self
.override_title
= ''
215 self
.last_modified
= None
218 self
.save_dir_size
= 0
219 self
.__save
_dir
_size
_set
= False
221 def request_save_dir_size(self
):
222 if not self
.__save
_dir
_size
_set
:
223 self
.update_save_dir_size()
224 self
.__save
_dir
_size
_set
= True
226 def update_save_dir_size(self
):
227 self
.save_dir_size
= util
.calculate_size(self
.save_dir
)
229 def get_filename( self
):
230 """Return the MD5 sum of the channel URL"""
231 return md5
.new( self
.url
).hexdigest()
233 filename
= property(fget
=get_filename
)
235 def get_title( self
):
236 if self
.override_title
:
237 return self
.override_title
238 elif not self
.__title
.strip():
243 def set_title( self
, value
):
244 self
.__title
= value
.strip()
246 title
= property(fget
=get_title
,
249 def set_custom_title( self
, custom_title
):
250 custom_title
= custom_title
.strip()
252 if custom_title
!= self
.__title
:
253 self
.override_title
= custom_title
255 self
.override_title
= ''
257 def get_downloaded_episodes(self
):
258 return db
.load_episodes(self
, factory
=lambda c
: podcastItem
.create_from_dict(c
, self
), state
=db
.STATE_DOWNLOADED
)
260 def save_settings(self
):
261 db
.save_channel(self
)
263 def get_new_episodes( self
):
264 return [episode
for episode
in db
.load_episodes(self
, factory
=lambda x
: podcastItem
.create_from_dict(x
, self
)) if episode
.state
== db
.STATE_NORMAL
and not episode
.is_played
]
266 def update_m3u_playlist(self
):
267 if gl
.config
.create_m3u_playlists
:
268 downloaded_episodes
= self
.get_downloaded_episodes()
269 fn
= util
.sanitize_filename(self
.title
)
271 fn
= os
.path
.basename(self
.save_dir
)
272 m3u_filename
= os
.path
.join(gl
.downloaddir
, fn
+'.m3u')
273 log('Writing playlist to %s', m3u_filename
, sender
=self
)
274 f
= open(m3u_filename
, 'w')
277 for episode
in downloaded_episodes
:
278 filename
= episode
.local_filename()
279 if os
.path
.dirname(filename
).startswith(os
.path
.dirname(m3u_filename
)):
280 filename
= filename
[len(os
.path
.dirname(m3u_filename
)+os
.sep
):]
281 f
.write('#EXTINF:0,'+self
.title
+' - '+episode
.title
+' ('+episode
.cute_pubdate()+')\n')
282 f
.write(filename
+'\n')
285 def addDownloadedItem(self
, item
):
286 log('addDownloadedItem(%s)', item
.url
)
288 if not item
.was_downloaded():
289 item
.mark(is_played
=False, state
=db
.STATE_DOWNLOADED
)
291 # Update metadata on file (if possible and wanted)
292 if gl
.config
.update_tags
and libtagupdate
.tagging_supported():
293 filename
= item
.local_filename()
295 libtagupdate
.update_metadata_on_file(filename
, title
=item
.title
, artist
=self
.title
, genre
='Podcast')
297 log('Error while calling update_metadata_on_file(): %s', e
)
299 self
.update_m3u_playlist()
301 if item
.file_type() == 'torrent':
302 torrent_filename
= item
.local_filename()
303 destination_filename
= util
.torrent_filename( torrent_filename
)
304 gl
.invoke_torrent(item
.url
, torrent_filename
, destination_filename
)
306 def get_all_episodes(self
):
307 return db
.load_episodes(self
, factory
= lambda d
: podcastItem
.create_from_dict(d
, self
), limit
=gl
.config
.max_episodes_per_feed
)
310 def update_model( self
):
311 self
.update_save_dir_size()
312 model
= self
.tree_model
314 iter = model
.get_iter_first()
315 while iter is not None:
316 self
.iter_set_downloading_columns(model
, iter)
317 iter = model
.iter_next( iter)
320 def tree_model( self
):
321 log('Returning TreeModel for %s', self
.url
, sender
= self
)
322 return self
.items_liststore()
324 def iter_set_downloading_columns( self
, model
, iter, episode
=None):
325 global ICON_AUDIO_FILE
, ICON_VIDEO_FILE
, ICON_BITTORRENT
326 global ICON_DOWNLOADING
, ICON_DELETED
, ICON_NEW
329 url
= model
.get_value( iter, 0)
330 episode
= db
.load_episode(url
, factory
=lambda x
: podcastItem
.create_from_dict(x
, self
))
334 if gl
.config
.episode_list_descriptions
:
339 if services
.download_status_manager
.is_download_in_progress(url
):
340 status_icon
= util
.get_tree_icon(ICON_DOWNLOADING
, icon_cache
=self
.icon_cache
, icon_size
=icon_size
)
342 if episode
.state
== db
.STATE_NORMAL
:
343 if episode
.is_played
:
346 status_icon
= util
.get_tree_icon(ICON_NEW
, icon_cache
=self
.icon_cache
, icon_size
=icon_size
)
347 elif episode
.was_downloaded(and_exists
=True):
348 missing
= not episode
.file_exists()
351 log('Episode missing: %s (before drawing an icon)', episode
.url
, sender
=self
)
353 file_type
= util
.file_type_by_extension( model
.get_value( iter, 9))
354 if file_type
== 'audio':
355 status_icon
= util
.get_tree_icon(ICON_AUDIO_FILE
, not episode
.is_played
, episode
.is_locked
, not episode
.file_exists(), self
.icon_cache
, icon_size
)
356 elif file_type
== 'video':
357 status_icon
= util
.get_tree_icon(ICON_VIDEO_FILE
, not episode
.is_played
, episode
.is_locked
, not episode
.file_exists(), self
.icon_cache
, icon_size
)
358 elif file_type
== 'torrent':
359 status_icon
= util
.get_tree_icon(ICON_BITTORRENT
, not episode
.is_played
, episode
.is_locked
, not episode
.file_exists(), self
.icon_cache
, icon_size
)
361 status_icon
= util
.get_tree_icon('unknown', not episode
.is_played
, episode
.is_locked
, not episode
.file_exists(), self
.icon_cache
, icon_size
)
362 elif episode
.state
== db
.STATE_DELETED
or episode
.state
== db
.STATE_DOWNLOADED
:
363 status_icon
= util
.get_tree_icon(ICON_DELETED
, icon_cache
=self
.icon_cache
, icon_size
=icon_size
)
365 log('Warning: Cannot determine status icon.', sender
=self
)
368 model
.set( iter, 4, status_icon
)
370 def items_liststore( self
):
372 Return a gtk.ListStore containing episodes for this channel
374 new_model
= gtk
.ListStore( gobject
.TYPE_STRING
, gobject
.TYPE_STRING
, gobject
.TYPE_STRING
,
375 gobject
.TYPE_BOOLEAN
, gtk
.gdk
.Pixbuf
, gobject
.TYPE_STRING
, gobject
.TYPE_STRING
,
376 gobject
.TYPE_STRING
, gobject
.TYPE_STRING
, gobject
.TYPE_STRING
)
378 for item
in self
.get_all_episodes():
379 description
= item
.title_and_description
382 filelength
= gl
.format_filesize(item
.length
, 1)
386 new_iter
= new_model
.append((item
.url
, item
.title
, filelength
,
387 True, None, item
.cute_pubdate(), description
, item
.description
,
388 item
.local_filename(), item
.extension()))
389 self
.iter_set_downloading_columns( new_model
, new_iter
, episode
=item
)
391 self
.update_save_dir_size()
394 def find_episode( self
, url
):
395 return db
.load_episode(url
, factory
=lambda x
: podcastItem
.create_from_dict(x
, self
))
397 def get_save_dir(self
):
398 save_dir
= os
.path
.join(gl
.downloaddir
, self
.filename
, '')
400 # Create save_dir if it does not yet exist
401 if not util
.make_directory( save_dir
):
402 log( 'Could not create save_dir: %s', save_dir
, sender
= self
)
406 save_dir
= property(fget
=get_save_dir
)
408 def remove_downloaded( self
):
409 shutil
.rmtree( self
.save_dir
, True)
411 def get_index_file(self
):
412 # gets index xml filename for downloaded channels list
413 return os
.path
.join( self
.save_dir
, 'index.xml')
415 index_file
= property(fget
=get_index_file
)
417 def get_cover_file( self
):
418 # gets cover filename for cover download cache
419 return os
.path
.join( self
.save_dir
, 'cover')
421 cover_file
= property(fget
=get_cover_file
)
423 def delete_episode_by_url(self
, url
):
424 episode
= db
.load_episode(url
, lambda c
: podcastItem
.create_from_dict(c
, self
))
426 if episode
is not None:
427 util
.delete_file(episode
.local_filename())
428 episode
.set_state(db
.STATE_DELETED
)
430 self
.update_m3u_playlist()
433 class podcastItem(object):
434 """holds data for one object in a channel"""
437 def load(url
, channel
):
438 e
= podcastItem(channel
)
439 d
= db
.load_episode(url
)
441 for k
, v
in d
.iteritems():
447 def from_feedparser_entry( entry
, channel
):
448 episode
= podcastItem( channel
)
450 episode
.title
= entry
.get( 'title', util
.get_first_line( util
.remove_html_tags( entry
.get( 'summary', ''))))
451 episode
.link
= entry
.get( 'link', '')
452 episode
.description
= util
.remove_html_tags( entry
.get( 'summary', entry
.get( 'link', entry
.get( 'title', ''))))
453 episode
.guid
= entry
.get( 'id', '')
454 if entry
.get( 'updated_parsed', None):
455 episode
.pubDate
= rfc822
.mktime_tz(entry
.updated_parsed
+(0,))
457 if episode
.title
== '':
458 log( 'Warning: Episode has no title, adding anyways.. (Feed Is Buggy!)', sender
= episode
)
461 if hasattr(entry
, 'enclosures') and len(entry
.enclosures
) > 0:
462 enclosure
= entry
.enclosures
[0]
463 if len(entry
.enclosures
) > 1:
464 for e
in entry
.enclosures
:
465 if hasattr( e
, 'href') and hasattr( e
, 'length') and hasattr( e
, 'type') and (e
.type.startswith('audio/') or e
.type.startswith('video/')):
466 if util
.normalize_feed_url(e
.href
) is not None:
467 log( 'Selected enclosure: %s', e
.href
, sender
= episode
)
470 episode
.url
= util
.normalize_feed_url( enclosure
.get( 'href', ''))
471 elif hasattr(entry
, 'link'):
472 (filename
, extension
) = util
.filename_from_url(entry
.link
)
473 if extension
== '' and hasattr( entry
, 'type'):
474 extension
= util
.extension_from_mimetype(e
.type)
475 file_type
= util
.file_type_by_extension(extension
)
476 if file_type
is not None:
477 log('Adding episode with link to file type "%s".', file_type
, sender
=episode
)
478 episode
.url
= entry
.link
481 # This item in the feed has no downloadable enclosure
484 if not episode
.pubDate
:
485 metainfo
= util
.get_episode_info_from_url(episode
.url
)
486 if 'pubdate' in metainfo
:
488 episode
.pubDate
= int(float(metainfo
['pubdate']))
490 log('Cannot convert pubDate "%s" in from_feedparser_entry.', str(metainfo
['pubdate']), traceback
=True)
492 if hasattr( enclosure
, 'length'):
494 episode
.length
= int(enclosure
.length
)
498 if hasattr( enclosure
, 'type'):
499 episode
.mimetype
= enclosure
.type
501 if episode
.title
== '':
502 ( filename
, extension
) = os
.path
.splitext( os
.path
.basename( episode
.url
))
503 episode
.title
= filename
508 def __init__( self
, channel
):
509 # Used by Storage for faster saving
514 self
.mimetype
= 'application/octet-stream'
516 self
.description
= ''
518 self
.channel
= channel
521 self
.state
= db
.STATE_NORMAL
522 self
.is_played
= False
523 self
.is_locked
= False
525 def save(self
, bulk
=False):
526 if self
.state
!= db
.STATE_DOWNLOADED
and self
.file_exists():
527 self
.state
= db
.STATE_DOWNLOADED
528 db
.save_episode(self
, bulk
=bulk
)
530 def set_state(self
, state
):
532 db
.mark_episode(self
.url
, state
=self
.state
, is_played
=self
.is_played
, is_locked
=self
.is_locked
)
534 def mark(self
, state
=None, is_played
=None, is_locked
=None):
535 if state
is not None:
537 if is_played
is not None:
538 self
.is_played
= is_played
539 if is_locked
is not None:
540 self
.is_locked
= is_locked
541 db
.mark_episode(self
.url
, state
=state
, is_played
=is_played
, is_locked
=is_locked
)
544 def create_from_dict(d
, channel
):
545 e
= podcastItem(channel
)
548 setattr(e
, key
, d
[key
])
552 def title_and_description(self
):
554 Returns Pango markup for displaying in a TreeView, and
555 disables the description when the config variable
556 "episode_list_descriptions" is not set.
558 if gl
.config
.episode_list_descriptions
:
559 return '%s\n<small>%s</small>' % (saxutils
.escape(self
.title
), saxutils
.escape(self
.one_line_description()))
561 return saxutils
.escape(self
.title
)
563 def age_in_days(self
):
564 return util
.file_age_in_days(self
.local_filename())
567 return self
.age_in_days() > gl
.config
.episode_old_age
569 def get_age_string(self
):
570 return util
.file_age_to_string(self
.age_in_days())
572 age_prop
= property(fget
=get_age_string
)
574 def one_line_description( self
):
575 lines
= self
.description
.strip().splitlines()
576 if not lines
or lines
[0] == '':
577 return _('No description available')
579 return ' '.join((l
.strip() for l
in lines
if l
.strip() != ''))
581 def delete_from_disk(self
):
583 self
.channel
.delete_episode_by_url(self
.url
)
585 log('Cannot delete episode from disk: %s', self
.title
, traceback
=True, sender
=self
)
587 def local_filename( self
):
588 ext
= self
.extension()
590 # For compatibility with already-downloaded episodes,
591 # we accept md5 filenames if they are downloaded now.
592 md5_filename
= os
.path
.join(self
.channel
.save_dir
, md5
.new(self
.url
).hexdigest()+ext
)
593 if os
.path
.exists(md5_filename
) or not gl
.config
.experimental_file_naming
:
596 # If the md5 filename does not exist,
597 ( episode
, e
) = util
.filename_from_url(self
.url
)
598 episode
= util
.sanitize_filename(episode
) + ext
600 # If the episode filename looks suspicious,
601 # we still return the md5 filename to be on
602 # the safe side of the fence ;)
603 if len(episode
) == 0 or episode
.startswith('redirect.'):
605 filename
= os
.path
.join(self
.channel
.save_dir
, episode
)
608 def extension( self
):
609 ( filename
, ext
) = util
.filename_from_url(self
.url
)
610 # if we can't detect the extension from the url fallback on the mimetype
611 if ext
== '' or util
.file_type_by_extension(ext
) is None:
612 ext
= util
.extension_from_mimetype(self
.mimetype
)
613 #log('Getting extension from mimetype for: %s (mimetype: %s)' % (self.title, ext), sender=self)
617 self
.state
= db
.STATE_NORMAL
618 self
.is_played
= False
619 db
.mark_episode(self
.url
, state
=self
.state
, is_played
=self
.is_played
)
622 self
.is_played
= True
623 db
.mark_episode(self
.url
, is_played
=True)
625 def file_exists(self
):
626 return os
.path
.exists(self
.local_filename())
628 def was_downloaded(self
, and_exists
=False):
629 if self
.state
!= db
.STATE_DOWNLOADED
:
631 if and_exists
and not self
.file_exists():
635 def sync_filename( self
):
636 if gl
.config
.custom_sync_name_enabled
:
637 return util
.object_string_formatter(gl
.config
.custom_sync_name
, episode
=self
, channel
=self
.channel
)
641 def file_type( self
):
642 return util
.file_type_by_extension( self
.extension() )
646 return os
.path
.splitext( os
.path
.basename( self
.url
))[0]
649 def published( self
):
651 return datetime
.datetime
.fromtimestamp(self
.pubDate
).strftime('%Y%m%d')
653 log( 'Cannot format pubDate for "%s".', self
.title
, sender
= self
)
656 def cute_pubdate(self
):
657 result
= util
.format_date(self
.pubDate
)
659 return '(%s)' % _('unknown')
663 pubdate_prop
= property(fget
=cute_pubdate
)
665 def calculate_filesize( self
):
667 self
.length
= os
.path
.getsize(self
.local_filename())
669 log( 'Could not get filesize for %s.', self
.url
)
671 def get_filesize_string( self
):
672 return gl
.format_filesize( self
.length
)
674 filesize_prop
= property(fget
=get_filesize_string
)
676 def get_channel_title( self
):
677 return self
.channel
.title
679 channel_prop
= property(fget
=get_channel_title
)
681 def get_played_string( self
):
682 if not self
.is_played
:
687 played_prop
= property(fget
=get_played_string
)
691 def update_channel_model_by_iter( model
, iter, channel
, color_dict
,
692 cover_cache
=None, max_width
=0, max_height
=0 ):
694 count_downloaded
= channel
.stat(state
=db
.STATE_DOWNLOADED
)
695 count_new
= channel
.stat(state
=db
.STATE_NORMAL
, is_played
=False)
696 count_unplayed
= channel
.stat(state
=db
.STATE_DOWNLOADED
, is_played
=False)
699 model
.set(iter, 0, channel
.url
)
700 model
.set(iter, 1, channel
.title
)
702 title_markup
= saxutils
.escape(channel
.title
)
703 description_markup
= saxutils
.escape(util
.get_first_line(channel
.description
) or _('No description available'))
706 d
.append('<span weight="bold">')
707 d
.append(title_markup
)
711 description
= ''.join(d
+['\n', '<small>', description_markup
, '</small>'])
712 model
.set(iter, 2, description
)
714 if channel
.parse_error
is not None:
715 model
.set(iter, 6, channel
.parse_error
)
716 color
= color_dict
['parse_error']
718 color
= color_dict
['default']
720 if channel
.update_flag
:
721 color
= color_dict
['updating']
723 model
.set(iter, 8, color
)
725 if count_unplayed
> 0 or count_downloaded
> 0:
726 model
.set(iter, 3, draw
.draw_pill_pixbuf(str(count_unplayed
), str(count_downloaded
)))
727 model
.set(iter, 7, True)
729 model
.set(iter, 7, False)
731 # Load the cover if we have it, but don't download
732 # it if it's not available (to avoid blocking here)
733 pixbuf
= services
.cover_downloader
.get_cover(channel
, avoid_downloading
=True)
735 if pixbuf
is not None:
736 new_pixbuf
= util
.resize_pixbuf_keep_ratio(pixbuf
, max_width
, max_height
, channel
.url
, cover_cache
)
737 model
.set(iter, 5, new_pixbuf
or pixbuf
)
739 def channels_to_model(channels
, color_dict
, cover_cache
=None, max_width
=0, max_height
=0):
740 new_model
= gtk
.ListStore( str, str, str, gtk
.gdk
.Pixbuf
, int,
741 gtk
.gdk
.Pixbuf
, str, bool, str )
743 for channel
in channels
:
744 update_channel_model_by_iter( new_model
, new_model
.append(), channel
,
745 color_dict
, cover_cache
, max_width
, max_height
)
751 return db
.load_channels(lambda d
: podcastChannel
.create_from_dict(d
))
753 def update_channels(callback_proc
=None, callback_error
=None, is_cancelled_cb
=None):
754 log('Updating channels....')
756 channels
= load_channels()
759 for channel
in channels
:
760 if is_cancelled_cb
is not None and is_cancelled_cb():
762 callback_proc
and callback_proc(count
, len(channels
))
768 def save_channels( channels
):
769 exporter
= opml
.Exporter(gl
.channel_opml_file
)
770 return exporter
.write(channels
)
772 def can_restore_from_opml():
774 if len(opml
.Importer(gl
.channel_opml_file
).items
):
775 return gl
.channel_opml_file
781 class LocalDBReader( object):
783 DEPRECATED - Only used for migration to SQLite
785 def __init__( self
, url
):
788 def get_text( self
, nodelist
):
789 return ''.join( [ node
.data
for node
in nodelist
if node
.nodeType
== node
.TEXT_NODE
])
791 def get_text_by_first_node( self
, element
, name
):
792 return self
.get_text( element
.getElementsByTagName( name
)[0].childNodes
)
794 def get_episode_from_element( self
, channel
, element
):
795 episode
= podcastItem( channel
)
796 episode
.title
= self
.get_text_by_first_node( element
, 'title')
797 episode
.description
= self
.get_text_by_first_node( element
, 'description')
798 episode
.url
= self
.get_text_by_first_node( element
, 'url')
799 episode
.link
= self
.get_text_by_first_node( element
, 'link')
800 episode
.guid
= self
.get_text_by_first_node( element
, 'guid')
803 for k
in ('url', 'link'):
804 if getattr(episode
, k
) is not None:
805 episode
.guid
= getattr(episode
, k
)
806 log('Notice: episode has no guid, using %s', episode
.guid
)
809 episode
.pubDate
= float(self
.get_text_by_first_node(element
, 'pubDate'))
811 log('Looks like you have an old pubDate in your LocalDB -> converting it')
812 episode
.pubDate
= self
.get_text_by_first_node(element
, 'pubDate')
813 log('FYI: pubDate value is: "%s"', episode
.pubDate
, sender
=self
)
814 pubdate
= feedparser
._parse
_date
(episode
.pubDate
)
816 log('Error converting the old pubDate - sorry!', sender
=self
)
819 log('PubDate converted successfully - yay!', sender
=self
)
820 episode
.pubDate
= time
.mktime(pubdate
)
822 episode
.mimetype
= self
.get_text_by_first_node( element
, 'mimetype')
824 log('No mimetype info for %s', episode
.url
, sender
=self
)
825 episode
.calculate_filesize()
828 def load_and_clean( self
, filename
):
830 Clean-up a LocalDB XML file that could potentially contain
831 "unbound prefix" XML elements (generated by the old print-based
832 LocalDB code). The code removes those lines to make the new
835 This should be removed in a future version.
838 for line
in open(filename
).read().split('\n'):
839 if not line
.startswith('<gpodder:info'):
842 return '\n'.join( lines
)
844 def read( self
, filename
):
845 doc
= xml
.dom
.minidom
.parseString( self
.load_and_clean( filename
))
846 rss
= doc
.getElementsByTagName('rss')[0]
848 channel_element
= rss
.getElementsByTagName('channel')[0]
850 channel
= podcastChannel( url
= self
.url
)
851 channel
.title
= self
.get_text_by_first_node( channel_element
, 'title')
852 channel
.description
= self
.get_text_by_first_node( channel_element
, 'description')
853 channel
.link
= self
.get_text_by_first_node( channel_element
, 'link')
856 for episode_element
in rss
.getElementsByTagName('item'):
857 episode
= self
.get_episode_from_element( channel
, episode_element
)
858 episodes
.append(episode
)