1 # GNU MediaGoblin -- federated, autonomous media hosting
2 # Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Affero General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU Affero General Public License for more details.
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
17 from __future__
import division
22 import multiprocessing
23 from mediagoblin
.media_types
.tools
import discover
25 #os.environ['GST_DEBUG'] = '4,python:4'
31 gi
.require_version('Gst', '1.0')
32 from gi
.repository
import GObject
, Gst
, GstPbutils
42 _log
= logging
.getLogger(__name__
)
47 CPU_COUNT
= multiprocessing
.cpu_count()
48 except NotImplementedError:
49 _log
.warning('multiprocessing.cpu_count not implemented')
51 os
.putenv('GST_DEBUG_DUMP_DOT_DIR', '/tmp')
54 def capture_thumb(video_path
, dest_path
, width
=None, height
=None, percent
=0.5):
55 def pad_added(element
, pad
, connect_to
):
56 '''This is a callback to dynamically add element to pipeline'''
57 caps
= pad
.query_caps(None)
58 name
= caps
.to_string()
59 _log
.debug('on_pad_added: {0}'.format(name
))
60 if name
.startswith('video') and not connect_to
.is_linked():
63 # construct pipeline: uridecodebin ! videoconvert ! videoscale ! \
65 pipeline
= Gst
.Pipeline()
66 uridecodebin
= Gst
.ElementFactory
.make('uridecodebin', None)
67 uridecodebin
.set_property('uri', 'file://{0}'.format(video_path
))
68 videoconvert
= Gst
.ElementFactory
.make('videoconvert', None)
69 uridecodebin
.connect('pad-added', pad_added
,
70 videoconvert
.get_static_pad('sink'))
71 videoscale
= Gst
.ElementFactory
.make('videoscale', None)
73 # create caps for video scaling
74 caps_struct
= Gst
.Structure
.new_empty('video/x-raw')
75 caps_struct
.set_value('pixel-aspect-ratio', Gst
.Fraction(1, 1))
76 caps_struct
.set_value('format', 'RGB')
78 caps_struct
.set_value('height', height
)
80 caps_struct
.set_value('width', width
)
81 caps
= Gst
.Caps
.new_empty()
82 caps
.append_structure(caps_struct
)
84 # sink everything to memory
85 appsink
= Gst
.ElementFactory
.make('appsink', None)
86 appsink
.set_property('caps', caps
)
88 # add everything to pipeline
89 elements
= [uridecodebin
, videoconvert
, videoscale
, appsink
]
92 videoconvert
.link(videoscale
)
93 videoscale
.link(appsink
)
95 # pipeline constructed, starting playing, but first some preparations
96 # seek to 50% of the file is required
97 pipeline
.set_state(Gst
.State
.PAUSED
)
98 # timeout of 3 seconds below was set experimentally
99 state
= pipeline
.get_state(Gst
.SECOND
* 3)
100 if state
[0] != Gst
.StateChangeReturn
.SUCCESS
:
101 _log
.warning('state change failed, {0}'.format(state
))
105 (success
, duration
) = pipeline
.query_duration(Gst
.Format
.TIME
)
107 _log
.warning('query_duration failed')
110 seek_to
= int(duration
* int(percent
* 100) / 100)
111 _log
.debug('Seeking to {0} of {1}'.format(
112 float(seek_to
) / Gst
.SECOND
, float(duration
) / Gst
.SECOND
))
113 seek
= pipeline
.seek_simple(Gst
.Format
.TIME
, Gst
.SeekFlags
.FLUSH
, seek_to
)
115 _log
.warning('seek failed')
118 # get sample, retrieve it's format and save
119 sample
= appsink
.emit("pull-preroll")
121 _log
.warning('could not get sample')
123 caps
= sample
.get_caps()
125 _log
.warning('could not get snapshot format')
127 structure
= caps
.get_structure(0)
128 (success
, width
) = structure
.get_int('width')
129 (success
, height
) = structure
.get_int('height')
130 buffer = sample
.get_buffer()
132 # get the image from the buffer and save it to disk
133 im
= Image
.frombytes('RGB', (width
, height
),
134 buffer.extract_dup(0, buffer.get_size()))
136 _log
.info('thumbnail saved to {0}'.format(dest_path
))
139 pipeline
.set_state(Gst
.State
.NULL
)
142 class VideoTranscoder(object):
146 Transcodes the SRC video file to a VP8 WebM video file at DST
148 - Produces a WebM vp8 and vorbis video file.
151 _log
.info('Initializing VideoTranscoder...')
152 self
.progress_percentage
= None
153 self
.loop
= GObject
.MainLoop()
155 def transcode(self
, src
, dst
, **kwargs
):
157 Transcode a video file into a 'medium'-sized version.
159 self
.source_path
= src
160 self
.destination_path
= dst
163 self
.destination_dimensions
= kwargs
.get('dimensions', (640, 640))
164 self
.vp8_quality
= kwargs
.get('vp8_quality', 8)
165 # Number of threads used by vp8enc:
166 # number of real cores - 1 as per recommendation on
167 # <http://www.webmproject.org/tools/encoder-parameters/#6-multi-threaded-encode-and-decode>
168 self
.vp8_threads
= kwargs
.get('vp8_threads', CPU_COUNT
- 1)
170 # 0 means auto-detect, but dict.get() only falls back to CPU_COUNT
171 # if value is None, this will correct our incompatibility with
173 # This will also correct cases where there's only 1 CPU core, see
174 # original self.vp8_threads assignment above.
175 if self
.vp8_threads
== 0:
176 self
.vp8_threads
= CPU_COUNT
179 self
.vorbis_quality
= kwargs
.get('vorbis_quality', 0.3)
181 self
._progress
_callback
= kwargs
.get('progress_callback') or None
183 if not type(self
.destination_dimensions
) == tuple:
184 raise Exception('dimensions must be tuple: (width, height)')
186 self
._setup
_pipeline
()
187 self
.data
= discover(self
.source_path
)
188 self
._link
_elements
()
189 self
.__setup
_videoscale
_capsfilter
()
190 self
.pipeline
.set_state(Gst
.State
.PLAYING
)
191 _log
.info('Transcoding...')
192 _log
.debug('Initializing MainLoop()')
196 def _setup_pipeline(self
):
197 _log
.debug('Setting up transcoding pipeline')
198 # Create the pipeline bin.
199 self
.pipeline
= Gst
.Pipeline
.new('VideoTranscoderPipeline')
201 # Create all GStreamer elements, starting with
203 self
.filesrc
= Gst
.ElementFactory
.make('filesrc', 'filesrc')
204 self
.filesrc
.set_property('location', self
.source_path
)
205 self
.pipeline
.add(self
.filesrc
)
207 self
.decoder
= Gst
.ElementFactory
.make('decodebin', 'decoder')
208 self
.decoder
.connect('pad-added', self
._on
_dynamic
_pad
)
209 self
.pipeline
.add(self
.decoder
)
212 self
.videoqueue
= Gst
.ElementFactory
.make('queue', 'videoqueue')
213 self
.pipeline
.add(self
.videoqueue
)
215 self
.videorate
= Gst
.ElementFactory
.make('videorate', 'videorate')
216 self
.pipeline
.add(self
.videorate
)
218 self
.videoconvert
= Gst
.ElementFactory
.make('videoconvert',
220 self
.pipeline
.add(self
.videoconvert
)
222 self
.videoscale
= Gst
.ElementFactory
.make('videoscale', 'videoscale')
223 self
.pipeline
.add(self
.videoscale
)
225 self
.capsfilter
= Gst
.ElementFactory
.make('capsfilter', 'capsfilter')
226 self
.pipeline
.add(self
.capsfilter
)
228 self
.vp8enc
= Gst
.ElementFactory
.make('vp8enc', 'vp8enc')
229 self
.vp8enc
.set_property('threads', self
.vp8_threads
)
230 self
.pipeline
.add(self
.vp8enc
)
233 self
.audioqueue
= Gst
.ElementFactory
.make('queue', 'audioqueue')
234 self
.pipeline
.add(self
.audioqueue
)
236 self
.audiorate
= Gst
.ElementFactory
.make('audiorate', 'audiorate')
237 self
.audiorate
.set_property('tolerance', 80000000)
238 self
.pipeline
.add(self
.audiorate
)
240 self
.audioconvert
= Gst
.ElementFactory
.make('audioconvert', 'audioconvert')
241 self
.pipeline
.add(self
.audioconvert
)
242 self
.audiocapsfilter
= Gst
.ElementFactory
.make('capsfilter',
244 audiocaps
= Gst
.Caps
.new_empty()
245 audiocaps_struct
= Gst
.Structure
.new_empty('audio/x-raw')
246 audiocaps
.append_structure(audiocaps_struct
)
247 self
.audiocapsfilter
.set_property('caps', audiocaps
)
248 self
.pipeline
.add(self
.audiocapsfilter
)
250 self
.vorbisenc
= Gst
.ElementFactory
.make('vorbisenc', 'vorbisenc')
251 self
.vorbisenc
.set_property('quality', self
.vorbis_quality
)
252 self
.pipeline
.add(self
.vorbisenc
)
255 self
.webmmux
= Gst
.ElementFactory
.make('webmmux', 'webmmux')
256 self
.pipeline
.add(self
.webmmux
)
258 self
.filesink
= Gst
.ElementFactory
.make('filesink', 'filesink')
259 self
.filesink
.set_property('location', self
.destination_path
)
260 self
.pipeline
.add(self
.filesink
)
263 self
.progressreport
= Gst
.ElementFactory
.make(
264 'progressreport', 'progressreport')
265 # Update every second
266 self
.progressreport
.set_property('update-freq', 1)
267 self
.progressreport
.set_property('silent', True)
268 self
.pipeline
.add(self
.progressreport
)
270 def _link_elements(self
):
272 Link all the elements
274 This code depends on data from the discoverer and is called
277 _log
.debug('linking elements')
278 # Link the filesrc element to the decoder. The decoder then emits
279 # 'new-decoded-pad' which links decoded src pads to either a video
281 self
.filesrc
.link(self
.decoder
)
283 self
.videoqueue
.link(self
.videorate
)
284 self
.videorate
.link(self
.videoconvert
)
285 self
.videoconvert
.link(self
.videoscale
)
286 self
.videoscale
.link(self
.capsfilter
)
287 self
.capsfilter
.link(self
.vp8enc
)
288 self
.vp8enc
.link(self
.webmmux
)
290 if self
.data
.get_audio_streams():
291 self
.audioqueue
.link(self
.audiorate
)
292 self
.audiorate
.link(self
.audioconvert
)
293 self
.audioconvert
.link(self
.audiocapsfilter
)
294 self
.audiocapsfilter
.link(self
.vorbisenc
)
295 self
.vorbisenc
.link(self
.webmmux
)
296 self
.webmmux
.link(self
.progressreport
)
297 self
.progressreport
.link(self
.filesink
)
299 # Setup the message bus and connect _on_message to the pipeline
302 def _on_dynamic_pad(self
, dbin
, pad
):
304 Callback called when ``decodebin`` has a pad that we can connect to
306 # Intersect the capabilities of the video sink and the pad src
307 # Then check if they have no common capabilities.
308 if (self
.videorate
.get_static_pad('sink').get_pad_template()
309 .get_caps().intersect(pad
.query_caps()).is_empty()):
310 # It is NOT a video src pad.
311 _log
.debug('linking audio to the pad dynamically')
312 pad
.link(self
.audioqueue
.get_static_pad('sink'))
314 # It IS a video src pad.
315 _log
.debug('linking video to the pad dynamically')
316 pad
.link(self
.videoqueue
.get_static_pad('sink'))
318 def _setup_bus(self
):
319 self
.bus
= self
.pipeline
.get_bus()
320 self
.bus
.add_signal_watch()
321 self
.bus
.connect('message', self
._on
_message
)
323 def __setup_videoscale_capsfilter(self
):
325 Sets up the output format (width, height) for the video
327 caps_struct
= Gst
.Structure
.new_empty('video/x-raw')
328 caps_struct
.set_value('pixel-aspect-ratio', Gst
.Fraction(1, 1))
329 caps_struct
.set_value('framerate', Gst
.Fraction(30, 1))
330 video_info
= self
.data
.get_video_streams()[0]
331 if video_info
.get_height() > video_info
.get_width():
333 caps_struct
.set_value('height', self
.destination_dimensions
[1])
336 caps_struct
.set_value('width', self
.destination_dimensions
[0])
337 caps
= Gst
.Caps
.new_empty()
338 caps
.append_structure(caps_struct
)
339 self
.capsfilter
.set_property('caps', caps
)
341 def _on_message(self
, bus
, message
):
342 _log
.debug((bus
, message
, message
.type))
343 if message
.type == Gst
.MessageType
.EOS
:
344 self
.dst_data
= discover(self
.destination_path
)
347 elif message
.type == Gst
.MessageType
.ELEMENT
:
348 if message
.has_name('progress'):
349 structure
= message
.get_structure()
350 # Update progress state if it has changed
351 (success
, percent
) = structure
.get_int('percent')
352 if self
.progress_percentage
!= percent
and success
:
353 self
.progress_percentage
= percent
354 if self
._progress
_callback
:
355 self
._progress
_callback
(percent
)
356 _log
.info('{percent}% done...'.format(percent
=percent
))
357 elif message
.type == Gst
.MessageType
.ERROR
:
358 _log
.error('Got error: {0}'.format(message
.parse_error()))
362 _log
.debug(self
.loop
)
364 if hasattr(self
, 'pipeline'):
365 # Stop executing the pipeline
366 self
.pipeline
.set_state(Gst
.State
.NULL
)
368 # This kills the loop, mercifully
369 GObject
.idle_add(self
.__stop
_mainloop
)
371 def __stop_mainloop(self
):
373 Wrapper for GObject.MainLoop.quit()
375 This wrapper makes us able to see if self.loop.quit has been called
377 _log
.info('Terminating MainLoop')
382 if __name__
== '__main__':
384 from optparse
import OptionParser
386 parser
= OptionParser(
387 usage
='%prog [-v] -a [ video | thumbnail | discover ] SRC [ DEST ]')
389 parser
.add_option('-a', '--action',
391 help='One of "video", "discover" or "thumbnail"')
393 parser
.add_option('-v',
396 help='Output debug information')
398 parser
.add_option('-q',
401 help='Dear program, please be quiet unless *error*')
403 parser
.add_option('-w', '--width',
407 (options
, args
) = parser
.parse_args()
410 _log
.setLevel(logging
.DEBUG
)
412 _log
.setLevel(logging
.INFO
)
415 _log
.setLevel(logging
.ERROR
)
419 if not len(args
) == 2 and not options
.action
== 'discover':
423 transcoder
= VideoTranscoder()
425 if options
.action
== 'thumbnail':
426 args
.append(options
.width
)
427 VideoThumbnailerMarkII(*args
)
428 elif options
.action
== 'video':
430 print('I\'m a callback!')
431 transcoder
.transcode(*args
, progress_callback
=cb
)
432 elif options
.action
== 'discover':
433 print transcoder
.discover(*args
)