virt.virt_test_utils: run_autotest - 'tar' needs relative paths to strip the leading '/'
[autotest-zwu.git] / client / common_lib / base_utils.py
blobad19e80aac62601523dfec49a1ef856012975590
2 # Copyright 2008 Google Inc. Released under the GPL v2
4 import os, pickle, random, re, resource, select, shutil, signal, StringIO
5 import socket, struct, subprocess, sys, time, textwrap, urlparse
6 import warnings, smtplib, logging, urllib2
7 from threading import Thread, Event
8 try:
9 import hashlib
10 except ImportError:
11 import md5, sha
12 from autotest_lib.client.common_lib import error, logging_manager
14 def deprecated(func):
15 """This is a decorator which can be used to mark functions as deprecated.
16 It will result in a warning being emmitted when the function is used."""
17 def new_func(*args, **dargs):
18 warnings.warn("Call to deprecated function %s." % func.__name__,
19 category=DeprecationWarning)
20 return func(*args, **dargs)
21 new_func.__name__ = func.__name__
22 new_func.__doc__ = func.__doc__
23 new_func.__dict__.update(func.__dict__)
24 return new_func
27 class _NullStream(object):
28 def write(self, data):
29 pass
32 def flush(self):
33 pass
36 TEE_TO_LOGS = object()
37 _the_null_stream = _NullStream()
39 DEFAULT_STDOUT_LEVEL = logging.DEBUG
40 DEFAULT_STDERR_LEVEL = logging.ERROR
42 # prefixes for logging stdout/stderr of commands
43 STDOUT_PREFIX = '[stdout] '
44 STDERR_PREFIX = '[stderr] '
47 def get_stream_tee_file(stream, level, prefix=''):
48 if stream is None:
49 return _the_null_stream
50 if stream is TEE_TO_LOGS:
51 return logging_manager.LoggingFile(level=level, prefix=prefix)
52 return stream
55 class BgJob(object):
56 def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
57 stdin=None, stderr_level=DEFAULT_STDERR_LEVEL):
58 self.command = command
59 self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL,
60 prefix=STDOUT_PREFIX)
61 self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level,
62 prefix=STDERR_PREFIX)
63 self.result = CmdResult(command)
65 # allow for easy stdin input by string, we'll let subprocess create
66 # a pipe for stdin input and we'll write to it in the wait loop
67 if isinstance(stdin, basestring):
68 self.string_stdin = stdin
69 stdin = subprocess.PIPE
70 else:
71 self.string_stdin = None
73 if verbose:
74 logging.debug("Running '%s'" % command)
75 self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
76 stderr=subprocess.PIPE,
77 preexec_fn=self._reset_sigpipe, shell=True,
78 executable="/bin/bash",
79 stdin=stdin)
82 def output_prepare(self, stdout_file=None, stderr_file=None):
83 self.stdout_file = stdout_file
84 self.stderr_file = stderr_file
87 def process_output(self, stdout=True, final_read=False):
88 """output_prepare must be called prior to calling this"""
89 if stdout:
90 pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
91 else:
92 pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
94 if final_read:
95 # read in all the data we can from pipe and then stop
96 data = []
97 while select.select([pipe], [], [], 0)[0]:
98 data.append(os.read(pipe.fileno(), 1024))
99 if len(data[-1]) == 0:
100 break
101 data = "".join(data)
102 else:
103 # perform a single read
104 data = os.read(pipe.fileno(), 1024)
105 buf.write(data)
106 tee.write(data)
109 def cleanup(self):
110 self.stdout_tee.flush()
111 self.stderr_tee.flush()
112 self.sp.stdout.close()
113 self.sp.stderr.close()
114 self.result.stdout = self.stdout_file.getvalue()
115 self.result.stderr = self.stderr_file.getvalue()
118 def _reset_sigpipe(self):
119 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
122 def ip_to_long(ip):
123 # !L is a long in network byte order
124 return struct.unpack('!L', socket.inet_aton(ip))[0]
127 def long_to_ip(number):
128 # See above comment.
129 return socket.inet_ntoa(struct.pack('!L', number))
132 def create_subnet_mask(bits):
133 return (1 << 32) - (1 << 32-bits)
136 def format_ip_with_mask(ip, mask_bits):
137 masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
138 return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
141 def normalize_hostname(alias):
142 ip = socket.gethostbyname(alias)
143 return socket.gethostbyaddr(ip)[0]
146 def get_ip_local_port_range():
147 match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
148 read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
149 return (int(match.group(1)), int(match.group(2)))
152 def set_ip_local_port_range(lower, upper):
153 write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
154 '%d %d\n' % (lower, upper))
158 def send_email(mail_from, mail_to, subject, body):
160 Sends an email via smtp
162 mail_from: string with email address of sender
163 mail_to: string or list with email address(es) of recipients
164 subject: string with subject of email
165 body: (multi-line) string with body of email
167 if isinstance(mail_to, str):
168 mail_to = [mail_to]
169 msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
170 subject, body)
171 try:
172 mailer = smtplib.SMTP('localhost')
173 try:
174 mailer.sendmail(mail_from, mail_to, msg)
175 finally:
176 mailer.quit()
177 except Exception, e:
178 # Emails are non-critical, not errors, but don't raise them
179 print "Sending email failed. Reason: %s" % repr(e)
182 def read_one_line(filename):
183 return open(filename, 'r').readline().rstrip('\n')
186 def read_file(filename):
187 f = open(filename)
188 try:
189 return f.read()
190 finally:
191 f.close()
194 def get_field(data, param, linestart="", sep=" "):
196 Parse data from string.
197 @param data: Data to parse.
198 example:
199 data:
200 cpu 324 345 34 5 345
201 cpu0 34 11 34 34 33
202 ^^^^
203 start of line
204 params 0 1 2 3 4
205 @param param: Position of parameter after linestart marker.
206 @param linestart: String to which start line with parameters.
207 @param sep: Separator between parameters regular expression.
209 search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
210 find = search.search(data)
211 if find != None:
212 return re.split("%s" % sep, find.group(1))[param]
213 else:
214 print "There is no line which starts with %s in data." % linestart
215 return None
218 def write_one_line(filename, line):
219 open_write_close(filename, line.rstrip('\n') + '\n')
222 def open_write_close(filename, data):
223 f = open(filename, 'w')
224 try:
225 f.write(data)
226 finally:
227 f.close()
230 def matrix_to_string(matrix, header=None):
232 Return a pretty, aligned string representation of a nxm matrix.
234 This representation can be used to print any tabular data, such as
235 database results. It works by scanning the lengths of each element
236 in each column, and determining the format string dynamically.
238 @param matrix: Matrix representation (list with n rows of m elements).
239 @param header: Optional tuple or list with header elements to be displayed.
241 if type(header) is list:
242 header = tuple(header)
243 lengths = []
244 if header:
245 for column in header:
246 lengths.append(len(column))
247 for row in matrix:
248 for i, column in enumerate(row):
249 column = unicode(column).encode("utf-8")
250 cl = len(column)
251 try:
252 ml = lengths[i]
253 if cl > ml:
254 lengths[i] = cl
255 except IndexError:
256 lengths.append(cl)
258 lengths = tuple(lengths)
259 format_string = ""
260 for length in lengths:
261 format_string += "%-" + str(length) + "s "
262 format_string += "\n"
264 matrix_str = ""
265 if header:
266 matrix_str += format_string % header
267 for row in matrix:
268 matrix_str += format_string % tuple(row)
270 return matrix_str
273 def read_keyval(path):
275 Read a key-value pair format file into a dictionary, and return it.
276 Takes either a filename or directory name as input. If it's a
277 directory name, we assume you want the file to be called keyval.
279 if os.path.isdir(path):
280 path = os.path.join(path, 'keyval')
281 keyval = {}
282 if os.path.exists(path):
283 for line in open(path):
284 line = re.sub('#.*', '', line).rstrip()
285 if not re.search(r'^[-\.\w]+=', line):
286 raise ValueError('Invalid format line: %s' % line)
287 key, value = line.split('=', 1)
288 if re.search('^\d+$', value):
289 value = int(value)
290 elif re.search('^(\d+\.)?\d+$', value):
291 value = float(value)
292 keyval[key] = value
293 return keyval
296 def write_keyval(path, dictionary, type_tag=None, tap_report=None):
298 Write a key-value pair format file out to a file. This uses append
299 mode to open the file, so existing text will not be overwritten or
300 reparsed.
302 If type_tag is None, then the key must be composed of alphanumeric
303 characters (or dashes+underscores). However, if type-tag is not
304 null then the keys must also have "{type_tag}" as a suffix. At
305 the moment the only valid values of type_tag are "attr" and "perf".
307 @param path: full path of the file to be written
308 @param dictionary: the items to write
309 @param type_tag: see text above
311 if os.path.isdir(path):
312 path = os.path.join(path, 'keyval')
313 keyval = open(path, 'a')
315 if type_tag is None:
316 key_regex = re.compile(r'^[-\.\w]+$')
317 else:
318 if type_tag not in ('attr', 'perf'):
319 raise ValueError('Invalid type tag: %s' % type_tag)
320 escaped_tag = re.escape(type_tag)
321 key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
322 try:
323 for key in sorted(dictionary.keys()):
324 if not key_regex.search(key):
325 raise ValueError('Invalid key: %s' % key)
326 keyval.write('%s=%s\n' % (key, dictionary[key]))
327 finally:
328 keyval.close()
330 # same for tap
331 if tap_report is not None and tap_report.do_tap_report:
332 tap_report.record_keyval(path, dictionary, type_tag=type_tag)
334 class FileFieldMonitor(object):
336 Monitors the information from the file and reports it's values.
338 It gather the information at start and stop of the measurement or
339 continuously during the measurement.
341 class Monitor(Thread):
343 Internal monitor class to ensure continuous monitor of monitored file.
345 def __init__(self, master):
347 @param master: Master class which control Monitor
349 Thread.__init__(self)
350 self.master = master
352 def run(self):
354 Start monitor in thread mode
356 while not self.master.end_event.isSet():
357 self.master._get_value(self.master.logging)
358 time.sleep(self.master.time_step)
361 def __init__(self, status_file, data_to_read, mode_diff, continuously=False,
362 contlogging=False, separator=" +", time_step=0.1):
364 Initialize variables.
365 @param status_file: File contain status.
366 @param mode_diff: If True make a difference of value, else average.
367 @param data_to_read: List of tuples with data position.
368 format: [(start_of_line,position in params)]
369 example:
370 data:
371 cpu 324 345 34 5 345
372 cpu0 34 11 34 34 33
373 ^^^^
374 start of line
375 params 0 1 2 3 4
376 @param mode_diff: True to subtract old value from new value,
377 False make average of the values.
378 @parma continuously: Start the monitoring thread using the time_step
379 as the measurement period.
380 @param contlogging: Log data in continuous run.
381 @param separator: Regular expression of separator.
382 @param time_step: Time period of the monitoring value.
384 self.end_event = Event()
385 self.start_time = 0
386 self.end_time = 0
387 self.test_time = 0
389 self.status_file = status_file
390 self.separator = separator
391 self.data_to_read = data_to_read
392 self.num_of_params = len(self.data_to_read)
393 self.mode_diff = mode_diff
394 self.continuously = continuously
395 self.time_step = time_step
397 self.value = [0 for i in range(self.num_of_params)]
398 self.old_value = [0 for i in range(self.num_of_params)]
399 self.log = []
400 self.logging = contlogging
402 self.started = False
403 self.num_of_get_value = 0
404 self.monitor = None
407 def _get_value(self, logging=True):
409 Return current values.
410 @param logging: If true log value in memory. There can be problem
411 with long run.
413 data = read_file(self.status_file)
414 value = []
415 for i in range(self.num_of_params):
416 value.append(int(get_field(data,
417 self.data_to_read[i][1],
418 self.data_to_read[i][0],
419 self.separator)))
421 if logging:
422 self.log.append(value)
423 if not self.mode_diff:
424 value = map(lambda x, y: x + y, value, self.old_value)
426 self.old_value = value
427 self.num_of_get_value += 1
428 return value
431 def start(self):
433 Start value monitor.
435 if self.started:
436 self.stop()
437 self.old_value = [0 for i in range(self.num_of_params)]
438 self.num_of_get_value = 0
439 self.log = []
440 self.end_event.clear()
441 self.start_time = time.time()
442 self._get_value()
443 self.started = True
444 if (self.continuously):
445 self.monitor = FileFieldMonitor.Monitor(self)
446 self.monitor.start()
449 def stop(self):
451 Stop value monitor.
453 if self.started:
454 self.started = False
455 self.end_time = time.time()
456 self.test_time = self.end_time - self.start_time
457 self.value = self._get_value()
458 if (self.continuously):
459 self.end_event.set()
460 self.monitor.join()
461 if (self.mode_diff):
462 self.value = map(lambda x, y: x - y, self.log[-1], self.log[0])
463 else:
464 self.value = map(lambda x: x / self.num_of_get_value,
465 self.value)
468 def get_status(self):
470 @return: Status of monitored process average value,
471 time of test and array of monitored values and time step of
472 continuous run.
474 if self.started:
475 self.stop()
476 if self.mode_diff:
477 for i in range(len(self.log) - 1):
478 self.log[i] = (map(lambda x, y: x - y,
479 self.log[i + 1], self.log[i]))
480 self.log.pop()
481 return (self.value, self.test_time, self.log, self.time_step)
484 def is_url(path):
485 """Return true if path looks like a URL"""
486 # for now, just handle http and ftp
487 url_parts = urlparse.urlparse(path)
488 return (url_parts[0] in ('http', 'ftp'))
491 def urlopen(url, data=None, timeout=5):
492 """Wrapper to urllib2.urlopen with timeout addition."""
494 # Save old timeout
495 old_timeout = socket.getdefaulttimeout()
496 socket.setdefaulttimeout(timeout)
497 try:
498 return urllib2.urlopen(url, data=data)
499 finally:
500 socket.setdefaulttimeout(old_timeout)
503 def urlretrieve(url, filename, data=None, timeout=300):
504 """Retrieve a file from given url."""
505 logging.debug('Fetching %s -> %s', url, filename)
507 src_file = urlopen(url, data=data, timeout=timeout)
508 try:
509 dest_file = open(filename, 'wb')
510 try:
511 shutil.copyfileobj(src_file, dest_file)
512 finally:
513 dest_file.close()
514 finally:
515 src_file.close()
518 def hash(type, input=None):
520 Returns an hash object of type md5 or sha1. This function is implemented in
521 order to encapsulate hash objects in a way that is compatible with python
522 2.4 and python 2.6 without warnings.
524 Note that even though python 2.6 hashlib supports hash types other than
525 md5 and sha1, we are artificially limiting the input values in order to
526 make the function to behave exactly the same among both python
527 implementations.
529 @param input: Optional input string that will be used to update the hash.
531 if type not in ['md5', 'sha1']:
532 raise ValueError("Unsupported hash type: %s" % type)
534 try:
535 hash = hashlib.new(type)
536 except NameError:
537 if type == 'md5':
538 hash = md5.new()
539 elif type == 'sha1':
540 hash = sha.new()
542 if input:
543 hash.update(input)
545 return hash
548 def get_file(src, dest, permissions=None):
549 """Get a file from src, which can be local or a remote URL"""
550 if src == dest:
551 return
553 if is_url(src):
554 urlretrieve(src, dest)
555 else:
556 shutil.copyfile(src, dest)
558 if permissions:
559 os.chmod(dest, permissions)
560 return dest
563 def unmap_url(srcdir, src, destdir='.'):
565 Receives either a path to a local file or a URL.
566 returns either the path to the local file, or the fetched URL
568 unmap_url('/usr/src', 'foo.tar', '/tmp')
569 = '/usr/src/foo.tar'
570 unmap_url('/usr/src', 'http://site/file', '/tmp')
571 = '/tmp/file'
572 (after retrieving it)
574 if is_url(src):
575 url_parts = urlparse.urlparse(src)
576 filename = os.path.basename(url_parts[2])
577 dest = os.path.join(destdir, filename)
578 return get_file(src, dest)
579 else:
580 return os.path.join(srcdir, src)
583 def update_version(srcdir, preserve_srcdir, new_version, install,
584 *args, **dargs):
586 Make sure srcdir is version new_version
588 If not, delete it and install() the new version.
590 In the preserve_srcdir case, we just check it's up to date,
591 and if not, we rerun install, without removing srcdir
593 versionfile = os.path.join(srcdir, '.version')
594 install_needed = True
596 if os.path.exists(versionfile):
597 old_version = pickle.load(open(versionfile))
598 if old_version == new_version:
599 install_needed = False
601 if install_needed:
602 if not preserve_srcdir and os.path.exists(srcdir):
603 shutil.rmtree(srcdir)
604 install(*args, **dargs)
605 if os.path.exists(srcdir):
606 pickle.dump(new_version, open(versionfile, 'w'))
609 def get_stderr_level(stderr_is_expected):
610 if stderr_is_expected:
611 return DEFAULT_STDOUT_LEVEL
612 return DEFAULT_STDERR_LEVEL
615 def run(command, timeout=None, ignore_status=False,
616 stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
617 stderr_is_expected=None, args=()):
619 Run a command on the host.
621 @param command: the command line string.
622 @param timeout: time limit in seconds before attempting to kill the
623 running process. The run() function will take a few seconds
624 longer than 'timeout' to complete if it has to kill the process.
625 @param ignore_status: do not raise an exception, no matter what the exit
626 code of the command is.
627 @param stdout_tee: optional file-like object to which stdout data
628 will be written as it is generated (data will still be stored
629 in result.stdout).
630 @param stderr_tee: likewise for stderr.
631 @param verbose: if True, log the command being run.
632 @param stdin: stdin to pass to the executed process (can be a file
633 descriptor, a file object of a real file or a string).
634 @param args: sequence of strings of arguments to be given to the command
635 inside " quotes after they have been escaped for that; each
636 element in the sequence will be given as a separate command
637 argument
639 @return a CmdResult object
641 @raise CmdError: the exit code of the command execution was not 0
643 if isinstance(args, basestring):
644 raise TypeError('Got a string for the "args" keyword argument, '
645 'need a sequence.')
647 for arg in args:
648 command += ' "%s"' % sh_escape(arg)
649 if stderr_is_expected is None:
650 stderr_is_expected = ignore_status
652 bg_job = join_bg_jobs(
653 (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
654 stderr_level=get_stderr_level(stderr_is_expected)),),
655 timeout)[0]
656 if not ignore_status and bg_job.result.exit_status:
657 raise error.CmdError(command, bg_job.result,
658 "Command returned non-zero exit status")
660 return bg_job.result
663 def run_parallel(commands, timeout=None, ignore_status=False,
664 stdout_tee=None, stderr_tee=None):
666 Behaves the same as run() with the following exceptions:
668 - commands is a list of commands to run in parallel.
669 - ignore_status toggles whether or not an exception should be raised
670 on any error.
672 @return: a list of CmdResult objects
674 bg_jobs = []
675 for command in commands:
676 bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
677 stderr_level=get_stderr_level(ignore_status)))
679 # Updates objects in bg_jobs list with their process information
680 join_bg_jobs(bg_jobs, timeout)
682 for bg_job in bg_jobs:
683 if not ignore_status and bg_job.result.exit_status:
684 raise error.CmdError(command, bg_job.result,
685 "Command returned non-zero exit status")
687 return [bg_job.result for bg_job in bg_jobs]
690 @deprecated
691 def run_bg(command):
692 """Function deprecated. Please use BgJob class instead."""
693 bg_job = BgJob(command)
694 return bg_job.sp, bg_job.result
697 def join_bg_jobs(bg_jobs, timeout=None):
698 """Joins the bg_jobs with the current thread.
700 Returns the same list of bg_jobs objects that was passed in.
702 ret, timeout_error = 0, False
703 for bg_job in bg_jobs:
704 bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
706 try:
707 # We are holding ends to stdin, stdout pipes
708 # hence we need to be sure to close those fds no mater what
709 start_time = time.time()
710 timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
712 for bg_job in bg_jobs:
713 # Process stdout and stderr
714 bg_job.process_output(stdout=True,final_read=True)
715 bg_job.process_output(stdout=False,final_read=True)
716 finally:
717 # close our ends of the pipes to the sp no matter what
718 for bg_job in bg_jobs:
719 bg_job.cleanup()
721 if timeout_error:
722 # TODO: This needs to be fixed to better represent what happens when
723 # running in parallel. However this is backwards compatable, so it will
724 # do for the time being.
725 raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
726 "Command(s) did not complete within %d seconds"
727 % timeout)
730 return bg_jobs
733 def _wait_for_commands(bg_jobs, start_time, timeout):
734 # This returns True if it must return due to a timeout, otherwise False.
736 # To check for processes which terminate without producing any output
737 # a 1 second timeout is used in select.
738 SELECT_TIMEOUT = 1
740 read_list = []
741 write_list = []
742 reverse_dict = {}
744 for bg_job in bg_jobs:
745 read_list.append(bg_job.sp.stdout)
746 read_list.append(bg_job.sp.stderr)
747 reverse_dict[bg_job.sp.stdout] = (bg_job, True)
748 reverse_dict[bg_job.sp.stderr] = (bg_job, False)
749 if bg_job.string_stdin is not None:
750 write_list.append(bg_job.sp.stdin)
751 reverse_dict[bg_job.sp.stdin] = bg_job
753 if timeout:
754 stop_time = start_time + timeout
755 time_left = stop_time - time.time()
756 else:
757 time_left = None # so that select never times out
759 while not timeout or time_left > 0:
760 # select will return when we may write to stdin or when there is
761 # stdout/stderr output we can read (including when it is
762 # EOF, that is the process has terminated).
763 read_ready, write_ready, _ = select.select(read_list, write_list, [],
764 SELECT_TIMEOUT)
766 # os.read() has to be used instead of
767 # subproc.stdout.read() which will otherwise block
768 for file_obj in read_ready:
769 bg_job, is_stdout = reverse_dict[file_obj]
770 bg_job.process_output(is_stdout)
772 for file_obj in write_ready:
773 # we can write PIPE_BUF bytes without blocking
774 # POSIX requires PIPE_BUF is >= 512
775 bg_job = reverse_dict[file_obj]
776 file_obj.write(bg_job.string_stdin[:512])
777 bg_job.string_stdin = bg_job.string_stdin[512:]
778 # no more input data, close stdin, remove it from the select set
779 if not bg_job.string_stdin:
780 file_obj.close()
781 write_list.remove(file_obj)
782 del reverse_dict[file_obj]
784 all_jobs_finished = True
785 for bg_job in bg_jobs:
786 if bg_job.result.exit_status is not None:
787 continue
789 bg_job.result.exit_status = bg_job.sp.poll()
790 if bg_job.result.exit_status is not None:
791 # process exited, remove its stdout/stdin from the select set
792 bg_job.result.duration = time.time() - start_time
793 read_list.remove(bg_job.sp.stdout)
794 read_list.remove(bg_job.sp.stderr)
795 del reverse_dict[bg_job.sp.stdout]
796 del reverse_dict[bg_job.sp.stderr]
797 else:
798 all_jobs_finished = False
800 if all_jobs_finished:
801 return False
803 if timeout:
804 time_left = stop_time - time.time()
806 # Kill all processes which did not complete prior to timeout
807 for bg_job in bg_jobs:
808 if bg_job.result.exit_status is not None:
809 continue
811 logging.warn('run process timeout (%s) fired on: %s', timeout,
812 bg_job.command)
813 nuke_subprocess(bg_job.sp)
814 bg_job.result.exit_status = bg_job.sp.poll()
815 bg_job.result.duration = time.time() - start_time
817 return True
820 def pid_is_alive(pid):
822 True if process pid exists and is not yet stuck in Zombie state.
823 Zombies are impossible to move between cgroups, etc.
824 pid can be integer, or text of integer.
826 path = '/proc/%s/stat' % pid
828 try:
829 stat = read_one_line(path)
830 except IOError:
831 if not os.path.exists(path):
832 # file went away
833 return False
834 raise
836 return stat.split()[2] != 'Z'
839 def signal_pid(pid, sig):
841 Sends a signal to a process id. Returns True if the process terminated
842 successfully, False otherwise.
844 try:
845 os.kill(pid, sig)
846 except OSError:
847 # The process may have died before we could kill it.
848 pass
850 for i in range(5):
851 if not pid_is_alive(pid):
852 return True
853 time.sleep(1)
855 # The process is still alive
856 return False
859 def nuke_subprocess(subproc):
860 # check if the subprocess is still alive, first
861 if subproc.poll() is not None:
862 return subproc.poll()
864 # the process has not terminated within timeout,
865 # kill it via an escalating series of signals.
866 signal_queue = [signal.SIGTERM, signal.SIGKILL]
867 for sig in signal_queue:
868 signal_pid(subproc.pid, sig)
869 if subproc.poll() is not None:
870 return subproc.poll()
873 def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
874 # the process has not terminated within timeout,
875 # kill it via an escalating series of signals.
876 for sig in signal_queue:
877 if signal_pid(pid, sig):
878 return
880 # no signal successfully terminated the process
881 raise error.AutoservRunError('Could not kill %d' % pid, None)
884 def system(command, timeout=None, ignore_status=False):
886 Run a command
888 @param timeout: timeout in seconds
889 @param ignore_status: if ignore_status=False, throw an exception if the
890 command's exit code is non-zero
891 if ignore_stauts=True, return the exit code.
893 @return exit status of command
894 (note, this will always be zero unless ignore_status=True)
896 return run(command, timeout=timeout, ignore_status=ignore_status,
897 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
900 def system_parallel(commands, timeout=None, ignore_status=False):
901 """This function returns a list of exit statuses for the respective
902 list of commands."""
903 return [bg_jobs.exit_status for bg_jobs in
904 run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
905 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
908 def system_output(command, timeout=None, ignore_status=False,
909 retain_output=False, args=()):
911 Run a command and return the stdout output.
913 @param command: command string to execute.
914 @param timeout: time limit in seconds before attempting to kill the
915 running process. The function will take a few seconds longer
916 than 'timeout' to complete if it has to kill the process.
917 @param ignore_status: do not raise an exception, no matter what the exit
918 code of the command is.
919 @param retain_output: set to True to make stdout/stderr of the command
920 output to be also sent to the logging system
921 @param args: sequence of strings of arguments to be given to the command
922 inside " quotes after they have been escaped for that; each
923 element in the sequence will be given as a separate command
924 argument
926 @return a string with the stdout output of the command.
928 if retain_output:
929 out = run(command, timeout=timeout, ignore_status=ignore_status,
930 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
931 args=args).stdout
932 else:
933 out = run(command, timeout=timeout, ignore_status=ignore_status,
934 args=args).stdout
935 if out[-1:] == '\n':
936 out = out[:-1]
937 return out
940 def system_output_parallel(commands, timeout=None, ignore_status=False,
941 retain_output=False):
942 if retain_output:
943 out = [bg_job.stdout for bg_job
944 in run_parallel(commands, timeout=timeout,
945 ignore_status=ignore_status,
946 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
947 else:
948 out = [bg_job.stdout for bg_job in run_parallel(commands,
949 timeout=timeout, ignore_status=ignore_status)]
950 for x in out:
951 if out[-1:] == '\n': out = out[:-1]
952 return out
955 def strip_unicode(input):
956 if type(input) == list:
957 return [strip_unicode(i) for i in input]
958 elif type(input) == dict:
959 output = {}
960 for key in input.keys():
961 output[str(key)] = strip_unicode(input[key])
962 return output
963 elif type(input) == unicode:
964 return str(input)
965 else:
966 return input
969 def get_cpu_percentage(function, *args, **dargs):
970 """Returns a tuple containing the CPU% and return value from function call.
972 This function calculates the usage time by taking the difference of
973 the user and system times both before and after the function call.
975 child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
976 self_pre = resource.getrusage(resource.RUSAGE_SELF)
977 start = time.time()
978 to_return = function(*args, **dargs)
979 elapsed = time.time() - start
980 self_post = resource.getrusage(resource.RUSAGE_SELF)
981 child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
983 # Calculate CPU Percentage
984 s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
985 c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
986 cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
988 return cpu_percent, to_return
991 class SystemLoad(object):
993 Get system and/or process values and return average value of load.
995 def __init__(self, pids, advanced=False, time_step=0.1, cpu_cont=False,
996 use_log=False):
998 @param pids: List of pids to be monitored. If pid = 0 whole system will
999 be monitored. pid == 0 means whole system.
1000 @param advanced: monitor add value for system irq count and softirq
1001 for process minor and maior page fault
1002 @param time_step: Time step for continuous monitoring.
1003 @param cpu_cont: If True monitor CPU load continuously.
1004 @param use_log: If true every monitoring is logged for dump.
1006 self.pids = []
1007 self.stats = {}
1008 for pid in pids:
1009 if pid == 0:
1010 cpu = FileFieldMonitor("/proc/stat",
1011 [("cpu", 0), # User Time
1012 ("cpu", 2), # System Time
1013 ("intr", 0), # IRQ Count
1014 ("softirq", 0)], # Soft IRQ Count
1015 True,
1016 cpu_cont,
1017 use_log,
1018 " +",
1019 time_step)
1020 mem = FileFieldMonitor("/proc/meminfo",
1021 [("MemTotal:", 0), # Mem Total
1022 ("MemFree:", 0), # Mem Free
1023 ("Buffers:", 0), # Buffers
1024 ("Cached:", 0)], # Cached
1025 False,
1026 True,
1027 use_log,
1028 " +",
1029 time_step)
1030 self.stats[pid] = ["TOTAL", cpu, mem]
1031 self.pids.append(pid)
1032 else:
1033 name = ""
1034 if (type(pid) is int):
1035 self.pids.append(pid)
1036 name = get_process_name(pid)
1037 else:
1038 self.pids.append(pid[0])
1039 name = pid[1]
1041 cpu = FileFieldMonitor("/proc/%d/stat" %
1042 self.pids[-1],
1043 [("", 13), # User Time
1044 ("", 14), # System Time
1045 ("", 9), # Minority Page Fault
1046 ("", 11)], # Majority Page Fault
1047 True,
1048 cpu_cont,
1049 use_log,
1050 " +",
1051 time_step)
1052 mem = FileFieldMonitor("/proc/%d/status" %
1053 self.pids[-1],
1054 [("VmSize:", 0), # Virtual Memory Size
1055 ("VmRSS:", 0), # Resident Set Size
1056 ("VmPeak:", 0), # Peak VM Size
1057 ("VmSwap:", 0)], # VM in Swap
1058 False,
1059 True,
1060 use_log,
1061 " +",
1062 time_step)
1063 self.stats[self.pids[-1]] = [name, cpu, mem]
1065 self.advanced = advanced
1068 def __str__(self):
1070 Define format how to print
1072 out = ""
1073 for pid in self.pids:
1074 for stat in self.stats[pid][1:]:
1075 out += str(stat.get_status()) + "\n"
1076 return out
1079 def start(self, pids=[]):
1081 Start monitoring of the process system usage.
1082 @param pids: List of PIDs you intend to control. Use pids=[] to control
1083 all defined PIDs.
1085 if pids == []:
1086 pids = self.pids
1088 for pid in pids:
1089 for stat in self.stats[pid][1:]:
1090 stat.start()
1093 def stop(self, pids=[]):
1095 Stop monitoring of the process system usage.
1096 @param pids: List of PIDs you intend to control. Use pids=[] to control
1097 all defined PIDs.
1099 if pids == []:
1100 pids = self.pids
1102 for pid in pids:
1103 for stat in self.stats[pid][1:]:
1104 stat.stop()
1107 def dump(self, pids=[]):
1109 Get the status of monitoring.
1110 @param pids: List of PIDs you intend to control. Use pids=[] to control
1111 all defined PIDs.
1112 @return:
1113 tuple([cpu load], [memory load]):
1114 ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
1115 [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
1117 PID1_cpu_meas:
1118 average_values[], test_time, cont_meas_values[[]], time_step
1119 PID1_mem_meas:
1120 average_values[], test_time, cont_meas_values[[]], time_step
1121 where average_values[] are the measured values (mem_free,swap,...)
1122 which are described in SystemLoad.__init__()-FileFieldMonitor.
1123 cont_meas_values[[]] is a list of average_values in the sampling
1124 times.
1126 if pids == []:
1127 pids = self.pids
1129 cpus = []
1130 memory = []
1131 for pid in pids:
1132 stat = (pid, self.stats[pid][1].get_status())
1133 cpus.append(stat)
1134 for pid in pids:
1135 stat = (pid, self.stats[pid][2].get_status())
1136 memory.append(stat)
1138 return (cpus, memory)
1141 def get_cpu_status_string(self, pids=[]):
1143 Convert status to string array.
1144 @param pids: List of PIDs you intend to control. Use pids=[] to control
1145 all defined PIDs.
1146 @return: String format to table.
1148 if pids == []:
1149 pids = self.pids
1151 headers = ["NAME",
1152 ("%7s") % "PID",
1153 ("%5s") % "USER",
1154 ("%5s") % "SYS",
1155 ("%5s") % "SUM"]
1156 if self.advanced:
1157 headers.extend(["MINFLT/IRQC",
1158 "MAJFLT/SOFTIRQ"])
1159 headers.append(("%11s") % "TIME")
1160 textstatus = []
1161 for pid in pids:
1162 stat = self.stats[pid][1].get_status()
1163 time = stat[1]
1164 stat = stat[0]
1165 textstatus.append(["%s" % self.stats[pid][0],
1166 "%7s" % pid,
1167 "%4.0f%%" % (stat[0] / time),
1168 "%4.0f%%" % (stat[1] / time),
1169 "%4.0f%%" % ((stat[0] + stat[1]) / time),
1170 "%10.3fs" % time])
1171 if self.advanced:
1172 textstatus[-1].insert(-1, "%11d" % stat[2])
1173 textstatus[-1].insert(-1, "%14d" % stat[3])
1175 return matrix_to_string(textstatus, tuple(headers))
1178 def get_mem_status_string(self, pids=[]):
1180 Convert status to string array.
1181 @param pids: List of PIDs you intend to control. Use pids=[] to control
1182 all defined PIDs.
1183 @return: String format to table.
1185 if pids == []:
1186 pids = self.pids
1188 headers = ["NAME",
1189 ("%7s") % "PID",
1190 ("%8s") % "TOTAL/VMSIZE",
1191 ("%8s") % "FREE/VMRSS",
1192 ("%8s") % "BUFFERS/VMPEAK",
1193 ("%8s") % "CACHED/VMSWAP",
1194 ("%11s") % "TIME"]
1195 textstatus = []
1196 for pid in pids:
1197 stat = self.stats[pid][2].get_status()
1198 time = stat[1]
1199 stat = stat[0]
1200 textstatus.append(["%s" % self.stats[pid][0],
1201 "%7s" % pid,
1202 "%10dMB" % (stat[0] / 1024),
1203 "%8dMB" % (stat[1] / 1024),
1204 "%12dMB" % (stat[2] / 1024),
1205 "%11dMB" % (stat[3] / 1024),
1206 "%10.3fs" % time])
1208 return matrix_to_string(textstatus, tuple(headers))
1211 def get_arch(run_function=run):
1213 Get the hardware architecture of the machine.
1214 run_function is used to execute the commands. It defaults to
1215 utils.run() but a custom method (if provided) should be of the
1216 same schema as utils.run. It should return a CmdResult object and
1217 throw a CmdError exception.
1219 arch = run_function('/bin/uname -m').stdout.rstrip()
1220 if re.match(r'i\d86$', arch):
1221 arch = 'i386'
1222 return arch
1225 def get_num_logical_cpus_per_socket(run_function=run):
1227 Get the number of cores (including hyperthreading) per cpu.
1228 run_function is used to execute the commands. It defaults to
1229 utils.run() but a custom method (if provided) should be of the
1230 same schema as utils.run. It should return a CmdResult object and
1231 throw a CmdError exception.
1233 siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
1234 num_siblings = map(int,
1235 re.findall(r'^siblings\s*:\s*(\d+)\s*$',
1236 siblings, re.M))
1237 if len(num_siblings) == 0:
1238 raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
1239 if min(num_siblings) != max(num_siblings):
1240 raise error.TestError('Number of siblings differ %r' %
1241 num_siblings)
1242 return num_siblings[0]
1245 def merge_trees(src, dest):
1247 Merges a source directory tree at 'src' into a destination tree at
1248 'dest'. If a path is a file in both trees than the file in the source
1249 tree is APPENDED to the one in the destination tree. If a path is
1250 a directory in both trees then the directories are recursively merged
1251 with this function. In any other case, the function will skip the
1252 paths that cannot be merged (instead of failing).
1254 if not os.path.exists(src):
1255 return # exists only in dest
1256 elif not os.path.exists(dest):
1257 if os.path.isfile(src):
1258 shutil.copy2(src, dest) # file only in src
1259 else:
1260 shutil.copytree(src, dest, symlinks=True) # dir only in src
1261 return
1262 elif os.path.isfile(src) and os.path.isfile(dest):
1263 # src & dest are files in both trees, append src to dest
1264 destfile = open(dest, "a")
1265 try:
1266 srcfile = open(src)
1267 try:
1268 destfile.write(srcfile.read())
1269 finally:
1270 srcfile.close()
1271 finally:
1272 destfile.close()
1273 elif os.path.isdir(src) and os.path.isdir(dest):
1274 # src & dest are directories in both trees, so recursively merge
1275 for name in os.listdir(src):
1276 merge_trees(os.path.join(src, name), os.path.join(dest, name))
1277 else:
1278 # src & dest both exist, but are incompatible
1279 return
1282 class CmdResult(object):
1284 Command execution result.
1286 command: String containing the command line itself
1287 exit_status: Integer exit code of the process
1288 stdout: String containing stdout of the process
1289 stderr: String containing stderr of the process
1290 duration: Elapsed wall clock time running the process
1294 def __init__(self, command="", stdout="", stderr="",
1295 exit_status=None, duration=0):
1296 self.command = command
1297 self.exit_status = exit_status
1298 self.stdout = stdout
1299 self.stderr = stderr
1300 self.duration = duration
1303 def __repr__(self):
1304 wrapper = textwrap.TextWrapper(width = 78,
1305 initial_indent="\n ",
1306 subsequent_indent=" ")
1308 stdout = self.stdout.rstrip()
1309 if stdout:
1310 stdout = "\nstdout:\n%s" % stdout
1312 stderr = self.stderr.rstrip()
1313 if stderr:
1314 stderr = "\nstderr:\n%s" % stderr
1316 return ("* Command: %s\n"
1317 "Exit status: %s\n"
1318 "Duration: %s\n"
1319 "%s"
1320 "%s"
1321 % (wrapper.fill(self.command), self.exit_status,
1322 self.duration, stdout, stderr))
1325 class run_randomly:
1326 def __init__(self, run_sequentially=False):
1327 # Run sequentially is for debugging control files
1328 self.test_list = []
1329 self.run_sequentially = run_sequentially
1332 def add(self, *args, **dargs):
1333 test = (args, dargs)
1334 self.test_list.append(test)
1337 def run(self, fn):
1338 while self.test_list:
1339 test_index = random.randint(0, len(self.test_list)-1)
1340 if self.run_sequentially:
1341 test_index = 0
1342 (args, dargs) = self.test_list.pop(test_index)
1343 fn(*args, **dargs)
1346 def import_site_module(path, module, dummy=None, modulefile=None):
1348 Try to import the site specific module if it exists.
1350 @param path full filename of the source file calling this (ie __file__)
1351 @param module full module name
1352 @param dummy dummy value to return in case there is no symbol to import
1353 @param modulefile module filename
1355 @return site specific module or dummy
1357 @raises ImportError if the site file exists but imports fails
1359 short_module = module[module.rfind(".") + 1:]
1361 if not modulefile:
1362 modulefile = short_module + ".py"
1364 if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
1365 return __import__(module, {}, {}, [short_module])
1366 return dummy
1369 def import_site_symbol(path, module, name, dummy=None, modulefile=None):
1371 Try to import site specific symbol from site specific file if it exists
1373 @param path full filename of the source file calling this (ie __file__)
1374 @param module full module name
1375 @param name symbol name to be imported from the site file
1376 @param dummy dummy value to return in case there is no symbol to import
1377 @param modulefile module filename
1379 @return site specific symbol or dummy
1381 @raises ImportError if the site file exists but imports fails
1383 module = import_site_module(path, module, modulefile=modulefile)
1384 if not module:
1385 return dummy
1387 # special unique value to tell us if the symbol can't be imported
1388 cant_import = object()
1390 obj = getattr(module, name, cant_import)
1391 if obj is cant_import:
1392 logging.debug("unable to import site symbol '%s', using non-site "
1393 "implementation", name)
1394 return dummy
1396 return obj
1399 def import_site_class(path, module, classname, baseclass, modulefile=None):
1401 Try to import site specific class from site specific file if it exists
1403 Args:
1404 path: full filename of the source file calling this (ie __file__)
1405 module: full module name
1406 classname: class name to be loaded from site file
1407 baseclass: base class object to return when no site file present or
1408 to mixin when site class exists but is not inherited from baseclass
1409 modulefile: module filename
1411 Returns: baseclass if site specific class does not exist, the site specific
1412 class if it exists and is inherited from baseclass or a mixin of the
1413 site specific class and baseclass when the site specific class exists
1414 and is not inherited from baseclass
1416 Raises: ImportError if the site file exists but imports fails
1419 res = import_site_symbol(path, module, classname, None, modulefile)
1420 if res:
1421 if not issubclass(res, baseclass):
1422 # if not a subclass of baseclass then mix in baseclass with the
1423 # site specific class object and return the result
1424 res = type(classname, (res, baseclass), {})
1425 else:
1426 res = baseclass
1428 return res
1431 def import_site_function(path, module, funcname, dummy, modulefile=None):
1433 Try to import site specific function from site specific file if it exists
1435 Args:
1436 path: full filename of the source file calling this (ie __file__)
1437 module: full module name
1438 funcname: function name to be imported from site file
1439 dummy: dummy function to return in case there is no function to import
1440 modulefile: module filename
1442 Returns: site specific function object or dummy
1444 Raises: ImportError if the site file exists but imports fails
1447 return import_site_symbol(path, module, funcname, dummy, modulefile)
1450 def _get_pid_path(program_name):
1451 my_path = os.path.dirname(__file__)
1452 return os.path.abspath(os.path.join(my_path, "..", "..",
1453 "%s.pid" % program_name))
1456 def write_pid(program_name):
1458 Try to drop <program_name>.pid in the main autotest directory.
1460 Args:
1461 program_name: prefix for file name
1463 pidfile = open(_get_pid_path(program_name), "w")
1464 try:
1465 pidfile.write("%s\n" % os.getpid())
1466 finally:
1467 pidfile.close()
1470 def delete_pid_file_if_exists(program_name):
1472 Tries to remove <program_name>.pid from the main autotest directory.
1474 pidfile_path = _get_pid_path(program_name)
1476 try:
1477 os.remove(pidfile_path)
1478 except OSError:
1479 if not os.path.exists(pidfile_path):
1480 return
1481 raise
1484 def get_pid_from_file(program_name):
1486 Reads the pid from <program_name>.pid in the autotest directory.
1488 @param program_name the name of the program
1489 @return the pid if the file exists, None otherwise.
1491 pidfile_path = _get_pid_path(program_name)
1492 if not os.path.exists(pidfile_path):
1493 return None
1495 pidfile = open(_get_pid_path(program_name), 'r')
1497 try:
1498 try:
1499 pid = int(pidfile.readline())
1500 except IOError:
1501 if not os.path.exists(pidfile_path):
1502 return None
1503 raise
1504 finally:
1505 pidfile.close()
1507 return pid
1510 def get_process_name(pid):
1512 Get process name from PID.
1513 @param pid: PID of process.
1515 return get_field(read_file("/proc/%d/stat" % pid), 1)[1:-1]
1518 def program_is_alive(program_name):
1520 Checks if the process is alive and not in Zombie state.
1522 @param program_name the name of the program
1523 @return True if still alive, False otherwise
1525 pid = get_pid_from_file(program_name)
1526 if pid is None:
1527 return False
1528 return pid_is_alive(pid)
1531 def signal_program(program_name, sig=signal.SIGTERM):
1533 Sends a signal to the process listed in <program_name>.pid
1535 @param program_name the name of the program
1536 @param sig signal to send
1538 pid = get_pid_from_file(program_name)
1539 if pid:
1540 signal_pid(pid, sig)
1543 def get_relative_path(path, reference):
1544 """Given 2 absolute paths "path" and "reference", compute the path of
1545 "path" as relative to the directory "reference".
1547 @param path the absolute path to convert to a relative path
1548 @param reference an absolute directory path to which the relative
1549 path will be computed
1551 # normalize the paths (remove double slashes, etc)
1552 assert(os.path.isabs(path))
1553 assert(os.path.isabs(reference))
1555 path = os.path.normpath(path)
1556 reference = os.path.normpath(reference)
1558 # we could use os.path.split() but it splits from the end
1559 path_list = path.split(os.path.sep)[1:]
1560 ref_list = reference.split(os.path.sep)[1:]
1562 # find the longest leading common path
1563 for i in xrange(min(len(path_list), len(ref_list))):
1564 if path_list[i] != ref_list[i]:
1565 # decrement i so when exiting this loop either by no match or by
1566 # end of range we are one step behind
1567 i -= 1
1568 break
1569 i += 1
1570 # drop the common part of the paths, not interested in that anymore
1571 del path_list[:i]
1573 # for each uncommon component in the reference prepend a ".."
1574 path_list[:0] = ['..'] * (len(ref_list) - i)
1576 return os.path.join(*path_list)
1579 def sh_escape(command):
1581 Escape special characters from a command so that it can be passed
1582 as a double quoted (" ") string in a (ba)sh command.
1584 Args:
1585 command: the command string to escape.
1587 Returns:
1588 The escaped command string. The required englobing double
1589 quotes are NOT added and so should be added at some point by
1590 the caller.
1592 See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1594 command = command.replace("\\", "\\\\")
1595 command = command.replace("$", r'\$')
1596 command = command.replace('"', r'\"')
1597 command = command.replace('`', r'\`')
1598 return command
1601 def configure(extra=None, configure='./configure'):
1603 Run configure passing in the correct host, build, and target options.
1605 @param extra: extra command line arguments to pass to configure
1606 @param configure: which configure script to use
1608 args = []
1609 if 'CHOST' in os.environ:
1610 args.append('--host=' + os.environ['CHOST'])
1611 if 'CBUILD' in os.environ:
1612 args.append('--build=' + os.environ['CBUILD'])
1613 if 'CTARGET' in os.environ:
1614 args.append('--target=' + os.environ['CTARGET'])
1615 if extra:
1616 args.append(extra)
1618 system('%s %s' % (configure, ' '.join(args)))
1621 def make(extra='', make='make', timeout=None, ignore_status=False):
1623 Run make, adding MAKEOPTS to the list of options.
1625 @param extra: extra command line arguments to pass to make.
1627 cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
1628 return system(cmd, timeout=timeout, ignore_status=ignore_status)
1631 def compare_versions(ver1, ver2):
1632 """Version number comparison between ver1 and ver2 strings.
1634 >>> compare_tuple("1", "2")
1636 >>> compare_tuple("foo-1.1", "foo-1.2")
1638 >>> compare_tuple("1.2", "1.2a")
1640 >>> compare_tuple("1.2b", "1.2a")
1642 >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1645 Args:
1646 ver1: version string
1647 ver2: version string
1649 Returns:
1650 int: 1 if ver1 > ver2
1651 0 if ver1 == ver2
1652 -1 if ver1 < ver2
1654 ax = re.split('[.-]', ver1)
1655 ay = re.split('[.-]', ver2)
1656 while len(ax) > 0 and len(ay) > 0:
1657 cx = ax.pop(0)
1658 cy = ay.pop(0)
1659 maxlen = max(len(cx), len(cy))
1660 c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
1661 if c != 0:
1662 return c
1663 return cmp(len(ax), len(ay))
1666 def args_to_dict(args):
1667 """Convert autoserv extra arguments in the form of key=val or key:val to a
1668 dictionary. Each argument key is converted to lowercase dictionary key.
1670 Args:
1671 args - list of autoserv extra arguments.
1673 Returns:
1674 dictionary
1676 arg_re = re.compile(r'(\w+)[:=](.*)$')
1677 dict = {}
1678 for arg in args:
1679 match = arg_re.match(arg)
1680 if match:
1681 dict[match.group(1).lower()] = match.group(2)
1682 else:
1683 logging.warning("args_to_dict: argument '%s' doesn't match "
1684 "'%s' pattern. Ignored." % (arg, arg_re.pattern))
1685 return dict
1688 def get_unused_port():
1690 Finds a semi-random available port. A race condition is still
1691 possible after the port number is returned, if another process
1692 happens to bind it.
1694 Returns:
1695 A port number that is unused on both TCP and UDP.
1698 def try_bind(port, socket_type, socket_proto):
1699 s = socket.socket(socket.AF_INET, socket_type, socket_proto)
1700 try:
1701 try:
1702 s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1703 s.bind(('', port))
1704 return s.getsockname()[1]
1705 except socket.error:
1706 return None
1707 finally:
1708 s.close()
1710 # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1711 # same port over and over. So always try TCP first.
1712 while True:
1713 # Ask the OS for an unused port.
1714 port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1715 # Check if this port is unused on the other protocol.
1716 if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1717 return port
1720 def ask(question, auto=False):
1722 Raw input with a prompt that emulates logging.
1724 @param question: Question to be asked
1725 @param auto: Whether to return "y" instead of asking the question
1727 if auto:
1728 logging.info("%s (y/n) y" % question)
1729 return "y"
1730 return raw_input("%s INFO | %s (y/n) " %
1731 (time.strftime("%H:%M:%S", time.localtime()), question))