2 # Copyright 2008 Google Inc. Released under the GPL v2
4 import os
, pickle
, random
, re
, resource
, select
, shutil
, signal
, StringIO
5 import socket
, struct
, subprocess
, sys
, time
, textwrap
, urlparse
6 import warnings
, smtplib
, logging
, urllib2
7 from threading
import Thread
, Event
12 from autotest_lib
.client
.common_lib
import error
, logging_manager
15 """This is a decorator which can be used to mark functions as deprecated.
16 It will result in a warning being emmitted when the function is used."""
17 def new_func(*args
, **dargs
):
18 warnings
.warn("Call to deprecated function %s." % func
.__name
__,
19 category
=DeprecationWarning)
20 return func(*args
, **dargs
)
21 new_func
.__name
__ = func
.__name
__
22 new_func
.__doc
__ = func
.__doc
__
23 new_func
.__dict
__.update(func
.__dict
__)
27 class _NullStream(object):
28 def write(self
, data
):
36 TEE_TO_LOGS
= object()
37 _the_null_stream
= _NullStream()
39 DEFAULT_STDOUT_LEVEL
= logging
.DEBUG
40 DEFAULT_STDERR_LEVEL
= logging
.ERROR
42 # prefixes for logging stdout/stderr of commands
43 STDOUT_PREFIX
= '[stdout] '
44 STDERR_PREFIX
= '[stderr] '
47 def get_stream_tee_file(stream
, level
, prefix
=''):
49 return _the_null_stream
50 if stream
is TEE_TO_LOGS
:
51 return logging_manager
.LoggingFile(level
=level
, prefix
=prefix
)
56 def __init__(self
, command
, stdout_tee
=None, stderr_tee
=None, verbose
=True,
57 stdin
=None, stderr_level
=DEFAULT_STDERR_LEVEL
):
58 self
.command
= command
59 self
.stdout_tee
= get_stream_tee_file(stdout_tee
, DEFAULT_STDOUT_LEVEL
,
61 self
.stderr_tee
= get_stream_tee_file(stderr_tee
, stderr_level
,
63 self
.result
= CmdResult(command
)
65 # allow for easy stdin input by string, we'll let subprocess create
66 # a pipe for stdin input and we'll write to it in the wait loop
67 if isinstance(stdin
, basestring
):
68 self
.string_stdin
= stdin
69 stdin
= subprocess
.PIPE
71 self
.string_stdin
= None
74 logging
.debug("Running '%s'" % command
)
75 self
.sp
= subprocess
.Popen(command
, stdout
=subprocess
.PIPE
,
76 stderr
=subprocess
.PIPE
,
77 preexec_fn
=self
._reset
_sigpipe
, shell
=True,
78 executable
="/bin/bash",
82 def output_prepare(self
, stdout_file
=None, stderr_file
=None):
83 self
.stdout_file
= stdout_file
84 self
.stderr_file
= stderr_file
87 def process_output(self
, stdout
=True, final_read
=False):
88 """output_prepare must be called prior to calling this"""
90 pipe
, buf
, tee
= self
.sp
.stdout
, self
.stdout_file
, self
.stdout_tee
92 pipe
, buf
, tee
= self
.sp
.stderr
, self
.stderr_file
, self
.stderr_tee
95 # read in all the data we can from pipe and then stop
97 while select
.select([pipe
], [], [], 0)[0]:
98 data
.append(os
.read(pipe
.fileno(), 1024))
99 if len(data
[-1]) == 0:
103 # perform a single read
104 data
= os
.read(pipe
.fileno(), 1024)
110 self
.stdout_tee
.flush()
111 self
.stderr_tee
.flush()
112 self
.sp
.stdout
.close()
113 self
.sp
.stderr
.close()
114 self
.result
.stdout
= self
.stdout_file
.getvalue()
115 self
.result
.stderr
= self
.stderr_file
.getvalue()
118 def _reset_sigpipe(self
):
119 signal
.signal(signal
.SIGPIPE
, signal
.SIG_DFL
)
123 # !L is a long in network byte order
124 return struct
.unpack('!L', socket
.inet_aton(ip
))[0]
127 def long_to_ip(number
):
129 return socket
.inet_ntoa(struct
.pack('!L', number
))
132 def create_subnet_mask(bits
):
133 return (1 << 32) - (1 << 32-bits
)
136 def format_ip_with_mask(ip
, mask_bits
):
137 masked_ip
= ip_to_long(ip
) & create_subnet_mask(mask_bits
)
138 return "%s/%s" % (long_to_ip(masked_ip
), mask_bits
)
141 def normalize_hostname(alias
):
142 ip
= socket
.gethostbyname(alias
)
143 return socket
.gethostbyaddr(ip
)[0]
146 def get_ip_local_port_range():
147 match
= re
.match(r
'\s*(\d+)\s*(\d+)\s*$',
148 read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
149 return (int(match
.group(1)), int(match
.group(2)))
152 def set_ip_local_port_range(lower
, upper
):
153 write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
154 '%d %d\n' % (lower
, upper
))
158 def send_email(mail_from
, mail_to
, subject
, body
):
160 Sends an email via smtp
162 mail_from: string with email address of sender
163 mail_to: string or list with email address(es) of recipients
164 subject: string with subject of email
165 body: (multi-line) string with body of email
167 if isinstance(mail_to
, str):
169 msg
= "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from
, ','.join(mail_to
),
172 mailer
= smtplib
.SMTP('localhost')
174 mailer
.sendmail(mail_from
, mail_to
, msg
)
178 # Emails are non-critical, not errors, but don't raise them
179 print "Sending email failed. Reason: %s" % repr(e
)
182 def read_one_line(filename
):
183 return open(filename
, 'r').readline().rstrip('\n')
186 def read_file(filename
):
194 def get_field(data
, param
, linestart
="", sep
=" "):
196 Parse data from string.
197 @param data: Data to parse.
205 @param param: Position of parameter after linestart marker.
206 @param linestart: String to which start line with parameters.
207 @param sep: Separator between parameters regular expression.
209 search
= re
.compile(r
"(?<=^%s)\s*(.*)" % linestart
, re
.MULTILINE
)
210 find
= search
.search(data
)
212 return re
.split("%s" % sep
, find
.group(1))[param
]
214 print "There is no line which starts with %s in data." % linestart
218 def write_one_line(filename
, line
):
219 open_write_close(filename
, line
.rstrip('\n') + '\n')
222 def open_write_close(filename
, data
):
223 f
= open(filename
, 'w')
230 def matrix_to_string(matrix
, header
=None):
232 Return a pretty, aligned string representation of a nxm matrix.
234 This representation can be used to print any tabular data, such as
235 database results. It works by scanning the lengths of each element
236 in each column, and determining the format string dynamically.
238 @param matrix: Matrix representation (list with n rows of m elements).
239 @param header: Optional tuple or list with header elements to be displayed.
241 if type(header
) is list:
242 header
= tuple(header
)
245 for column
in header
:
246 lengths
.append(len(column
))
248 for i
, column
in enumerate(row
):
249 column
= unicode(column
).encode("utf-8")
258 lengths
= tuple(lengths
)
260 for length
in lengths
:
261 format_string
+= "%-" + str(length
) + "s "
262 format_string
+= "\n"
266 matrix_str
+= format_string
% header
268 matrix_str
+= format_string
% tuple(row
)
273 def read_keyval(path
):
275 Read a key-value pair format file into a dictionary, and return it.
276 Takes either a filename or directory name as input. If it's a
277 directory name, we assume you want the file to be called keyval.
279 if os
.path
.isdir(path
):
280 path
= os
.path
.join(path
, 'keyval')
282 if os
.path
.exists(path
):
283 for line
in open(path
):
284 line
= re
.sub('#.*', '', line
).rstrip()
285 if not re
.search(r
'^[-\.\w]+=', line
):
286 raise ValueError('Invalid format line: %s' % line
)
287 key
, value
= line
.split('=', 1)
288 if re
.search('^\d+$', value
):
290 elif re
.search('^(\d+\.)?\d+$', value
):
296 def write_keyval(path
, dictionary
, type_tag
=None, tap_report
=None):
298 Write a key-value pair format file out to a file. This uses append
299 mode to open the file, so existing text will not be overwritten or
302 If type_tag is None, then the key must be composed of alphanumeric
303 characters (or dashes+underscores). However, if type-tag is not
304 null then the keys must also have "{type_tag}" as a suffix. At
305 the moment the only valid values of type_tag are "attr" and "perf".
307 @param path: full path of the file to be written
308 @param dictionary: the items to write
309 @param type_tag: see text above
311 if os
.path
.isdir(path
):
312 path
= os
.path
.join(path
, 'keyval')
313 keyval
= open(path
, 'a')
316 key_regex
= re
.compile(r
'^[-\.\w]+$')
318 if type_tag
not in ('attr', 'perf'):
319 raise ValueError('Invalid type tag: %s' % type_tag
)
320 escaped_tag
= re
.escape(type_tag
)
321 key_regex
= re
.compile(r
'^[-\.\w]+\{%s\}$' % escaped_tag
)
323 for key
in sorted(dictionary
.keys()):
324 if not key_regex
.search(key
):
325 raise ValueError('Invalid key: %s' % key
)
326 keyval
.write('%s=%s\n' % (key
, dictionary
[key
]))
331 if tap_report
is not None and tap_report
.do_tap_report
:
332 tap_report
.record_keyval(path
, dictionary
, type_tag
=type_tag
)
334 class FileFieldMonitor(object):
336 Monitors the information from the file and reports it's values.
338 It gather the information at start and stop of the measurement or
339 continuously during the measurement.
341 class Monitor(Thread
):
343 Internal monitor class to ensure continuous monitor of monitored file.
345 def __init__(self
, master
):
347 @param master: Master class which control Monitor
349 Thread
.__init
__(self
)
354 Start monitor in thread mode
356 while not self
.master
.end_event
.isSet():
357 self
.master
._get
_value
(self
.master
.logging
)
358 time
.sleep(self
.master
.time_step
)
361 def __init__(self
, status_file
, data_to_read
, mode_diff
, continuously
=False,
362 contlogging
=False, separator
=" +", time_step
=0.1):
364 Initialize variables.
365 @param status_file: File contain status.
366 @param mode_diff: If True make a difference of value, else average.
367 @param data_to_read: List of tuples with data position.
368 format: [(start_of_line,position in params)]
376 @param mode_diff: True to subtract old value from new value,
377 False make average of the values.
378 @parma continuously: Start the monitoring thread using the time_step
379 as the measurement period.
380 @param contlogging: Log data in continuous run.
381 @param separator: Regular expression of separator.
382 @param time_step: Time period of the monitoring value.
384 self
.end_event
= Event()
389 self
.status_file
= status_file
390 self
.separator
= separator
391 self
.data_to_read
= data_to_read
392 self
.num_of_params
= len(self
.data_to_read
)
393 self
.mode_diff
= mode_diff
394 self
.continuously
= continuously
395 self
.time_step
= time_step
397 self
.value
= [0 for i
in range(self
.num_of_params
)]
398 self
.old_value
= [0 for i
in range(self
.num_of_params
)]
400 self
.logging
= contlogging
403 self
.num_of_get_value
= 0
407 def _get_value(self
, logging
=True):
409 Return current values.
410 @param logging: If true log value in memory. There can be problem
413 data
= read_file(self
.status_file
)
415 for i
in range(self
.num_of_params
):
416 value
.append(int(get_field(data
,
417 self
.data_to_read
[i
][1],
418 self
.data_to_read
[i
][0],
422 self
.log
.append(value
)
423 if not self
.mode_diff
:
424 value
= map(lambda x
, y
: x
+ y
, value
, self
.old_value
)
426 self
.old_value
= value
427 self
.num_of_get_value
+= 1
437 self
.old_value
= [0 for i
in range(self
.num_of_params
)]
438 self
.num_of_get_value
= 0
440 self
.end_event
.clear()
441 self
.start_time
= time
.time()
444 if (self
.continuously
):
445 self
.monitor
= FileFieldMonitor
.Monitor(self
)
455 self
.end_time
= time
.time()
456 self
.test_time
= self
.end_time
- self
.start_time
457 self
.value
= self
._get
_value
()
458 if (self
.continuously
):
462 self
.value
= map(lambda x
, y
: x
- y
, self
.log
[-1], self
.log
[0])
464 self
.value
= map(lambda x
: x
/ self
.num_of_get_value
,
468 def get_status(self
):
470 @return: Status of monitored process average value,
471 time of test and array of monitored values and time step of
477 for i
in range(len(self
.log
) - 1):
478 self
.log
[i
] = (map(lambda x
, y
: x
- y
,
479 self
.log
[i
+ 1], self
.log
[i
]))
481 return (self
.value
, self
.test_time
, self
.log
, self
.time_step
)
485 """Return true if path looks like a URL"""
486 # for now, just handle http and ftp
487 url_parts
= urlparse
.urlparse(path
)
488 return (url_parts
[0] in ('http', 'ftp'))
491 def urlopen(url
, data
=None, timeout
=5):
492 """Wrapper to urllib2.urlopen with timeout addition."""
495 old_timeout
= socket
.getdefaulttimeout()
496 socket
.setdefaulttimeout(timeout
)
498 return urllib2
.urlopen(url
, data
=data
)
500 socket
.setdefaulttimeout(old_timeout
)
503 def urlretrieve(url
, filename
, data
=None, timeout
=300):
504 """Retrieve a file from given url."""
505 logging
.debug('Fetching %s -> %s', url
, filename
)
507 src_file
= urlopen(url
, data
=data
, timeout
=timeout
)
509 dest_file
= open(filename
, 'wb')
511 shutil
.copyfileobj(src_file
, dest_file
)
518 def hash(type, input=None):
520 Returns an hash object of type md5 or sha1. This function is implemented in
521 order to encapsulate hash objects in a way that is compatible with python
522 2.4 and python 2.6 without warnings.
524 Note that even though python 2.6 hashlib supports hash types other than
525 md5 and sha1, we are artificially limiting the input values in order to
526 make the function to behave exactly the same among both python
529 @param input: Optional input string that will be used to update the hash.
531 if type not in ['md5', 'sha1']:
532 raise ValueError("Unsupported hash type: %s" % type)
535 hash = hashlib
.new(type)
548 def get_file(src
, dest
, permissions
=None):
549 """Get a file from src, which can be local or a remote URL"""
554 urlretrieve(src
, dest
)
556 shutil
.copyfile(src
, dest
)
559 os
.chmod(dest
, permissions
)
563 def unmap_url(srcdir
, src
, destdir
='.'):
565 Receives either a path to a local file or a URL.
566 returns either the path to the local file, or the fetched URL
568 unmap_url('/usr/src', 'foo.tar', '/tmp')
570 unmap_url('/usr/src', 'http://site/file', '/tmp')
572 (after retrieving it)
575 url_parts
= urlparse
.urlparse(src
)
576 filename
= os
.path
.basename(url_parts
[2])
577 dest
= os
.path
.join(destdir
, filename
)
578 return get_file(src
, dest
)
580 return os
.path
.join(srcdir
, src
)
583 def update_version(srcdir
, preserve_srcdir
, new_version
, install
,
586 Make sure srcdir is version new_version
588 If not, delete it and install() the new version.
590 In the preserve_srcdir case, we just check it's up to date,
591 and if not, we rerun install, without removing srcdir
593 versionfile
= os
.path
.join(srcdir
, '.version')
594 install_needed
= True
596 if os
.path
.exists(versionfile
):
597 old_version
= pickle
.load(open(versionfile
))
598 if old_version
== new_version
:
599 install_needed
= False
602 if not preserve_srcdir
and os
.path
.exists(srcdir
):
603 shutil
.rmtree(srcdir
)
604 install(*args
, **dargs
)
605 if os
.path
.exists(srcdir
):
606 pickle
.dump(new_version
, open(versionfile
, 'w'))
609 def get_stderr_level(stderr_is_expected
):
610 if stderr_is_expected
:
611 return DEFAULT_STDOUT_LEVEL
612 return DEFAULT_STDERR_LEVEL
615 def run(command
, timeout
=None, ignore_status
=False,
616 stdout_tee
=None, stderr_tee
=None, verbose
=True, stdin
=None,
617 stderr_is_expected
=None, args
=()):
619 Run a command on the host.
621 @param command: the command line string.
622 @param timeout: time limit in seconds before attempting to kill the
623 running process. The run() function will take a few seconds
624 longer than 'timeout' to complete if it has to kill the process.
625 @param ignore_status: do not raise an exception, no matter what the exit
626 code of the command is.
627 @param stdout_tee: optional file-like object to which stdout data
628 will be written as it is generated (data will still be stored
630 @param stderr_tee: likewise for stderr.
631 @param verbose: if True, log the command being run.
632 @param stdin: stdin to pass to the executed process (can be a file
633 descriptor, a file object of a real file or a string).
634 @param args: sequence of strings of arguments to be given to the command
635 inside " quotes after they have been escaped for that; each
636 element in the sequence will be given as a separate command
639 @return a CmdResult object
641 @raise CmdError: the exit code of the command execution was not 0
643 if isinstance(args
, basestring
):
644 raise TypeError('Got a string for the "args" keyword argument, '
648 command
+= ' "%s"' % sh_escape(arg
)
649 if stderr_is_expected
is None:
650 stderr_is_expected
= ignore_status
652 bg_job
= join_bg_jobs(
653 (BgJob(command
, stdout_tee
, stderr_tee
, verbose
, stdin
=stdin
,
654 stderr_level
=get_stderr_level(stderr_is_expected
)),),
656 if not ignore_status
and bg_job
.result
.exit_status
:
657 raise error
.CmdError(command
, bg_job
.result
,
658 "Command returned non-zero exit status")
663 def run_parallel(commands
, timeout
=None, ignore_status
=False,
664 stdout_tee
=None, stderr_tee
=None):
666 Behaves the same as run() with the following exceptions:
668 - commands is a list of commands to run in parallel.
669 - ignore_status toggles whether or not an exception should be raised
672 @return: a list of CmdResult objects
675 for command
in commands
:
676 bg_jobs
.append(BgJob(command
, stdout_tee
, stderr_tee
,
677 stderr_level
=get_stderr_level(ignore_status
)))
679 # Updates objects in bg_jobs list with their process information
680 join_bg_jobs(bg_jobs
, timeout
)
682 for bg_job
in bg_jobs
:
683 if not ignore_status
and bg_job
.result
.exit_status
:
684 raise error
.CmdError(command
, bg_job
.result
,
685 "Command returned non-zero exit status")
687 return [bg_job
.result
for bg_job
in bg_jobs
]
692 """Function deprecated. Please use BgJob class instead."""
693 bg_job
= BgJob(command
)
694 return bg_job
.sp
, bg_job
.result
697 def join_bg_jobs(bg_jobs
, timeout
=None):
698 """Joins the bg_jobs with the current thread.
700 Returns the same list of bg_jobs objects that was passed in.
702 ret
, timeout_error
= 0, False
703 for bg_job
in bg_jobs
:
704 bg_job
.output_prepare(StringIO
.StringIO(), StringIO
.StringIO())
707 # We are holding ends to stdin, stdout pipes
708 # hence we need to be sure to close those fds no mater what
709 start_time
= time
.time()
710 timeout_error
= _wait_for_commands(bg_jobs
, start_time
, timeout
)
712 for bg_job
in bg_jobs
:
713 # Process stdout and stderr
714 bg_job
.process_output(stdout
=True,final_read
=True)
715 bg_job
.process_output(stdout
=False,final_read
=True)
717 # close our ends of the pipes to the sp no matter what
718 for bg_job
in bg_jobs
:
722 # TODO: This needs to be fixed to better represent what happens when
723 # running in parallel. However this is backwards compatable, so it will
724 # do for the time being.
725 raise error
.CmdError(bg_jobs
[0].command
, bg_jobs
[0].result
,
726 "Command(s) did not complete within %d seconds"
733 def _wait_for_commands(bg_jobs
, start_time
, timeout
):
734 # This returns True if it must return due to a timeout, otherwise False.
736 # To check for processes which terminate without producing any output
737 # a 1 second timeout is used in select.
744 for bg_job
in bg_jobs
:
745 read_list
.append(bg_job
.sp
.stdout
)
746 read_list
.append(bg_job
.sp
.stderr
)
747 reverse_dict
[bg_job
.sp
.stdout
] = (bg_job
, True)
748 reverse_dict
[bg_job
.sp
.stderr
] = (bg_job
, False)
749 if bg_job
.string_stdin
is not None:
750 write_list
.append(bg_job
.sp
.stdin
)
751 reverse_dict
[bg_job
.sp
.stdin
] = bg_job
754 stop_time
= start_time
+ timeout
755 time_left
= stop_time
- time
.time()
757 time_left
= None # so that select never times out
759 while not timeout
or time_left
> 0:
760 # select will return when we may write to stdin or when there is
761 # stdout/stderr output we can read (including when it is
762 # EOF, that is the process has terminated).
763 read_ready
, write_ready
, _
= select
.select(read_list
, write_list
, [],
766 # os.read() has to be used instead of
767 # subproc.stdout.read() which will otherwise block
768 for file_obj
in read_ready
:
769 bg_job
, is_stdout
= reverse_dict
[file_obj
]
770 bg_job
.process_output(is_stdout
)
772 for file_obj
in write_ready
:
773 # we can write PIPE_BUF bytes without blocking
774 # POSIX requires PIPE_BUF is >= 512
775 bg_job
= reverse_dict
[file_obj
]
776 file_obj
.write(bg_job
.string_stdin
[:512])
777 bg_job
.string_stdin
= bg_job
.string_stdin
[512:]
778 # no more input data, close stdin, remove it from the select set
779 if not bg_job
.string_stdin
:
781 write_list
.remove(file_obj
)
782 del reverse_dict
[file_obj
]
784 all_jobs_finished
= True
785 for bg_job
in bg_jobs
:
786 if bg_job
.result
.exit_status
is not None:
789 bg_job
.result
.exit_status
= bg_job
.sp
.poll()
790 if bg_job
.result
.exit_status
is not None:
791 # process exited, remove its stdout/stdin from the select set
792 bg_job
.result
.duration
= time
.time() - start_time
793 read_list
.remove(bg_job
.sp
.stdout
)
794 read_list
.remove(bg_job
.sp
.stderr
)
795 del reverse_dict
[bg_job
.sp
.stdout
]
796 del reverse_dict
[bg_job
.sp
.stderr
]
798 all_jobs_finished
= False
800 if all_jobs_finished
:
804 time_left
= stop_time
- time
.time()
806 # Kill all processes which did not complete prior to timeout
807 for bg_job
in bg_jobs
:
808 if bg_job
.result
.exit_status
is not None:
811 logging
.warn('run process timeout (%s) fired on: %s', timeout
,
813 nuke_subprocess(bg_job
.sp
)
814 bg_job
.result
.exit_status
= bg_job
.sp
.poll()
815 bg_job
.result
.duration
= time
.time() - start_time
820 def pid_is_alive(pid
):
822 True if process pid exists and is not yet stuck in Zombie state.
823 Zombies are impossible to move between cgroups, etc.
824 pid can be integer, or text of integer.
826 path
= '/proc/%s/stat' % pid
829 stat
= read_one_line(path
)
831 if not os
.path
.exists(path
):
836 return stat
.split()[2] != 'Z'
839 def signal_pid(pid
, sig
):
841 Sends a signal to a process id. Returns True if the process terminated
842 successfully, False otherwise.
847 # The process may have died before we could kill it.
851 if not pid_is_alive(pid
):
855 # The process is still alive
859 def nuke_subprocess(subproc
):
860 # check if the subprocess is still alive, first
861 if subproc
.poll() is not None:
862 return subproc
.poll()
864 # the process has not terminated within timeout,
865 # kill it via an escalating series of signals.
866 signal_queue
= [signal
.SIGTERM
, signal
.SIGKILL
]
867 for sig
in signal_queue
:
868 signal_pid(subproc
.pid
, sig
)
869 if subproc
.poll() is not None:
870 return subproc
.poll()
873 def nuke_pid(pid
, signal_queue
=(signal
.SIGTERM
, signal
.SIGKILL
)):
874 # the process has not terminated within timeout,
875 # kill it via an escalating series of signals.
876 for sig
in signal_queue
:
877 if signal_pid(pid
, sig
):
880 # no signal successfully terminated the process
881 raise error
.AutoservRunError('Could not kill %d' % pid
, None)
884 def system(command
, timeout
=None, ignore_status
=False):
888 @param timeout: timeout in seconds
889 @param ignore_status: if ignore_status=False, throw an exception if the
890 command's exit code is non-zero
891 if ignore_stauts=True, return the exit code.
893 @return exit status of command
894 (note, this will always be zero unless ignore_status=True)
896 return run(command
, timeout
=timeout
, ignore_status
=ignore_status
,
897 stdout_tee
=TEE_TO_LOGS
, stderr_tee
=TEE_TO_LOGS
).exit_status
900 def system_parallel(commands
, timeout
=None, ignore_status
=False):
901 """This function returns a list of exit statuses for the respective
903 return [bg_jobs
.exit_status
for bg_jobs
in
904 run_parallel(commands
, timeout
=timeout
, ignore_status
=ignore_status
,
905 stdout_tee
=TEE_TO_LOGS
, stderr_tee
=TEE_TO_LOGS
)]
908 def system_output(command
, timeout
=None, ignore_status
=False,
909 retain_output
=False, args
=()):
911 Run a command and return the stdout output.
913 @param command: command string to execute.
914 @param timeout: time limit in seconds before attempting to kill the
915 running process. The function will take a few seconds longer
916 than 'timeout' to complete if it has to kill the process.
917 @param ignore_status: do not raise an exception, no matter what the exit
918 code of the command is.
919 @param retain_output: set to True to make stdout/stderr of the command
920 output to be also sent to the logging system
921 @param args: sequence of strings of arguments to be given to the command
922 inside " quotes after they have been escaped for that; each
923 element in the sequence will be given as a separate command
926 @return a string with the stdout output of the command.
929 out
= run(command
, timeout
=timeout
, ignore_status
=ignore_status
,
930 stdout_tee
=TEE_TO_LOGS
, stderr_tee
=TEE_TO_LOGS
,
933 out
= run(command
, timeout
=timeout
, ignore_status
=ignore_status
,
940 def system_output_parallel(commands
, timeout
=None, ignore_status
=False,
941 retain_output
=False):
943 out
= [bg_job
.stdout
for bg_job
944 in run_parallel(commands
, timeout
=timeout
,
945 ignore_status
=ignore_status
,
946 stdout_tee
=TEE_TO_LOGS
, stderr_tee
=TEE_TO_LOGS
)]
948 out
= [bg_job
.stdout
for bg_job
in run_parallel(commands
,
949 timeout
=timeout
, ignore_status
=ignore_status
)]
951 if out
[-1:] == '\n': out
= out
[:-1]
955 def strip_unicode(input):
956 if type(input) == list:
957 return [strip_unicode(i
) for i
in input]
958 elif type(input) == dict:
960 for key
in input.keys():
961 output
[str(key
)] = strip_unicode(input[key
])
963 elif type(input) == unicode:
969 def get_cpu_percentage(function
, *args
, **dargs
):
970 """Returns a tuple containing the CPU% and return value from function call.
972 This function calculates the usage time by taking the difference of
973 the user and system times both before and after the function call.
975 child_pre
= resource
.getrusage(resource
.RUSAGE_CHILDREN
)
976 self_pre
= resource
.getrusage(resource
.RUSAGE_SELF
)
978 to_return
= function(*args
, **dargs
)
979 elapsed
= time
.time() - start
980 self_post
= resource
.getrusage(resource
.RUSAGE_SELF
)
981 child_post
= resource
.getrusage(resource
.RUSAGE_CHILDREN
)
983 # Calculate CPU Percentage
984 s_user
, s_system
= [a
- b
for a
, b
in zip(self_post
, self_pre
)[:2]]
985 c_user
, c_system
= [a
- b
for a
, b
in zip(child_post
, child_pre
)[:2]]
986 cpu_percent
= (s_user
+ c_user
+ s_system
+ c_system
) / elapsed
988 return cpu_percent
, to_return
991 class SystemLoad(object):
993 Get system and/or process values and return average value of load.
995 def __init__(self
, pids
, advanced
=False, time_step
=0.1, cpu_cont
=False,
998 @param pids: List of pids to be monitored. If pid = 0 whole system will
999 be monitored. pid == 0 means whole system.
1000 @param advanced: monitor add value for system irq count and softirq
1001 for process minor and maior page fault
1002 @param time_step: Time step for continuous monitoring.
1003 @param cpu_cont: If True monitor CPU load continuously.
1004 @param use_log: If true every monitoring is logged for dump.
1010 cpu
= FileFieldMonitor("/proc/stat",
1011 [("cpu", 0), # User Time
1012 ("cpu", 2), # System Time
1013 ("intr", 0), # IRQ Count
1014 ("softirq", 0)], # Soft IRQ Count
1020 mem
= FileFieldMonitor("/proc/meminfo",
1021 [("MemTotal:", 0), # Mem Total
1022 ("MemFree:", 0), # Mem Free
1023 ("Buffers:", 0), # Buffers
1024 ("Cached:", 0)], # Cached
1030 self
.stats
[pid
] = ["TOTAL", cpu
, mem
]
1031 self
.pids
.append(pid
)
1034 if (type(pid
) is int):
1035 self
.pids
.append(pid
)
1036 name
= get_process_name(pid
)
1038 self
.pids
.append(pid
[0])
1041 cpu
= FileFieldMonitor("/proc/%d/stat" %
1043 [("", 13), # User Time
1044 ("", 14), # System Time
1045 ("", 9), # Minority Page Fault
1046 ("", 11)], # Majority Page Fault
1052 mem
= FileFieldMonitor("/proc/%d/status" %
1054 [("VmSize:", 0), # Virtual Memory Size
1055 ("VmRSS:", 0), # Resident Set Size
1056 ("VmPeak:", 0), # Peak VM Size
1057 ("VmSwap:", 0)], # VM in Swap
1063 self
.stats
[self
.pids
[-1]] = [name
, cpu
, mem
]
1065 self
.advanced
= advanced
1070 Define format how to print
1073 for pid
in self
.pids
:
1074 for stat
in self
.stats
[pid
][1:]:
1075 out
+= str(stat
.get_status()) + "\n"
1079 def start(self
, pids
=[]):
1081 Start monitoring of the process system usage.
1082 @param pids: List of PIDs you intend to control. Use pids=[] to control
1089 for stat
in self
.stats
[pid
][1:]:
1093 def stop(self
, pids
=[]):
1095 Stop monitoring of the process system usage.
1096 @param pids: List of PIDs you intend to control. Use pids=[] to control
1103 for stat
in self
.stats
[pid
][1:]:
1107 def dump(self
, pids
=[]):
1109 Get the status of monitoring.
1110 @param pids: List of PIDs you intend to control. Use pids=[] to control
1113 tuple([cpu load], [memory load]):
1114 ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
1115 [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
1118 average_values[], test_time, cont_meas_values[[]], time_step
1120 average_values[], test_time, cont_meas_values[[]], time_step
1121 where average_values[] are the measured values (mem_free,swap,...)
1122 which are described in SystemLoad.__init__()-FileFieldMonitor.
1123 cont_meas_values[[]] is a list of average_values in the sampling
1132 stat
= (pid
, self
.stats
[pid
][1].get_status())
1135 stat
= (pid
, self
.stats
[pid
][2].get_status())
1138 return (cpus
, memory
)
1141 def get_cpu_status_string(self
, pids
=[]):
1143 Convert status to string array.
1144 @param pids: List of PIDs you intend to control. Use pids=[] to control
1146 @return: String format to table.
1157 headers
.extend(["MINFLT/IRQC",
1159 headers
.append(("%11s") % "TIME")
1162 stat
= self
.stats
[pid
][1].get_status()
1165 textstatus
.append(["%s" % self
.stats
[pid
][0],
1167 "%4.0f%%" % (stat
[0] / time
),
1168 "%4.0f%%" % (stat
[1] / time
),
1169 "%4.0f%%" % ((stat
[0] + stat
[1]) / time
),
1172 textstatus
[-1].insert(-1, "%11d" % stat
[2])
1173 textstatus
[-1].insert(-1, "%14d" % stat
[3])
1175 return matrix_to_string(textstatus
, tuple(headers
))
1178 def get_mem_status_string(self
, pids
=[]):
1180 Convert status to string array.
1181 @param pids: List of PIDs you intend to control. Use pids=[] to control
1183 @return: String format to table.
1190 ("%8s") % "TOTAL/VMSIZE",
1191 ("%8s") % "FREE/VMRSS",
1192 ("%8s") % "BUFFERS/VMPEAK",
1193 ("%8s") % "CACHED/VMSWAP",
1197 stat
= self
.stats
[pid
][2].get_status()
1200 textstatus
.append(["%s" % self
.stats
[pid
][0],
1202 "%10dMB" % (stat
[0] / 1024),
1203 "%8dMB" % (stat
[1] / 1024),
1204 "%12dMB" % (stat
[2] / 1024),
1205 "%11dMB" % (stat
[3] / 1024),
1208 return matrix_to_string(textstatus
, tuple(headers
))
1211 def get_arch(run_function
=run
):
1213 Get the hardware architecture of the machine.
1214 run_function is used to execute the commands. It defaults to
1215 utils.run() but a custom method (if provided) should be of the
1216 same schema as utils.run. It should return a CmdResult object and
1217 throw a CmdError exception.
1219 arch
= run_function('/bin/uname -m').stdout
.rstrip()
1220 if re
.match(r
'i\d86$', arch
):
1225 def get_num_logical_cpus_per_socket(run_function
=run
):
1227 Get the number of cores (including hyperthreading) per cpu.
1228 run_function is used to execute the commands. It defaults to
1229 utils.run() but a custom method (if provided) should be of the
1230 same schema as utils.run. It should return a CmdResult object and
1231 throw a CmdError exception.
1233 siblings
= run_function('grep "^siblings" /proc/cpuinfo').stdout
.rstrip()
1234 num_siblings
= map(int,
1235 re
.findall(r
'^siblings\s*:\s*(\d+)\s*$',
1237 if len(num_siblings
) == 0:
1238 raise error
.TestError('Unable to find siblings info in /proc/cpuinfo')
1239 if min(num_siblings
) != max(num_siblings
):
1240 raise error
.TestError('Number of siblings differ %r' %
1242 return num_siblings
[0]
1245 def merge_trees(src
, dest
):
1247 Merges a source directory tree at 'src' into a destination tree at
1248 'dest'. If a path is a file in both trees than the file in the source
1249 tree is APPENDED to the one in the destination tree. If a path is
1250 a directory in both trees then the directories are recursively merged
1251 with this function. In any other case, the function will skip the
1252 paths that cannot be merged (instead of failing).
1254 if not os
.path
.exists(src
):
1255 return # exists only in dest
1256 elif not os
.path
.exists(dest
):
1257 if os
.path
.isfile(src
):
1258 shutil
.copy2(src
, dest
) # file only in src
1260 shutil
.copytree(src
, dest
, symlinks
=True) # dir only in src
1262 elif os
.path
.isfile(src
) and os
.path
.isfile(dest
):
1263 # src & dest are files in both trees, append src to dest
1264 destfile
= open(dest
, "a")
1268 destfile
.write(srcfile
.read())
1273 elif os
.path
.isdir(src
) and os
.path
.isdir(dest
):
1274 # src & dest are directories in both trees, so recursively merge
1275 for name
in os
.listdir(src
):
1276 merge_trees(os
.path
.join(src
, name
), os
.path
.join(dest
, name
))
1278 # src & dest both exist, but are incompatible
1282 class CmdResult(object):
1284 Command execution result.
1286 command: String containing the command line itself
1287 exit_status: Integer exit code of the process
1288 stdout: String containing stdout of the process
1289 stderr: String containing stderr of the process
1290 duration: Elapsed wall clock time running the process
1294 def __init__(self
, command
="", stdout
="", stderr
="",
1295 exit_status
=None, duration
=0):
1296 self
.command
= command
1297 self
.exit_status
= exit_status
1298 self
.stdout
= stdout
1299 self
.stderr
= stderr
1300 self
.duration
= duration
1304 wrapper
= textwrap
.TextWrapper(width
= 78,
1305 initial_indent
="\n ",
1306 subsequent_indent
=" ")
1308 stdout
= self
.stdout
.rstrip()
1310 stdout
= "\nstdout:\n%s" % stdout
1312 stderr
= self
.stderr
.rstrip()
1314 stderr
= "\nstderr:\n%s" % stderr
1316 return ("* Command: %s\n"
1321 % (wrapper
.fill(self
.command
), self
.exit_status
,
1322 self
.duration
, stdout
, stderr
))
1326 def __init__(self
, run_sequentially
=False):
1327 # Run sequentially is for debugging control files
1329 self
.run_sequentially
= run_sequentially
1332 def add(self
, *args
, **dargs
):
1333 test
= (args
, dargs
)
1334 self
.test_list
.append(test
)
1338 while self
.test_list
:
1339 test_index
= random
.randint(0, len(self
.test_list
)-1)
1340 if self
.run_sequentially
:
1342 (args
, dargs
) = self
.test_list
.pop(test_index
)
1346 def import_site_module(path
, module
, dummy
=None, modulefile
=None):
1348 Try to import the site specific module if it exists.
1350 @param path full filename of the source file calling this (ie __file__)
1351 @param module full module name
1352 @param dummy dummy value to return in case there is no symbol to import
1353 @param modulefile module filename
1355 @return site specific module or dummy
1357 @raises ImportError if the site file exists but imports fails
1359 short_module
= module
[module
.rfind(".") + 1:]
1362 modulefile
= short_module
+ ".py"
1364 if os
.path
.exists(os
.path
.join(os
.path
.dirname(path
), modulefile
)):
1365 return __import__(module
, {}, {}, [short_module
])
1369 def import_site_symbol(path
, module
, name
, dummy
=None, modulefile
=None):
1371 Try to import site specific symbol from site specific file if it exists
1373 @param path full filename of the source file calling this (ie __file__)
1374 @param module full module name
1375 @param name symbol name to be imported from the site file
1376 @param dummy dummy value to return in case there is no symbol to import
1377 @param modulefile module filename
1379 @return site specific symbol or dummy
1381 @raises ImportError if the site file exists but imports fails
1383 module
= import_site_module(path
, module
, modulefile
=modulefile
)
1387 # special unique value to tell us if the symbol can't be imported
1388 cant_import
= object()
1390 obj
= getattr(module
, name
, cant_import
)
1391 if obj
is cant_import
:
1392 logging
.debug("unable to import site symbol '%s', using non-site "
1393 "implementation", name
)
1399 def import_site_class(path
, module
, classname
, baseclass
, modulefile
=None):
1401 Try to import site specific class from site specific file if it exists
1404 path: full filename of the source file calling this (ie __file__)
1405 module: full module name
1406 classname: class name to be loaded from site file
1407 baseclass: base class object to return when no site file present or
1408 to mixin when site class exists but is not inherited from baseclass
1409 modulefile: module filename
1411 Returns: baseclass if site specific class does not exist, the site specific
1412 class if it exists and is inherited from baseclass or a mixin of the
1413 site specific class and baseclass when the site specific class exists
1414 and is not inherited from baseclass
1416 Raises: ImportError if the site file exists but imports fails
1419 res
= import_site_symbol(path
, module
, classname
, None, modulefile
)
1421 if not issubclass(res
, baseclass
):
1422 # if not a subclass of baseclass then mix in baseclass with the
1423 # site specific class object and return the result
1424 res
= type(classname
, (res
, baseclass
), {})
1431 def import_site_function(path
, module
, funcname
, dummy
, modulefile
=None):
1433 Try to import site specific function from site specific file if it exists
1436 path: full filename of the source file calling this (ie __file__)
1437 module: full module name
1438 funcname: function name to be imported from site file
1439 dummy: dummy function to return in case there is no function to import
1440 modulefile: module filename
1442 Returns: site specific function object or dummy
1444 Raises: ImportError if the site file exists but imports fails
1447 return import_site_symbol(path
, module
, funcname
, dummy
, modulefile
)
1450 def _get_pid_path(program_name
):
1451 my_path
= os
.path
.dirname(__file__
)
1452 return os
.path
.abspath(os
.path
.join(my_path
, "..", "..",
1453 "%s.pid" % program_name
))
1456 def write_pid(program_name
):
1458 Try to drop <program_name>.pid in the main autotest directory.
1461 program_name: prefix for file name
1463 pidfile
= open(_get_pid_path(program_name
), "w")
1465 pidfile
.write("%s\n" % os
.getpid())
1470 def delete_pid_file_if_exists(program_name
):
1472 Tries to remove <program_name>.pid from the main autotest directory.
1474 pidfile_path
= _get_pid_path(program_name
)
1477 os
.remove(pidfile_path
)
1479 if not os
.path
.exists(pidfile_path
):
1484 def get_pid_from_file(program_name
):
1486 Reads the pid from <program_name>.pid in the autotest directory.
1488 @param program_name the name of the program
1489 @return the pid if the file exists, None otherwise.
1491 pidfile_path
= _get_pid_path(program_name
)
1492 if not os
.path
.exists(pidfile_path
):
1495 pidfile
= open(_get_pid_path(program_name
), 'r')
1499 pid
= int(pidfile
.readline())
1501 if not os
.path
.exists(pidfile_path
):
1510 def get_process_name(pid
):
1512 Get process name from PID.
1513 @param pid: PID of process.
1515 return get_field(read_file("/proc/%d/stat" % pid
), 1)[1:-1]
1518 def program_is_alive(program_name
):
1520 Checks if the process is alive and not in Zombie state.
1522 @param program_name the name of the program
1523 @return True if still alive, False otherwise
1525 pid
= get_pid_from_file(program_name
)
1528 return pid_is_alive(pid
)
1531 def signal_program(program_name
, sig
=signal
.SIGTERM
):
1533 Sends a signal to the process listed in <program_name>.pid
1535 @param program_name the name of the program
1536 @param sig signal to send
1538 pid
= get_pid_from_file(program_name
)
1540 signal_pid(pid
, sig
)
1543 def get_relative_path(path
, reference
):
1544 """Given 2 absolute paths "path" and "reference", compute the path of
1545 "path" as relative to the directory "reference".
1547 @param path the absolute path to convert to a relative path
1548 @param reference an absolute directory path to which the relative
1549 path will be computed
1551 # normalize the paths (remove double slashes, etc)
1552 assert(os
.path
.isabs(path
))
1553 assert(os
.path
.isabs(reference
))
1555 path
= os
.path
.normpath(path
)
1556 reference
= os
.path
.normpath(reference
)
1558 # we could use os.path.split() but it splits from the end
1559 path_list
= path
.split(os
.path
.sep
)[1:]
1560 ref_list
= reference
.split(os
.path
.sep
)[1:]
1562 # find the longest leading common path
1563 for i
in xrange(min(len(path_list
), len(ref_list
))):
1564 if path_list
[i
] != ref_list
[i
]:
1565 # decrement i so when exiting this loop either by no match or by
1566 # end of range we are one step behind
1570 # drop the common part of the paths, not interested in that anymore
1573 # for each uncommon component in the reference prepend a ".."
1574 path_list
[:0] = ['..'] * (len(ref_list
) - i
)
1576 return os
.path
.join(*path_list
)
1579 def sh_escape(command
):
1581 Escape special characters from a command so that it can be passed
1582 as a double quoted (" ") string in a (ba)sh command.
1585 command: the command string to escape.
1588 The escaped command string. The required englobing double
1589 quotes are NOT added and so should be added at some point by
1592 See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1594 command
= command
.replace("\\", "\\\\")
1595 command
= command
.replace("$", r
'\$')
1596 command
= command
.replace('"', r
'\"')
1597 command
= command
.replace('`', r
'\`')
1601 def configure(extra
=None, configure
='./configure'):
1603 Run configure passing in the correct host, build, and target options.
1605 @param extra: extra command line arguments to pass to configure
1606 @param configure: which configure script to use
1609 if 'CHOST' in os
.environ
:
1610 args
.append('--host=' + os
.environ
['CHOST'])
1611 if 'CBUILD' in os
.environ
:
1612 args
.append('--build=' + os
.environ
['CBUILD'])
1613 if 'CTARGET' in os
.environ
:
1614 args
.append('--target=' + os
.environ
['CTARGET'])
1618 system('%s %s' % (configure
, ' '.join(args
)))
1621 def make(extra
='', make
='make', timeout
=None, ignore_status
=False):
1623 Run make, adding MAKEOPTS to the list of options.
1625 @param extra: extra command line arguments to pass to make.
1627 cmd
= '%s %s %s' % (make
, os
.environ
.get('MAKEOPTS', ''), extra
)
1628 return system(cmd
, timeout
=timeout
, ignore_status
=ignore_status
)
1631 def compare_versions(ver1
, ver2
):
1632 """Version number comparison between ver1 and ver2 strings.
1634 >>> compare_tuple("1", "2")
1636 >>> compare_tuple("foo-1.1", "foo-1.2")
1638 >>> compare_tuple("1.2", "1.2a")
1640 >>> compare_tuple("1.2b", "1.2a")
1642 >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1646 ver1: version string
1647 ver2: version string
1650 int: 1 if ver1 > ver2
1654 ax
= re
.split('[.-]', ver1
)
1655 ay
= re
.split('[.-]', ver2
)
1656 while len(ax
) > 0 and len(ay
) > 0:
1659 maxlen
= max(len(cx
), len(cy
))
1660 c
= cmp(cx
.zfill(maxlen
), cy
.zfill(maxlen
))
1663 return cmp(len(ax
), len(ay
))
1666 def args_to_dict(args
):
1667 """Convert autoserv extra arguments in the form of key=val or key:val to a
1668 dictionary. Each argument key is converted to lowercase dictionary key.
1671 args - list of autoserv extra arguments.
1676 arg_re
= re
.compile(r
'(\w+)[:=](.*)$')
1679 match
= arg_re
.match(arg
)
1681 dict[match
.group(1).lower()] = match
.group(2)
1683 logging
.warning("args_to_dict: argument '%s' doesn't match "
1684 "'%s' pattern. Ignored." % (arg
, arg_re
.pattern
))
1688 def get_unused_port():
1690 Finds a semi-random available port. A race condition is still
1691 possible after the port number is returned, if another process
1695 A port number that is unused on both TCP and UDP.
1698 def try_bind(port
, socket_type
, socket_proto
):
1699 s
= socket
.socket(socket
.AF_INET
, socket_type
, socket_proto
)
1702 s
.setsockopt(socket
.SOL_SOCKET
, socket
.SO_REUSEADDR
, 1)
1704 return s
.getsockname()[1]
1705 except socket
.error
:
1710 # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1711 # same port over and over. So always try TCP first.
1713 # Ask the OS for an unused port.
1714 port
= try_bind(0, socket
.SOCK_STREAM
, socket
.IPPROTO_TCP
)
1715 # Check if this port is unused on the other protocol.
1716 if port
and try_bind(port
, socket
.SOCK_DGRAM
, socket
.IPPROTO_UDP
):
1720 def ask(question
, auto
=False):
1722 Raw input with a prompt that emulates logging.
1724 @param question: Question to be asked
1725 @param auto: Whether to return "y" instead of asking the question
1728 logging
.info("%s (y/n) y" % question
)
1730 return raw_input("%s INFO | %s (y/n) " %
1731 (time
.strftime("%H:%M:%S", time
.localtime()), question
))