1 import os
, copy
, logging
, errno
, fcntl
, time
, re
, weakref
, traceback
3 import cPickle
as pickle
4 from autotest_lib
.client
.common_lib
import autotemp
, error
, log
7 class job_directory(object):
8 """Represents a job.*dir directory."""
11 class JobDirectoryException(error
.AutotestError
):
12 """Generic job_directory exception superclass."""
15 class MissingDirectoryException(JobDirectoryException
):
16 """Raised when a directory required by the job does not exist."""
17 def __init__(self
, path
):
18 Exception.__init
__(self
, 'Directory %s does not exist' % path
)
21 class UncreatableDirectoryException(JobDirectoryException
):
22 """Raised when a directory required by the job is missing and cannot
24 def __init__(self
, path
, error
):
25 msg
= 'Creation of directory %s failed with exception %s'
27 Exception.__init
__(self
, msg
)
30 class UnwritableDirectoryException(JobDirectoryException
):
31 """Raised when a writable directory required by the job exists
32 but is not writable."""
33 def __init__(self
, path
):
34 msg
= 'Directory %s exists but is not writable' % path
35 Exception.__init
__(self
, msg
)
38 def __init__(self
, path
, is_writable
=False):
40 Instantiate a job directory.
42 @param path: The path of the directory. If None a temporary directory
43 will be created instead.
44 @param is_writable: If True, expect the directory to be writable.
46 @raise MissingDirectoryException: raised if is_writable=False and the
47 directory does not exist.
48 @raise UnwritableDirectoryException: raised if is_writable=True and
49 the directory exists but is not writable.
50 @raise UncreatableDirectoryException: raised if is_writable=True, the
51 directory does not exist and it cannot be created.
55 self
._tempdir
= autotemp
.tempdir(unique_id
='autotest')
56 self
.path
= self
._tempdir
.name
58 raise self
.MissingDirectoryException(path
)
62 self
._ensure
_valid
(is_writable
)
65 def _ensure_valid(self
, is_writable
):
67 Ensure that this is a valid directory.
69 Will check if a directory exists, can optionally also enforce that
70 it be writable. It can optionally create it if necessary. Creation
71 will still fail if the path is rooted in a non-writable directory, or
72 if a file already exists at the given location.
74 @param dir_path A path where a directory should be located
75 @param is_writable A boolean indicating that the directory should
76 not only exist, but also be writable.
78 @raises MissingDirectoryException raised if is_writable=False and the
79 directory does not exist.
80 @raises UnwritableDirectoryException raised if is_writable=True and
81 the directory is not wrtiable.
82 @raises UncreatableDirectoryException raised if is_writable=True, the
83 directory does not exist and it cannot be created
85 # ensure the directory exists
88 os
.makedirs(self
.path
)
90 if e
.errno
!= errno
.EEXIST
or not os
.path
.isdir(self
.path
):
91 raise self
.UncreatableDirectoryException(self
.path
, e
)
92 elif not os
.path
.isdir(self
.path
):
93 raise self
.MissingDirectoryException(self
.path
)
95 # if is_writable=True, also check that the directory is writable
96 if is_writable
and not os
.access(self
.path
, os
.W_OK
):
97 raise self
.UnwritableDirectoryException(self
.path
)
101 def property_factory(attribute
):
103 Create a job.*dir -> job._*dir.path property accessor.
105 @param attribute A string with the name of the attribute this is
106 exposed as. '_'+attribute must then be attribute that holds
107 either None or a job_directory-like object.
109 @returns A read-only property object that exposes a job_directory path
112 def dir_property(self
):
113 underlying_attribute
= getattr(self
, '_' + attribute
)
114 if underlying_attribute
is None:
117 return underlying_attribute
.path
121 # decorator for use with job_state methods
122 def with_backing_lock(method
):
123 """A decorator to perform a lock-*-unlock cycle.
125 When applied to a method, this decorator will automatically wrap
126 calls to the method in a backing file lock and before the call
127 followed by a backing file unlock.
129 def wrapped_method(self
, *args
, **dargs
):
130 already_have_lock
= self
._backing
_file
_lock
is not None
131 if not already_have_lock
:
132 self
._lock
_backing
_file
()
134 return method(self
, *args
, **dargs
)
136 if not already_have_lock
:
137 self
._unlock
_backing
_file
()
138 wrapped_method
.__name
__ = method
.__name
__
139 wrapped_method
.__doc
__ = method
.__doc
__
140 return wrapped_method
143 # decorator for use with job_state methods
144 def with_backing_file(method
):
145 """A decorator to perform a lock-read-*-write-unlock cycle.
147 When applied to a method, this decorator will automatically wrap
148 calls to the method in a lock-and-read before the call followed by a
149 write-and-unlock. Any operation that is reading or writing state
150 should be decorated with this method to ensure that backing file
151 state is consistently maintained.
154 def wrapped_method(self
, *args
, **dargs
):
155 self
._read
_from
_backing
_file
()
157 return method(self
, *args
, **dargs
)
159 self
._write
_to
_backing
_file
()
160 wrapped_method
.__name
__ = method
.__name
__
161 wrapped_method
.__doc
__ = method
.__doc
__
162 return wrapped_method
166 class job_state(object):
167 """A class for managing explicit job and user state, optionally persistent.
169 The class allows you to save state by name (like a dictionary). Any state
170 stored in this class should be picklable and deep copyable. While this is
171 not enforced it is recommended that only valid python identifiers be used
172 as names. Additionally, the namespace 'stateful_property' is used for
173 storing the valued associated with properties constructed using the
174 property_factory method.
177 NO_DEFAULT
= object()
178 PICKLE_PROTOCOL
= 2 # highest protocol available in python 2.4
182 """Initialize the job state."""
184 self
._backing
_file
= None
185 self
._backing
_file
_initialized
= False
186 self
._backing
_file
_lock
= None
189 def _lock_backing_file(self
):
190 """Acquire a lock on the backing file."""
191 if self
._backing
_file
:
192 self
._backing
_file
_lock
= open(self
._backing
_file
, 'a')
193 fcntl
.flock(self
._backing
_file
_lock
, fcntl
.LOCK_EX
)
196 def _unlock_backing_file(self
):
197 """Release a lock on the backing file."""
198 if self
._backing
_file
_lock
:
199 fcntl
.flock(self
._backing
_file
_lock
, fcntl
.LOCK_UN
)
200 self
._backing
_file
_lock
.close()
201 self
._backing
_file
_lock
= None
204 def read_from_file(self
, file_path
, merge
=True):
205 """Read in any state from the file at file_path.
207 When merge=True, any state specified only in-memory will be preserved.
208 Any state specified on-disk will be set in-memory, even if an in-memory
209 setting already exists.
211 @param file_path: The path where the state should be read from. It must
212 exist but it can be empty.
213 @param merge: If true, merge the on-disk state with the in-memory
214 state. If false, replace the in-memory state with the on-disk
217 @warning: This method is intentionally concurrency-unsafe. It makes no
218 attempt to control concurrent access to the file at file_path.
221 # we can assume that the file exists
222 if os
.path
.getsize(file_path
) == 0:
225 on_disk_state
= pickle
.load(open(file_path
))
228 # merge the on-disk state with the in-memory state
229 for namespace
, namespace_dict
in on_disk_state
.iteritems():
230 in_memory_namespace
= self
._state
.setdefault(namespace
, {})
231 for name
, value
in namespace_dict
.iteritems():
232 if name
in in_memory_namespace
:
233 if in_memory_namespace
[name
] != value
:
234 logging
.info('Persistent value of %s.%s from %s '
235 'overridding existing in-memory '
236 'value', namespace
, name
, file_path
)
237 in_memory_namespace
[name
] = value
239 logging
.debug('Value of %s.%s is unchanged, '
240 'skipping import', namespace
, name
)
242 logging
.debug('Importing %s.%s from state file %s',
243 namespace
, name
, file_path
)
244 in_memory_namespace
[name
] = value
246 # just replace the in-memory state with the on-disk state
247 self
._state
= on_disk_state
249 # lock the backing file before we refresh it
250 with_backing_lock(self
.__class
__._write
_to
_backing
_file
)(self
)
253 def write_to_file(self
, file_path
):
254 """Write out the current state to the given path.
256 @param file_path: The path where the state should be written out to.
259 @warning: This method is intentionally concurrency-unsafe. It makes no
260 attempt to control concurrent access to the file at file_path.
262 outfile
= open(file_path
, 'w')
264 pickle
.dump(self
._state
, outfile
, self
.PICKLE_PROTOCOL
)
269 def _read_from_backing_file(self
):
270 """Refresh the current state from the backing file.
272 If the backing file has never been read before (indicated by checking
273 self._backing_file_initialized) it will merge the file with the
274 in-memory state, rather than overwriting it.
276 if self
._backing
_file
:
277 merge_backing_file
= not self
._backing
_file
_initialized
278 self
.read_from_file(self
._backing
_file
, merge
=merge_backing_file
)
279 self
._backing
_file
_initialized
= True
282 def _write_to_backing_file(self
):
283 """Flush the current state to the backing file."""
284 if self
._backing
_file
:
285 self
.write_to_file(self
._backing
_file
)
289 def _synchronize_backing_file(self
):
290 """Synchronizes the contents of the in-memory and on-disk state."""
291 # state is implicitly synchronized in _with_backing_file methods
295 def set_backing_file(self
, file_path
):
296 """Change the path used as the backing file for the persistent state.
298 When a new backing file is specified if a file already exists then
299 its contents will be added into the current state, with conflicts
300 between the file and memory being resolved in favor of the file
301 contents. The file will then be kept in sync with the (combined)
302 in-memory state. The syncing can be disabled by setting this to None.
304 @param file_path: A path on the filesystem that can be read from and
305 written to, or None to turn off the backing store.
307 self
._synchronize
_backing
_file
()
308 self
._backing
_file
= file_path
309 self
._backing
_file
_initialized
= False
310 self
._synchronize
_backing
_file
()
314 def get(self
, namespace
, name
, default
=NO_DEFAULT
):
315 """Returns the value associated with a particular name.
317 @param namespace: The namespace that the property should be stored in.
318 @param name: The name the value was saved with.
319 @param default: A default value to return if no state is currently
322 @return: A deep copy of the value associated with name. Note that this
323 explicitly returns a deep copy to avoid problems with mutable
324 values; mutations are not persisted or shared.
325 @raise KeyError: raised when no state is associated with var and a
326 default value is not provided.
328 if self
.has(namespace
, name
):
329 return copy
.deepcopy(self
._state
[namespace
][name
])
330 elif default
is self
.NO_DEFAULT
:
331 raise KeyError('No key %s in namespace %s' % (name
, namespace
))
337 def set(self
, namespace
, name
, value
):
338 """Saves the value given with the provided name.
340 @param namespace: The namespace that the property should be stored in.
341 @param name: The name the value should be saved with.
342 @param value: The value to save.
344 namespace_dict
= self
._state
.setdefault(namespace
, {})
345 namespace_dict
[name
] = copy
.deepcopy(value
)
346 logging
.debug('Persistent state %s.%s now set to %r', namespace
,
351 def has(self
, namespace
, name
):
352 """Return a boolean indicating if namespace.name is defined.
354 @param namespace: The namespace to check for a definition.
355 @param name: The name to check for a definition.
357 @return: True if the given name is defined in the given namespace and
360 return namespace
in self
._state
and name
in self
._state
[namespace
]
364 def discard(self
, namespace
, name
):
365 """If namespace.name is a defined value, deletes it.
367 @param namespace: The namespace that the property is stored in.
368 @param name: The name the value is saved with.
370 if self
.has(namespace
, name
):
371 del self
._state
[namespace
][name
]
372 if len(self
._state
[namespace
]) == 0:
373 del self
._state
[namespace
]
374 logging
.debug('Persistent state %s.%s deleted', namespace
, name
)
377 'Persistent state %s.%s not defined so nothing is discarded',
382 def discard_namespace(self
, namespace
):
383 """Delete all defined namespace.* names.
385 @param namespace: The namespace to be cleared.
387 if namespace
in self
._state
:
388 del self
._state
[namespace
]
389 logging
.debug('Persistent state %s.* deleted', namespace
)
393 def property_factory(state_attribute
, property_attribute
, default
,
394 namespace
='global_properties'):
396 Create a property object for an attribute using self.get and self.set.
398 @param state_attribute: A string with the name of the attribute on
399 job that contains the job_state instance.
400 @param property_attribute: A string with the name of the attribute
401 this property is exposed as.
402 @param default: A default value that should be used for this property
404 @param namespace: The namespace to store the attribute value in.
406 @return: A read-write property object that performs self.get calls
407 to read the value and self.set calls to set it.
410 state
= getattr(job
, state_attribute
)
411 return state
.get(namespace
, property_attribute
, default
)
412 def setter(job
, value
):
413 state
= getattr(job
, state_attribute
)
414 state
.set(namespace
, property_attribute
, value
)
415 return property(getter
, setter
)
418 class status_log_entry(object):
419 """Represents a single status log entry."""
421 RENDERED_NONE_VALUE
= '----'
422 TIMESTAMP_FIELD
= 'timestamp'
423 LOCALTIME_FIELD
= 'localtime'
425 # non-space whitespace is forbidden in any fields
426 BAD_CHAR_REGEX
= re
.compile(r
'[\t\n\r\v\f]')
428 def __init__(self
, status_code
, subdir
, operation
, message
, fields
,
430 """Construct a status.log entry.
432 @param status_code: A message status code. Must match the codes
433 accepted by autotest_lib.common_lib.log.is_valid_status.
434 @param subdir: A valid job subdirectory, or None.
435 @param operation: Description of the operation, or None.
436 @param message: A printable string describing event to be recorded.
437 @param fields: A dictionary of arbitrary alphanumeric key=value pairs
438 to be included in the log, or None.
439 @param timestamp: An optional integer timestamp, in the same format
440 as a time.time() timestamp. If unspecified, the current time is
443 @raise ValueError: if any of the parameters are invalid
446 if not log
.is_valid_status(status_code
):
447 raise ValueError('status code %r is not valid' % status_code
)
448 self
.status_code
= status_code
450 if subdir
and self
.BAD_CHAR_REGEX
.search(subdir
):
451 raise ValueError('Invalid character in subdir string')
454 if operation
and self
.BAD_CHAR_REGEX
.search(operation
):
455 raise ValueError('Invalid character in operation string')
456 self
.operation
= operation
458 # break the message line into a single-line message that goes into the
459 # database, and a block of additional lines that goes into the status
460 # log but will never be parsed
461 message_lines
= message
.split('\n')
462 self
.message
= message_lines
[0].replace('\t', ' ' * 8)
463 self
.extra_message_lines
= message_lines
[1:]
464 if self
.BAD_CHAR_REGEX
.search(self
.message
):
465 raise ValueError('Invalid character in message %r' % self
.message
)
470 self
.fields
= fields
.copy()
471 for key
, value
in self
.fields
.iteritems():
472 if type(value
) is int:
474 if self
.BAD_CHAR_REGEX
.search(key
+ value
):
475 raise ValueError('Invalid character in %r=%r field'
478 # build up the timestamp
479 if timestamp
is None:
480 timestamp
= int(time
.time())
481 self
.fields
[self
.TIMESTAMP_FIELD
] = str(timestamp
)
482 self
.fields
[self
.LOCALTIME_FIELD
] = time
.strftime(
483 '%b %d %H:%M:%S', time
.localtime(timestamp
))
487 """Indicates if this status log is the start of a new nested block.
489 @return: A boolean indicating if this entry starts a new nested block.
491 return self
.status_code
== 'START'
495 """Indicates if this status log is the end of a nested block.
497 @return: A boolean indicating if this entry ends a nested block.
499 return self
.status_code
.startswith('END ')
503 """Render the status log entry into a text string.
505 @return: A text string suitable for writing into a status log file.
507 # combine all the log line data into a tab-delimited string
508 subdir
= self
.subdir
or self
.RENDERED_NONE_VALUE
509 operation
= self
.operation
or self
.RENDERED_NONE_VALUE
510 extra_fields
= ['%s=%s' % field
for field
in self
.fields
.iteritems()]
511 line_items
= [self
.status_code
, subdir
, operation
]
512 line_items
+= extra_fields
+ [self
.message
]
513 first_line
= '\t'.join(line_items
)
515 # append the extra unparsable lines, two-space indented
516 all_lines
= [first_line
]
517 all_lines
+= [' ' + line
for line
in self
.extra_message_lines
]
518 return '\n'.join(all_lines
)
522 def parse(cls
, line
):
523 """Parse a status log entry from a text string.
525 This method is the inverse of render; it should always be true that
526 parse(entry.render()) produces a new status_log_entry equivalent to
529 @return: A new status_log_entry instance with fields extracted from the
530 given status line. If the line is an extra message line then None
533 # extra message lines are always prepended with two spaces
534 if line
.startswith(' '):
537 line
= line
.lstrip('\t') # ignore indentation
538 entry_parts
= line
.split('\t')
539 if len(entry_parts
) < 4:
540 raise ValueError('%r is not a valid status line' % line
)
541 status_code
, subdir
, operation
= entry_parts
[:3]
542 if subdir
== cls
.RENDERED_NONE_VALUE
:
544 if operation
== cls
.RENDERED_NONE_VALUE
:
546 message
= entry_parts
[-1]
547 fields
= dict(part
.split('=', 1) for part
in entry_parts
[3:-1])
548 if cls
.TIMESTAMP_FIELD
in fields
:
549 timestamp
= int(fields
[cls
.TIMESTAMP_FIELD
])
552 return cls(status_code
, subdir
, operation
, message
, fields
, timestamp
)
555 class status_indenter(object):
556 """Abstract interface that a status log indenter should use."""
560 raise NotImplementedError
564 """Increase indentation by one level."""
565 raise NotImplementedError
569 """Decrease indentation by one level."""
572 class status_logger(object):
573 """Represents a status log file. Responsible for translating messages
574 into on-disk status log lines.
576 @property global_filename: The filename to write top-level logs to.
577 @property subdir_filename: The filename to write subdir-level logs to.
579 def __init__(self
, job
, indenter
, global_filename
='status',
580 subdir_filename
='status', record_hook
=None,
582 """Construct a logger instance.
584 @param job: A reference to the job object this is logging for. Only a
585 weak reference to the job is held, to avoid a
586 status_logger <-> job circular reference.
587 @param indenter: A status_indenter instance, for tracking the
589 @param global_filename: An optional filename to initialize the
590 self.global_filename attribute.
591 @param subdir_filename: An optional filename to initialize the
592 self.subdir_filename attribute.
593 @param record_hook: An optional function to be called before an entry
594 is logged. The function should expect a single parameter, a
595 copy of the status_log_entry object.
596 @param tap_writer: An instance of the class TAPReport for addionally
599 self
._jobref
= weakref
.ref(job
)
600 self
._indenter
= indenter
601 self
.global_filename
= global_filename
602 self
.subdir_filename
= subdir_filename
603 self
._record
_hook
= record_hook
604 if tap_writer
is None:
605 self
._tap
_writer
= TAPReport(None)
607 self
._tap
_writer
= tap_writer
610 def render_entry(self
, log_entry
):
611 """Render a status_log_entry as it would be written to a log file.
613 @param log_entry: A status_log_entry instance to be rendered.
615 @return: The status log entry, rendered as it would be written to the
616 logs (including indentation).
618 if log_entry
.is_end():
619 indent
= self
._indenter
.indent
- 1
621 indent
= self
._indenter
.indent
622 return '\t' * indent
+ log_entry
.render().rstrip('\n')
625 def record_entry(self
, log_entry
, log_in_subdir
=True):
626 """Record a status_log_entry into the appropriate status log files.
628 @param log_entry: A status_log_entry instance to be recorded into the
630 @param log_in_subdir: A boolean that indicates (when true) that subdir
631 logs should be written into the subdirectory status log file.
633 # acquire a strong reference for the duration of the method
636 logging
.warning('Something attempted to write a status log entry '
637 'after its job terminated, ignoring the attempt.')
638 logging
.warning(traceback
.format_stack())
641 # call the record hook if one was given
642 if self
._record
_hook
:
643 self
._record
_hook
(log_entry
)
645 # figure out where we need to log to
646 log_files
= [os
.path
.join(job
.resultdir
, self
.global_filename
)]
647 if log_in_subdir
and log_entry
.subdir
:
648 log_files
.append(os
.path
.join(job
.resultdir
, log_entry
.subdir
,
649 self
.subdir_filename
))
651 # write out to entry to the log files
652 log_text
= self
.render_entry(log_entry
)
653 for log_file
in log_files
:
654 fileobj
= open(log_file
, 'a')
656 print >> fileobj
, log_text
660 # write to TAPRecord instance
661 if log_entry
.is_end() and self
._tap
_writer
.do_tap_report
:
662 self
._tap
_writer
.record(log_entry
, self
._indenter
.indent
, log_files
)
664 # adjust the indentation if this was a START or END entry
665 if log_entry
.is_start():
666 self
._indenter
.increment()
667 elif log_entry
.is_end():
668 self
._indenter
.decrement()
671 class TAPReport(object):
673 Deal with TAP reporting for the Autotest client.
691 def __init__(self
, enable
, resultdir
=None, global_filename
='status'):
693 @param enable: Set self.do_tap_report to trigger TAP reporting.
694 @param resultdir: Path where the TAP report files will be written.
695 @param global_filename: File name of the status files .tap extensions
698 self
.do_tap_report
= enable
699 if resultdir
is not None:
700 self
.resultdir
= os
.path
.abspath(resultdir
)
701 self
._reports
_container
= {}
702 self
._keyval
_container
= {} # {'path1': [entries],}
703 self
.global_filename
= global_filename
707 def tap_ok(self
, success
, counter
, message
):
709 return a TAP message string.
711 @param success: True for positive message string.
712 @param counter: number of TAP line in plan.
713 @param message: additional message to report in TAP line.
716 message
= "ok %s - %s" % (counter
, message
)
718 message
= "not ok %s - %s" % (counter
, message
)
722 def record(self
, log_entry
, indent
, log_files
):
724 Append a job-level status event to self._reports_container. All
725 events will be written to TAP log files at the end of the test run.
726 Otherwise, it's impossilble to determine the TAP plan.
728 @param log_entry: A string status code describing the type of status
729 entry being recorded. It must pass log.is_valid_status to be
731 @param indent: Level of the log_entry to determine the operation if
732 log_entry.operation is not given.
733 @param log_files: List of full path of files the TAP report will be
734 written to at the end of the test.
736 for log_file
in log_files
:
737 log_file_path
= os
.path
.dirname(log_file
)
738 key
= log_file_path
.split(self
.resultdir
, 1)[1].strip(os
.sep
)
742 if not self
._reports
_container
.has_key(key
):
743 self
._reports
_container
[key
] = []
745 if log_entry
.operation
:
746 operation
= log_entry
.operation
750 operation
= "unknown"
752 self
.job_statuses
.get(log_entry
.status_code
, False),
753 len(self
._reports
_container
[key
]) + 1, operation
+ "\n"
755 self
._reports
_container
[key
].append(entry
)
758 def record_keyval(self
, path
, dictionary
, type_tag
=None):
760 Append a key-value pairs of dictionary to self._keyval_container in
761 TAP format. Once finished write out the keyval.tap file to the file
764 If type_tag is None, then the key must be composed of alphanumeric
765 characters (or dashes + underscores). However, if type-tag is not
766 null then the keys must also have "{type_tag}" as a suffix. At
767 the moment the only valid values of type_tag are "attr" and "perf".
769 @param path: The full path of the keyval.tap file to be created
770 @param dictionary: The keys and values.
771 @param type_tag: The type of the values
773 self
._keyval
_container
.setdefault(path
, [0, []])
774 self
._keyval
_container
[path
][0] += 1
777 key_regex
= re
.compile(r
'^[-\.\w]+$')
779 if type_tag
not in ('attr', 'perf'):
780 raise ValueError('Invalid type tag: %s' % type_tag
)
781 escaped_tag
= re
.escape(type_tag
)
782 key_regex
= re
.compile(r
'^[-\.\w]+\{%s\}$' % escaped_tag
)
783 self
._keyval
_container
[path
][1].extend([
784 self
.tap_ok(True, self
._keyval
_container
[path
][0], "results"),
788 for key
in sorted(dictionary
.keys()):
789 if not key_regex
.search(key
):
790 raise ValueError('Invalid key: %s' % key
)
791 self
._keyval
_container
[path
][1].append(
792 ' %s: %s\n' % (key
.replace('{', '_').rstrip('}'),
796 self
._keyval
_container
[path
][1].append(" ...\n")
800 def _write_reports(self
):
802 Write TAP reports to file.
804 for key
in self
._reports
_container
.keys():
809 tap_fh
= open(os
.sep
.join(
810 [self
.resultdir
, sub_dir
, self
.global_filename
]
812 tap_fh
.write('1..' + str(len(self
._reports
_container
[key
])) + '\n')
813 tap_fh
.writelines(self
._reports
_container
[key
])
817 def _write_keyval(self
):
819 Write the self._keyval_container key values to a file.
821 for path
in self
._keyval
_container
.keys():
822 tap_fh
= open(path
+ ".tap", 'w')
823 tap_fh
.write('1..' + str(self
._keyval
_container
[path
][0]) + '\n')
824 tap_fh
.writelines(self
._keyval
_container
[path
][1])
830 Write the TAP reports to files.
832 self
._write
_reports
()
835 def _write_tap_archive(self
):
837 Write a tar archive containing all the TAP files and
838 a meta.yml containing the file names.
840 os
.chdir(self
.resultdir
)
842 for rel_path
, d
, files
in os
.walk('.'):
843 tap_files
.extend(["/".join(
844 [rel_path
, f
]) for f
in files
if f
.endswith('.tap')])
845 meta_yaml
= open('meta.yml', 'w')
846 meta_yaml
.write('file_order:\n')
847 tap_tar
= tarfile
.open(self
.resultdir
+ '/tap.tar.gz', 'w:gz')
849 meta_yaml
.write(" - " + f
.lstrip('./') + "\n")
852 tap_tar
.add('meta.yml')
856 class base_job(object):
857 """An abstract base class for the various autotest job classes.
859 @property autodir: The top level autotest directory.
860 @property clientdir: The autotest client directory.
861 @property serverdir: The autotest server directory. [OPTIONAL]
862 @property resultdir: The directory where results should be written out.
865 @property pkgdir: The job packages directory. [WRITABLE]
866 @property tmpdir: The job temporary directory. [WRITABLE]
867 @property testdir: The job test directory. [WRITABLE]
868 @property site_testdir: The job site test directory. [WRITABLE]
870 @property bindir: The client bin/ directory.
871 @property configdir: The client config/ directory.
872 @property profdir: The client profilers/ directory.
873 @property toolsdir: The client tools/ directory.
875 @property conmuxdir: The conmux directory. [OPTIONAL]
877 @property control: A path to the control file to be executed. [OPTIONAL]
878 @property hosts: A set of all live Host objects currently in use by the
879 job. Code running in the context of a local client can safely assume
880 that this set contains only a single entry.
881 @property machines: A list of the machine names associated with the job.
882 @property user: The user executing the job.
883 @property tag: A tag identifying the job. Often used by the scheduler to
884 give a name of the form NUMBER-USERNAME/HOSTNAME.
885 @property args: A list of addtional miscellaneous command-line arguments
886 provided when starting the job.
888 @property last_boot_tag: The label of the kernel from the last reboot.
889 [OPTIONAL,PERSISTENT]
890 @property automatic_test_tag: A string which, if set, will be automatically
891 added to the test name when running tests.
893 @property default_profile_only: A boolean indicating the default value of
894 profile_only used by test.execute. [PERSISTENT]
895 @property drop_caches: A boolean indicating if caches should be dropped
896 before each test is executed.
897 @property drop_caches_between_iterations: A boolean indicating if caches
898 should be dropped before each test iteration is executed.
899 @property run_test_cleanup: A boolean indicating if test.cleanup should be
900 run by default after a test completes, if the run_cleanup argument is
901 not specified. [PERSISTENT]
903 @property num_tests_run: The number of tests run during the job. [OPTIONAL]
904 @property num_tests_failed: The number of tests failed during the job.
907 @property bootloader: An instance of the boottool class. May not be
908 available on job instances where access to the bootloader is not
909 available (e.g. on the server running a server job). [OPTIONAL]
910 @property harness: An instance of the client test harness. Only available
911 in contexts where client test execution happens. [OPTIONAL]
912 @property logging: An instance of the logging manager associated with the
914 @property profilers: An instance of the profiler manager associated with
916 @property sysinfo: An instance of the sysinfo object. Only available in
917 contexts where it's possible to collect sysinfo.
918 @property warning_manager: A class for managing which types of WARN
919 messages should be logged and which should be supressed. [OPTIONAL]
920 @property warning_loggers: A set of readable streams that will be monitored
921 for WARN messages to be logged. [OPTIONAL]
924 _find_base_directories [CLASSMETHOD]
925 Returns the location of autodir, clientdir and serverdir
928 Returns the location of resultdir. Gets a copy of any parameters
929 passed into base_job.__init__. Can return None to indicate that
930 no resultdir is to be used.
933 Returns a status_logger instance for recording job status logs.
936 # capture the dependency on several helper classes with factories
937 _job_directory
= job_directory
938 _job_state
= job_state
941 # all the job directory attributes
942 autodir
= _job_directory
.property_factory('autodir')
943 clientdir
= _job_directory
.property_factory('clientdir')
944 serverdir
= _job_directory
.property_factory('serverdir')
945 resultdir
= _job_directory
.property_factory('resultdir')
946 pkgdir
= _job_directory
.property_factory('pkgdir')
947 tmpdir
= _job_directory
.property_factory('tmpdir')
948 testdir
= _job_directory
.property_factory('testdir')
949 site_testdir
= _job_directory
.property_factory('site_testdir')
950 bindir
= _job_directory
.property_factory('bindir')
951 configdir
= _job_directory
.property_factory('configdir')
952 profdir
= _job_directory
.property_factory('profdir')
953 toolsdir
= _job_directory
.property_factory('toolsdir')
954 conmuxdir
= _job_directory
.property_factory('conmuxdir')
957 # all the generic persistent properties
958 tag
= _job_state
.property_factory('_state', 'tag', '')
959 default_profile_only
= _job_state
.property_factory(
960 '_state', 'default_profile_only', False)
961 run_test_cleanup
= _job_state
.property_factory(
962 '_state', 'run_test_cleanup', True)
963 last_boot_tag
= _job_state
.property_factory(
964 '_state', 'last_boot_tag', None)
965 automatic_test_tag
= _job_state
.property_factory(
966 '_state', 'automatic_test_tag', None)
968 # the use_sequence_number property
969 _sequence_number
= _job_state
.property_factory(
970 '_state', '_sequence_number', None)
971 def _get_use_sequence_number(self
):
972 return bool(self
._sequence
_number
)
973 def _set_use_sequence_number(self
, value
):
975 self
._sequence
_number
= 1
977 self
._sequence
_number
= None
978 use_sequence_number
= property(_get_use_sequence_number
,
979 _set_use_sequence_number
)
982 def __init__(self
, *args
, **dargs
):
983 # initialize the base directories, all others are relative to these
984 autodir
, clientdir
, serverdir
= self
._find
_base
_directories
()
985 self
._autodir
= self
._job
_directory
(autodir
)
986 self
._clientdir
= self
._job
_directory
(clientdir
)
988 self
._serverdir
= self
._job
_directory
(serverdir
)
990 self
._serverdir
= None
992 # initialize all the other directories relative to the base ones
993 self
._initialize
_dir
_properties
()
994 self
._resultdir
= self
._job
_directory
(
995 self
._find
_resultdir
(*args
, **dargs
), True)
996 self
._execution
_contexts
= []
998 # initialize all the job state
999 self
._state
= self
._job
_state
()
1001 # initialize tap reporting
1002 if dargs
.has_key('options'):
1003 self
._tap
= self
._tap
_init
(dargs
['options'].tap_report
)
1005 self
._tap
= self
._tap
_init
(False)
1008 def _find_base_directories(cls
):
1009 raise NotImplementedError()
1012 def _initialize_dir_properties(self
):
1014 Initializes all the secondary self.*dir properties. Requires autodir,
1015 clientdir and serverdir to already be initialized.
1017 # create some stubs for use as shortcuts
1018 def readonly_dir(*args
):
1019 return self
._job
_directory
(os
.path
.join(*args
))
1020 def readwrite_dir(*args
):
1021 return self
._job
_directory
(os
.path
.join(*args
), True)
1023 # various client-specific directories
1024 self
._bindir
= readonly_dir(self
.clientdir
, 'bin')
1025 self
._configdir
= readonly_dir(self
.clientdir
, 'config')
1026 self
._profdir
= readonly_dir(self
.clientdir
, 'profilers')
1027 self
._pkgdir
= readwrite_dir(self
.clientdir
, 'packages')
1028 self
._toolsdir
= readonly_dir(self
.clientdir
, 'tools')
1030 # directories which are in serverdir on a server, clientdir on a client
1032 root
= self
.serverdir
1034 root
= self
.clientdir
1035 self
._tmpdir
= readwrite_dir(root
, 'tmp')
1036 self
._testdir
= readwrite_dir(root
, 'tests')
1037 self
._site
_testdir
= readwrite_dir(root
, 'site_tests')
1039 # various server-specific directories
1041 self
._conmuxdir
= readonly_dir(self
.autodir
, 'conmux')
1043 self
._conmuxdir
= None
1046 def _find_resultdir(self
, *args
, **dargs
):
1047 raise NotImplementedError()
1050 def push_execution_context(self
, resultdir
):
1052 Save off the current context of the job and change to the given one.
1054 In practice method just changes the resultdir, but it may become more
1055 extensive in the future. The expected use case is for when a child
1056 job needs to be executed in some sort of nested context (for example
1057 the way parallel_simple does). The original context can be restored
1058 with a pop_execution_context call.
1060 @param resultdir: The new resultdir, relative to the current one.
1062 new_dir
= self
._job
_directory
(
1063 os
.path
.join(self
.resultdir
, resultdir
), True)
1064 self
._execution
_contexts
.append(self
._resultdir
)
1065 self
._resultdir
= new_dir
1068 def pop_execution_context(self
):
1070 Reverse the effects of the previous push_execution_context call.
1072 @raise IndexError: raised when the stack of contexts is empty.
1074 if not self
._execution
_contexts
:
1075 raise IndexError('No old execution context to restore')
1076 self
._resultdir
= self
._execution
_contexts
.pop()
1079 def get_state(self
, name
, default
=_job_state
.NO_DEFAULT
):
1080 """Returns the value associated with a particular name.
1082 @param name: The name the value was saved with.
1083 @param default: A default value to return if no state is currently
1084 associated with var.
1086 @return: A deep copy of the value associated with name. Note that this
1087 explicitly returns a deep copy to avoid problems with mutable
1088 values; mutations are not persisted or shared.
1089 @raise KeyError: raised when no state is associated with var and a
1090 default value is not provided.
1093 return self
._state
.get('public', name
, default
=default
)
1095 raise KeyError(name
)
1098 def set_state(self
, name
, value
):
1099 """Saves the value given with the provided name.
1101 @param name: The name the value should be saved with.
1102 @param value: The value to save.
1104 self
._state
.set('public', name
, value
)
1107 def _build_tagged_test_name(self
, testname
, dargs
):
1108 """Builds the fully tagged testname and subdirectory for job.run_test.
1110 @param testname: The base name of the test
1111 @param dargs: The ** arguments passed to run_test. And arguments
1112 consumed by this method will be removed from the dictionary.
1114 @return: A 3-tuple of the full name of the test, the subdirectory it
1115 should be stored in, and the full tag of the subdir.
1119 # build up the parts of the tag used for the test name
1120 master_testpath
= dargs
.get('master_testpath', "")
1121 base_tag
= dargs
.pop('tag', None)
1123 tag_parts
.append(str(base_tag
))
1124 if self
.use_sequence_number
:
1125 tag_parts
.append('_%02d_' % self
._sequence
_number
)
1126 self
._sequence
_number
+= 1
1127 if self
.automatic_test_tag
:
1128 tag_parts
.append(self
.automatic_test_tag
)
1129 full_testname
= '.'.join([testname
] + tag_parts
)
1131 # build up the subdir and tag as well
1132 subdir_tag
= dargs
.pop('subdir_tag', None)
1134 tag_parts
.append(subdir_tag
)
1135 subdir
= '.'.join([testname
] + tag_parts
)
1136 subdir
= os
.path
.join(master_testpath
, subdir
)
1137 tag
= '.'.join(tag_parts
)
1139 return full_testname
, subdir
, tag
1142 def _make_test_outputdir(self
, subdir
):
1143 """Creates an output directory for a test to run it.
1145 @param subdir: The subdirectory of the test. Generally computed by
1146 _build_tagged_test_name.
1148 @return: A job_directory instance corresponding to the outputdir of
1150 @raise TestError: If the output directory is invalid.
1152 # explicitly check that this subdirectory is new
1153 path
= os
.path
.join(self
.resultdir
, subdir
)
1154 if os
.path
.exists(path
):
1155 msg
= ('%s already exists; multiple tests cannot run with the '
1156 'same subdirectory' % subdir
)
1157 raise error
.TestError(msg
)
1159 # create the outputdir and raise a TestError if it isn't valid
1161 outputdir
= self
._job
_directory
(path
, True)
1163 except self
._job
_directory
.JobDirectoryException
, e
:
1164 logging
.exception('%s directory creation failed with %s',
1166 raise error
.TestError('%s directory creation failed' % subdir
)
1168 def _tap_init(self
, enable
):
1169 """Initialize TAP reporting
1171 return TAPReport(enable
, resultdir
=self
.resultdir
)
1174 def record(self
, status_code
, subdir
, operation
, status
='',
1175 optional_fields
=None):
1176 """Record a job-level status event.
1178 Logs an event noteworthy to the Autotest job as a whole. Messages will
1179 be written into a global status log file, as well as a subdir-local
1180 status log file (if subdir is specified).
1182 @param status_code: A string status code describing the type of status
1183 entry being recorded. It must pass log.is_valid_status to be
1185 @param subdir: A specific results subdirectory this also applies to, or
1186 None. If not None the subdirectory must exist.
1187 @param operation: A string describing the operation that was run.
1188 @param status: An optional human-readable message describing the status
1189 entry, for example an error message or "completed successfully".
1190 @param optional_fields: An optional dictionary of addtional named fields
1191 to be included with the status message. Every time timestamp and
1192 localtime entries are generated with the current time and added
1195 entry
= status_log_entry(status_code
, subdir
, operation
, status
,
1197 self
.record_entry(entry
)
1200 def record_entry(self
, entry
, log_in_subdir
=True):
1201 """Record a job-level status event, using a status_log_entry.
1203 This is the same as self.record but using an existing status log
1204 entry object rather than constructing one for you.
1206 @param entry: A status_log_entry object
1207 @param log_in_subdir: A boolean that indicates (when true) that subdir
1208 logs should be written into the subdirectory status log file.
1210 self
._get
_status
_logger
().record_entry(entry
, log_in_subdir
)