KojiClient: only validate tag or build if they're actually set
[autotest-zwu.git] / server / frontend.py
blob0850c581825eafaa3b21c561871cbd2a7bfa08bc
1 # Copyright Martin J. Bligh, Google Inc 2008
2 # Released under the GPL v2
4 """
5 This class allows you to communicate with the frontend to submit jobs etc
6 It is designed for writing more sophisiticated server-side control files that
7 can recursively add and manage other jobs.
9 We turn the JSON dictionaries into real objects that are more idiomatic
11 For docs, see:
12 http://autotest/afe/server/rpc_doc/
13 http://autotest/new_tko/server/rpc_doc/
14 http://docs.djangoproject.com/en/dev/ref/models/querysets/#queryset-api
15 """
17 import getpass, os, time, traceback, re
18 import common
19 from autotest_lib.frontend.afe import rpc_client_lib
20 from autotest_lib.client.common_lib import global_config
21 from autotest_lib.client.common_lib import utils
22 try:
23 from autotest_lib.server.site_common import site_utils as server_utils
24 except:
25 from autotest_lib.server import utils as server_utils
26 form_ntuples_from_machines = server_utils.form_ntuples_from_machines
28 GLOBAL_CONFIG = global_config.global_config
29 DEFAULT_SERVER = 'autotest'
31 def dump_object(header, obj):
32 """
33 Standard way to print out the frontend objects (eg job, host, acl, label)
34 in a human-readable fashion for debugging
35 """
36 result = header + '\n'
37 for key in obj.hash:
38 if key == 'afe' or key == 'hash':
39 continue
40 result += '%20s: %s\n' % (key, obj.hash[key])
41 return result
44 class RpcClient(object):
45 """
46 Abstract RPC class for communicating with the autotest frontend
47 Inherited for both TKO and AFE uses.
49 All the constructors go in the afe / tko class.
50 Manipulating methods go in the object classes themselves
51 """
52 def __init__(self, path, user, server, print_log, debug, reply_debug):
53 """
54 Create a cached instance of a connection to the frontend
56 user: username to connect as
57 server: frontend server to connect to
58 print_log: pring a logging message to stdout on every operation
59 debug: print out all RPC traffic
60 """
61 if not user:
62 user = getpass.getuser()
63 if not server:
64 if 'AUTOTEST_WEB' in os.environ:
65 server = os.environ['AUTOTEST_WEB']
66 else:
67 server = GLOBAL_CONFIG.get_config_value('SERVER', 'hostname',
68 default=DEFAULT_SERVER)
69 self.server = server
70 self.user = user
71 self.print_log = print_log
72 self.debug = debug
73 self.reply_debug = reply_debug
74 http_server = 'http://' + server
75 headers = rpc_client_lib.authorization_headers(user, http_server)
76 rpc_server = http_server + path
77 if debug:
78 print 'SERVER: %s' % rpc_server
79 print 'HEADERS: %s' % headers
80 self.proxy = rpc_client_lib.get_proxy(rpc_server, headers=headers)
83 def run(self, call, **dargs):
84 """
85 Make a RPC call to the AFE server
86 """
87 rpc_call = getattr(self.proxy, call)
88 if self.debug:
89 print 'DEBUG: %s %s' % (call, dargs)
90 try:
91 result = utils.strip_unicode(rpc_call(**dargs))
92 if self.reply_debug:
93 print result
94 return result
95 except Exception:
96 print 'FAILED RPC CALL: %s %s' % (call, dargs)
97 raise
100 def log(self, message):
101 if self.print_log:
102 print message
105 class Planner(RpcClient):
106 def __init__(self, user=None, server=None, print_log=True, debug=False,
107 reply_debug=False):
108 super(Planner, self).__init__(path='/planner/server/rpc/',
109 user=user,
110 server=server,
111 print_log=print_log,
112 debug=debug,
113 reply_debug=reply_debug)
116 class TKO(RpcClient):
117 def __init__(self, user=None, server=None, print_log=True, debug=False,
118 reply_debug=False):
119 super(TKO, self).__init__(path='/new_tko/server/rpc/',
120 user=user,
121 server=server,
122 print_log=print_log,
123 debug=debug,
124 reply_debug=reply_debug)
127 def get_status_counts(self, job, **data):
128 entries = self.run('get_status_counts',
129 group_by=['hostname', 'test_name', 'reason'],
130 job_tag__startswith='%s-' % job, **data)
131 return [TestStatus(self, e) for e in entries['groups']]
134 class AFE(RpcClient):
135 def __init__(self, user=None, server=None, print_log=True, debug=False,
136 reply_debug=False, job=None):
137 self.job = job
138 super(AFE, self).__init__(path='/afe/server/rpc/',
139 user=user,
140 server=server,
141 print_log=print_log,
142 debug=debug,
143 reply_debug=reply_debug)
146 def host_statuses(self, live=None):
147 dead_statuses = ['Repair Failed', 'Repairing']
148 statuses = self.run('get_static_data')['host_statuses']
149 if live == True:
150 return list(set(statuses) - set(dead_statuses))
151 if live == False:
152 return dead_statuses
153 else:
154 return statuses
157 @staticmethod
158 def _dict_for_host_query(hostnames=(), status=None, label=None):
159 query_args = {}
160 if hostnames:
161 query_args['hostname__in'] = hostnames
162 if status:
163 query_args['status'] = status
164 if label:
165 query_args['labels__name'] = label
166 return query_args
169 def get_hosts(self, hostnames=(), status=None, label=None, **dargs):
170 query_args = dict(dargs)
171 query_args.update(self._dict_for_host_query(hostnames=hostnames,
172 status=status,
173 label=label))
174 hosts = self.run('get_hosts', **query_args)
175 return [Host(self, h) for h in hosts]
178 def get_hostnames(self, status=None, label=None, **dargs):
179 """Like get_hosts() but returns hostnames instead of Host objects."""
180 # This implementation can be replaced with a more efficient one
181 # that does not query for entire host objects in the future.
182 return [host_obj.hostname for host_obj in
183 self.get_hosts(status=status, label=label, **dargs)]
186 def reverify_hosts(self, hostnames=(), status=None, label=None):
187 query_args = dict(locked=False,
188 aclgroup__users__login=self.user)
189 query_args.update(self._dict_for_host_query(hostnames=hostnames,
190 status=status,
191 label=label))
192 return self.run('reverify_hosts', **query_args)
195 def create_host(self, hostname, **dargs):
196 id = self.run('add_host', hostname=hostname, **dargs)
197 return self.get_hosts(id=id)[0]
200 def get_labels(self, **dargs):
201 labels = self.run('get_labels', **dargs)
202 return [Label(self, l) for l in labels]
205 def create_label(self, name, **dargs):
206 id = self.run('add_label', name=name, **dargs)
207 return self.get_labels(id=id)[0]
210 def get_acls(self, **dargs):
211 acls = self.run('get_acl_groups', **dargs)
212 return [Acl(self, a) for a in acls]
215 def create_acl(self, name, **dargs):
216 id = self.run('add_acl_group', name=name, **dargs)
217 return self.get_acls(id=id)[0]
220 def get_users(self, **dargs):
221 users = self.run('get_users', **dargs)
222 return [User(self, u) for u in users]
225 def generate_control_file(self, tests, **dargs):
226 ret = self.run('generate_control_file', tests=tests, **dargs)
227 return ControlFile(self, ret)
230 def get_jobs(self, summary=False, **dargs):
231 if summary:
232 jobs_data = self.run('get_jobs_summary', **dargs)
233 else:
234 jobs_data = self.run('get_jobs', **dargs)
235 jobs = []
236 for j in jobs_data:
237 job = Job(self, j)
238 # Set up some extra information defaults
239 job.testname = re.sub('\s.*', '', job.name) # arbitrary default
240 job.platform_results = {}
241 job.platform_reasons = {}
242 jobs.append(job)
243 return jobs
246 def get_host_queue_entries(self, **data):
247 entries = self.run('get_host_queue_entries', **data)
248 job_statuses = [JobStatus(self, e) for e in entries]
250 # Sadly, get_host_queue_entries doesn't return platforms, we have
251 # to get those back from an explicit get_hosts queury, then patch
252 # the new host objects back into the host list.
253 hostnames = [s.host.hostname for s in job_statuses if s.host]
254 host_hash = {}
255 for host in self.get_hosts(hostname__in=hostnames):
256 host_hash[host.hostname] = host
257 for status in job_statuses:
258 if status.host:
259 status.host = host_hash[status.host.hostname]
260 # filter job statuses that have either host or meta_host
261 return [status for status in job_statuses if (status.host or
262 status.meta_host)]
265 def create_job_by_test(self, tests, kernel=None, use_container=False,
266 kernel_cmdline=None, **dargs):
268 Given a test name, fetch the appropriate control file from the server
269 and submit it.
271 @param kernel: A comma separated list of kernel versions to boot.
272 @param kernel_cmdline: The command line used to boot all kernels listed
273 in the kernel parameter.
275 Returns a list of job objects
277 assert ('hosts' in dargs or
278 'atomic_group_name' in dargs and 'synch_count' in dargs)
279 if kernel:
280 kernel_list = re.split('[\s,]+', kernel.strip())
281 kernel_info = []
282 for version in kernel_list:
283 kernel_dict = {'version': version}
284 if kernel_cmdline is not None:
285 kernel_dict['cmdline'] = kernel_cmdline
286 kernel_info.append(kernel_dict)
287 else:
288 kernel_info = None
289 control_file = self.generate_control_file(
290 tests=tests, kernel=kernel_info, use_container=use_container)
291 if control_file.is_server:
292 dargs['control_type'] = 'Server'
293 else:
294 dargs['control_type'] = 'Client'
295 dargs['dependencies'] = dargs.get('dependencies', []) + \
296 control_file.dependencies
297 dargs['control_file'] = control_file.control_file
298 if not dargs.get('synch_count', None):
299 dargs['synch_count'] = control_file.synch_count
300 if 'hosts' in dargs and len(dargs['hosts']) < dargs['synch_count']:
301 # will not be able to satisfy this request
302 return None
303 return self.create_job(**dargs)
306 def create_job(self, control_file, name=' ', priority='Medium',
307 control_type='Client', **dargs):
308 id = self.run('create_job', name=name, priority=priority,
309 control_file=control_file, control_type=control_type, **dargs)
310 return self.get_jobs(id=id)[0]
313 def run_test_suites(self, pairings, kernel, kernel_label=None,
314 priority='Medium', wait=True, poll_interval=10,
315 email_from=None, email_to=None, timeout=168,
316 max_runtime_hrs=168, kernel_cmdline=None):
318 Run a list of test suites on a particular kernel.
320 Poll for them to complete, and return whether they worked or not.
322 @param pairings: List of MachineTestPairing objects to invoke.
323 @param kernel: Name of the kernel to run.
324 @param kernel_label: Label (string) of the kernel to run such as
325 '<kernel-version> : <config> : <date>'
326 If any pairing object has its job_label attribute set it
327 will override this value for that particular job.
328 @param kernel_cmdline: The command line to boot the kernel(s) with.
329 @param wait: boolean - Wait for the results to come back?
330 @param poll_interval: Interval between polling for job results (in mins)
331 @param email_from: Send notification email upon completion from here.
332 @param email_from: Send notification email upon completion to here.
334 jobs = []
335 for pairing in pairings:
336 try:
337 new_job = self.invoke_test(pairing, kernel, kernel_label,
338 priority, timeout=timeout,
339 kernel_cmdline=kernel_cmdline,
340 max_runtime_hrs=max_runtime_hrs)
341 if not new_job:
342 continue
343 jobs.append(new_job)
344 except Exception, e:
345 traceback.print_exc()
346 if not wait or not jobs:
347 return
348 tko = TKO()
349 while True:
350 time.sleep(60 * poll_interval)
351 result = self.poll_all_jobs(tko, jobs, email_from, email_to)
352 if result is not None:
353 return result
356 def result_notify(self, job, email_from, email_to):
358 Notify about the result of a job. Will always print, if email data
359 is provided, will send email for it as well.
361 job: job object to notify about
362 email_from: send notification email upon completion from here
363 email_from: send notification email upon completion to here
365 if job.result == True:
366 subject = 'Testing PASSED: '
367 else:
368 subject = 'Testing FAILED: '
369 subject += '%s : %s\n' % (job.name, job.id)
370 text = []
371 for platform in job.results_platform_map:
372 for status in job.results_platform_map[platform]:
373 if status == 'Total':
374 continue
375 for host in job.results_platform_map[platform][status]:
376 text.append('%20s %10s %10s' % (platform, status, host))
377 if status == 'Failed':
378 for test_status in job.test_status[host].fail:
379 text.append('(%s, %s) : %s' % \
380 (host, test_status.test_name,
381 test_status.reason))
382 text.append('')
384 base_url = 'http://' + self.server
386 params = ('columns=test',
387 'rows=machine_group',
388 "condition=tag~'%s-%%25'" % job.id,
389 'title=Report')
390 query_string = '&'.join(params)
391 url = '%s/tko/compose_query.cgi?%s' % (base_url, query_string)
392 text.append(url + '\n')
393 url = '%s/afe/#tab_id=view_job&object_id=%s' % (base_url, job.id)
394 text.append(url + '\n')
396 body = '\n'.join(text)
397 print '---------------------------------------------------'
398 print 'Subject: ', subject
399 print body
400 print '---------------------------------------------------'
401 if email_from and email_to:
402 print 'Sending email ...'
403 utils.send_email(email_from, email_to, subject, body)
404 print
407 def print_job_result(self, job):
409 Print the result of a single job.
410 job: a job object
412 if job.result is None:
413 print 'PENDING',
414 elif job.result == True:
415 print 'PASSED',
416 elif job.result == False:
417 print 'FAILED',
418 elif job.result == "Abort":
419 print 'ABORT',
420 print ' %s : %s' % (job.id, job.name)
423 def poll_all_jobs(self, tko, jobs, email_from=None, email_to=None):
425 Poll all jobs in a list.
426 jobs: list of job objects to poll
427 email_from: send notification email upon completion from here
428 email_from: send notification email upon completion to here
430 Returns:
431 a) All complete successfully (return True)
432 b) One or more has failed (return False)
433 c) Cannot tell yet (return None)
435 results = []
436 for job in jobs:
437 if getattr(job, 'result', None) is None:
438 job.result = self.poll_job_results(tko, job)
439 if job.result is not None:
440 self.result_notify(job, email_from, email_to)
442 results.append(job.result)
443 self.print_job_result(job)
445 if None in results:
446 return None
447 elif False in results or "Abort" in results:
448 return False
449 else:
450 return True
453 def _included_platform(self, host, platforms):
455 See if host's platforms matches any of the patterns in the included
456 platforms list.
458 if not platforms:
459 return True # No filtering of platforms
460 for platform in platforms:
461 if re.search(platform, host.platform):
462 return True
463 return False
466 def invoke_test(self, pairing, kernel, kernel_label, priority='Medium',
467 kernel_cmdline=None, **dargs):
469 Given a pairing of a control file to a machine label, find all machines
470 with that label, and submit that control file to them.
472 @param kernel_label: Label (string) of the kernel to run such as
473 '<kernel-version> : <config> : <date>'
474 If any pairing object has its job_label attribute set it
475 will override this value for that particular job.
477 @returns A list of job objects.
479 # The pairing can override the job label.
480 if pairing.job_label:
481 kernel_label = pairing.job_label
482 job_name = '%s : %s' % (pairing.machine_label, kernel_label)
483 hosts = self.get_hosts(multiple_labels=[pairing.machine_label])
484 platforms = pairing.platforms
485 hosts = [h for h in hosts if self._included_platform(h, platforms)]
486 dead_statuses = self.host_statuses(live=False)
487 host_list = [h.hostname for h in hosts if h.status not in dead_statuses]
488 print 'HOSTS: %s' % host_list
489 if pairing.atomic_group_sched:
490 dargs['synch_count'] = pairing.synch_count
491 dargs['atomic_group_name'] = pairing.machine_label
492 else:
493 dargs['hosts'] = host_list
494 new_job = self.create_job_by_test(name=job_name,
495 dependencies=[pairing.machine_label],
496 tests=[pairing.control_file],
497 priority=priority,
498 kernel=kernel,
499 kernel_cmdline=kernel_cmdline,
500 use_container=pairing.container,
501 **dargs)
502 if new_job:
503 if pairing.testname:
504 new_job.testname = pairing.testname
505 print 'Invoked test %s : %s' % (new_job.id, job_name)
506 return new_job
509 def _job_test_results(self, tko, job, debug, tests=[]):
511 Retrieve test results for a job
513 job.test_status = {}
514 try:
515 test_statuses = tko.get_status_counts(job=job.id)
516 except Exception:
517 print "Ignoring exception on poll job; RPC interface is flaky"
518 traceback.print_exc()
519 return
521 for test_status in test_statuses:
522 # SERVER_JOB is buggy, and often gives false failures. Ignore it.
523 if test_status.test_name == 'SERVER_JOB':
524 continue
525 # if tests is not empty, restrict list of test_statuses to tests
526 if tests and test_status.test_name not in tests:
527 continue
528 if debug:
529 print test_status
530 hostname = test_status.hostname
531 if hostname not in job.test_status:
532 job.test_status[hostname] = TestResults()
533 job.test_status[hostname].add(test_status)
536 def _job_results_platform_map(self, job, debug):
537 # Figure out which hosts passed / failed / aborted in a job
538 # Creates a 2-dimensional hash, stored as job.results_platform_map
539 # 1st index - platform type (string)
540 # 2nd index - Status (string)
541 # 'Completed' / 'Failed' / 'Aborted'
542 # Data indexed by this hash is a list of hostnames (text strings)
543 job.results_platform_map = {}
544 try:
545 job_statuses = self.get_host_queue_entries(job=job.id)
546 except Exception:
547 print "Ignoring exception on poll job; RPC interface is flaky"
548 traceback.print_exc()
549 return None
551 platform_map = {}
552 job.job_status = {}
553 job.metahost_index = {}
554 for job_status in job_statuses:
555 # This is basically "for each host / metahost in the job"
556 if job_status.host:
557 hostname = job_status.host.hostname
558 else: # This is a metahost
559 metahost = job_status.meta_host
560 index = job.metahost_index.get(metahost, 1)
561 job.metahost_index[metahost] = index + 1
562 hostname = '%s.%s' % (metahost, index)
563 job.job_status[hostname] = job_status.status
564 status = job_status.status
565 # Skip hosts that failed verify or repair:
566 # that's a machine failure, not a job failure
567 if hostname in job.test_status:
568 verify_failed = False
569 for failure in job.test_status[hostname].fail:
570 if (failure.test_name == 'verify' or
571 failure.test_name == 'repair'):
572 verify_failed = True
573 break
574 if verify_failed:
575 continue
576 if hostname in job.test_status and job.test_status[hostname].fail:
577 # If the any tests failed in the job, we want to mark the
578 # job result as failed, overriding the default job status.
579 if status != "Aborted": # except if it's an aborted job
580 status = 'Failed'
581 if job_status.host:
582 platform = job_status.host.platform
583 else: # This is a metahost
584 platform = job_status.meta_host
585 if platform not in platform_map:
586 platform_map[platform] = {'Total' : [hostname]}
587 else:
588 platform_map[platform]['Total'].append(hostname)
589 new_host_list = platform_map[platform].get(status, []) + [hostname]
590 platform_map[platform][status] = new_host_list
591 job.results_platform_map = platform_map
594 def set_platform_results(self, test_job, platform, result):
596 Result must be None, 'FAIL', 'WARN' or 'GOOD'
598 if test_job.platform_results[platform] is not None:
599 # We're already done, and results recorded. This can't change later.
600 return
601 test_job.platform_results[platform] = result
602 # Note that self.job refers to the metajob we're IN, not the job
603 # that we're excuting from here.
604 testname = '%s.%s' % (test_job.testname, platform)
605 if self.job:
606 self.job.record(result, None, testname, status='')
609 def poll_job_results(self, tko, job, debug=False):
611 Analyse all job results by platform, return:
613 False: if any platform has more than one failure
614 None: if any platform has more than one machine not yet Good.
615 True: if all platforms have at least all-but-one machines Good.
617 self._job_test_results(tko, job, debug)
618 if job.test_status == {}:
619 return None
620 self._job_results_platform_map(job, debug)
622 good_platforms = []
623 failed_platforms = []
624 aborted_platforms = []
625 unknown_platforms = []
626 platform_map = job.results_platform_map
627 for platform in platform_map:
628 if not job.platform_results.has_key(platform):
629 # record test start, but there's no way to do this right now
630 job.platform_results[platform] = None
631 total = len(platform_map[platform]['Total'])
632 completed = len(platform_map[platform].get('Completed', []))
633 failed = len(platform_map[platform].get('Failed', []))
634 aborted = len(platform_map[platform].get('Aborted', []))
636 # We set up what we want to record here, but don't actually do
637 # it yet, until we have a decisive answer for this platform
638 if aborted or failed:
639 bad = aborted + failed
640 if (bad > 1) or (bad * 2 >= total):
641 platform_test_result = 'FAIL'
642 else:
643 platform_test_result = 'WARN'
645 if aborted > 1:
646 aborted_platforms.append(platform)
647 self.set_platform_results(job, platform, platform_test_result)
648 elif (failed * 2 >= total) or (failed > 1):
649 failed_platforms.append(platform)
650 self.set_platform_results(job, platform, platform_test_result)
651 elif (completed >= 1) and (completed + 1 >= total):
652 # if all or all but one are good, call the job good.
653 good_platforms.append(platform)
654 self.set_platform_results(job, platform, 'GOOD')
655 else:
656 unknown_platforms.append(platform)
657 detail = []
658 for status in platform_map[platform]:
659 if status == 'Total':
660 continue
661 detail.append('%s=%s' % (status,platform_map[platform][status]))
662 if debug:
663 print '%20s %d/%d %s' % (platform, completed, total,
664 ' '.join(detail))
665 print
667 if len(aborted_platforms) > 0:
668 if debug:
669 print 'Result aborted - platforms: ',
670 print ' '.join(aborted_platforms)
671 return "Abort"
672 if len(failed_platforms) > 0:
673 if debug:
674 print 'Result bad - platforms: ' + ' '.join(failed_platforms)
675 return False
676 if len(unknown_platforms) > 0:
677 if debug:
678 platform_list = ' '.join(unknown_platforms)
679 print 'Result unknown - platforms: ', platform_list
680 return None
681 if debug:
682 platform_list = ' '.join(good_platforms)
683 print 'Result good - all platforms passed: ', platform_list
684 return True
687 class TestResults(object):
689 Container class used to hold the results of the tests for a job
691 def __init__(self):
692 self.good = []
693 self.fail = []
694 self.pending = []
697 def add(self, result):
698 if result.complete_count > result.pass_count:
699 self.fail.append(result)
700 elif result.incomplete_count > 0:
701 self.pending.append(result)
702 else:
703 self.good.append(result)
706 class RpcObject(object):
708 Generic object used to construct python objects from rpc calls
710 def __init__(self, afe, hash):
711 self.afe = afe
712 self.hash = hash
713 self.__dict__.update(hash)
716 def __str__(self):
717 return dump_object(self.__repr__(), self)
720 class ControlFile(RpcObject):
722 AFE control file object
724 Fields: synch_count, dependencies, control_file, is_server
726 def __repr__(self):
727 return 'CONTROL FILE: %s' % self.control_file
730 class Label(RpcObject):
732 AFE label object
734 Fields:
735 name, invalid, platform, kernel_config, id, only_if_needed
737 def __repr__(self):
738 return 'LABEL: %s' % self.name
741 def add_hosts(self, hosts):
742 return self.afe.run('label_add_hosts', self.id, hosts)
745 def remove_hosts(self, hosts):
746 return self.afe.run('label_remove_hosts', self.id, hosts)
749 class Acl(RpcObject):
751 AFE acl object
753 Fields:
754 users, hosts, description, name, id
756 def __repr__(self):
757 return 'ACL: %s' % self.name
760 def add_hosts(self, hosts):
761 self.afe.log('Adding hosts %s to ACL %s' % (hosts, self.name))
762 return self.afe.run('acl_group_add_hosts', self.id, hosts)
765 def remove_hosts(self, hosts):
766 self.afe.log('Removing hosts %s from ACL %s' % (hosts, self.name))
767 return self.afe.run('acl_group_remove_hosts', self.id, hosts)
770 def add_users(self, users):
771 self.afe.log('Adding users %s to ACL %s' % (users, self.name))
772 return self.afe.run('acl_group_add_users', id=self.name, users=users)
775 class Job(RpcObject):
777 AFE job object
779 Fields:
780 name, control_file, control_type, synch_count, reboot_before,
781 run_verify, priority, email_list, created_on, dependencies,
782 timeout, owner, reboot_after, id
784 def __repr__(self):
785 return 'JOB: %s' % self.id
788 class JobStatus(RpcObject):
790 AFE job_status object
792 Fields:
793 status, complete, deleted, meta_host, host, active, execution_subdir, id
795 def __init__(self, afe, hash):
796 # This should call super
797 self.afe = afe
798 self.hash = hash
799 self.__dict__.update(hash)
800 self.job = Job(afe, self.job)
801 if self.host:
802 self.host = Host(afe, self.host)
805 def __repr__(self):
806 if self.host and self.host.hostname:
807 hostname = self.host.hostname
808 else:
809 hostname = 'None'
810 return 'JOB STATUS: %s-%s' % (self.job.id, hostname)
813 class Host(RpcObject):
815 AFE host object
817 Fields:
818 status, lock_time, locked_by, locked, hostname, invalid,
819 synch_id, labels, platform, protection, dirty, id
821 def __repr__(self):
822 return 'HOST OBJECT: %s' % self.hostname
825 def show(self):
826 labels = list(set(self.labels) - set([self.platform]))
827 print '%-6s %-7s %-7s %-16s %s' % (self.hostname, self.status,
828 self.locked, self.platform,
829 ', '.join(labels))
832 def delete(self):
833 return self.afe.run('delete_host', id=self.id)
836 def modify(self, **dargs):
837 return self.afe.run('modify_host', id=self.id, **dargs)
840 def get_acls(self):
841 return self.afe.get_acls(hosts__hostname=self.hostname)
844 def add_acl(self, acl_name):
845 self.afe.log('Adding ACL %s to host %s' % (acl_name, self.hostname))
846 return self.afe.run('acl_group_add_hosts', id=acl_name,
847 hosts=[self.hostname])
850 def remove_acl(self, acl_name):
851 self.afe.log('Removing ACL %s from host %s' % (acl_name, self.hostname))
852 return self.afe.run('acl_group_remove_hosts', id=acl_name,
853 hosts=[self.hostname])
856 def get_labels(self):
857 return self.afe.get_labels(host__hostname__in=[self.hostname])
860 def add_labels(self, labels):
861 self.afe.log('Adding labels %s to host %s' % (labels, self.hostname))
862 return self.afe.run('host_add_labels', id=self.id, labels=labels)
865 def remove_labels(self, labels):
866 self.afe.log('Removing labels %s from host %s' % (labels,self.hostname))
867 return self.afe.run('host_remove_labels', id=self.id, labels=labels)
870 class User(RpcObject):
871 def __repr__(self):
872 return 'USER: %s' % self.login
875 class TestStatus(RpcObject):
877 TKO test status object
879 Fields:
880 test_idx, hostname, testname, id
881 complete_count, incomplete_count, group_count, pass_count
883 def __repr__(self):
884 return 'TEST STATUS: %s' % self.id
887 class MachineTestPairing(object):
889 Object representing the pairing of a machine label with a control file
891 machine_label: use machines from this label
892 control_file: use this control file (by name in the frontend)
893 platforms: list of rexeps to filter platforms by. [] => no filtering
894 job_label: The label (name) to give to the autotest job launched
895 to run this pairing. '<kernel-version> : <config> : <date>'
897 def __init__(self, machine_label, control_file, platforms=[],
898 container=False, atomic_group_sched=False, synch_count=0,
899 testname=None, job_label=None):
900 self.machine_label = machine_label
901 self.control_file = control_file
902 self.platforms = platforms
903 self.container = container
904 self.atomic_group_sched = atomic_group_sched
905 self.synch_count = synch_count
906 self.testname = testname
907 self.job_label = job_label
910 def __repr__(self):
911 return '%s %s %s %s' % (self.machine_label, self.control_file,
912 self.platforms, self.container)