blob: 23bc85558c86aee9e5b3126c674b22e71ef7922a [file] [log] [blame]
mbligh67647152008-11-19 00:18:14 +00001# Copyright Martin J. Bligh, Google Inc 2008
2# Released under the GPL v2
3
4"""
5This class allows you to communicate with the frontend to submit jobs etc
6It is designed for writing more sophisiticated server-side control files that
7can recursively add and manage other jobs.
8
9We turn the JSON dictionaries into real objects that are more idiomatic
10
mblighc31e4022008-12-11 19:32:30 +000011For docs, see:
12 http://autotest/afe/server/noauth/rpc/
13 http://autotest/new_tko/server/noauth/rpc/
14 http://docs.djangoproject.com/en/dev/ref/models/querysets/#queryset-api
mbligh67647152008-11-19 00:18:14 +000015"""
16
mblighdb59e3c2009-11-21 01:45:18 +000017import getpass, os, time, traceback, re
mbligh67647152008-11-19 00:18:14 +000018import common
19from autotest_lib.frontend.afe import rpc_client_lib
mbligh37eceaa2008-12-15 22:56:37 +000020from autotest_lib.client.common_lib import global_config
mbligh67647152008-11-19 00:18:14 +000021from autotest_lib.client.common_lib import utils
mbligh4e576612008-12-22 14:56:36 +000022try:
23 from autotest_lib.server.site_common import site_utils as server_utils
24except:
25 from autotest_lib.server import utils as server_utils
26form_ntuples_from_machines = server_utils.form_ntuples_from_machines
mbligh67647152008-11-19 00:18:14 +000027
mbligh37eceaa2008-12-15 22:56:37 +000028GLOBAL_CONFIG = global_config.global_config
29DEFAULT_SERVER = 'autotest'
30
mbligh67647152008-11-19 00:18:14 +000031def dump_object(header, obj):
32 """
33 Standard way to print out the frontend objects (eg job, host, acl, label)
34 in a human-readable fashion for debugging
35 """
36 result = header + '\n'
37 for key in obj.hash:
38 if key == 'afe' or key == 'hash':
39 continue
40 result += '%20s: %s\n' % (key, obj.hash[key])
41 return result
42
43
mbligh5280e3b2008-12-22 14:39:28 +000044class RpcClient(object):
mbligh67647152008-11-19 00:18:14 +000045 """
mbligh451ede12009-02-12 21:54:03 +000046 Abstract RPC class for communicating with the autotest frontend
47 Inherited for both TKO and AFE uses.
mbligh67647152008-11-19 00:18:14 +000048
mbligh1ef218d2009-08-03 16:57:56 +000049 All the constructors go in the afe / tko class.
mbligh451ede12009-02-12 21:54:03 +000050 Manipulating methods go in the object classes themselves
mbligh67647152008-11-19 00:18:14 +000051 """
mbligh99b24f42009-06-08 16:45:55 +000052 def __init__(self, path, user, server, print_log, debug, reply_debug):
mbligh67647152008-11-19 00:18:14 +000053 """
mbligh451ede12009-02-12 21:54:03 +000054 Create a cached instance of a connection to the frontend
mbligh67647152008-11-19 00:18:14 +000055
56 user: username to connect as
mbligh451ede12009-02-12 21:54:03 +000057 server: frontend server to connect to
mbligh67647152008-11-19 00:18:14 +000058 print_log: pring a logging message to stdout on every operation
59 debug: print out all RPC traffic
60 """
mblighc31e4022008-12-11 19:32:30 +000061 if not user:
mblighdb59e3c2009-11-21 01:45:18 +000062 user = getpass.getuser()
mbligh451ede12009-02-12 21:54:03 +000063 if not server:
mbligh475f7762009-01-30 00:34:04 +000064 if 'AUTOTEST_WEB' in os.environ:
mbligh451ede12009-02-12 21:54:03 +000065 server = os.environ['AUTOTEST_WEB']
mbligh475f7762009-01-30 00:34:04 +000066 else:
mbligh451ede12009-02-12 21:54:03 +000067 server = GLOBAL_CONFIG.get_config_value('SERVER', 'hostname',
68 default=DEFAULT_SERVER)
69 self.server = server
mbligh67647152008-11-19 00:18:14 +000070 self.user = user
71 self.print_log = print_log
72 self.debug = debug
mbligh99b24f42009-06-08 16:45:55 +000073 self.reply_debug = reply_debug
mbligh67647152008-11-19 00:18:14 +000074 headers = {'AUTHORIZATION' : self.user}
mbligh451ede12009-02-12 21:54:03 +000075 rpc_server = 'http://' + server + path
mbligh1354c9d2008-12-22 14:56:13 +000076 if debug:
77 print 'SERVER: %s' % rpc_server
78 print 'HEADERS: %s' % headers
mbligh67647152008-11-19 00:18:14 +000079 self.proxy = rpc_client_lib.get_proxy(rpc_server, headers=headers)
80
81
82 def run(self, call, **dargs):
83 """
84 Make a RPC call to the AFE server
85 """
86 rpc_call = getattr(self.proxy, call)
87 if self.debug:
88 print 'DEBUG: %s %s' % (call, dargs)
mbligh451ede12009-02-12 21:54:03 +000089 try:
mbligh99b24f42009-06-08 16:45:55 +000090 result = utils.strip_unicode(rpc_call(**dargs))
91 if self.reply_debug:
92 print result
93 return result
mbligh451ede12009-02-12 21:54:03 +000094 except Exception:
95 print 'FAILED RPC CALL: %s %s' % (call, dargs)
96 raise
mbligh67647152008-11-19 00:18:14 +000097
98
99 def log(self, message):
100 if self.print_log:
101 print message
102
103
mbligh5280e3b2008-12-22 14:39:28 +0000104class TKO(RpcClient):
mbligh99b24f42009-06-08 16:45:55 +0000105 def __init__(self, user=None, server=None, print_log=True, debug=False,
106 reply_debug=False):
107 super(TKO, self).__init__(path='/new_tko/server/noauth/rpc/',
108 user=user,
109 server=server,
110 print_log=print_log,
111 debug=debug,
112 reply_debug=reply_debug)
mblighc31e4022008-12-11 19:32:30 +0000113
114
115 def get_status_counts(self, job, **data):
116 entries = self.run('get_status_counts',
mbligh1ef218d2009-08-03 16:57:56 +0000117 group_by=['hostname', 'test_name', 'reason'],
mblighc31e4022008-12-11 19:32:30 +0000118 job_tag__startswith='%s-' % job, **data)
mbligh5280e3b2008-12-22 14:39:28 +0000119 return [TestStatus(self, e) for e in entries['groups']]
mblighc31e4022008-12-11 19:32:30 +0000120
121
mbligh5280e3b2008-12-22 14:39:28 +0000122class AFE(RpcClient):
mbligh17c75e62009-06-08 16:18:21 +0000123 def __init__(self, user=None, server=None, print_log=True, debug=False,
mbligh99b24f42009-06-08 16:45:55 +0000124 reply_debug=False, job=None):
mbligh17c75e62009-06-08 16:18:21 +0000125 self.job = job
mbligh99b24f42009-06-08 16:45:55 +0000126 super(AFE, self).__init__(path='/afe/server/noauth/rpc/',
127 user=user,
128 server=server,
129 print_log=print_log,
130 debug=debug,
131 reply_debug=reply_debug)
mblighc31e4022008-12-11 19:32:30 +0000132
mbligh1ef218d2009-08-03 16:57:56 +0000133
mbligh67647152008-11-19 00:18:14 +0000134 def host_statuses(self, live=None):
mblighc2847b72009-03-25 19:32:20 +0000135 dead_statuses = ['Dead', 'Repair Failed', 'Repairing']
mbligh67647152008-11-19 00:18:14 +0000136 statuses = self.run('get_static_data')['host_statuses']
137 if live == True:
mblighc2847b72009-03-25 19:32:20 +0000138 return list(set(statuses) - set(dead_statuses))
mbligh67647152008-11-19 00:18:14 +0000139 if live == False:
140 return dead_statuses
141 else:
142 return statuses
143
144
mbligh71094012009-12-19 05:35:21 +0000145 @staticmethod
146 def _dict_for_host_query(hostnames=(), status=None, label=None):
147 query_args = {}
mbligh4e545a52009-12-19 05:30:39 +0000148 if hostnames:
149 query_args['hostname__in'] = hostnames
150 if status:
151 query_args['status'] = status
152 if label:
153 query_args['labels__name'] = label
mbligh71094012009-12-19 05:35:21 +0000154 return query_args
155
156
157 def get_hosts(self, hostnames=(), status=None, label=None, **dargs):
158 query_args = dict(dargs)
159 query_args.update(self._dict_for_host_query(hostnames=hostnames,
160 status=status,
161 label=label))
162 hosts = self.run('get_hosts', **query_args)
163 return [Host(self, h) for h in hosts]
164
165
166 def get_hostnames(self, status=None, label=None, **dargs):
167 """Like get_hosts() but returns hostnames instead of Host objects."""
168 # This implementation can be replaced with a more efficient one
169 # that does not query for entire host objects in the future.
170 return [host_obj.hostname for host_obj in
171 self.get_hosts(status=status, label=label, **dargs)]
172
173
174 def reverify_hosts(self, hostnames=(), status=None, label=None):
175 query_args = dict(locked=False,
176 aclgroup__users__login=self.user)
177 query_args.update(self._dict_for_host_query(hostnames=hostnames,
178 status=status,
179 label=label))
mbligh4e545a52009-12-19 05:30:39 +0000180 return self.run('reverify_hosts', **query_args)
181
182
mbligh67647152008-11-19 00:18:14 +0000183 def create_host(self, hostname, **dargs):
mbligh54459c72009-01-21 19:26:44 +0000184 id = self.run('add_host', hostname=hostname, **dargs)
mbligh67647152008-11-19 00:18:14 +0000185 return self.get_hosts(id=id)[0]
186
187
188 def get_labels(self, **dargs):
189 labels = self.run('get_labels', **dargs)
mbligh5280e3b2008-12-22 14:39:28 +0000190 return [Label(self, l) for l in labels]
mbligh67647152008-11-19 00:18:14 +0000191
192
193 def create_label(self, name, **dargs):
mbligh54459c72009-01-21 19:26:44 +0000194 id = self.run('add_label', name=name, **dargs)
mbligh67647152008-11-19 00:18:14 +0000195 return self.get_labels(id=id)[0]
196
197
198 def get_acls(self, **dargs):
199 acls = self.run('get_acl_groups', **dargs)
mbligh5280e3b2008-12-22 14:39:28 +0000200 return [Acl(self, a) for a in acls]
mbligh67647152008-11-19 00:18:14 +0000201
202
203 def create_acl(self, name, **dargs):
mbligh54459c72009-01-21 19:26:44 +0000204 id = self.run('add_acl_group', name=name, **dargs)
mbligh67647152008-11-19 00:18:14 +0000205 return self.get_acls(id=id)[0]
206
207
mbligh54459c72009-01-21 19:26:44 +0000208 def get_users(self, **dargs):
209 users = self.run('get_users', **dargs)
210 return [User(self, u) for u in users]
211
212
mbligh1354c9d2008-12-22 14:56:13 +0000213 def generate_control_file(self, tests, **dargs):
214 ret = self.run('generate_control_file', tests=tests, **dargs)
215 return ControlFile(self, ret)
216
217
mbligh67647152008-11-19 00:18:14 +0000218 def get_jobs(self, summary=False, **dargs):
219 if summary:
220 jobs_data = self.run('get_jobs_summary', **dargs)
221 else:
222 jobs_data = self.run('get_jobs', **dargs)
mblighafbba0c2009-06-08 16:44:45 +0000223 jobs = []
224 for j in jobs_data:
225 job = Job(self, j)
226 # Set up some extra information defaults
227 job.testname = re.sub('\s.*', '', job.name) # arbitrary default
228 job.platform_results = {}
229 job.platform_reasons = {}
230 jobs.append(job)
231 return jobs
mbligh67647152008-11-19 00:18:14 +0000232
233
234 def get_host_queue_entries(self, **data):
235 entries = self.run('get_host_queue_entries', **data)
mblighf9e35862009-02-26 01:03:11 +0000236 job_statuses = [JobStatus(self, e) for e in entries]
mbligh99b24f42009-06-08 16:45:55 +0000237
238 # Sadly, get_host_queue_entries doesn't return platforms, we have
239 # to get those back from an explicit get_hosts queury, then patch
240 # the new host objects back into the host list.
241 hostnames = [s.host.hostname for s in job_statuses if s.host]
242 host_hash = {}
243 for host in self.get_hosts(hostname__in=hostnames):
244 host_hash[host.hostname] = host
245 for status in job_statuses:
246 if status.host:
247 status.host = host_hash[status.host.hostname]
mblighf9e35862009-02-26 01:03:11 +0000248 # filter job statuses that have either host or meta_host
249 return [status for status in job_statuses if (status.host or
250 status.meta_host)]
mbligh67647152008-11-19 00:18:14 +0000251
252
mblighb9db5162009-04-17 22:21:41 +0000253 def create_job_by_test(self, tests, kernel=None, use_container=False,
mbligh1354c9d2008-12-22 14:56:13 +0000254 **dargs):
mbligh67647152008-11-19 00:18:14 +0000255 """
256 Given a test name, fetch the appropriate control file from the server
mbligh4e576612008-12-22 14:56:36 +0000257 and submit it.
258
259 Returns a list of job objects
mbligh67647152008-11-19 00:18:14 +0000260 """
mblighb9db5162009-04-17 22:21:41 +0000261 assert ('hosts' in dargs or
262 'atomic_group_name' in dargs and 'synch_count' in dargs)
showarda2cd72b2009-10-01 18:43:53 +0000263 if kernel:
264 kernel_list = re.split('[\s,]+', kernel.strip())
265 kernel_info = [{'version': version} for version in kernel_list]
266 else:
267 kernel_info = None
268 control_file = self.generate_control_file(
269 tests=tests, kernel=kernel_info, use_container=use_container,
270 do_push_packages=True)
mbligh1354c9d2008-12-22 14:56:13 +0000271 if control_file.is_server:
mbligh67647152008-11-19 00:18:14 +0000272 dargs['control_type'] = 'Server'
273 else:
274 dargs['control_type'] = 'Client'
275 dargs['dependencies'] = dargs.get('dependencies', []) + \
mbligh1354c9d2008-12-22 14:56:13 +0000276 control_file.dependencies
277 dargs['control_file'] = control_file.control_file
mbligh672666c2009-07-28 23:22:13 +0000278 if not dargs.get('synch_count', None):
mblighc99fccf2009-07-11 00:59:33 +0000279 dargs['synch_count'] = control_file.synch_count
mblighb9db5162009-04-17 22:21:41 +0000280 if 'hosts' in dargs and len(dargs['hosts']) < dargs['synch_count']:
281 # will not be able to satisfy this request
mbligh38b09152009-04-28 18:34:25 +0000282 return None
283 return self.create_job(**dargs)
mbligh67647152008-11-19 00:18:14 +0000284
285
286 def create_job(self, control_file, name=' ', priority='Medium',
287 control_type='Client', **dargs):
288 id = self.run('create_job', name=name, priority=priority,
289 control_file=control_file, control_type=control_type, **dargs)
290 return self.get_jobs(id=id)[0]
291
292
mbligh282ce892010-01-06 18:40:17 +0000293 def run_test_suites(self, pairings, kernel, kernel_label=None,
294 priority='Medium', wait=True, poll_interval=10,
295 email_from=None, email_to=None, timeout=168):
mbligh5b618382008-12-03 15:24:01 +0000296 """
297 Run a list of test suites on a particular kernel.
mbligh1ef218d2009-08-03 16:57:56 +0000298
mbligh5b618382008-12-03 15:24:01 +0000299 Poll for them to complete, and return whether they worked or not.
mbligh1ef218d2009-08-03 16:57:56 +0000300
mbligh282ce892010-01-06 18:40:17 +0000301 @param pairings: List of MachineTestPairing objects to invoke.
302 @param kernel: Name of the kernel to run.
303 @param kernel_label: Label (string) of the kernel to run such as
304 '<kernel-version> : <config> : <date>'
305 If any pairing object has its job_label attribute set it
306 will override this value for that particular job.
307 @param wait: boolean - Wait for the results to come back?
308 @param poll_interval: Interval between polling for job results (in mins)
309 @param email_from: Send notification email upon completion from here.
310 @param email_from: Send notification email upon completion to here.
mbligh5b618382008-12-03 15:24:01 +0000311 """
312 jobs = []
313 for pairing in pairings:
mbligh0c4f8d72009-05-12 20:52:18 +0000314 try:
315 new_job = self.invoke_test(pairing, kernel, kernel_label,
316 priority, timeout=timeout)
317 if not new_job:
318 continue
mbligh0c4f8d72009-05-12 20:52:18 +0000319 jobs.append(new_job)
320 except Exception, e:
321 traceback.print_exc()
mblighb9db5162009-04-17 22:21:41 +0000322 if not wait or not jobs:
mbligh5b618382008-12-03 15:24:01 +0000323 return
mbligh5280e3b2008-12-22 14:39:28 +0000324 tko = TKO()
mbligh5b618382008-12-03 15:24:01 +0000325 while True:
326 time.sleep(60 * poll_interval)
mbligh5280e3b2008-12-22 14:39:28 +0000327 result = self.poll_all_jobs(tko, jobs, email_from, email_to)
mbligh5b618382008-12-03 15:24:01 +0000328 if result is not None:
329 return result
330
331
mbligh45ffc432008-12-09 23:35:17 +0000332 def result_notify(self, job, email_from, email_to):
mbligh5b618382008-12-03 15:24:01 +0000333 """
mbligh45ffc432008-12-09 23:35:17 +0000334 Notify about the result of a job. Will always print, if email data
335 is provided, will send email for it as well.
336
337 job: job object to notify about
338 email_from: send notification email upon completion from here
339 email_from: send notification email upon completion to here
340 """
341 if job.result == True:
342 subject = 'Testing PASSED: '
343 else:
344 subject = 'Testing FAILED: '
345 subject += '%s : %s\n' % (job.name, job.id)
346 text = []
347 for platform in job.results_platform_map:
348 for status in job.results_platform_map[platform]:
349 if status == 'Total':
350 continue
mbligh451ede12009-02-12 21:54:03 +0000351 for host in job.results_platform_map[platform][status]:
352 text.append('%20s %10s %10s' % (platform, status, host))
353 if status == 'Failed':
354 for test_status in job.test_status[host].fail:
355 text.append('(%s, %s) : %s' % \
356 (host, test_status.test_name,
357 test_status.reason))
358 text.append('')
mbligh37eceaa2008-12-15 22:56:37 +0000359
mbligh451ede12009-02-12 21:54:03 +0000360 base_url = 'http://' + self.server
mbligh37eceaa2008-12-15 22:56:37 +0000361
362 params = ('columns=test',
363 'rows=machine_group',
364 "condition=tag~'%s-%%25'" % job.id,
365 'title=Report')
366 query_string = '&'.join(params)
mbligh451ede12009-02-12 21:54:03 +0000367 url = '%s/tko/compose_query.cgi?%s' % (base_url, query_string)
368 text.append(url + '\n')
369 url = '%s/afe/#tab_id=view_job&object_id=%s' % (base_url, job.id)
370 text.append(url + '\n')
mbligh37eceaa2008-12-15 22:56:37 +0000371
372 body = '\n'.join(text)
373 print '---------------------------------------------------'
374 print 'Subject: ', subject
mbligh45ffc432008-12-09 23:35:17 +0000375 print body
mbligh37eceaa2008-12-15 22:56:37 +0000376 print '---------------------------------------------------'
mbligh45ffc432008-12-09 23:35:17 +0000377 if email_from and email_to:
mbligh37eceaa2008-12-15 22:56:37 +0000378 print 'Sending email ...'
mbligh45ffc432008-12-09 23:35:17 +0000379 utils.send_email(email_from, email_to, subject, body)
380 print
mbligh37eceaa2008-12-15 22:56:37 +0000381
mbligh45ffc432008-12-09 23:35:17 +0000382
mbligh1354c9d2008-12-22 14:56:13 +0000383 def print_job_result(self, job):
384 """
385 Print the result of a single job.
386 job: a job object
387 """
388 if job.result is None:
389 print 'PENDING',
390 elif job.result == True:
391 print 'PASSED',
392 elif job.result == False:
393 print 'FAILED',
mbligh912c3f32009-03-25 19:31:30 +0000394 elif job.result == "Abort":
395 print 'ABORT',
mbligh1354c9d2008-12-22 14:56:13 +0000396 print ' %s : %s' % (job.id, job.name)
397
398
mbligh451ede12009-02-12 21:54:03 +0000399 def poll_all_jobs(self, tko, jobs, email_from=None, email_to=None):
mbligh45ffc432008-12-09 23:35:17 +0000400 """
401 Poll all jobs in a list.
402 jobs: list of job objects to poll
403 email_from: send notification email upon completion from here
404 email_from: send notification email upon completion to here
405
406 Returns:
mbligh5b618382008-12-03 15:24:01 +0000407 a) All complete successfully (return True)
408 b) One or more has failed (return False)
409 c) Cannot tell yet (return None)
410 """
mbligh45ffc432008-12-09 23:35:17 +0000411 results = []
mbligh5b618382008-12-03 15:24:01 +0000412 for job in jobs:
mbligh676dcbe2009-06-15 21:57:27 +0000413 if getattr(job, 'result', None) is None:
414 job.result = self.poll_job_results(tko, job)
415 if job.result is not None:
416 self.result_notify(job, email_from, email_to)
mbligh45ffc432008-12-09 23:35:17 +0000417
mbligh676dcbe2009-06-15 21:57:27 +0000418 results.append(job.result)
mbligh1354c9d2008-12-22 14:56:13 +0000419 self.print_job_result(job)
mbligh45ffc432008-12-09 23:35:17 +0000420
421 if None in results:
422 return None
mbligh912c3f32009-03-25 19:31:30 +0000423 elif False in results or "Abort" in results:
mbligh45ffc432008-12-09 23:35:17 +0000424 return False
425 else:
426 return True
mbligh5b618382008-12-03 15:24:01 +0000427
428
mbligh1f23f362008-12-22 14:46:12 +0000429 def _included_platform(self, host, platforms):
430 """
431 See if host's platforms matches any of the patterns in the included
432 platforms list.
433 """
434 if not platforms:
435 return True # No filtering of platforms
436 for platform in platforms:
437 if re.search(platform, host.platform):
438 return True
439 return False
440
441
mbligh7b312282009-01-07 16:45:43 +0000442 def invoke_test(self, pairing, kernel, kernel_label, priority='Medium',
443 **dargs):
mbligh5b618382008-12-03 15:24:01 +0000444 """
445 Given a pairing of a control file to a machine label, find all machines
446 with that label, and submit that control file to them.
mbligh1ef218d2009-08-03 16:57:56 +0000447
mbligh282ce892010-01-06 18:40:17 +0000448 @param kernel_label: Label (string) of the kernel to run such as
449 '<kernel-version> : <config> : <date>'
450 If any pairing object has its job_label attribute set it
451 will override this value for that particular job.
452
453 @returns A list of job objects.
mbligh5b618382008-12-03 15:24:01 +0000454 """
mbligh282ce892010-01-06 18:40:17 +0000455 # The pairing can override the job label.
456 if pairing.job_label:
457 kernel_label = pairing.job_label
mbligh5b618382008-12-03 15:24:01 +0000458 job_name = '%s : %s' % (pairing.machine_label, kernel_label)
459 hosts = self.get_hosts(multiple_labels=[pairing.machine_label])
mbligh1f23f362008-12-22 14:46:12 +0000460 platforms = pairing.platforms
461 hosts = [h for h in hosts if self._included_platform(h, platforms)]
mblighc2847b72009-03-25 19:32:20 +0000462 dead_statuses = self.host_statuses(live=False)
463 host_list = [h.hostname for h in hosts if h.status not in dead_statuses]
mbligh1f23f362008-12-22 14:46:12 +0000464 print 'HOSTS: %s' % host_list
mblighb9db5162009-04-17 22:21:41 +0000465 if pairing.atomic_group_sched:
mblighc99fccf2009-07-11 00:59:33 +0000466 dargs['synch_count'] = pairing.synch_count
mblighb9db5162009-04-17 22:21:41 +0000467 dargs['atomic_group_name'] = pairing.machine_label
468 else:
469 dargs['hosts'] = host_list
mbligh38b09152009-04-28 18:34:25 +0000470 new_job = self.create_job_by_test(name=job_name,
mbligh17c75e62009-06-08 16:18:21 +0000471 dependencies=[pairing.machine_label],
472 tests=[pairing.control_file],
473 priority=priority,
474 kernel=kernel,
475 use_container=pairing.container,
476 **dargs)
mbligh38b09152009-04-28 18:34:25 +0000477 if new_job:
mbligh17c75e62009-06-08 16:18:21 +0000478 if pairing.testname:
479 new_job.testname = pairing.testname
mbligh4e576612008-12-22 14:56:36 +0000480 print 'Invoked test %s : %s' % (new_job.id, job_name)
mbligh38b09152009-04-28 18:34:25 +0000481 return new_job
mbligh5b618382008-12-03 15:24:01 +0000482
483
mblighb9db5162009-04-17 22:21:41 +0000484 def _job_test_results(self, tko, job, debug, tests=[]):
mbligh5b618382008-12-03 15:24:01 +0000485 """
mbligh5280e3b2008-12-22 14:39:28 +0000486 Retrieve test results for a job
mbligh5b618382008-12-03 15:24:01 +0000487 """
mbligh5280e3b2008-12-22 14:39:28 +0000488 job.test_status = {}
489 try:
490 test_statuses = tko.get_status_counts(job=job.id)
491 except Exception:
492 print "Ignoring exception on poll job; RPC interface is flaky"
493 traceback.print_exc()
494 return
495
496 for test_status in test_statuses:
mbligh7479a182009-01-07 16:46:24 +0000497 # SERVER_JOB is buggy, and often gives false failures. Ignore it.
498 if test_status.test_name == 'SERVER_JOB':
499 continue
mblighb9db5162009-04-17 22:21:41 +0000500 # if tests is not empty, restrict list of test_statuses to tests
501 if tests and test_status.test_name not in tests:
502 continue
mbligh451ede12009-02-12 21:54:03 +0000503 if debug:
504 print test_status
mbligh5280e3b2008-12-22 14:39:28 +0000505 hostname = test_status.hostname
506 if hostname not in job.test_status:
507 job.test_status[hostname] = TestResults()
508 job.test_status[hostname].add(test_status)
509
510
mbligh451ede12009-02-12 21:54:03 +0000511 def _job_results_platform_map(self, job, debug):
mblighc9e427e2009-04-28 18:35:06 +0000512 # Figure out which hosts passed / failed / aborted in a job
513 # Creates a 2-dimensional hash, stored as job.results_platform_map
514 # 1st index - platform type (string)
515 # 2nd index - Status (string)
516 # 'Completed' / 'Failed' / 'Aborted'
517 # Data indexed by this hash is a list of hostnames (text strings)
mbligh5280e3b2008-12-22 14:39:28 +0000518 job.results_platform_map = {}
mbligh5b618382008-12-03 15:24:01 +0000519 try:
mbligh45ffc432008-12-09 23:35:17 +0000520 job_statuses = self.get_host_queue_entries(job=job.id)
mbligh5b618382008-12-03 15:24:01 +0000521 except Exception:
522 print "Ignoring exception on poll job; RPC interface is flaky"
523 traceback.print_exc()
524 return None
mbligh5280e3b2008-12-22 14:39:28 +0000525
mbligh5b618382008-12-03 15:24:01 +0000526 platform_map = {}
mbligh5280e3b2008-12-22 14:39:28 +0000527 job.job_status = {}
mbligh451ede12009-02-12 21:54:03 +0000528 job.metahost_index = {}
mbligh5b618382008-12-03 15:24:01 +0000529 for job_status in job_statuses:
mblighc9e427e2009-04-28 18:35:06 +0000530 # This is basically "for each host / metahost in the job"
mbligh451ede12009-02-12 21:54:03 +0000531 if job_status.host:
532 hostname = job_status.host.hostname
533 else: # This is a metahost
534 metahost = job_status.meta_host
535 index = job.metahost_index.get(metahost, 1)
536 job.metahost_index[metahost] = index + 1
537 hostname = '%s.%s' % (metahost, index)
mbligh5280e3b2008-12-22 14:39:28 +0000538 job.job_status[hostname] = job_status.status
mbligh5b618382008-12-03 15:24:01 +0000539 status = job_status.status
mbligh0ecbe632009-05-13 21:34:56 +0000540 # Skip hosts that failed verify or repair:
541 # that's a machine failure, not a job failure
mbligh451ede12009-02-12 21:54:03 +0000542 if hostname in job.test_status:
543 verify_failed = False
544 for failure in job.test_status[hostname].fail:
mbligh0ecbe632009-05-13 21:34:56 +0000545 if (failure.test_name == 'verify' or
546 failure.test_name == 'repair'):
mbligh451ede12009-02-12 21:54:03 +0000547 verify_failed = True
548 break
549 if verify_failed:
550 continue
mblighc9e427e2009-04-28 18:35:06 +0000551 if hostname in job.test_status and job.test_status[hostname].fail:
552 # If the any tests failed in the job, we want to mark the
553 # job result as failed, overriding the default job status.
554 if status != "Aborted": # except if it's an aborted job
555 status = 'Failed'
mbligh451ede12009-02-12 21:54:03 +0000556 if job_status.host:
557 platform = job_status.host.platform
558 else: # This is a metahost
559 platform = job_status.meta_host
mbligh5b618382008-12-03 15:24:01 +0000560 if platform not in platform_map:
561 platform_map[platform] = {'Total' : [hostname]}
562 else:
563 platform_map[platform]['Total'].append(hostname)
564 new_host_list = platform_map[platform].get(status, []) + [hostname]
565 platform_map[platform][status] = new_host_list
mbligh45ffc432008-12-09 23:35:17 +0000566 job.results_platform_map = platform_map
mbligh5280e3b2008-12-22 14:39:28 +0000567
568
mbligh17c75e62009-06-08 16:18:21 +0000569 def set_platform_results(self, test_job, platform, result):
570 """
571 Result must be None, 'FAIL', 'WARN' or 'GOOD'
572 """
573 if test_job.platform_results[platform] is not None:
574 # We're already done, and results recorded. This can't change later.
575 return
576 test_job.platform_results[platform] = result
577 # Note that self.job refers to the metajob we're IN, not the job
578 # that we're excuting from here.
579 testname = '%s.%s' % (test_job.testname, platform)
580 if self.job:
581 self.job.record(result, None, testname, status='')
582
583
mbligh5280e3b2008-12-22 14:39:28 +0000584 def poll_job_results(self, tko, job, debug=False):
585 """
586 Analyse all job results by platform, return:
mbligh1ef218d2009-08-03 16:57:56 +0000587
mbligh5280e3b2008-12-22 14:39:28 +0000588 False: if any platform has more than one failure
589 None: if any platform has more than one machine not yet Good.
590 True: if all platforms have at least all-but-one machines Good.
591 """
mbligh451ede12009-02-12 21:54:03 +0000592 self._job_test_results(tko, job, debug)
mblighe7fcf562009-05-21 01:43:17 +0000593 if job.test_status == {}:
594 return None
mbligh451ede12009-02-12 21:54:03 +0000595 self._job_results_platform_map(job, debug)
mbligh5280e3b2008-12-22 14:39:28 +0000596
mbligh5b618382008-12-03 15:24:01 +0000597 good_platforms = []
mbligh912c3f32009-03-25 19:31:30 +0000598 failed_platforms = []
599 aborted_platforms = []
mbligh5b618382008-12-03 15:24:01 +0000600 unknown_platforms = []
mbligh5280e3b2008-12-22 14:39:28 +0000601 platform_map = job.results_platform_map
mbligh5b618382008-12-03 15:24:01 +0000602 for platform in platform_map:
mbligh17c75e62009-06-08 16:18:21 +0000603 if not job.platform_results.has_key(platform):
604 # record test start, but there's no way to do this right now
605 job.platform_results[platform] = None
mbligh5b618382008-12-03 15:24:01 +0000606 total = len(platform_map[platform]['Total'])
607 completed = len(platform_map[platform].get('Completed', []))
mbligh912c3f32009-03-25 19:31:30 +0000608 failed = len(platform_map[platform].get('Failed', []))
609 aborted = len(platform_map[platform].get('Aborted', []))
mbligh17c75e62009-06-08 16:18:21 +0000610
mbligh1ef218d2009-08-03 16:57:56 +0000611 # We set up what we want to record here, but don't actually do
mbligh17c75e62009-06-08 16:18:21 +0000612 # it yet, until we have a decisive answer for this platform
613 if aborted or failed:
614 bad = aborted + failed
615 if (bad > 1) or (bad * 2 >= total):
616 platform_test_result = 'FAIL'
617 else:
618 platform_test_result = 'WARN'
619
mbligh912c3f32009-03-25 19:31:30 +0000620 if aborted > 1:
621 aborted_platforms.append(platform)
mbligh17c75e62009-06-08 16:18:21 +0000622 self.set_platform_results(job, platform, platform_test_result)
mbligh912c3f32009-03-25 19:31:30 +0000623 elif (failed * 2 >= total) or (failed > 1):
624 failed_platforms.append(platform)
mbligh17c75e62009-06-08 16:18:21 +0000625 self.set_platform_results(job, platform, platform_test_result)
mbligh451ede12009-02-12 21:54:03 +0000626 elif (completed >= 1) and (completed + 1 >= total):
mbligh5b618382008-12-03 15:24:01 +0000627 # if all or all but one are good, call the job good.
628 good_platforms.append(platform)
mbligh17c75e62009-06-08 16:18:21 +0000629 self.set_platform_results(job, platform, 'GOOD')
mbligh5b618382008-12-03 15:24:01 +0000630 else:
631 unknown_platforms.append(platform)
632 detail = []
633 for status in platform_map[platform]:
634 if status == 'Total':
635 continue
636 detail.append('%s=%s' % (status,platform_map[platform][status]))
637 if debug:
mbligh1ef218d2009-08-03 16:57:56 +0000638 print '%20s %d/%d %s' % (platform, completed, total,
mbligh5b618382008-12-03 15:24:01 +0000639 ' '.join(detail))
640 print
mbligh1ef218d2009-08-03 16:57:56 +0000641
mbligh912c3f32009-03-25 19:31:30 +0000642 if len(aborted_platforms) > 0:
mbligh5b618382008-12-03 15:24:01 +0000643 if debug:
mbligh17c75e62009-06-08 16:18:21 +0000644 print 'Result aborted - platforms: ',
645 print ' '.join(aborted_platforms)
mbligh912c3f32009-03-25 19:31:30 +0000646 return "Abort"
647 if len(failed_platforms) > 0:
648 if debug:
649 print 'Result bad - platforms: ' + ' '.join(failed_platforms)
mbligh5b618382008-12-03 15:24:01 +0000650 return False
651 if len(unknown_platforms) > 0:
652 if debug:
653 platform_list = ' '.join(unknown_platforms)
654 print 'Result unknown - platforms: ', platform_list
655 return None
656 if debug:
657 platform_list = ' '.join(good_platforms)
658 print 'Result good - all platforms passed: ', platform_list
659 return True
660
661
mbligh5280e3b2008-12-22 14:39:28 +0000662class TestResults(object):
663 """
664 Container class used to hold the results of the tests for a job
665 """
666 def __init__(self):
667 self.good = []
668 self.fail = []
mbligh451ede12009-02-12 21:54:03 +0000669 self.pending = []
mbligh5280e3b2008-12-22 14:39:28 +0000670
671
672 def add(self, result):
mbligh451ede12009-02-12 21:54:03 +0000673 if result.complete_count > result.pass_count:
674 self.fail.append(result)
675 elif result.incomplete_count > 0:
676 self.pending.append(result)
mbligh5280e3b2008-12-22 14:39:28 +0000677 else:
mbligh451ede12009-02-12 21:54:03 +0000678 self.good.append(result)
mbligh5280e3b2008-12-22 14:39:28 +0000679
680
681class RpcObject(object):
mbligh67647152008-11-19 00:18:14 +0000682 """
683 Generic object used to construct python objects from rpc calls
684 """
685 def __init__(self, afe, hash):
686 self.afe = afe
687 self.hash = hash
688 self.__dict__.update(hash)
689
690
691 def __str__(self):
692 return dump_object(self.__repr__(), self)
693
694
mbligh1354c9d2008-12-22 14:56:13 +0000695class ControlFile(RpcObject):
696 """
697 AFE control file object
698
699 Fields: synch_count, dependencies, control_file, is_server
700 """
701 def __repr__(self):
702 return 'CONTROL FILE: %s' % self.control_file
703
704
mbligh5280e3b2008-12-22 14:39:28 +0000705class Label(RpcObject):
mbligh67647152008-11-19 00:18:14 +0000706 """
707 AFE label object
708
709 Fields:
710 name, invalid, platform, kernel_config, id, only_if_needed
711 """
712 def __repr__(self):
713 return 'LABEL: %s' % self.name
714
715
716 def add_hosts(self, hosts):
717 return self.afe.run('label_add_hosts', self.id, hosts)
718
719
720 def remove_hosts(self, hosts):
721 return self.afe.run('label_remove_hosts', self.id, hosts)
722
723
mbligh5280e3b2008-12-22 14:39:28 +0000724class Acl(RpcObject):
mbligh67647152008-11-19 00:18:14 +0000725 """
726 AFE acl object
727
728 Fields:
729 users, hosts, description, name, id
730 """
731 def __repr__(self):
732 return 'ACL: %s' % self.name
733
734
735 def add_hosts(self, hosts):
736 self.afe.log('Adding hosts %s to ACL %s' % (hosts, self.name))
737 return self.afe.run('acl_group_add_hosts', self.id, hosts)
738
739
740 def remove_hosts(self, hosts):
741 self.afe.log('Removing hosts %s from ACL %s' % (hosts, self.name))
742 return self.afe.run('acl_group_remove_hosts', self.id, hosts)
743
744
mbligh54459c72009-01-21 19:26:44 +0000745 def add_users(self, users):
746 self.afe.log('Adding users %s to ACL %s' % (users, self.name))
747 return self.afe.run('acl_group_add_users', id=self.name, users=users)
748
749
mbligh5280e3b2008-12-22 14:39:28 +0000750class Job(RpcObject):
mbligh67647152008-11-19 00:18:14 +0000751 """
752 AFE job object
753
754 Fields:
755 name, control_file, control_type, synch_count, reboot_before,
756 run_verify, priority, email_list, created_on, dependencies,
757 timeout, owner, reboot_after, id
758 """
759 def __repr__(self):
760 return 'JOB: %s' % self.id
761
762
mbligh5280e3b2008-12-22 14:39:28 +0000763class JobStatus(RpcObject):
mbligh67647152008-11-19 00:18:14 +0000764 """
765 AFE job_status object
766
767 Fields:
768 status, complete, deleted, meta_host, host, active, execution_subdir, id
769 """
770 def __init__(self, afe, hash):
771 # This should call super
772 self.afe = afe
773 self.hash = hash
774 self.__dict__.update(hash)
mbligh5280e3b2008-12-22 14:39:28 +0000775 self.job = Job(afe, self.job)
mbligh67647152008-11-19 00:18:14 +0000776 if self.host:
mbligh99b24f42009-06-08 16:45:55 +0000777 self.host = Host(afe, self.host)
mbligh67647152008-11-19 00:18:14 +0000778
779
780 def __repr__(self):
mbligh451ede12009-02-12 21:54:03 +0000781 if self.host and self.host.hostname:
782 hostname = self.host.hostname
783 else:
784 hostname = 'None'
785 return 'JOB STATUS: %s-%s' % (self.job.id, hostname)
mbligh67647152008-11-19 00:18:14 +0000786
787
mbligh5280e3b2008-12-22 14:39:28 +0000788class Host(RpcObject):
mbligh67647152008-11-19 00:18:14 +0000789 """
790 AFE host object
791
792 Fields:
793 status, lock_time, locked_by, locked, hostname, invalid,
794 synch_id, labels, platform, protection, dirty, id
795 """
796 def __repr__(self):
797 return 'HOST OBJECT: %s' % self.hostname
798
799
800 def show(self):
801 labels = list(set(self.labels) - set([self.platform]))
802 print '%-6s %-7s %-7s %-16s %s' % (self.hostname, self.status,
803 self.locked, self.platform,
804 ', '.join(labels))
805
806
mbligh54459c72009-01-21 19:26:44 +0000807 def delete(self):
808 return self.afe.run('delete_host', id=self.id)
809
810
mbligh6463c4b2009-01-30 00:33:37 +0000811 def modify(self, **dargs):
812 return self.afe.run('modify_host', id=self.id, **dargs)
813
814
mbligh67647152008-11-19 00:18:14 +0000815 def get_acls(self):
816 return self.afe.get_acls(hosts__hostname=self.hostname)
817
818
819 def add_acl(self, acl_name):
820 self.afe.log('Adding ACL %s to host %s' % (acl_name, self.hostname))
821 return self.afe.run('acl_group_add_hosts', id=acl_name,
822 hosts=[self.hostname])
823
824
825 def remove_acl(self, acl_name):
826 self.afe.log('Removing ACL %s from host %s' % (acl_name, self.hostname))
827 return self.afe.run('acl_group_remove_hosts', id=acl_name,
828 hosts=[self.hostname])
829
830
831 def get_labels(self):
832 return self.afe.get_labels(host__hostname__in=[self.hostname])
833
834
835 def add_labels(self, labels):
836 self.afe.log('Adding labels %s to host %s' % (labels, self.hostname))
837 return self.afe.run('host_add_labels', id=self.id, labels=labels)
838
839
840 def remove_labels(self, labels):
841 self.afe.log('Removing labels %s from host %s' % (labels,self.hostname))
842 return self.afe.run('host_remove_labels', id=self.id, labels=labels)
mbligh5b618382008-12-03 15:24:01 +0000843
844
mbligh54459c72009-01-21 19:26:44 +0000845class User(RpcObject):
846 def __repr__(self):
847 return 'USER: %s' % self.login
848
849
mbligh5280e3b2008-12-22 14:39:28 +0000850class TestStatus(RpcObject):
mblighc31e4022008-12-11 19:32:30 +0000851 """
852 TKO test status object
853
854 Fields:
855 test_idx, hostname, testname, id
856 complete_count, incomplete_count, group_count, pass_count
857 """
858 def __repr__(self):
859 return 'TEST STATUS: %s' % self.id
860
861
mbligh5b618382008-12-03 15:24:01 +0000862class MachineTestPairing(object):
863 """
864 Object representing the pairing of a machine label with a control file
mbligh1f23f362008-12-22 14:46:12 +0000865
866 machine_label: use machines from this label
867 control_file: use this control file (by name in the frontend)
868 platforms: list of rexeps to filter platforms by. [] => no filtering
mbligh282ce892010-01-06 18:40:17 +0000869 job_label: The label (name) to give to the autotest job launched
870 to run this pairing. '<kernel-version> : <config> : <date>'
mbligh5b618382008-12-03 15:24:01 +0000871 """
mbligh1354c9d2008-12-22 14:56:13 +0000872 def __init__(self, machine_label, control_file, platforms=[],
mbligh17c75e62009-06-08 16:18:21 +0000873 container=False, atomic_group_sched=False, synch_count=0,
mbligh282ce892010-01-06 18:40:17 +0000874 testname=None, job_label=None):
mbligh5b618382008-12-03 15:24:01 +0000875 self.machine_label = machine_label
876 self.control_file = control_file
mbligh1f23f362008-12-22 14:46:12 +0000877 self.platforms = platforms
mbligh1354c9d2008-12-22 14:56:13 +0000878 self.container = container
mblighb9db5162009-04-17 22:21:41 +0000879 self.atomic_group_sched = atomic_group_sched
880 self.synch_count = synch_count
mbligh17c75e62009-06-08 16:18:21 +0000881 self.testname = testname
mbligh282ce892010-01-06 18:40:17 +0000882 self.job_label = job_label
mbligh1354c9d2008-12-22 14:56:13 +0000883
884
885 def __repr__(self):
886 return '%s %s %s %s' % (self.machine_label, self.control_file,
887 self.platforms, self.container)