blob: 95dc4d942e781f6e447b9201831abba29ca24267 [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski025099d2008-09-23 14:13:48 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess, traceback
mbligh084bc172008-10-18 14:02:45 +000010import shutil, warnings
jadmanskic09fc152008-10-15 17:56:59 +000011from autotest_lib.client.bin import fd_stack, sysinfo
mbligh09108442008-10-15 16:27:38 +000012from autotest_lib.client.common_lib import error, log, utils, packages
jadmanski043e1132008-11-19 17:10:32 +000013from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000014from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000015
16
mbligh084bc172008-10-18 14:02:45 +000017def _control_segment_path(name):
18 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000019 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000020 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000021
22
mbligh084bc172008-10-18 14:02:45 +000023CLIENT_CONTROL_FILENAME = 'control'
24SERVER_CONTROL_FILENAME = 'control.srv'
25MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000026
mbligh084bc172008-10-18 14:02:45 +000027CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
28CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
29CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000030INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000031CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000032
mbligh084bc172008-10-18 14:02:45 +000033VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000034REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000035
36
37# load up site-specific code for generating site-specific job data
38try:
39 import site_job
40 get_site_job_data = site_job.get_site_job_data
41 del site_job
42except ImportError:
43 # by default provide a stub that generates no site data
44 def get_site_job_data(job):
45 return {}
46
47
48class base_server_job(object):
49 """The actual job against which we do everything.
50
51 Properties:
52 autodir
53 The top level autotest directory (/usr/local/autotest).
54 serverdir
55 <autodir>/server/
56 clientdir
57 <autodir>/client/
58 conmuxdir
59 <autodir>/conmux/
60 testdir
61 <autodir>/server/tests/
62 site_testdir
63 <autodir>/server/site_tests/
64 control
65 the control file for this job
66 """
67
68 STATUS_VERSION = 1
69
70
71 def __init__(self, control, args, resultdir, label, user, machines,
72 client=False, parse_job='',
73 ssh_user='root', ssh_port=22, ssh_pass=''):
74 """
75 control
76 The control file (pathname of)
77 args
78 args to pass to the control file
79 resultdir
80 where to throw the results
81 label
82 label for the job
83 user
84 Username for the job (email address)
85 client
86 True if a client-side control file
87 """
88 path = os.path.dirname(__file__)
89 self.autodir = os.path.abspath(os.path.join(path, '..'))
90 self.serverdir = os.path.join(self.autodir, 'server')
91 self.testdir = os.path.join(self.serverdir, 'tests')
92 self.site_testdir = os.path.join(self.serverdir, 'site_tests')
93 self.tmpdir = os.path.join(self.serverdir, 'tmp')
94 self.conmuxdir = os.path.join(self.autodir, 'conmux')
95 self.clientdir = os.path.join(self.autodir, 'client')
96 self.toolsdir = os.path.join(self.autodir, 'client/tools')
97 if control:
98 self.control = open(control, 'r').read()
99 self.control = re.sub('\r', '', self.control)
100 else:
showard45ae8192008-11-05 19:32:53 +0000101 self.control = ''
jadmanski10646442008-08-13 14:05:21 +0000102 self.resultdir = resultdir
mbligh80e1eba2008-11-19 00:26:18 +0000103 if resultdir:
104 if not os.path.exists(resultdir):
105 os.mkdir(resultdir)
106 self.debugdir = os.path.join(resultdir, 'debug')
107 if not os.path.exists(self.debugdir):
108 os.mkdir(self.debugdir)
109 self.status = os.path.join(resultdir, 'status')
110 else:
111 self.status = None
jadmanski10646442008-08-13 14:05:21 +0000112 self.label = label
113 self.user = user
114 self.args = args
115 self.machines = machines
116 self.client = client
117 self.record_prefix = ''
118 self.warning_loggers = set()
119 self.ssh_user = ssh_user
120 self.ssh_port = ssh_port
121 self.ssh_pass = ssh_pass
jadmanski23afbec2008-09-17 18:12:07 +0000122 self.run_test_cleanup = True
mbligh09108442008-10-15 16:27:38 +0000123 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000124 self.hosts = set()
jadmanski10646442008-08-13 14:05:21 +0000125
126 self.stdout = fd_stack.fd_stack(1, sys.stdout)
127 self.stderr = fd_stack.fd_stack(2, sys.stderr)
128
mbligh80e1eba2008-11-19 00:26:18 +0000129 if resultdir:
130 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000131 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000132
jadmanski025099d2008-09-23 14:13:48 +0000133 if not os.access(self.tmpdir, os.W_OK):
134 try:
135 os.makedirs(self.tmpdir, 0700)
136 except os.error, e:
137 # Thrown if the directory already exists, which it may.
138 pass
139
140 if (not os.access(self.tmpdir, os.W_OK) or
141 not os.path.isdir(self.tmpdir)):
142 self.tmpdir = os.path.join(tempfile.gettempdir(),
143 'autotest-' + getpass.getuser())
144 try:
145 os.makedirs(self.tmpdir, 0700)
146 except os.error, e:
147 # Thrown if the directory already exists, which it may.
148 # If the problem was something other than the
149 # directory already existing, this chmod should throw as well
150 # exception.
151 os.chmod(self.tmpdir, stat.S_IRWXU)
152
mbligh80e1eba2008-11-19 00:26:18 +0000153 if self.status and os.path.exists(self.status):
jadmanski10646442008-08-13 14:05:21 +0000154 os.unlink(self.status)
155 job_data = {'label' : label, 'user' : user,
156 'hostname' : ','.join(machines),
157 'status_version' : str(self.STATUS_VERSION)}
mbligh80e1eba2008-11-19 00:26:18 +0000158 if self.resultdir:
159 job_data.update(get_site_job_data(self))
160 utils.write_keyval(self.resultdir, job_data)
jadmanski10646442008-08-13 14:05:21 +0000161
162 self.parse_job = parse_job
163 if self.parse_job and len(machines) == 1:
164 self.using_parser = True
165 self.init_parser(resultdir)
166 else:
167 self.using_parser = False
168 self.pkgmgr = packages.PackageManager(
169 self.autodir, run_function_dargs={'timeout':600})
170 self.pkgdir = os.path.join(self.autodir, 'packages')
171
showard21baa452008-10-21 00:08:39 +0000172 self.num_tests_run = 0
173 self.num_tests_failed = 0
174
jadmanski550fdc22008-11-20 16:32:08 +0000175 self._register_subcommand_hooks()
176
177
178 def _register_subcommand_hooks(self):
179 """ Register some hooks into the subcommand modules that allow us
180 to properly clean up self.hosts created in forked subprocesses. """
181 def on_fork(cmd):
182 self._existing_hosts_on_fork = set(self.hosts)
183 def on_join(cmd):
184 new_hosts = self.hosts - self._existing_hosts_on_fork
185 for host in new_hosts:
186 host.close()
187 subcommand.subcommand.register_fork_hook(on_fork)
188 subcommand.subcommand.register_join_hook(on_join)
189
jadmanski10646442008-08-13 14:05:21 +0000190
191 def init_parser(self, resultdir):
192 """Start the continuous parsing of resultdir. This sets up
193 the database connection and inserts the basic job object into
194 the database if necessary."""
195 # redirect parser debugging to .parse.log
196 parse_log = os.path.join(resultdir, '.parse.log')
197 parse_log = open(parse_log, 'w', 0)
198 tko_utils.redirect_parser_debugging(parse_log)
199 # create a job model object and set up the db
200 self.results_db = tko_db.db(autocommit=True)
201 self.parser = status_lib.parser(self.STATUS_VERSION)
202 self.job_model = self.parser.make_job(resultdir)
203 self.parser.start(self.job_model)
204 # check if a job already exists in the db and insert it if
205 # it does not
206 job_idx = self.results_db.find_job(self.parse_job)
207 if job_idx is None:
208 self.results_db.insert_job(self.parse_job,
209 self.job_model)
210 else:
211 machine_idx = self.results_db.lookup_machine(
212 self.job_model.machine)
213 self.job_model.index = job_idx
214 self.job_model.machine_idx = machine_idx
215
216
217 def cleanup_parser(self):
218 """This should be called after the server job is finished
219 to carry out any remaining cleanup (e.g. flushing any
220 remaining test results to the results db)"""
221 if not self.using_parser:
222 return
223 final_tests = self.parser.end()
224 for test in final_tests:
225 self.__insert_test(test)
226 self.using_parser = False
227
228
229 def verify(self):
230 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000231 raise error.AutoservError('No machines specified to verify')
jadmanski10646442008-08-13 14:05:21 +0000232 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000233 namespace = {'machines' : self.machines, 'job' : self,
234 'ssh_user' : self.ssh_user,
235 'ssh_port' : self.ssh_port,
236 'ssh_pass' : self.ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000237 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000238 except Exception, e:
239 msg = ('Verify failed\n' + str(e) + '\n'
240 + traceback.format_exc())
241 self.record('ABORT', None, None, msg)
242 raise
243
244
245 def repair(self, host_protection):
246 if not self.machines:
247 raise error.AutoservError('No machines specified to repair')
248 namespace = {'machines': self.machines, 'job': self,
249 'ssh_user': self.ssh_user, 'ssh_port': self.ssh_port,
250 'ssh_pass': self.ssh_pass,
251 'protection_level': host_protection}
252 # no matter what happens during repair, go on to try to reverify
253 try:
mbligh084bc172008-10-18 14:02:45 +0000254 self._execute_code(REPAIR_CONTROL_FILE, namespace,
255 protect=False)
jadmanski10646442008-08-13 14:05:21 +0000256 except Exception, exc:
257 print 'Exception occured during repair'
258 traceback.print_exc()
259 self.verify()
260
261
262 def precheck(self):
263 """
264 perform any additional checks in derived classes.
265 """
266 pass
267
268
269 def enable_external_logging(self):
270 """Start or restart external logging mechanism.
271 """
272 pass
273
274
275 def disable_external_logging(self):
276 """ Pause or stop external logging mechanism.
277 """
278 pass
279
280
jadmanski23afbec2008-09-17 18:12:07 +0000281 def enable_test_cleanup(self):
282 """ By default tests run test.cleanup """
283 self.run_test_cleanup = True
284
285
286 def disable_test_cleanup(self):
287 """ By default tests do not run test.cleanup """
288 self.run_test_cleanup = False
289
290
jadmanski10646442008-08-13 14:05:21 +0000291 def use_external_logging(self):
292 """Return True if external logging should be used.
293 """
294 return False
295
296
297 def parallel_simple(self, function, machines, log=True, timeout=None):
298 """Run 'function' using parallel_simple, with an extra
299 wrapper to handle the necessary setup for continuous parsing,
300 if possible. If continuous parsing is already properly
301 initialized then this should just work."""
302 is_forking = not (len(machines) == 1 and
303 self.machines == machines)
jadmanski4dd1a002008-09-05 20:27:30 +0000304 if self.parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000305 def wrapper(machine):
306 self.parse_job += "/" + machine
307 self.using_parser = True
308 self.machines = [machine]
309 self.resultdir = os.path.join(self.resultdir,
310 machine)
jadmanski609a5f42008-08-26 20:52:42 +0000311 os.chdir(self.resultdir)
showard2bab8f42008-11-12 18:15:22 +0000312 utils.write_keyval(self.resultdir, {"hostname": machine})
jadmanski10646442008-08-13 14:05:21 +0000313 self.init_parser(self.resultdir)
314 result = function(machine)
315 self.cleanup_parser()
316 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000317 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000318 def wrapper(machine):
319 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000320 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000321 result = function(machine)
322 return result
323 else:
324 wrapper = function
325 subcommand.parallel_simple(wrapper, machines, log, timeout)
326
327
showard45ae8192008-11-05 19:32:53 +0000328 def run(self, cleanup = False, install_before = False,
jadmanski10646442008-08-13 14:05:21 +0000329 install_after = False, collect_crashdumps = True,
330 namespace = {}):
331 # use a copy so changes don't affect the original dictionary
332 namespace = namespace.copy()
333 machines = self.machines
334
335 self.aborted = False
336 namespace['machines'] = machines
337 namespace['args'] = self.args
338 namespace['job'] = self
339 namespace['ssh_user'] = self.ssh_user
340 namespace['ssh_port'] = self.ssh_port
341 namespace['ssh_pass'] = self.ssh_pass
342 test_start_time = int(time.time())
343
mbligh80e1eba2008-11-19 00:26:18 +0000344 if self.resultdir:
345 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000346
mbligh80e1eba2008-11-19 00:26:18 +0000347 self.enable_external_logging()
348 status_log = os.path.join(self.resultdir, 'status.log')
jadmanskicdd0c402008-09-19 21:21:31 +0000349 collect_crashinfo = True
jadmanski10646442008-08-13 14:05:21 +0000350 try:
351 if install_before and machines:
mbligh084bc172008-10-18 14:02:45 +0000352 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000353 if self.client:
354 namespace['control'] = self.control
mbligh084bc172008-10-18 14:02:45 +0000355 utils.open_write_close(CLIENT_CONTROL_FILENAME, self.control)
356 shutil.copy(CLIENT_WRAPPER_CONTROL_FILE,
357 SERVER_CONTROL_FILENAME)
jadmanski10646442008-08-13 14:05:21 +0000358 else:
mbligh084bc172008-10-18 14:02:45 +0000359 utils.open_write_close(SERVER_CONTROL_FILENAME, self.control)
360 self._execute_code(SERVER_CONTROL_FILENAME, namespace)
jadmanski10646442008-08-13 14:05:21 +0000361
jadmanskicdd0c402008-09-19 21:21:31 +0000362 # disable crashinfo collection if we get this far without error
363 collect_crashinfo = False
jadmanski10646442008-08-13 14:05:21 +0000364 finally:
jadmanskicdd0c402008-09-19 21:21:31 +0000365 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000366 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000367 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000368 # includes crashdumps
369 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000370 else:
mbligh084bc172008-10-18 14:02:45 +0000371 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000372 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000373 if cleanup and machines:
374 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000375 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000376 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000377
378
379 def run_test(self, url, *args, **dargs):
380 """Summon a test object and run it.
381
382 tag
383 tag to add to testname
384 url
385 url of the test to run
386 """
387
388 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000389
390 tag = dargs.pop('tag', None)
391 if tag:
jadmanskide292df2008-08-26 20:51:14 +0000392 testname += '.' + tag
393 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000394
395 outputdir = os.path.join(self.resultdir, subdir)
396 if os.path.exists(outputdir):
397 msg = ("%s already exists, test <%s> may have"
398 " already run with tag <%s>"
399 % (outputdir, testname, tag) )
400 raise error.TestError(msg)
401 os.mkdir(outputdir)
402
403 def group_func():
404 try:
405 test.runtest(self, url, tag, args, dargs)
406 except error.TestBaseException, e:
407 self.record(e.exit_status, subdir, testname, str(e))
408 raise
409 except Exception, e:
410 info = str(e) + "\n" + traceback.format_exc()
411 self.record('FAIL', subdir, testname, info)
412 raise
413 else:
414 self.record('GOOD', subdir, testname,
415 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000416
417 result, exc_info = self._run_group(testname, subdir, group_func)
418 if exc_info and isinstance(exc_info[1], error.TestBaseException):
419 return False
420 elif exc_info:
421 raise exc_info[0], exc_info[1], exc_info[2]
422 else:
423 return True
jadmanski10646442008-08-13 14:05:21 +0000424
425
426 def _run_group(self, name, subdir, function, *args, **dargs):
427 """\
428 Underlying method for running something inside of a group.
429 """
jadmanskide292df2008-08-26 20:51:14 +0000430 result, exc_info = None, None
jadmanski10646442008-08-13 14:05:21 +0000431 old_record_prefix = self.record_prefix
432 try:
433 self.record('START', subdir, name)
434 self.record_prefix += '\t'
435 try:
436 result = function(*args, **dargs)
437 finally:
438 self.record_prefix = old_record_prefix
439 except error.TestBaseException, e:
440 self.record("END %s" % e.exit_status, subdir, name, str(e))
jadmanskide292df2008-08-26 20:51:14 +0000441 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000442 except Exception, e:
443 err_msg = str(e) + '\n'
444 err_msg += traceback.format_exc()
445 self.record('END ABORT', subdir, name, err_msg)
446 raise error.JobError(name + ' failed\n' + traceback.format_exc())
447 else:
448 self.record('END GOOD', subdir, name)
449
jadmanskide292df2008-08-26 20:51:14 +0000450 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000451
452
453 def run_group(self, function, *args, **dargs):
454 """\
455 function:
456 subroutine to run
457 *args:
458 arguments for the function
459 """
460
461 name = function.__name__
462
463 # Allow the tag for the group to be specified.
464 tag = dargs.pop('tag', None)
465 if tag:
466 name = tag
467
jadmanskide292df2008-08-26 20:51:14 +0000468 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000469
470
471 def run_reboot(self, reboot_func, get_kernel_func):
472 """\
473 A specialization of run_group meant specifically for handling
474 a reboot. Includes support for capturing the kernel version
475 after the reboot.
476
477 reboot_func: a function that carries out the reboot
478
479 get_kernel_func: a function that returns a string
480 representing the kernel version.
481 """
482
483 old_record_prefix = self.record_prefix
484 try:
485 self.record('START', None, 'reboot')
486 self.record_prefix += '\t'
487 reboot_func()
488 except Exception, e:
489 self.record_prefix = old_record_prefix
490 err_msg = str(e) + '\n' + traceback.format_exc()
491 self.record('END FAIL', None, 'reboot', err_msg)
492 else:
493 kernel = get_kernel_func()
494 self.record_prefix = old_record_prefix
495 self.record('END GOOD', None, 'reboot',
496 optional_fields={"kernel": kernel})
497
498
jadmanskic09fc152008-10-15 17:56:59 +0000499 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
500 self._add_sysinfo_loggable(sysinfo.command(command, logfile),
501 on_every_test)
502
503
504 def add_sysinfo_logfile(self, file, on_every_test=False):
505 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
506
507
508 def _add_sysinfo_loggable(self, loggable, on_every_test):
509 if on_every_test:
510 self.sysinfo.test_loggables.add(loggable)
511 else:
512 self.sysinfo.boot_loggables.add(loggable)
513
514
jadmanski10646442008-08-13 14:05:21 +0000515 def record(self, status_code, subdir, operation, status='',
516 optional_fields=None):
517 """
518 Record job-level status
519
520 The intent is to make this file both machine parseable and
521 human readable. That involves a little more complexity, but
522 really isn't all that bad ;-)
523
524 Format is <status code>\t<subdir>\t<operation>\t<status>
525
mbligh1b3b3762008-09-25 02:46:34 +0000526 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000527 for valid status definition
528
529 subdir: MUST be a relevant subdirectory in the results,
530 or None, which will be represented as '----'
531
532 operation: description of what you ran (e.g. "dbench", or
533 "mkfs -t foobar /dev/sda9")
534
535 status: error message or "completed sucessfully"
536
537 ------------------------------------------------------------
538
539 Initial tabs indicate indent levels for grouping, and is
540 governed by self.record_prefix
541
542 multiline messages have secondary lines prefaced by a double
543 space (' ')
544
545 Executing this method will trigger the logging of all new
546 warnings to date from the various console loggers.
547 """
548 # poll all our warning loggers for new warnings
549 warnings = self._read_warnings()
550 for timestamp, msg in warnings:
551 self._record("WARN", None, None, msg, timestamp)
552
553 # write out the actual status log line
554 self._record(status_code, subdir, operation, status,
555 optional_fields=optional_fields)
556
557
558 def _read_warnings(self):
559 warnings = []
560 while True:
561 # pull in a line of output from every logger that has
562 # output ready to be read
563 loggers, _, _ = select.select(self.warning_loggers,
564 [], [], 0)
565 closed_loggers = set()
566 for logger in loggers:
567 line = logger.readline()
568 # record any broken pipes (aka line == empty)
569 if len(line) == 0:
570 closed_loggers.add(logger)
571 continue
572 timestamp, msg = line.split('\t', 1)
573 warnings.append((int(timestamp), msg.strip()))
574
575 # stop listening to loggers that are closed
576 self.warning_loggers -= closed_loggers
577
578 # stop if none of the loggers have any output left
579 if not loggers:
580 break
581
582 # sort into timestamp order
583 warnings.sort()
584 return warnings
585
586
587 def _render_record(self, status_code, subdir, operation, status='',
588 epoch_time=None, record_prefix=None,
589 optional_fields=None):
590 """
591 Internal Function to generate a record to be written into a
592 status log. For use by server_job.* classes only.
593 """
594 if subdir:
595 if re.match(r'[\n\t]', subdir):
596 raise ValueError(
597 'Invalid character in subdir string')
598 substr = subdir
599 else:
600 substr = '----'
601
mbligh1b3b3762008-09-25 02:46:34 +0000602 if not log.is_valid_status(status_code):
jadmanski10646442008-08-13 14:05:21 +0000603 raise ValueError('Invalid status code supplied: %s' %
604 status_code)
605 if not operation:
606 operation = '----'
607 if re.match(r'[\n\t]', operation):
608 raise ValueError(
609 'Invalid character in operation string')
610 operation = operation.rstrip()
611 status = status.rstrip()
612 status = re.sub(r"\t", " ", status)
613 # Ensure any continuation lines are marked so we can
614 # detect them in the status file to ensure it is parsable.
615 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
616
617 if not optional_fields:
618 optional_fields = {}
619
620 # Generate timestamps for inclusion in the logs
621 if epoch_time is None:
622 epoch_time = int(time.time())
623 local_time = time.localtime(epoch_time)
624 optional_fields["timestamp"] = str(epoch_time)
625 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
626 local_time)
627
628 fields = [status_code, substr, operation]
629 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
630 fields.append(status)
631
632 if record_prefix is None:
633 record_prefix = self.record_prefix
634
635 msg = '\t'.join(str(x) for x in fields)
636
637 return record_prefix + msg + '\n'
638
639
640 def _record_prerendered(self, msg):
641 """
642 Record a pre-rendered msg into the status logs. The only
643 change this makes to the message is to add on the local
644 indentation. Should not be called outside of server_job.*
645 classes. Unlike _record, this does not write the message
646 to standard output.
647 """
648 lines = []
649 status_file = os.path.join(self.resultdir, 'status.log')
650 status_log = open(status_file, 'a')
651 for line in msg.splitlines():
652 line = self.record_prefix + line + '\n'
653 lines.append(line)
654 status_log.write(line)
655 status_log.close()
656 self.__parse_status(lines)
657
658
mbligh084bc172008-10-18 14:02:45 +0000659 def _fill_server_control_namespace(self, namespace, protect=True):
660 """Prepare a namespace to be used when executing server control files.
661
662 This sets up the control file API by importing modules and making them
663 available under the appropriate names within namespace.
664
665 For use by _execute_code().
666
667 Args:
668 namespace: The namespace dictionary to fill in.
669 protect: Boolean. If True (the default) any operation that would
670 clobber an existing entry in namespace will cause an error.
671 Raises:
672 error.AutoservError: When a name would be clobbered by import.
673 """
674 def _import_names(module_name, names=()):
675 """Import a module and assign named attributes into namespace.
676
677 Args:
678 module_name: The string module name.
679 names: A limiting list of names to import from module_name. If
680 empty (the default), all names are imported from the module
681 similar to a "from foo.bar import *" statement.
682 Raises:
683 error.AutoservError: When a name being imported would clobber
684 a name already in namespace.
685 """
686 module = __import__(module_name, {}, {}, names)
687
688 # No names supplied? Import * from the lowest level module.
689 # (Ugh, why do I have to implement this part myself?)
690 if not names:
691 for submodule_name in module_name.split('.')[1:]:
692 module = getattr(module, submodule_name)
693 if hasattr(module, '__all__'):
694 names = getattr(module, '__all__')
695 else:
696 names = dir(module)
697
698 # Install each name into namespace, checking to make sure it
699 # doesn't override anything that already exists.
700 for name in names:
701 # Check for conflicts to help prevent future problems.
702 if name in namespace and protect:
703 if namespace[name] is not getattr(module, name):
704 raise error.AutoservError('importing name '
705 '%s from %s %r would override %r' %
706 (name, module_name, getattr(module, name),
707 namespace[name]))
708 else:
709 # Encourage cleanliness and the use of __all__ for a
710 # more concrete API with less surprises on '*' imports.
711 warnings.warn('%s (%r) being imported from %s for use '
712 'in server control files is not the '
713 'first occurrance of that import.' %
714 (name, namespace[name], module_name))
715
716 namespace[name] = getattr(module, name)
717
718
719 # This is the equivalent of prepending a bunch of import statements to
720 # the front of the control script.
721 namespace.update(os=os, sys=sys)
722 _import_names('autotest_lib.server',
723 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
724 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
725 _import_names('autotest_lib.server.subcommand',
726 ('parallel', 'parallel_simple', 'subcommand'))
727 _import_names('autotest_lib.server.utils',
728 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
729 _import_names('autotest_lib.client.common_lib.error')
730 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
731
732 # Inject ourself as the job object into other classes within the API.
733 # (Yuck, this injection is a gross thing be part of a public API. -gps)
734 #
735 # XXX Base & SiteAutotest do not appear to use .job. Who does?
736 namespace['autotest'].Autotest.job = self
737 # server.hosts.base_classes.Host uses .job.
738 namespace['hosts'].Host.job = self
739
740
741 def _execute_code(self, code_file, namespace, protect=True):
742 """Execute code using a copy of namespace as a server control script.
743
744 Unless protect_namespace is explicitly set to False, the dict will not
745 be modified.
746
747 Args:
748 code_file: The filename of the control file to execute.
749 namespace: A dict containing names to make available during execution.
750 protect: Boolean. If True (the default) a copy of the namespace dict
751 is used during execution to prevent the code from modifying its
752 contents outside of this function. If False the raw dict is
753 passed in and modifications will be allowed.
754 """
755 if protect:
756 namespace = namespace.copy()
757 self._fill_server_control_namespace(namespace, protect=protect)
758 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +0000759 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +0000760 machines_text = '\n'.join(self.machines) + '\n'
761 # Only rewrite the file if it does not match our machine list.
762 try:
763 machines_f = open(MACHINES_FILENAME, 'r')
764 existing_machines_text = machines_f.read()
765 machines_f.close()
766 except EnvironmentError:
767 existing_machines_text = None
768 if machines_text != existing_machines_text:
769 utils.open_write_close(MACHINES_FILENAME, machines_text)
770 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +0000771
772
773 def _record(self, status_code, subdir, operation, status='',
774 epoch_time=None, optional_fields=None):
775 """
776 Actual function for recording a single line into the status
777 logs. Should never be called directly, only by job.record as
778 this would bypass the console monitor logging.
779 """
780
781 msg = self._render_record(status_code, subdir, operation,
782 status, epoch_time,
783 optional_fields=optional_fields)
784
785
786 status_file = os.path.join(self.resultdir, 'status.log')
787 sys.stdout.write(msg)
788 open(status_file, "a").write(msg)
789 if subdir:
790 test_dir = os.path.join(self.resultdir, subdir)
jadmanski5ff55352008-09-18 19:43:46 +0000791 status_file = os.path.join(test_dir, 'status.log')
jadmanski10646442008-08-13 14:05:21 +0000792 open(status_file, "a").write(msg)
793 self.__parse_status(msg.splitlines())
794
795
796 def __parse_status(self, new_lines):
797 if not self.using_parser:
798 return
799 new_tests = self.parser.process_lines(new_lines)
800 for test in new_tests:
801 self.__insert_test(test)
802
803
804 def __insert_test(self, test):
805 """ An internal method to insert a new test result into the
806 database. This method will not raise an exception, even if an
807 error occurs during the insert, to avoid failing a test
808 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +0000809 self.num_tests_run += 1
810 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
811 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +0000812 try:
813 self.results_db.insert_test(self.job_model, test)
814 except Exception:
815 msg = ("WARNING: An unexpected error occured while "
816 "inserting test results into the database. "
817 "Ignoring error.\n" + traceback.format_exc())
818 print >> sys.stderr, msg
819
mblighcaa62c22008-04-07 21:51:17 +0000820
821# site_server_job.py may be non-existant or empty, make sure that an
822# appropriate site_server_job class is created nevertheless
823try:
jadmanski0afbb632008-06-06 21:10:57 +0000824 from autotest_lib.server.site_server_job import site_server_job
mblighcaa62c22008-04-07 21:51:17 +0000825except ImportError:
jadmanski10646442008-08-13 14:05:21 +0000826 class site_server_job(object):
jadmanski0afbb632008-06-06 21:10:57 +0000827 pass
828
jadmanski10646442008-08-13 14:05:21 +0000829class server_job(site_server_job, base_server_job):
jadmanski0afbb632008-06-06 21:10:57 +0000830 pass