blob: 53af0c1e67259c5520665638c698d23bfae62917 [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski6bb32d72009-03-19 20:25:24 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess
showard75cdfee2009-06-10 17:40:41 +000010import traceback, shutil, warnings, fcntl, pickle, logging
mbligh78c0daa2009-06-15 21:54:50 +000011import itertools
showard75cdfee2009-06-10 17:40:41 +000012from autotest_lib.client.bin import sysinfo
mbligh09108442008-10-15 16:27:38 +000013from autotest_lib.client.common_lib import error, log, utils, packages
showard75cdfee2009-06-10 17:40:41 +000014from autotest_lib.client.common_lib import logging_manager
jadmanski043e1132008-11-19 17:10:32 +000015from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000016from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000017
18
mbligh084bc172008-10-18 14:02:45 +000019def _control_segment_path(name):
20 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000021 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000022 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000023
24
mbligh084bc172008-10-18 14:02:45 +000025CLIENT_CONTROL_FILENAME = 'control'
26SERVER_CONTROL_FILENAME = 'control.srv'
27MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000028
mbligh084bc172008-10-18 14:02:45 +000029CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
30CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
31CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000032INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000033CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000034
mbligh084bc172008-10-18 14:02:45 +000035VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000036REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000037
38
mbligh062ed152009-01-13 00:57:14 +000039# by default provide a stub that generates no site data
40def _get_site_job_data_dummy(job):
41 return {}
42
43
jadmanski10646442008-08-13 14:05:21 +000044# load up site-specific code for generating site-specific job data
mbligh062ed152009-01-13 00:57:14 +000045get_site_job_data = utils.import_site_function(__file__,
jadmanskic0a623d2009-03-03 21:11:48 +000046 "autotest_lib.server.site_server_job", "get_site_job_data",
mbligh062ed152009-01-13 00:57:14 +000047 _get_site_job_data_dummy)
jadmanski10646442008-08-13 14:05:21 +000048
49
50class base_server_job(object):
mbligh2b92b862008-11-22 13:25:32 +000051 """
52 The actual job against which we do everything.
jadmanski10646442008-08-13 14:05:21 +000053
54 Properties:
55 autodir
56 The top level autotest directory (/usr/local/autotest).
57 serverdir
58 <autodir>/server/
59 clientdir
60 <autodir>/client/
61 conmuxdir
62 <autodir>/conmux/
63 testdir
64 <autodir>/server/tests/
65 site_testdir
66 <autodir>/server/site_tests/
67 control
68 the control file for this job
mblighb5dac432008-11-27 00:38:44 +000069 drop_caches_between_iterations
70 drop the pagecache between each iteration
jadmanski10646442008-08-13 14:05:21 +000071 """
72
73 STATUS_VERSION = 1
mblighc86113b2009-04-28 18:32:51 +000074 test_tag = None
jadmanski10646442008-08-13 14:05:21 +000075
76 def __init__(self, control, args, resultdir, label, user, machines,
77 client=False, parse_job='',
mbligh374f3412009-05-13 21:29:45 +000078 ssh_user='root', ssh_port=22, ssh_pass='',
mblighe7d9c602009-07-02 19:02:33 +000079 group_name='', tag=''):
jadmanski10646442008-08-13 14:05:21 +000080 """
mbligh374f3412009-05-13 21:29:45 +000081 Create a server side job object.
mblighb5dac432008-11-27 00:38:44 +000082
mblighe7d9c602009-07-02 19:02:33 +000083 @param control: The pathname of the control file.
84 @param args: Passed to the control file.
85 @param resultdir: Where to throw the results.
86 @param label: Description of the job.
87 @param user: Username for the job (email address).
88 @param client: True if this is a client-side control file.
89 @param parse_job: string, if supplied it is the job execution tag that
90 the results will be passed through to the TKO parser with.
91 @param ssh_user: The SSH username. [root]
92 @param ssh_port: The SSH port number. [22]
93 @param ssh_pass: The SSH passphrase, if needed.
94 @param group_name: If supplied, this will be written out as
mbligh374f3412009-05-13 21:29:45 +000095 host_group_name in the keyvals file for the parser.
mblighe7d9c602009-07-02 19:02:33 +000096 @param tag: The job execution tag from the scheduler. [optional]
jadmanski10646442008-08-13 14:05:21 +000097 """
98 path = os.path.dirname(__file__)
99 self.autodir = os.path.abspath(os.path.join(path, '..'))
100 self.serverdir = os.path.join(self.autodir, 'server')
101 self.testdir = os.path.join(self.serverdir, 'tests')
102 self.site_testdir = os.path.join(self.serverdir, 'site_tests')
103 self.tmpdir = os.path.join(self.serverdir, 'tmp')
104 self.conmuxdir = os.path.join(self.autodir, 'conmux')
105 self.clientdir = os.path.join(self.autodir, 'client')
106 self.toolsdir = os.path.join(self.autodir, 'client/tools')
107 if control:
jadmanskie432dd22009-01-30 15:04:51 +0000108 self.control = self._load_control_file(control)
jadmanski10646442008-08-13 14:05:21 +0000109 else:
showard45ae8192008-11-05 19:32:53 +0000110 self.control = ''
jadmanski10646442008-08-13 14:05:21 +0000111 self.resultdir = resultdir
mbligha788dc42009-03-26 21:10:16 +0000112 self.uncollected_log_file = None
mbligh80e1eba2008-11-19 00:26:18 +0000113 if resultdir:
mbligh374f3412009-05-13 21:29:45 +0000114 self.uncollected_log_file = os.path.join(resultdir,
115 'uncollected_logs')
mbligha788dc42009-03-26 21:10:16 +0000116 self.debugdir = os.path.join(resultdir, 'debug')
117
mbligh80e1eba2008-11-19 00:26:18 +0000118 if not os.path.exists(resultdir):
119 os.mkdir(resultdir)
mbligh80e1eba2008-11-19 00:26:18 +0000120 if not os.path.exists(self.debugdir):
121 os.mkdir(self.debugdir)
jadmanski10646442008-08-13 14:05:21 +0000122 self.label = label
123 self.user = user
124 self.args = args
125 self.machines = machines
126 self.client = client
127 self.record_prefix = ''
128 self.warning_loggers = set()
jadmanskif37df842009-02-11 00:03:26 +0000129 self.warning_manager = warning_manager()
jadmanski10646442008-08-13 14:05:21 +0000130 self.ssh_user = ssh_user
131 self.ssh_port = ssh_port
132 self.ssh_pass = ssh_pass
mblighe7d9c602009-07-02 19:02:33 +0000133 self.tag = tag
jadmanski23afbec2008-09-17 18:12:07 +0000134 self.run_test_cleanup = True
mbligh09108442008-10-15 16:27:38 +0000135 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000136 self.hosts = set()
mblighb5dac432008-11-27 00:38:44 +0000137 self.drop_caches_between_iterations = False
jadmanski10646442008-08-13 14:05:21 +0000138
showard75cdfee2009-06-10 17:40:41 +0000139 self.logging = logging_manager.get_logging_manager(
140 manage_stdout_and_stderr=True, redirect_fds=True)
141 subcommand.logging_manager_object = self.logging
jadmanski10646442008-08-13 14:05:21 +0000142
mbligh80e1eba2008-11-19 00:26:18 +0000143 if resultdir:
144 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000145 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000146
jadmanski025099d2008-09-23 14:13:48 +0000147 if not os.access(self.tmpdir, os.W_OK):
148 try:
149 os.makedirs(self.tmpdir, 0700)
150 except os.error, e:
151 # Thrown if the directory already exists, which it may.
152 pass
153
mbligh2b92b862008-11-22 13:25:32 +0000154 if not (os.access(self.tmpdir, os.W_OK) and os.path.isdir(self.tmpdir)):
jadmanski025099d2008-09-23 14:13:48 +0000155 self.tmpdir = os.path.join(tempfile.gettempdir(),
156 'autotest-' + getpass.getuser())
157 try:
158 os.makedirs(self.tmpdir, 0700)
159 except os.error, e:
160 # Thrown if the directory already exists, which it may.
161 # If the problem was something other than the
162 # directory already existing, this chmod should throw as well
163 # exception.
164 os.chmod(self.tmpdir, stat.S_IRWXU)
165
jadmanski10646442008-08-13 14:05:21 +0000166 job_data = {'label' : label, 'user' : user,
167 'hostname' : ','.join(machines),
showard170873e2009-01-07 00:22:26 +0000168 'status_version' : str(self.STATUS_VERSION),
169 'job_started' : str(int(time.time()))}
mbligh374f3412009-05-13 21:29:45 +0000170 if group_name:
171 job_data['host_group_name'] = group_name
mbligh80e1eba2008-11-19 00:26:18 +0000172 if self.resultdir:
jadmanski58962982009-04-21 19:54:34 +0000173 # only write these keyvals out on the first job in a resultdir
174 if 'job_started' not in utils.read_keyval(self.resultdir):
175 job_data.update(get_site_job_data(self))
176 utils.write_keyval(self.resultdir, job_data)
jadmanski10646442008-08-13 14:05:21 +0000177
178 self.parse_job = parse_job
179 if self.parse_job and len(machines) == 1:
180 self.using_parser = True
181 self.init_parser(resultdir)
182 else:
183 self.using_parser = False
mbligh2b92b862008-11-22 13:25:32 +0000184 self.pkgmgr = packages.PackageManager(self.autodir,
185 run_function_dargs={'timeout':600})
jadmanski10646442008-08-13 14:05:21 +0000186 self.pkgdir = os.path.join(self.autodir, 'packages')
187
showard21baa452008-10-21 00:08:39 +0000188 self.num_tests_run = 0
189 self.num_tests_failed = 0
190
jadmanski550fdc22008-11-20 16:32:08 +0000191 self._register_subcommand_hooks()
192
193
jadmanskie432dd22009-01-30 15:04:51 +0000194 @staticmethod
195 def _load_control_file(path):
196 f = open(path)
197 try:
198 control_file = f.read()
199 finally:
200 f.close()
201 return re.sub('\r', '', control_file)
202
203
jadmanski550fdc22008-11-20 16:32:08 +0000204 def _register_subcommand_hooks(self):
mbligh2b92b862008-11-22 13:25:32 +0000205 """
206 Register some hooks into the subcommand modules that allow us
207 to properly clean up self.hosts created in forked subprocesses.
208 """
jadmanski550fdc22008-11-20 16:32:08 +0000209 def on_fork(cmd):
210 self._existing_hosts_on_fork = set(self.hosts)
211 def on_join(cmd):
212 new_hosts = self.hosts - self._existing_hosts_on_fork
213 for host in new_hosts:
214 host.close()
215 subcommand.subcommand.register_fork_hook(on_fork)
216 subcommand.subcommand.register_join_hook(on_join)
217
jadmanski10646442008-08-13 14:05:21 +0000218
219 def init_parser(self, resultdir):
mbligh2b92b862008-11-22 13:25:32 +0000220 """
221 Start the continuous parsing of resultdir. This sets up
jadmanski10646442008-08-13 14:05:21 +0000222 the database connection and inserts the basic job object into
mbligh2b92b862008-11-22 13:25:32 +0000223 the database if necessary.
224 """
jadmanski10646442008-08-13 14:05:21 +0000225 # redirect parser debugging to .parse.log
226 parse_log = os.path.join(resultdir, '.parse.log')
227 parse_log = open(parse_log, 'w', 0)
228 tko_utils.redirect_parser_debugging(parse_log)
229 # create a job model object and set up the db
230 self.results_db = tko_db.db(autocommit=True)
231 self.parser = status_lib.parser(self.STATUS_VERSION)
232 self.job_model = self.parser.make_job(resultdir)
233 self.parser.start(self.job_model)
234 # check if a job already exists in the db and insert it if
235 # it does not
236 job_idx = self.results_db.find_job(self.parse_job)
237 if job_idx is None:
mbligh2b92b862008-11-22 13:25:32 +0000238 self.results_db.insert_job(self.parse_job, self.job_model)
jadmanski10646442008-08-13 14:05:21 +0000239 else:
mbligh2b92b862008-11-22 13:25:32 +0000240 machine_idx = self.results_db.lookup_machine(self.job_model.machine)
jadmanski10646442008-08-13 14:05:21 +0000241 self.job_model.index = job_idx
242 self.job_model.machine_idx = machine_idx
243
244
245 def cleanup_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000246 """
247 This should be called after the server job is finished
jadmanski10646442008-08-13 14:05:21 +0000248 to carry out any remaining cleanup (e.g. flushing any
mbligh2b92b862008-11-22 13:25:32 +0000249 remaining test results to the results db)
250 """
jadmanski10646442008-08-13 14:05:21 +0000251 if not self.using_parser:
252 return
253 final_tests = self.parser.end()
254 for test in final_tests:
255 self.__insert_test(test)
256 self.using_parser = False
257
258
259 def verify(self):
260 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000261 raise error.AutoservError('No machines specified to verify')
mbligh0fce4112008-11-27 00:37:17 +0000262 if self.resultdir:
263 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000264 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000265 namespace = {'machines' : self.machines, 'job' : self,
266 'ssh_user' : self.ssh_user,
267 'ssh_port' : self.ssh_port,
268 'ssh_pass' : self.ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000269 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000270 except Exception, e:
mbligh2b92b862008-11-22 13:25:32 +0000271 msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
jadmanski10646442008-08-13 14:05:21 +0000272 self.record('ABORT', None, None, msg)
273 raise
274
275
276 def repair(self, host_protection):
277 if not self.machines:
278 raise error.AutoservError('No machines specified to repair')
mbligh0fce4112008-11-27 00:37:17 +0000279 if self.resultdir:
280 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000281 namespace = {'machines': self.machines, 'job': self,
282 'ssh_user': self.ssh_user, 'ssh_port': self.ssh_port,
283 'ssh_pass': self.ssh_pass,
284 'protection_level': host_protection}
mbligh25c0b8c2009-01-24 01:44:17 +0000285
mbligh0931b0a2009-04-08 17:44:48 +0000286 self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000287
288
289 def precheck(self):
290 """
291 perform any additional checks in derived classes.
292 """
293 pass
294
295
296 def enable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000297 """
298 Start or restart external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000299 """
300 pass
301
302
303 def disable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000304 """
305 Pause or stop external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000306 """
307 pass
308
309
jadmanski23afbec2008-09-17 18:12:07 +0000310 def enable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000311 """
312 By default tests run test.cleanup
313 """
jadmanski23afbec2008-09-17 18:12:07 +0000314 self.run_test_cleanup = True
315
316
317 def disable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000318 """
319 By default tests do not run test.cleanup
320 """
jadmanski23afbec2008-09-17 18:12:07 +0000321 self.run_test_cleanup = False
322
323
jadmanski10646442008-08-13 14:05:21 +0000324 def use_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000325 """
326 Return True if external logging should be used.
jadmanski10646442008-08-13 14:05:21 +0000327 """
328 return False
329
330
mbligh415dc212009-06-15 21:53:34 +0000331 def _make_parallel_wrapper(self, function, machines, log):
332 """Wrap function as appropriate for calling by parallel_simple."""
mbligh2b92b862008-11-22 13:25:32 +0000333 is_forking = not (len(machines) == 1 and self.machines == machines)
jadmanski4dd1a002008-09-05 20:27:30 +0000334 if self.parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000335 def wrapper(machine):
336 self.parse_job += "/" + machine
337 self.using_parser = True
338 self.machines = [machine]
mbligh2b92b862008-11-22 13:25:32 +0000339 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000340 os.chdir(self.resultdir)
showard2bab8f42008-11-12 18:15:22 +0000341 utils.write_keyval(self.resultdir, {"hostname": machine})
jadmanski10646442008-08-13 14:05:21 +0000342 self.init_parser(self.resultdir)
343 result = function(machine)
344 self.cleanup_parser()
345 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000346 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000347 def wrapper(machine):
348 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000349 os.chdir(self.resultdir)
mbligh838d82d2009-03-11 17:14:31 +0000350 machine_data = {'hostname' : machine,
351 'status_version' : str(self.STATUS_VERSION)}
352 utils.write_keyval(self.resultdir, machine_data)
jadmanski10646442008-08-13 14:05:21 +0000353 result = function(machine)
354 return result
355 else:
356 wrapper = function
mbligh415dc212009-06-15 21:53:34 +0000357 return wrapper
358
359
360 def parallel_simple(self, function, machines, log=True, timeout=None,
361 return_results=False):
362 """
363 Run 'function' using parallel_simple, with an extra wrapper to handle
364 the necessary setup for continuous parsing, if possible. If continuous
365 parsing is already properly initialized then this should just work.
366
367 @param function: A callable to run in parallel given each machine.
368 @param machines: A list of machine names to be passed one per subcommand
369 invocation of function.
370 @param log: If True, output will be written to output in a subdirectory
371 named after each machine.
372 @param timeout: Seconds after which the function call should timeout.
373 @param return_results: If True instead of an AutoServError being raised
374 on any error a list of the results|exceptions from the function
375 called on each arg is returned. [default: False]
376
377 @raises error.AutotestError: If any of the functions failed.
378 """
379 wrapper = self._make_parallel_wrapper(function, machines, log)
380 return subcommand.parallel_simple(wrapper, machines,
381 log=log, timeout=timeout,
382 return_results=return_results)
383
384
385 def parallel_on_machines(self, function, machines, timeout=None):
386 """
387 @param func: Called in parallel with one machine as its argument.
388 @param machines: A list of machines to call function(machine) on.
389 @param timeout: Seconds after which the function call should timeout.
390
391 @returns A list of machines on which function(machine) returned
392 without raising an exception.
393 """
394 results = self.parallel_simple(func, machines, timeout=timeout,
395 return_results=True)
396 success_machines = []
397 for result, machine in itertools.izip(results, machines):
398 if not isinstance(result, Exception):
399 success_machines.append(machine)
400 return success_machines
jadmanski10646442008-08-13 14:05:21 +0000401
402
jadmanskie432dd22009-01-30 15:04:51 +0000403 USE_TEMP_DIR = object()
mbligh2b92b862008-11-22 13:25:32 +0000404 def run(self, cleanup=False, install_before=False, install_after=False,
jadmanskie432dd22009-01-30 15:04:51 +0000405 collect_crashdumps=True, namespace={}, control=None,
jadmanskidef0c3c2009-03-25 20:07:10 +0000406 control_file_dir=None, only_collect_crashinfo=False):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000407 # for a normal job, make sure the uncollected logs file exists
408 # for a crashinfo-only run it should already exist, bail out otherwise
409 if self.resultdir and not os.path.exists(self.uncollected_log_file):
410 if only_collect_crashinfo:
411 # if this is a crashinfo-only run, and there were no existing
412 # uncollected logs, just bail out early
413 logging.info("No existing uncollected logs, "
414 "skipping crashinfo collection")
415 return
416 else:
417 log_file = open(self.uncollected_log_file, "w")
418 pickle.dump([], log_file)
419 log_file.close()
420
jadmanski10646442008-08-13 14:05:21 +0000421 # use a copy so changes don't affect the original dictionary
422 namespace = namespace.copy()
423 machines = self.machines
jadmanskie432dd22009-01-30 15:04:51 +0000424 if control is None:
425 control = self.control
426 if control_file_dir is None:
427 control_file_dir = self.resultdir
jadmanski10646442008-08-13 14:05:21 +0000428
429 self.aborted = False
430 namespace['machines'] = machines
431 namespace['args'] = self.args
432 namespace['job'] = self
433 namespace['ssh_user'] = self.ssh_user
434 namespace['ssh_port'] = self.ssh_port
435 namespace['ssh_pass'] = self.ssh_pass
436 test_start_time = int(time.time())
437
mbligh80e1eba2008-11-19 00:26:18 +0000438 if self.resultdir:
439 os.chdir(self.resultdir)
jadmanski779bd292009-03-19 17:33:33 +0000440 # touch status.log so that the parser knows a job is running here
jadmanski382303a2009-04-21 19:53:39 +0000441 open(self.get_status_log_path(), 'a').close()
mbligh80e1eba2008-11-19 00:26:18 +0000442 self.enable_external_logging()
jadmanskie432dd22009-01-30 15:04:51 +0000443
jadmanskicdd0c402008-09-19 21:21:31 +0000444 collect_crashinfo = True
mblighaebe3b62008-12-22 14:45:40 +0000445 temp_control_file_dir = None
jadmanski10646442008-08-13 14:05:21 +0000446 try:
447 if install_before and machines:
mbligh084bc172008-10-18 14:02:45 +0000448 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanskie432dd22009-01-30 15:04:51 +0000449
jadmanskidef0c3c2009-03-25 20:07:10 +0000450 if not only_collect_crashinfo:
451 # determine the dir to write the control files to
452 cfd_specified = (control_file_dir
453 and control_file_dir is not self.USE_TEMP_DIR)
454 if cfd_specified:
455 temp_control_file_dir = None
456 else:
457 temp_control_file_dir = tempfile.mkdtemp(
458 suffix='temp_control_file_dir')
459 control_file_dir = temp_control_file_dir
460 server_control_file = os.path.join(control_file_dir,
461 SERVER_CONTROL_FILENAME)
462 client_control_file = os.path.join(control_file_dir,
463 CLIENT_CONTROL_FILENAME)
464 if self.client:
465 namespace['control'] = control
466 utils.open_write_close(client_control_file, control)
mblighfeac0102009-04-28 18:31:12 +0000467 shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
468 server_control_file)
jadmanskidef0c3c2009-03-25 20:07:10 +0000469 else:
470 utils.open_write_close(server_control_file, control)
mbligh26f0d882009-06-22 18:30:01 +0000471 logging.info("Processing control file")
jadmanskidef0c3c2009-03-25 20:07:10 +0000472 self._execute_code(server_control_file, namespace)
mbligh26f0d882009-06-22 18:30:01 +0000473 logging.info("Finished processing control file")
jadmanski10646442008-08-13 14:05:21 +0000474
jadmanskidef0c3c2009-03-25 20:07:10 +0000475 # no error occured, so we don't need to collect crashinfo
476 collect_crashinfo = False
jadmanski10646442008-08-13 14:05:21 +0000477 finally:
mblighaebe3b62008-12-22 14:45:40 +0000478 if temp_control_file_dir:
jadmanskie432dd22009-01-30 15:04:51 +0000479 # Clean up temp directory used for copies of the control files
mblighaebe3b62008-12-22 14:45:40 +0000480 try:
481 shutil.rmtree(temp_control_file_dir)
482 except Exception, e:
mblighe7d9c602009-07-02 19:02:33 +0000483 logging.warn('Could not remove temp directory %s: %s',
484 temp_control_file_dir, e)
jadmanskie432dd22009-01-30 15:04:51 +0000485
jadmanskicdd0c402008-09-19 21:21:31 +0000486 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000487 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000488 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000489 # includes crashdumps
490 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000491 else:
mbligh084bc172008-10-18 14:02:45 +0000492 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
mbligha788dc42009-03-26 21:10:16 +0000493 if self.uncollected_log_file:
494 os.remove(self.uncollected_log_file)
jadmanski10646442008-08-13 14:05:21 +0000495 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000496 if cleanup and machines:
497 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000498 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000499 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000500
501
mblighc86113b2009-04-28 18:32:51 +0000502 def set_test_tag(self, tag=''):
503 """Set tag to be added to test name of all following run_test steps."""
504 self.test_tag = tag
505
506
jadmanski10646442008-08-13 14:05:21 +0000507 def run_test(self, url, *args, **dargs):
mbligh2b92b862008-11-22 13:25:32 +0000508 """
509 Summon a test object and run it.
jadmanski10646442008-08-13 14:05:21 +0000510
511 tag
512 tag to add to testname
513 url
514 url of the test to run
515 """
516
517 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000518
519 tag = dargs.pop('tag', None)
mblighc86113b2009-04-28 18:32:51 +0000520 if tag is None:
521 tag = self.test_tag
jadmanski10646442008-08-13 14:05:21 +0000522 if tag:
mbligh8ad24202009-01-07 16:49:36 +0000523 testname += '.' + str(tag)
jadmanskide292df2008-08-26 20:51:14 +0000524 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000525
526 outputdir = os.path.join(self.resultdir, subdir)
527 if os.path.exists(outputdir):
528 msg = ("%s already exists, test <%s> may have"
mbligh2b92b862008-11-22 13:25:32 +0000529 " already run with tag <%s>" % (outputdir, testname, tag))
jadmanski10646442008-08-13 14:05:21 +0000530 raise error.TestError(msg)
531 os.mkdir(outputdir)
532
533 def group_func():
534 try:
535 test.runtest(self, url, tag, args, dargs)
536 except error.TestBaseException, e:
537 self.record(e.exit_status, subdir, testname, str(e))
538 raise
539 except Exception, e:
540 info = str(e) + "\n" + traceback.format_exc()
541 self.record('FAIL', subdir, testname, info)
542 raise
543 else:
mbligh2b92b862008-11-22 13:25:32 +0000544 self.record('GOOD', subdir, testname, 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000545
546 result, exc_info = self._run_group(testname, subdir, group_func)
547 if exc_info and isinstance(exc_info[1], error.TestBaseException):
548 return False
549 elif exc_info:
550 raise exc_info[0], exc_info[1], exc_info[2]
551 else:
552 return True
jadmanski10646442008-08-13 14:05:21 +0000553
554
555 def _run_group(self, name, subdir, function, *args, **dargs):
556 """\
557 Underlying method for running something inside of a group.
558 """
jadmanskide292df2008-08-26 20:51:14 +0000559 result, exc_info = None, None
jadmanski10646442008-08-13 14:05:21 +0000560 old_record_prefix = self.record_prefix
561 try:
562 self.record('START', subdir, name)
563 self.record_prefix += '\t'
564 try:
565 result = function(*args, **dargs)
566 finally:
567 self.record_prefix = old_record_prefix
568 except error.TestBaseException, e:
jadmanskib88d6dc2009-01-10 00:33:18 +0000569 self.record("END %s" % e.exit_status, subdir, name)
jadmanskide292df2008-08-26 20:51:14 +0000570 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000571 except Exception, e:
572 err_msg = str(e) + '\n'
573 err_msg += traceback.format_exc()
574 self.record('END ABORT', subdir, name, err_msg)
575 raise error.JobError(name + ' failed\n' + traceback.format_exc())
576 else:
577 self.record('END GOOD', subdir, name)
578
jadmanskide292df2008-08-26 20:51:14 +0000579 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000580
581
582 def run_group(self, function, *args, **dargs):
583 """\
584 function:
585 subroutine to run
586 *args:
587 arguments for the function
588 """
589
590 name = function.__name__
591
592 # Allow the tag for the group to be specified.
593 tag = dargs.pop('tag', None)
594 if tag:
595 name = tag
596
jadmanskide292df2008-08-26 20:51:14 +0000597 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000598
599
600 def run_reboot(self, reboot_func, get_kernel_func):
601 """\
602 A specialization of run_group meant specifically for handling
603 a reboot. Includes support for capturing the kernel version
604 after the reboot.
605
606 reboot_func: a function that carries out the reboot
607
608 get_kernel_func: a function that returns a string
609 representing the kernel version.
610 """
611
612 old_record_prefix = self.record_prefix
613 try:
614 self.record('START', None, 'reboot')
615 self.record_prefix += '\t'
616 reboot_func()
617 except Exception, e:
618 self.record_prefix = old_record_prefix
619 err_msg = str(e) + '\n' + traceback.format_exc()
620 self.record('END FAIL', None, 'reboot', err_msg)
jadmanski4b51d542009-04-08 14:17:16 +0000621 raise
jadmanski10646442008-08-13 14:05:21 +0000622 else:
623 kernel = get_kernel_func()
624 self.record_prefix = old_record_prefix
625 self.record('END GOOD', None, 'reboot',
626 optional_fields={"kernel": kernel})
627
628
jadmanskie432dd22009-01-30 15:04:51 +0000629 def run_control(self, path):
630 """Execute a control file found at path (relative to the autotest
631 path). Intended for executing a control file within a control file,
632 not for running the top-level job control file."""
633 path = os.path.join(self.autodir, path)
634 control_file = self._load_control_file(path)
635 self.run(control=control_file, control_file_dir=self.USE_TEMP_DIR)
636
637
jadmanskic09fc152008-10-15 17:56:59 +0000638 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
mbligh4395bbd2009-03-25 19:34:17 +0000639 self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
jadmanskic09fc152008-10-15 17:56:59 +0000640 on_every_test)
641
642
643 def add_sysinfo_logfile(self, file, on_every_test=False):
644 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
645
646
647 def _add_sysinfo_loggable(self, loggable, on_every_test):
648 if on_every_test:
649 self.sysinfo.test_loggables.add(loggable)
650 else:
651 self.sysinfo.boot_loggables.add(loggable)
652
653
jadmanski10646442008-08-13 14:05:21 +0000654 def record(self, status_code, subdir, operation, status='',
655 optional_fields=None):
656 """
657 Record job-level status
658
659 The intent is to make this file both machine parseable and
660 human readable. That involves a little more complexity, but
661 really isn't all that bad ;-)
662
663 Format is <status code>\t<subdir>\t<operation>\t<status>
664
mbligh1b3b3762008-09-25 02:46:34 +0000665 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000666 for valid status definition
667
668 subdir: MUST be a relevant subdirectory in the results,
669 or None, which will be represented as '----'
670
671 operation: description of what you ran (e.g. "dbench", or
672 "mkfs -t foobar /dev/sda9")
673
674 status: error message or "completed sucessfully"
675
676 ------------------------------------------------------------
677
678 Initial tabs indicate indent levels for grouping, and is
679 governed by self.record_prefix
680
681 multiline messages have secondary lines prefaced by a double
682 space (' ')
683
684 Executing this method will trigger the logging of all new
685 warnings to date from the various console loggers.
686 """
687 # poll all our warning loggers for new warnings
688 warnings = self._read_warnings()
jadmanski2de83112009-04-01 18:21:04 +0000689 old_record_prefix = self.record_prefix
690 try:
691 if status_code.startswith("END "):
692 self.record_prefix += "\t"
693 for timestamp, msg in warnings:
694 self._record("WARN", None, None, msg, timestamp)
695 finally:
696 self.record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000697
698 # write out the actual status log line
699 self._record(status_code, subdir, operation, status,
700 optional_fields=optional_fields)
701
702
703 def _read_warnings(self):
jadmanskif37df842009-02-11 00:03:26 +0000704 """Poll all the warning loggers and extract any new warnings that have
705 been logged. If the warnings belong to a category that is currently
706 disabled, this method will discard them and they will no longer be
707 retrievable.
708
709 Returns a list of (timestamp, message) tuples, where timestamp is an
710 integer epoch timestamp."""
jadmanski10646442008-08-13 14:05:21 +0000711 warnings = []
712 while True:
713 # pull in a line of output from every logger that has
714 # output ready to be read
mbligh2b92b862008-11-22 13:25:32 +0000715 loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
jadmanski10646442008-08-13 14:05:21 +0000716 closed_loggers = set()
717 for logger in loggers:
718 line = logger.readline()
719 # record any broken pipes (aka line == empty)
720 if len(line) == 0:
721 closed_loggers.add(logger)
722 continue
jadmanskif37df842009-02-11 00:03:26 +0000723 # parse out the warning
724 timestamp, msgtype, msg = line.split('\t', 2)
725 timestamp = int(timestamp)
726 # if the warning is valid, add it to the results
727 if self.warning_manager.is_valid(timestamp, msgtype):
728 warnings.append((timestamp, msg.strip()))
jadmanski10646442008-08-13 14:05:21 +0000729
730 # stop listening to loggers that are closed
731 self.warning_loggers -= closed_loggers
732
733 # stop if none of the loggers have any output left
734 if not loggers:
735 break
736
737 # sort into timestamp order
738 warnings.sort()
739 return warnings
740
741
jadmanski16a7ff72009-04-01 18:19:53 +0000742 def disable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000743 self.warning_manager.disable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000744 self.record("INFO", None, None,
745 "disabling %s warnings" % warning_type,
746 {"warnings.disable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000747
748
jadmanski16a7ff72009-04-01 18:19:53 +0000749 def enable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000750 self.warning_manager.enable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000751 self.record("INFO", None, None,
752 "enabling %s warnings" % warning_type,
753 {"warnings.enable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000754
755
jadmanski779bd292009-03-19 17:33:33 +0000756 def get_status_log_path(self, subdir=None):
757 """Return the path to the job status log.
758
759 @param subdir - Optional paramter indicating that you want the path
760 to a subdirectory status log.
761
762 @returns The path where the status log should be.
763 """
mbligh210bae62009-04-01 18:33:13 +0000764 if self.resultdir:
765 if subdir:
766 return os.path.join(self.resultdir, subdir, "status.log")
767 else:
768 return os.path.join(self.resultdir, "status.log")
jadmanski779bd292009-03-19 17:33:33 +0000769 else:
mbligh210bae62009-04-01 18:33:13 +0000770 return None
jadmanski779bd292009-03-19 17:33:33 +0000771
772
jadmanski6bb32d72009-03-19 20:25:24 +0000773 def _update_uncollected_logs_list(self, update_func):
774 """Updates the uncollected logs list in a multi-process safe manner.
775
776 @param update_func - a function that updates the list of uncollected
777 logs. Should take one parameter, the list to be updated.
778 """
mbligha788dc42009-03-26 21:10:16 +0000779 if self.uncollected_log_file:
780 log_file = open(self.uncollected_log_file, "r+")
781 fcntl.flock(log_file, fcntl.LOCK_EX)
jadmanski6bb32d72009-03-19 20:25:24 +0000782 try:
783 uncollected_logs = pickle.load(log_file)
784 update_func(uncollected_logs)
785 log_file.seek(0)
786 log_file.truncate()
787 pickle.dump(uncollected_logs, log_file)
jadmanski3bff9092009-04-22 18:09:47 +0000788 log_file.flush()
jadmanski6bb32d72009-03-19 20:25:24 +0000789 finally:
790 fcntl.flock(log_file, fcntl.LOCK_UN)
791 log_file.close()
792
793
794 def add_client_log(self, hostname, remote_path, local_path):
795 """Adds a new set of client logs to the list of uncollected logs,
796 to allow for future log recovery.
797
798 @param host - the hostname of the machine holding the logs
799 @param remote_path - the directory on the remote machine holding logs
800 @param local_path - the local directory to copy the logs into
801 """
802 def update_func(logs_list):
803 logs_list.append((hostname, remote_path, local_path))
804 self._update_uncollected_logs_list(update_func)
805
806
807 def remove_client_log(self, hostname, remote_path, local_path):
808 """Removes a set of client logs from the list of uncollected logs,
809 to allow for future log recovery.
810
811 @param host - the hostname of the machine holding the logs
812 @param remote_path - the directory on the remote machine holding logs
813 @param local_path - the local directory to copy the logs into
814 """
815 def update_func(logs_list):
816 logs_list.remove((hostname, remote_path, local_path))
817 self._update_uncollected_logs_list(update_func)
818
819
jadmanski10646442008-08-13 14:05:21 +0000820 def _render_record(self, status_code, subdir, operation, status='',
821 epoch_time=None, record_prefix=None,
822 optional_fields=None):
823 """
824 Internal Function to generate a record to be written into a
825 status log. For use by server_job.* classes only.
826 """
827 if subdir:
828 if re.match(r'[\n\t]', subdir):
mbligh2b92b862008-11-22 13:25:32 +0000829 raise ValueError('Invalid character in subdir string')
jadmanski10646442008-08-13 14:05:21 +0000830 substr = subdir
831 else:
832 substr = '----'
833
mbligh1b3b3762008-09-25 02:46:34 +0000834 if not log.is_valid_status(status_code):
mbligh2b92b862008-11-22 13:25:32 +0000835 raise ValueError('Invalid status code supplied: %s' % status_code)
jadmanski10646442008-08-13 14:05:21 +0000836 if not operation:
837 operation = '----'
838 if re.match(r'[\n\t]', operation):
mbligh2b92b862008-11-22 13:25:32 +0000839 raise ValueError('Invalid character in operation string')
jadmanski10646442008-08-13 14:05:21 +0000840 operation = operation.rstrip()
841 status = status.rstrip()
842 status = re.sub(r"\t", " ", status)
843 # Ensure any continuation lines are marked so we can
844 # detect them in the status file to ensure it is parsable.
845 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
846
847 if not optional_fields:
848 optional_fields = {}
849
850 # Generate timestamps for inclusion in the logs
851 if epoch_time is None:
852 epoch_time = int(time.time())
853 local_time = time.localtime(epoch_time)
854 optional_fields["timestamp"] = str(epoch_time)
855 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
856 local_time)
857
858 fields = [status_code, substr, operation]
859 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
860 fields.append(status)
861
862 if record_prefix is None:
863 record_prefix = self.record_prefix
864
865 msg = '\t'.join(str(x) for x in fields)
jadmanski10646442008-08-13 14:05:21 +0000866 return record_prefix + msg + '\n'
867
868
869 def _record_prerendered(self, msg):
870 """
871 Record a pre-rendered msg into the status logs. The only
872 change this makes to the message is to add on the local
873 indentation. Should not be called outside of server_job.*
874 classes. Unlike _record, this does not write the message
875 to standard output.
876 """
877 lines = []
jadmanski779bd292009-03-19 17:33:33 +0000878 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000879 status_log = open(status_file, 'a')
880 for line in msg.splitlines():
881 line = self.record_prefix + line + '\n'
882 lines.append(line)
883 status_log.write(line)
884 status_log.close()
885 self.__parse_status(lines)
886
887
mbligh084bc172008-10-18 14:02:45 +0000888 def _fill_server_control_namespace(self, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000889 """
890 Prepare a namespace to be used when executing server control files.
mbligh084bc172008-10-18 14:02:45 +0000891
892 This sets up the control file API by importing modules and making them
893 available under the appropriate names within namespace.
894
895 For use by _execute_code().
896
897 Args:
898 namespace: The namespace dictionary to fill in.
899 protect: Boolean. If True (the default) any operation that would
900 clobber an existing entry in namespace will cause an error.
901 Raises:
902 error.AutoservError: When a name would be clobbered by import.
903 """
904 def _import_names(module_name, names=()):
mbligh2b92b862008-11-22 13:25:32 +0000905 """
906 Import a module and assign named attributes into namespace.
mbligh084bc172008-10-18 14:02:45 +0000907
908 Args:
909 module_name: The string module name.
910 names: A limiting list of names to import from module_name. If
911 empty (the default), all names are imported from the module
912 similar to a "from foo.bar import *" statement.
913 Raises:
914 error.AutoservError: When a name being imported would clobber
915 a name already in namespace.
916 """
917 module = __import__(module_name, {}, {}, names)
918
919 # No names supplied? Import * from the lowest level module.
920 # (Ugh, why do I have to implement this part myself?)
921 if not names:
922 for submodule_name in module_name.split('.')[1:]:
923 module = getattr(module, submodule_name)
924 if hasattr(module, '__all__'):
925 names = getattr(module, '__all__')
926 else:
927 names = dir(module)
928
929 # Install each name into namespace, checking to make sure it
930 # doesn't override anything that already exists.
931 for name in names:
932 # Check for conflicts to help prevent future problems.
933 if name in namespace and protect:
934 if namespace[name] is not getattr(module, name):
935 raise error.AutoservError('importing name '
936 '%s from %s %r would override %r' %
937 (name, module_name, getattr(module, name),
938 namespace[name]))
939 else:
940 # Encourage cleanliness and the use of __all__ for a
941 # more concrete API with less surprises on '*' imports.
942 warnings.warn('%s (%r) being imported from %s for use '
943 'in server control files is not the '
944 'first occurrance of that import.' %
945 (name, namespace[name], module_name))
946
947 namespace[name] = getattr(module, name)
948
949
950 # This is the equivalent of prepending a bunch of import statements to
951 # the front of the control script.
mbligha2b07dd2009-06-22 18:26:13 +0000952 namespace.update(os=os, sys=sys, logging=logging)
mbligh084bc172008-10-18 14:02:45 +0000953 _import_names('autotest_lib.server',
954 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
955 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
956 _import_names('autotest_lib.server.subcommand',
957 ('parallel', 'parallel_simple', 'subcommand'))
958 _import_names('autotest_lib.server.utils',
959 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
960 _import_names('autotest_lib.client.common_lib.error')
961 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
962
963 # Inject ourself as the job object into other classes within the API.
964 # (Yuck, this injection is a gross thing be part of a public API. -gps)
965 #
966 # XXX Base & SiteAutotest do not appear to use .job. Who does?
967 namespace['autotest'].Autotest.job = self
968 # server.hosts.base_classes.Host uses .job.
969 namespace['hosts'].Host.job = self
970
971
972 def _execute_code(self, code_file, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000973 """
974 Execute code using a copy of namespace as a server control script.
mbligh084bc172008-10-18 14:02:45 +0000975
976 Unless protect_namespace is explicitly set to False, the dict will not
977 be modified.
978
979 Args:
980 code_file: The filename of the control file to execute.
981 namespace: A dict containing names to make available during execution.
982 protect: Boolean. If True (the default) a copy of the namespace dict
983 is used during execution to prevent the code from modifying its
984 contents outside of this function. If False the raw dict is
985 passed in and modifications will be allowed.
986 """
987 if protect:
988 namespace = namespace.copy()
989 self._fill_server_control_namespace(namespace, protect=protect)
990 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +0000991 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +0000992 machines_text = '\n'.join(self.machines) + '\n'
993 # Only rewrite the file if it does not match our machine list.
994 try:
995 machines_f = open(MACHINES_FILENAME, 'r')
996 existing_machines_text = machines_f.read()
997 machines_f.close()
998 except EnvironmentError:
999 existing_machines_text = None
1000 if machines_text != existing_machines_text:
1001 utils.open_write_close(MACHINES_FILENAME, machines_text)
1002 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +00001003
1004
1005 def _record(self, status_code, subdir, operation, status='',
1006 epoch_time=None, optional_fields=None):
1007 """
1008 Actual function for recording a single line into the status
1009 logs. Should never be called directly, only by job.record as
1010 this would bypass the console monitor logging.
1011 """
1012
mbligh2b92b862008-11-22 13:25:32 +00001013 msg = self._render_record(status_code, subdir, operation, status,
1014 epoch_time, optional_fields=optional_fields)
jadmanski10646442008-08-13 14:05:21 +00001015
jadmanski779bd292009-03-19 17:33:33 +00001016 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +00001017 sys.stdout.write(msg)
mbligh210bae62009-04-01 18:33:13 +00001018 if status_file:
1019 open(status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001020 if subdir:
jadmanski779bd292009-03-19 17:33:33 +00001021 sub_status_file = self.get_status_log_path(subdir)
1022 open(sub_status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001023 self.__parse_status(msg.splitlines())
1024
1025
1026 def __parse_status(self, new_lines):
1027 if not self.using_parser:
1028 return
1029 new_tests = self.parser.process_lines(new_lines)
1030 for test in new_tests:
1031 self.__insert_test(test)
1032
1033
1034 def __insert_test(self, test):
mbligh2b92b862008-11-22 13:25:32 +00001035 """
1036 An internal method to insert a new test result into the
jadmanski10646442008-08-13 14:05:21 +00001037 database. This method will not raise an exception, even if an
1038 error occurs during the insert, to avoid failing a test
1039 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +00001040 self.num_tests_run += 1
1041 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
1042 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +00001043 try:
1044 self.results_db.insert_test(self.job_model, test)
1045 except Exception:
1046 msg = ("WARNING: An unexpected error occured while "
1047 "inserting test results into the database. "
1048 "Ignoring error.\n" + traceback.format_exc())
1049 print >> sys.stderr, msg
1050
mblighcaa62c22008-04-07 21:51:17 +00001051
mbligha7007722009-01-13 00:37:11 +00001052site_server_job = utils.import_site_class(
1053 __file__, "autotest_lib.server.site_server_job", "site_server_job",
1054 base_server_job)
jadmanski0afbb632008-06-06 21:10:57 +00001055
mbligh0a8c3322009-04-28 18:32:19 +00001056class server_job(site_server_job):
jadmanski0afbb632008-06-06 21:10:57 +00001057 pass
jadmanskif37df842009-02-11 00:03:26 +00001058
1059
1060class warning_manager(object):
1061 """Class for controlling warning logs. Manages the enabling and disabling
1062 of warnings."""
1063 def __init__(self):
1064 # a map of warning types to a list of disabled time intervals
1065 self.disabled_warnings = {}
1066
1067
1068 def is_valid(self, timestamp, warning_type):
1069 """Indicates if a warning (based on the time it occured and its type)
1070 is a valid warning. A warning is considered "invalid" if this type of
1071 warning was marked as "disabled" at the time the warning occured."""
1072 disabled_intervals = self.disabled_warnings.get(warning_type, [])
1073 for start, end in disabled_intervals:
1074 if timestamp >= start and (end is None or timestamp < end):
1075 return False
1076 return True
1077
1078
1079 def disable_warnings(self, warning_type, current_time_func=time.time):
1080 """As of now, disables all further warnings of this type."""
1081 intervals = self.disabled_warnings.setdefault(warning_type, [])
1082 if not intervals or intervals[-1][1] is not None:
jadmanski16a7ff72009-04-01 18:19:53 +00001083 intervals.append((int(current_time_func()), None))
jadmanskif37df842009-02-11 00:03:26 +00001084
1085
1086 def enable_warnings(self, warning_type, current_time_func=time.time):
1087 """As of now, enables all further warnings of this type."""
1088 intervals = self.disabled_warnings.get(warning_type, [])
1089 if intervals and intervals[-1][1] is None:
jadmanski16a7ff72009-04-01 18:19:53 +00001090 intervals[-1] = (intervals[-1][0], int(current_time_func()))