blob: 8566e5e0fb9a42eeb3b758992618b768acd1605f [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski6bb32d72009-03-19 20:25:24 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess
showard75cdfee2009-06-10 17:40:41 +000010import traceback, shutil, warnings, fcntl, pickle, logging
mbligh78c0daa2009-06-15 21:54:50 +000011import itertools
showard75cdfee2009-06-10 17:40:41 +000012from autotest_lib.client.bin import sysinfo
mbligh09108442008-10-15 16:27:38 +000013from autotest_lib.client.common_lib import error, log, utils, packages
showard75cdfee2009-06-10 17:40:41 +000014from autotest_lib.client.common_lib import logging_manager
jadmanski043e1132008-11-19 17:10:32 +000015from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000016from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000017
18
mbligh084bc172008-10-18 14:02:45 +000019def _control_segment_path(name):
20 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000021 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000022 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000023
24
mbligh084bc172008-10-18 14:02:45 +000025CLIENT_CONTROL_FILENAME = 'control'
26SERVER_CONTROL_FILENAME = 'control.srv'
27MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000028
mbligh084bc172008-10-18 14:02:45 +000029CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
30CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
31CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000032INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000033CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000034
mbligh084bc172008-10-18 14:02:45 +000035VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000036REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000037
38
mbligh062ed152009-01-13 00:57:14 +000039# by default provide a stub that generates no site data
40def _get_site_job_data_dummy(job):
41 return {}
42
43
jadmanski10646442008-08-13 14:05:21 +000044# load up site-specific code for generating site-specific job data
mbligh062ed152009-01-13 00:57:14 +000045get_site_job_data = utils.import_site_function(__file__,
jadmanskic0a623d2009-03-03 21:11:48 +000046 "autotest_lib.server.site_server_job", "get_site_job_data",
mbligh062ed152009-01-13 00:57:14 +000047 _get_site_job_data_dummy)
jadmanski10646442008-08-13 14:05:21 +000048
49
50class base_server_job(object):
mbligh2b92b862008-11-22 13:25:32 +000051 """
52 The actual job against which we do everything.
jadmanski10646442008-08-13 14:05:21 +000053
54 Properties:
55 autodir
56 The top level autotest directory (/usr/local/autotest).
57 serverdir
58 <autodir>/server/
59 clientdir
60 <autodir>/client/
61 conmuxdir
62 <autodir>/conmux/
63 testdir
64 <autodir>/server/tests/
65 site_testdir
66 <autodir>/server/site_tests/
67 control
68 the control file for this job
mblighb5dac432008-11-27 00:38:44 +000069 drop_caches_between_iterations
70 drop the pagecache between each iteration
jadmanski10646442008-08-13 14:05:21 +000071 """
72
73 STATUS_VERSION = 1
mblighc86113b2009-04-28 18:32:51 +000074 test_tag = None
jadmanski10646442008-08-13 14:05:21 +000075
76 def __init__(self, control, args, resultdir, label, user, machines,
77 client=False, parse_job='',
mbligh374f3412009-05-13 21:29:45 +000078 ssh_user='root', ssh_port=22, ssh_pass='',
79 group_name=''):
jadmanski10646442008-08-13 14:05:21 +000080 """
mbligh374f3412009-05-13 21:29:45 +000081 Create a server side job object.
mblighb5dac432008-11-27 00:38:44 +000082
mbligh374f3412009-05-13 21:29:45 +000083 @param control The pathname of the control file.
84 @param args Passed to the control file.
85 @param resultdir Where to throw the results.
86 @param label Description of the job.
87 @param user Username for the job (email address).
88 @param client True if this is a client-side control file.
89 @param parse_job bool, should the results be through the TKO parser.
90 @param ssh_user The SSH username. [root]
91 @param ssh_port The SSH port number. [22]
92 @param ssh_pass The SSH passphrase, if needed.
93 @param group_name If supplied, this will be written out as
94 host_group_name in the keyvals file for the parser.
jadmanski10646442008-08-13 14:05:21 +000095 """
96 path = os.path.dirname(__file__)
97 self.autodir = os.path.abspath(os.path.join(path, '..'))
98 self.serverdir = os.path.join(self.autodir, 'server')
99 self.testdir = os.path.join(self.serverdir, 'tests')
100 self.site_testdir = os.path.join(self.serverdir, 'site_tests')
101 self.tmpdir = os.path.join(self.serverdir, 'tmp')
102 self.conmuxdir = os.path.join(self.autodir, 'conmux')
103 self.clientdir = os.path.join(self.autodir, 'client')
104 self.toolsdir = os.path.join(self.autodir, 'client/tools')
105 if control:
jadmanskie432dd22009-01-30 15:04:51 +0000106 self.control = self._load_control_file(control)
jadmanski10646442008-08-13 14:05:21 +0000107 else:
showard45ae8192008-11-05 19:32:53 +0000108 self.control = ''
jadmanski10646442008-08-13 14:05:21 +0000109 self.resultdir = resultdir
mbligha788dc42009-03-26 21:10:16 +0000110 self.uncollected_log_file = None
mbligh80e1eba2008-11-19 00:26:18 +0000111 if resultdir:
mbligh374f3412009-05-13 21:29:45 +0000112 self.uncollected_log_file = os.path.join(resultdir,
113 'uncollected_logs')
mbligha788dc42009-03-26 21:10:16 +0000114 self.debugdir = os.path.join(resultdir, 'debug')
115
mbligh80e1eba2008-11-19 00:26:18 +0000116 if not os.path.exists(resultdir):
117 os.mkdir(resultdir)
mbligh80e1eba2008-11-19 00:26:18 +0000118 if not os.path.exists(self.debugdir):
119 os.mkdir(self.debugdir)
jadmanski10646442008-08-13 14:05:21 +0000120 self.label = label
121 self.user = user
122 self.args = args
123 self.machines = machines
124 self.client = client
125 self.record_prefix = ''
126 self.warning_loggers = set()
jadmanskif37df842009-02-11 00:03:26 +0000127 self.warning_manager = warning_manager()
jadmanski10646442008-08-13 14:05:21 +0000128 self.ssh_user = ssh_user
129 self.ssh_port = ssh_port
130 self.ssh_pass = ssh_pass
jadmanski23afbec2008-09-17 18:12:07 +0000131 self.run_test_cleanup = True
mbligh09108442008-10-15 16:27:38 +0000132 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000133 self.hosts = set()
mblighb5dac432008-11-27 00:38:44 +0000134 self.drop_caches_between_iterations = False
jadmanski10646442008-08-13 14:05:21 +0000135
showard75cdfee2009-06-10 17:40:41 +0000136 self.logging = logging_manager.get_logging_manager(
137 manage_stdout_and_stderr=True, redirect_fds=True)
138 subcommand.logging_manager_object = self.logging
jadmanski10646442008-08-13 14:05:21 +0000139
mbligh80e1eba2008-11-19 00:26:18 +0000140 if resultdir:
141 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000142 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000143
jadmanski025099d2008-09-23 14:13:48 +0000144 if not os.access(self.tmpdir, os.W_OK):
145 try:
146 os.makedirs(self.tmpdir, 0700)
147 except os.error, e:
148 # Thrown if the directory already exists, which it may.
149 pass
150
mbligh2b92b862008-11-22 13:25:32 +0000151 if not (os.access(self.tmpdir, os.W_OK) and os.path.isdir(self.tmpdir)):
jadmanski025099d2008-09-23 14:13:48 +0000152 self.tmpdir = os.path.join(tempfile.gettempdir(),
153 'autotest-' + getpass.getuser())
154 try:
155 os.makedirs(self.tmpdir, 0700)
156 except os.error, e:
157 # Thrown if the directory already exists, which it may.
158 # If the problem was something other than the
159 # directory already existing, this chmod should throw as well
160 # exception.
161 os.chmod(self.tmpdir, stat.S_IRWXU)
162
jadmanski10646442008-08-13 14:05:21 +0000163 job_data = {'label' : label, 'user' : user,
164 'hostname' : ','.join(machines),
showard170873e2009-01-07 00:22:26 +0000165 'status_version' : str(self.STATUS_VERSION),
166 'job_started' : str(int(time.time()))}
mbligh374f3412009-05-13 21:29:45 +0000167 if group_name:
168 job_data['host_group_name'] = group_name
mbligh80e1eba2008-11-19 00:26:18 +0000169 if self.resultdir:
jadmanski58962982009-04-21 19:54:34 +0000170 # only write these keyvals out on the first job in a resultdir
171 if 'job_started' not in utils.read_keyval(self.resultdir):
172 job_data.update(get_site_job_data(self))
173 utils.write_keyval(self.resultdir, job_data)
jadmanski10646442008-08-13 14:05:21 +0000174
175 self.parse_job = parse_job
176 if self.parse_job and len(machines) == 1:
177 self.using_parser = True
178 self.init_parser(resultdir)
179 else:
180 self.using_parser = False
mbligh2b92b862008-11-22 13:25:32 +0000181 self.pkgmgr = packages.PackageManager(self.autodir,
182 run_function_dargs={'timeout':600})
jadmanski10646442008-08-13 14:05:21 +0000183 self.pkgdir = os.path.join(self.autodir, 'packages')
184
showard21baa452008-10-21 00:08:39 +0000185 self.num_tests_run = 0
186 self.num_tests_failed = 0
187
jadmanski550fdc22008-11-20 16:32:08 +0000188 self._register_subcommand_hooks()
189
190
jadmanskie432dd22009-01-30 15:04:51 +0000191 @staticmethod
192 def _load_control_file(path):
193 f = open(path)
194 try:
195 control_file = f.read()
196 finally:
197 f.close()
198 return re.sub('\r', '', control_file)
199
200
jadmanski550fdc22008-11-20 16:32:08 +0000201 def _register_subcommand_hooks(self):
mbligh2b92b862008-11-22 13:25:32 +0000202 """
203 Register some hooks into the subcommand modules that allow us
204 to properly clean up self.hosts created in forked subprocesses.
205 """
jadmanski550fdc22008-11-20 16:32:08 +0000206 def on_fork(cmd):
207 self._existing_hosts_on_fork = set(self.hosts)
208 def on_join(cmd):
209 new_hosts = self.hosts - self._existing_hosts_on_fork
210 for host in new_hosts:
211 host.close()
212 subcommand.subcommand.register_fork_hook(on_fork)
213 subcommand.subcommand.register_join_hook(on_join)
214
jadmanski10646442008-08-13 14:05:21 +0000215
216 def init_parser(self, resultdir):
mbligh2b92b862008-11-22 13:25:32 +0000217 """
218 Start the continuous parsing of resultdir. This sets up
jadmanski10646442008-08-13 14:05:21 +0000219 the database connection and inserts the basic job object into
mbligh2b92b862008-11-22 13:25:32 +0000220 the database if necessary.
221 """
jadmanski10646442008-08-13 14:05:21 +0000222 # redirect parser debugging to .parse.log
223 parse_log = os.path.join(resultdir, '.parse.log')
224 parse_log = open(parse_log, 'w', 0)
225 tko_utils.redirect_parser_debugging(parse_log)
226 # create a job model object and set up the db
227 self.results_db = tko_db.db(autocommit=True)
228 self.parser = status_lib.parser(self.STATUS_VERSION)
229 self.job_model = self.parser.make_job(resultdir)
230 self.parser.start(self.job_model)
231 # check if a job already exists in the db and insert it if
232 # it does not
233 job_idx = self.results_db.find_job(self.parse_job)
234 if job_idx is None:
mbligh2b92b862008-11-22 13:25:32 +0000235 self.results_db.insert_job(self.parse_job, self.job_model)
jadmanski10646442008-08-13 14:05:21 +0000236 else:
mbligh2b92b862008-11-22 13:25:32 +0000237 machine_idx = self.results_db.lookup_machine(self.job_model.machine)
jadmanski10646442008-08-13 14:05:21 +0000238 self.job_model.index = job_idx
239 self.job_model.machine_idx = machine_idx
240
241
242 def cleanup_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000243 """
244 This should be called after the server job is finished
jadmanski10646442008-08-13 14:05:21 +0000245 to carry out any remaining cleanup (e.g. flushing any
mbligh2b92b862008-11-22 13:25:32 +0000246 remaining test results to the results db)
247 """
jadmanski10646442008-08-13 14:05:21 +0000248 if not self.using_parser:
249 return
250 final_tests = self.parser.end()
251 for test in final_tests:
252 self.__insert_test(test)
253 self.using_parser = False
254
255
256 def verify(self):
257 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000258 raise error.AutoservError('No machines specified to verify')
mbligh0fce4112008-11-27 00:37:17 +0000259 if self.resultdir:
260 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000261 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000262 namespace = {'machines' : self.machines, 'job' : self,
263 'ssh_user' : self.ssh_user,
264 'ssh_port' : self.ssh_port,
265 'ssh_pass' : self.ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000266 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000267 except Exception, e:
mbligh2b92b862008-11-22 13:25:32 +0000268 msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
jadmanski10646442008-08-13 14:05:21 +0000269 self.record('ABORT', None, None, msg)
270 raise
271
272
273 def repair(self, host_protection):
274 if not self.machines:
275 raise error.AutoservError('No machines specified to repair')
mbligh0fce4112008-11-27 00:37:17 +0000276 if self.resultdir:
277 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000278 namespace = {'machines': self.machines, 'job': self,
279 'ssh_user': self.ssh_user, 'ssh_port': self.ssh_port,
280 'ssh_pass': self.ssh_pass,
281 'protection_level': host_protection}
mbligh25c0b8c2009-01-24 01:44:17 +0000282
mbligh0931b0a2009-04-08 17:44:48 +0000283 self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000284
285
286 def precheck(self):
287 """
288 perform any additional checks in derived classes.
289 """
290 pass
291
292
293 def enable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000294 """
295 Start or restart external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000296 """
297 pass
298
299
300 def disable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000301 """
302 Pause or stop external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000303 """
304 pass
305
306
jadmanski23afbec2008-09-17 18:12:07 +0000307 def enable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000308 """
309 By default tests run test.cleanup
310 """
jadmanski23afbec2008-09-17 18:12:07 +0000311 self.run_test_cleanup = True
312
313
314 def disable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000315 """
316 By default tests do not run test.cleanup
317 """
jadmanski23afbec2008-09-17 18:12:07 +0000318 self.run_test_cleanup = False
319
320
jadmanski10646442008-08-13 14:05:21 +0000321 def use_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000322 """
323 Return True if external logging should be used.
jadmanski10646442008-08-13 14:05:21 +0000324 """
325 return False
326
327
mbligh415dc212009-06-15 21:53:34 +0000328 def _make_parallel_wrapper(self, function, machines, log):
329 """Wrap function as appropriate for calling by parallel_simple."""
mbligh2b92b862008-11-22 13:25:32 +0000330 is_forking = not (len(machines) == 1 and self.machines == machines)
jadmanski4dd1a002008-09-05 20:27:30 +0000331 if self.parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000332 def wrapper(machine):
333 self.parse_job += "/" + machine
334 self.using_parser = True
335 self.machines = [machine]
mbligh2b92b862008-11-22 13:25:32 +0000336 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000337 os.chdir(self.resultdir)
showard2bab8f42008-11-12 18:15:22 +0000338 utils.write_keyval(self.resultdir, {"hostname": machine})
jadmanski10646442008-08-13 14:05:21 +0000339 self.init_parser(self.resultdir)
340 result = function(machine)
341 self.cleanup_parser()
342 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000343 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000344 def wrapper(machine):
345 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000346 os.chdir(self.resultdir)
mbligh838d82d2009-03-11 17:14:31 +0000347 machine_data = {'hostname' : machine,
348 'status_version' : str(self.STATUS_VERSION)}
349 utils.write_keyval(self.resultdir, machine_data)
jadmanski10646442008-08-13 14:05:21 +0000350 result = function(machine)
351 return result
352 else:
353 wrapper = function
mbligh415dc212009-06-15 21:53:34 +0000354 return wrapper
355
356
357 def parallel_simple(self, function, machines, log=True, timeout=None,
358 return_results=False):
359 """
360 Run 'function' using parallel_simple, with an extra wrapper to handle
361 the necessary setup for continuous parsing, if possible. If continuous
362 parsing is already properly initialized then this should just work.
363
364 @param function: A callable to run in parallel given each machine.
365 @param machines: A list of machine names to be passed one per subcommand
366 invocation of function.
367 @param log: If True, output will be written to output in a subdirectory
368 named after each machine.
369 @param timeout: Seconds after which the function call should timeout.
370 @param return_results: If True instead of an AutoServError being raised
371 on any error a list of the results|exceptions from the function
372 called on each arg is returned. [default: False]
373
374 @raises error.AutotestError: If any of the functions failed.
375 """
376 wrapper = self._make_parallel_wrapper(function, machines, log)
377 return subcommand.parallel_simple(wrapper, machines,
378 log=log, timeout=timeout,
379 return_results=return_results)
380
381
382 def parallel_on_machines(self, function, machines, timeout=None):
383 """
384 @param func: Called in parallel with one machine as its argument.
385 @param machines: A list of machines to call function(machine) on.
386 @param timeout: Seconds after which the function call should timeout.
387
388 @returns A list of machines on which function(machine) returned
389 without raising an exception.
390 """
391 results = self.parallel_simple(func, machines, timeout=timeout,
392 return_results=True)
393 success_machines = []
394 for result, machine in itertools.izip(results, machines):
395 if not isinstance(result, Exception):
396 success_machines.append(machine)
397 return success_machines
jadmanski10646442008-08-13 14:05:21 +0000398
399
jadmanskie432dd22009-01-30 15:04:51 +0000400 USE_TEMP_DIR = object()
mbligh2b92b862008-11-22 13:25:32 +0000401 def run(self, cleanup=False, install_before=False, install_after=False,
jadmanskie432dd22009-01-30 15:04:51 +0000402 collect_crashdumps=True, namespace={}, control=None,
jadmanskidef0c3c2009-03-25 20:07:10 +0000403 control_file_dir=None, only_collect_crashinfo=False):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000404 # for a normal job, make sure the uncollected logs file exists
405 # for a crashinfo-only run it should already exist, bail out otherwise
406 if self.resultdir and not os.path.exists(self.uncollected_log_file):
407 if only_collect_crashinfo:
408 # if this is a crashinfo-only run, and there were no existing
409 # uncollected logs, just bail out early
410 logging.info("No existing uncollected logs, "
411 "skipping crashinfo collection")
412 return
413 else:
414 log_file = open(self.uncollected_log_file, "w")
415 pickle.dump([], log_file)
416 log_file.close()
417
jadmanski10646442008-08-13 14:05:21 +0000418 # use a copy so changes don't affect the original dictionary
419 namespace = namespace.copy()
420 machines = self.machines
jadmanskie432dd22009-01-30 15:04:51 +0000421 if control is None:
422 control = self.control
423 if control_file_dir is None:
424 control_file_dir = self.resultdir
jadmanski10646442008-08-13 14:05:21 +0000425
426 self.aborted = False
427 namespace['machines'] = machines
428 namespace['args'] = self.args
429 namespace['job'] = self
430 namespace['ssh_user'] = self.ssh_user
431 namespace['ssh_port'] = self.ssh_port
432 namespace['ssh_pass'] = self.ssh_pass
433 test_start_time = int(time.time())
434
mbligh80e1eba2008-11-19 00:26:18 +0000435 if self.resultdir:
436 os.chdir(self.resultdir)
jadmanski779bd292009-03-19 17:33:33 +0000437 # touch status.log so that the parser knows a job is running here
jadmanski382303a2009-04-21 19:53:39 +0000438 open(self.get_status_log_path(), 'a').close()
mbligh80e1eba2008-11-19 00:26:18 +0000439 self.enable_external_logging()
jadmanskie432dd22009-01-30 15:04:51 +0000440
jadmanskicdd0c402008-09-19 21:21:31 +0000441 collect_crashinfo = True
mblighaebe3b62008-12-22 14:45:40 +0000442 temp_control_file_dir = None
jadmanski10646442008-08-13 14:05:21 +0000443 try:
444 if install_before and machines:
mbligh084bc172008-10-18 14:02:45 +0000445 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanskie432dd22009-01-30 15:04:51 +0000446
jadmanskidef0c3c2009-03-25 20:07:10 +0000447 if not only_collect_crashinfo:
448 # determine the dir to write the control files to
449 cfd_specified = (control_file_dir
450 and control_file_dir is not self.USE_TEMP_DIR)
451 if cfd_specified:
452 temp_control_file_dir = None
453 else:
454 temp_control_file_dir = tempfile.mkdtemp(
455 suffix='temp_control_file_dir')
456 control_file_dir = temp_control_file_dir
457 server_control_file = os.path.join(control_file_dir,
458 SERVER_CONTROL_FILENAME)
459 client_control_file = os.path.join(control_file_dir,
460 CLIENT_CONTROL_FILENAME)
461 if self.client:
462 namespace['control'] = control
463 utils.open_write_close(client_control_file, control)
mblighfeac0102009-04-28 18:31:12 +0000464 shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
465 server_control_file)
jadmanskidef0c3c2009-03-25 20:07:10 +0000466 else:
467 utils.open_write_close(server_control_file, control)
mbligh26f0d882009-06-22 18:30:01 +0000468 logging.info("Processing control file")
jadmanskidef0c3c2009-03-25 20:07:10 +0000469 self._execute_code(server_control_file, namespace)
mbligh26f0d882009-06-22 18:30:01 +0000470 logging.info("Finished processing control file")
jadmanski10646442008-08-13 14:05:21 +0000471
jadmanskidef0c3c2009-03-25 20:07:10 +0000472 # no error occured, so we don't need to collect crashinfo
473 collect_crashinfo = False
jadmanski10646442008-08-13 14:05:21 +0000474 finally:
mblighaebe3b62008-12-22 14:45:40 +0000475 if temp_control_file_dir:
jadmanskie432dd22009-01-30 15:04:51 +0000476 # Clean up temp directory used for copies of the control files
mblighaebe3b62008-12-22 14:45:40 +0000477 try:
478 shutil.rmtree(temp_control_file_dir)
479 except Exception, e:
jadmanskie432dd22009-01-30 15:04:51 +0000480 print 'Error %s removing dir %s' % (e,
481 temp_control_file_dir)
482
jadmanskicdd0c402008-09-19 21:21:31 +0000483 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000484 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000485 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000486 # includes crashdumps
487 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000488 else:
mbligh084bc172008-10-18 14:02:45 +0000489 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
mbligha788dc42009-03-26 21:10:16 +0000490 if self.uncollected_log_file:
491 os.remove(self.uncollected_log_file)
jadmanski10646442008-08-13 14:05:21 +0000492 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000493 if cleanup and machines:
494 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000495 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000496 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000497
498
mblighc86113b2009-04-28 18:32:51 +0000499 def set_test_tag(self, tag=''):
500 """Set tag to be added to test name of all following run_test steps."""
501 self.test_tag = tag
502
503
jadmanski10646442008-08-13 14:05:21 +0000504 def run_test(self, url, *args, **dargs):
mbligh2b92b862008-11-22 13:25:32 +0000505 """
506 Summon a test object and run it.
jadmanski10646442008-08-13 14:05:21 +0000507
508 tag
509 tag to add to testname
510 url
511 url of the test to run
512 """
513
514 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000515
516 tag = dargs.pop('tag', None)
mblighc86113b2009-04-28 18:32:51 +0000517 if tag is None:
518 tag = self.test_tag
jadmanski10646442008-08-13 14:05:21 +0000519 if tag:
mbligh8ad24202009-01-07 16:49:36 +0000520 testname += '.' + str(tag)
jadmanskide292df2008-08-26 20:51:14 +0000521 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000522
523 outputdir = os.path.join(self.resultdir, subdir)
524 if os.path.exists(outputdir):
525 msg = ("%s already exists, test <%s> may have"
mbligh2b92b862008-11-22 13:25:32 +0000526 " already run with tag <%s>" % (outputdir, testname, tag))
jadmanski10646442008-08-13 14:05:21 +0000527 raise error.TestError(msg)
528 os.mkdir(outputdir)
529
530 def group_func():
531 try:
532 test.runtest(self, url, tag, args, dargs)
533 except error.TestBaseException, e:
534 self.record(e.exit_status, subdir, testname, str(e))
535 raise
536 except Exception, e:
537 info = str(e) + "\n" + traceback.format_exc()
538 self.record('FAIL', subdir, testname, info)
539 raise
540 else:
mbligh2b92b862008-11-22 13:25:32 +0000541 self.record('GOOD', subdir, testname, 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000542
543 result, exc_info = self._run_group(testname, subdir, group_func)
544 if exc_info and isinstance(exc_info[1], error.TestBaseException):
545 return False
546 elif exc_info:
547 raise exc_info[0], exc_info[1], exc_info[2]
548 else:
549 return True
jadmanski10646442008-08-13 14:05:21 +0000550
551
552 def _run_group(self, name, subdir, function, *args, **dargs):
553 """\
554 Underlying method for running something inside of a group.
555 """
jadmanskide292df2008-08-26 20:51:14 +0000556 result, exc_info = None, None
jadmanski10646442008-08-13 14:05:21 +0000557 old_record_prefix = self.record_prefix
558 try:
559 self.record('START', subdir, name)
560 self.record_prefix += '\t'
561 try:
562 result = function(*args, **dargs)
563 finally:
564 self.record_prefix = old_record_prefix
565 except error.TestBaseException, e:
jadmanskib88d6dc2009-01-10 00:33:18 +0000566 self.record("END %s" % e.exit_status, subdir, name)
jadmanskide292df2008-08-26 20:51:14 +0000567 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000568 except Exception, e:
569 err_msg = str(e) + '\n'
570 err_msg += traceback.format_exc()
571 self.record('END ABORT', subdir, name, err_msg)
572 raise error.JobError(name + ' failed\n' + traceback.format_exc())
573 else:
574 self.record('END GOOD', subdir, name)
575
jadmanskide292df2008-08-26 20:51:14 +0000576 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000577
578
579 def run_group(self, function, *args, **dargs):
580 """\
581 function:
582 subroutine to run
583 *args:
584 arguments for the function
585 """
586
587 name = function.__name__
588
589 # Allow the tag for the group to be specified.
590 tag = dargs.pop('tag', None)
591 if tag:
592 name = tag
593
jadmanskide292df2008-08-26 20:51:14 +0000594 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000595
596
597 def run_reboot(self, reboot_func, get_kernel_func):
598 """\
599 A specialization of run_group meant specifically for handling
600 a reboot. Includes support for capturing the kernel version
601 after the reboot.
602
603 reboot_func: a function that carries out the reboot
604
605 get_kernel_func: a function that returns a string
606 representing the kernel version.
607 """
608
609 old_record_prefix = self.record_prefix
610 try:
611 self.record('START', None, 'reboot')
612 self.record_prefix += '\t'
613 reboot_func()
614 except Exception, e:
615 self.record_prefix = old_record_prefix
616 err_msg = str(e) + '\n' + traceback.format_exc()
617 self.record('END FAIL', None, 'reboot', err_msg)
jadmanski4b51d542009-04-08 14:17:16 +0000618 raise
jadmanski10646442008-08-13 14:05:21 +0000619 else:
620 kernel = get_kernel_func()
621 self.record_prefix = old_record_prefix
622 self.record('END GOOD', None, 'reboot',
623 optional_fields={"kernel": kernel})
624
625
jadmanskie432dd22009-01-30 15:04:51 +0000626 def run_control(self, path):
627 """Execute a control file found at path (relative to the autotest
628 path). Intended for executing a control file within a control file,
629 not for running the top-level job control file."""
630 path = os.path.join(self.autodir, path)
631 control_file = self._load_control_file(path)
632 self.run(control=control_file, control_file_dir=self.USE_TEMP_DIR)
633
634
jadmanskic09fc152008-10-15 17:56:59 +0000635 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
mbligh4395bbd2009-03-25 19:34:17 +0000636 self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
jadmanskic09fc152008-10-15 17:56:59 +0000637 on_every_test)
638
639
640 def add_sysinfo_logfile(self, file, on_every_test=False):
641 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
642
643
644 def _add_sysinfo_loggable(self, loggable, on_every_test):
645 if on_every_test:
646 self.sysinfo.test_loggables.add(loggable)
647 else:
648 self.sysinfo.boot_loggables.add(loggable)
649
650
jadmanski10646442008-08-13 14:05:21 +0000651 def record(self, status_code, subdir, operation, status='',
652 optional_fields=None):
653 """
654 Record job-level status
655
656 The intent is to make this file both machine parseable and
657 human readable. That involves a little more complexity, but
658 really isn't all that bad ;-)
659
660 Format is <status code>\t<subdir>\t<operation>\t<status>
661
mbligh1b3b3762008-09-25 02:46:34 +0000662 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000663 for valid status definition
664
665 subdir: MUST be a relevant subdirectory in the results,
666 or None, which will be represented as '----'
667
668 operation: description of what you ran (e.g. "dbench", or
669 "mkfs -t foobar /dev/sda9")
670
671 status: error message or "completed sucessfully"
672
673 ------------------------------------------------------------
674
675 Initial tabs indicate indent levels for grouping, and is
676 governed by self.record_prefix
677
678 multiline messages have secondary lines prefaced by a double
679 space (' ')
680
681 Executing this method will trigger the logging of all new
682 warnings to date from the various console loggers.
683 """
684 # poll all our warning loggers for new warnings
685 warnings = self._read_warnings()
jadmanski2de83112009-04-01 18:21:04 +0000686 old_record_prefix = self.record_prefix
687 try:
688 if status_code.startswith("END "):
689 self.record_prefix += "\t"
690 for timestamp, msg in warnings:
691 self._record("WARN", None, None, msg, timestamp)
692 finally:
693 self.record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000694
695 # write out the actual status log line
696 self._record(status_code, subdir, operation, status,
697 optional_fields=optional_fields)
698
699
700 def _read_warnings(self):
jadmanskif37df842009-02-11 00:03:26 +0000701 """Poll all the warning loggers and extract any new warnings that have
702 been logged. If the warnings belong to a category that is currently
703 disabled, this method will discard them and they will no longer be
704 retrievable.
705
706 Returns a list of (timestamp, message) tuples, where timestamp is an
707 integer epoch timestamp."""
jadmanski10646442008-08-13 14:05:21 +0000708 warnings = []
709 while True:
710 # pull in a line of output from every logger that has
711 # output ready to be read
mbligh2b92b862008-11-22 13:25:32 +0000712 loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
jadmanski10646442008-08-13 14:05:21 +0000713 closed_loggers = set()
714 for logger in loggers:
715 line = logger.readline()
716 # record any broken pipes (aka line == empty)
717 if len(line) == 0:
718 closed_loggers.add(logger)
719 continue
jadmanskif37df842009-02-11 00:03:26 +0000720 # parse out the warning
721 timestamp, msgtype, msg = line.split('\t', 2)
722 timestamp = int(timestamp)
723 # if the warning is valid, add it to the results
724 if self.warning_manager.is_valid(timestamp, msgtype):
725 warnings.append((timestamp, msg.strip()))
jadmanski10646442008-08-13 14:05:21 +0000726
727 # stop listening to loggers that are closed
728 self.warning_loggers -= closed_loggers
729
730 # stop if none of the loggers have any output left
731 if not loggers:
732 break
733
734 # sort into timestamp order
735 warnings.sort()
736 return warnings
737
738
jadmanski16a7ff72009-04-01 18:19:53 +0000739 def disable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000740 self.warning_manager.disable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000741 self.record("INFO", None, None,
742 "disabling %s warnings" % warning_type,
743 {"warnings.disable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000744
745
jadmanski16a7ff72009-04-01 18:19:53 +0000746 def enable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000747 self.warning_manager.enable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000748 self.record("INFO", None, None,
749 "enabling %s warnings" % warning_type,
750 {"warnings.enable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000751
752
jadmanski779bd292009-03-19 17:33:33 +0000753 def get_status_log_path(self, subdir=None):
754 """Return the path to the job status log.
755
756 @param subdir - Optional paramter indicating that you want the path
757 to a subdirectory status log.
758
759 @returns The path where the status log should be.
760 """
mbligh210bae62009-04-01 18:33:13 +0000761 if self.resultdir:
762 if subdir:
763 return os.path.join(self.resultdir, subdir, "status.log")
764 else:
765 return os.path.join(self.resultdir, "status.log")
jadmanski779bd292009-03-19 17:33:33 +0000766 else:
mbligh210bae62009-04-01 18:33:13 +0000767 return None
jadmanski779bd292009-03-19 17:33:33 +0000768
769
jadmanski6bb32d72009-03-19 20:25:24 +0000770 def _update_uncollected_logs_list(self, update_func):
771 """Updates the uncollected logs list in a multi-process safe manner.
772
773 @param update_func - a function that updates the list of uncollected
774 logs. Should take one parameter, the list to be updated.
775 """
mbligha788dc42009-03-26 21:10:16 +0000776 if self.uncollected_log_file:
777 log_file = open(self.uncollected_log_file, "r+")
778 fcntl.flock(log_file, fcntl.LOCK_EX)
jadmanski6bb32d72009-03-19 20:25:24 +0000779 try:
780 uncollected_logs = pickle.load(log_file)
781 update_func(uncollected_logs)
782 log_file.seek(0)
783 log_file.truncate()
784 pickle.dump(uncollected_logs, log_file)
jadmanski3bff9092009-04-22 18:09:47 +0000785 log_file.flush()
jadmanski6bb32d72009-03-19 20:25:24 +0000786 finally:
787 fcntl.flock(log_file, fcntl.LOCK_UN)
788 log_file.close()
789
790
791 def add_client_log(self, hostname, remote_path, local_path):
792 """Adds a new set of client logs to the list of uncollected logs,
793 to allow for future log recovery.
794
795 @param host - the hostname of the machine holding the logs
796 @param remote_path - the directory on the remote machine holding logs
797 @param local_path - the local directory to copy the logs into
798 """
799 def update_func(logs_list):
800 logs_list.append((hostname, remote_path, local_path))
801 self._update_uncollected_logs_list(update_func)
802
803
804 def remove_client_log(self, hostname, remote_path, local_path):
805 """Removes a set of client logs from the list of uncollected logs,
806 to allow for future log recovery.
807
808 @param host - the hostname of the machine holding the logs
809 @param remote_path - the directory on the remote machine holding logs
810 @param local_path - the local directory to copy the logs into
811 """
812 def update_func(logs_list):
813 logs_list.remove((hostname, remote_path, local_path))
814 self._update_uncollected_logs_list(update_func)
815
816
jadmanski10646442008-08-13 14:05:21 +0000817 def _render_record(self, status_code, subdir, operation, status='',
818 epoch_time=None, record_prefix=None,
819 optional_fields=None):
820 """
821 Internal Function to generate a record to be written into a
822 status log. For use by server_job.* classes only.
823 """
824 if subdir:
825 if re.match(r'[\n\t]', subdir):
mbligh2b92b862008-11-22 13:25:32 +0000826 raise ValueError('Invalid character in subdir string')
jadmanski10646442008-08-13 14:05:21 +0000827 substr = subdir
828 else:
829 substr = '----'
830
mbligh1b3b3762008-09-25 02:46:34 +0000831 if not log.is_valid_status(status_code):
mbligh2b92b862008-11-22 13:25:32 +0000832 raise ValueError('Invalid status code supplied: %s' % status_code)
jadmanski10646442008-08-13 14:05:21 +0000833 if not operation:
834 operation = '----'
835 if re.match(r'[\n\t]', operation):
mbligh2b92b862008-11-22 13:25:32 +0000836 raise ValueError('Invalid character in operation string')
jadmanski10646442008-08-13 14:05:21 +0000837 operation = operation.rstrip()
838 status = status.rstrip()
839 status = re.sub(r"\t", " ", status)
840 # Ensure any continuation lines are marked so we can
841 # detect them in the status file to ensure it is parsable.
842 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
843
844 if not optional_fields:
845 optional_fields = {}
846
847 # Generate timestamps for inclusion in the logs
848 if epoch_time is None:
849 epoch_time = int(time.time())
850 local_time = time.localtime(epoch_time)
851 optional_fields["timestamp"] = str(epoch_time)
852 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
853 local_time)
854
855 fields = [status_code, substr, operation]
856 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
857 fields.append(status)
858
859 if record_prefix is None:
860 record_prefix = self.record_prefix
861
862 msg = '\t'.join(str(x) for x in fields)
jadmanski10646442008-08-13 14:05:21 +0000863 return record_prefix + msg + '\n'
864
865
866 def _record_prerendered(self, msg):
867 """
868 Record a pre-rendered msg into the status logs. The only
869 change this makes to the message is to add on the local
870 indentation. Should not be called outside of server_job.*
871 classes. Unlike _record, this does not write the message
872 to standard output.
873 """
874 lines = []
jadmanski779bd292009-03-19 17:33:33 +0000875 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000876 status_log = open(status_file, 'a')
877 for line in msg.splitlines():
878 line = self.record_prefix + line + '\n'
879 lines.append(line)
880 status_log.write(line)
881 status_log.close()
882 self.__parse_status(lines)
883
884
mbligh084bc172008-10-18 14:02:45 +0000885 def _fill_server_control_namespace(self, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000886 """
887 Prepare a namespace to be used when executing server control files.
mbligh084bc172008-10-18 14:02:45 +0000888
889 This sets up the control file API by importing modules and making them
890 available under the appropriate names within namespace.
891
892 For use by _execute_code().
893
894 Args:
895 namespace: The namespace dictionary to fill in.
896 protect: Boolean. If True (the default) any operation that would
897 clobber an existing entry in namespace will cause an error.
898 Raises:
899 error.AutoservError: When a name would be clobbered by import.
900 """
901 def _import_names(module_name, names=()):
mbligh2b92b862008-11-22 13:25:32 +0000902 """
903 Import a module and assign named attributes into namespace.
mbligh084bc172008-10-18 14:02:45 +0000904
905 Args:
906 module_name: The string module name.
907 names: A limiting list of names to import from module_name. If
908 empty (the default), all names are imported from the module
909 similar to a "from foo.bar import *" statement.
910 Raises:
911 error.AutoservError: When a name being imported would clobber
912 a name already in namespace.
913 """
914 module = __import__(module_name, {}, {}, names)
915
916 # No names supplied? Import * from the lowest level module.
917 # (Ugh, why do I have to implement this part myself?)
918 if not names:
919 for submodule_name in module_name.split('.')[1:]:
920 module = getattr(module, submodule_name)
921 if hasattr(module, '__all__'):
922 names = getattr(module, '__all__')
923 else:
924 names = dir(module)
925
926 # Install each name into namespace, checking to make sure it
927 # doesn't override anything that already exists.
928 for name in names:
929 # Check for conflicts to help prevent future problems.
930 if name in namespace and protect:
931 if namespace[name] is not getattr(module, name):
932 raise error.AutoservError('importing name '
933 '%s from %s %r would override %r' %
934 (name, module_name, getattr(module, name),
935 namespace[name]))
936 else:
937 # Encourage cleanliness and the use of __all__ for a
938 # more concrete API with less surprises on '*' imports.
939 warnings.warn('%s (%r) being imported from %s for use '
940 'in server control files is not the '
941 'first occurrance of that import.' %
942 (name, namespace[name], module_name))
943
944 namespace[name] = getattr(module, name)
945
946
947 # This is the equivalent of prepending a bunch of import statements to
948 # the front of the control script.
mbligha2b07dd2009-06-22 18:26:13 +0000949 namespace.update(os=os, sys=sys, logging=logging)
mbligh084bc172008-10-18 14:02:45 +0000950 _import_names('autotest_lib.server',
951 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
952 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
953 _import_names('autotest_lib.server.subcommand',
954 ('parallel', 'parallel_simple', 'subcommand'))
955 _import_names('autotest_lib.server.utils',
956 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
957 _import_names('autotest_lib.client.common_lib.error')
958 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
959
960 # Inject ourself as the job object into other classes within the API.
961 # (Yuck, this injection is a gross thing be part of a public API. -gps)
962 #
963 # XXX Base & SiteAutotest do not appear to use .job. Who does?
964 namespace['autotest'].Autotest.job = self
965 # server.hosts.base_classes.Host uses .job.
966 namespace['hosts'].Host.job = self
967
968
969 def _execute_code(self, code_file, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000970 """
971 Execute code using a copy of namespace as a server control script.
mbligh084bc172008-10-18 14:02:45 +0000972
973 Unless protect_namespace is explicitly set to False, the dict will not
974 be modified.
975
976 Args:
977 code_file: The filename of the control file to execute.
978 namespace: A dict containing names to make available during execution.
979 protect: Boolean. If True (the default) a copy of the namespace dict
980 is used during execution to prevent the code from modifying its
981 contents outside of this function. If False the raw dict is
982 passed in and modifications will be allowed.
983 """
984 if protect:
985 namespace = namespace.copy()
986 self._fill_server_control_namespace(namespace, protect=protect)
987 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +0000988 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +0000989 machines_text = '\n'.join(self.machines) + '\n'
990 # Only rewrite the file if it does not match our machine list.
991 try:
992 machines_f = open(MACHINES_FILENAME, 'r')
993 existing_machines_text = machines_f.read()
994 machines_f.close()
995 except EnvironmentError:
996 existing_machines_text = None
997 if machines_text != existing_machines_text:
998 utils.open_write_close(MACHINES_FILENAME, machines_text)
999 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +00001000
1001
1002 def _record(self, status_code, subdir, operation, status='',
1003 epoch_time=None, optional_fields=None):
1004 """
1005 Actual function for recording a single line into the status
1006 logs. Should never be called directly, only by job.record as
1007 this would bypass the console monitor logging.
1008 """
1009
mbligh2b92b862008-11-22 13:25:32 +00001010 msg = self._render_record(status_code, subdir, operation, status,
1011 epoch_time, optional_fields=optional_fields)
jadmanski10646442008-08-13 14:05:21 +00001012
jadmanski779bd292009-03-19 17:33:33 +00001013 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +00001014 sys.stdout.write(msg)
mbligh210bae62009-04-01 18:33:13 +00001015 if status_file:
1016 open(status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001017 if subdir:
jadmanski779bd292009-03-19 17:33:33 +00001018 sub_status_file = self.get_status_log_path(subdir)
1019 open(sub_status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001020 self.__parse_status(msg.splitlines())
1021
1022
1023 def __parse_status(self, new_lines):
1024 if not self.using_parser:
1025 return
1026 new_tests = self.parser.process_lines(new_lines)
1027 for test in new_tests:
1028 self.__insert_test(test)
1029
1030
1031 def __insert_test(self, test):
mbligh2b92b862008-11-22 13:25:32 +00001032 """
1033 An internal method to insert a new test result into the
jadmanski10646442008-08-13 14:05:21 +00001034 database. This method will not raise an exception, even if an
1035 error occurs during the insert, to avoid failing a test
1036 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +00001037 self.num_tests_run += 1
1038 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
1039 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +00001040 try:
1041 self.results_db.insert_test(self.job_model, test)
1042 except Exception:
1043 msg = ("WARNING: An unexpected error occured while "
1044 "inserting test results into the database. "
1045 "Ignoring error.\n" + traceback.format_exc())
1046 print >> sys.stderr, msg
1047
mblighcaa62c22008-04-07 21:51:17 +00001048
mbligha7007722009-01-13 00:37:11 +00001049site_server_job = utils.import_site_class(
1050 __file__, "autotest_lib.server.site_server_job", "site_server_job",
1051 base_server_job)
jadmanski0afbb632008-06-06 21:10:57 +00001052
mbligh0a8c3322009-04-28 18:32:19 +00001053class server_job(site_server_job):
jadmanski0afbb632008-06-06 21:10:57 +00001054 pass
jadmanskif37df842009-02-11 00:03:26 +00001055
1056
1057class warning_manager(object):
1058 """Class for controlling warning logs. Manages the enabling and disabling
1059 of warnings."""
1060 def __init__(self):
1061 # a map of warning types to a list of disabled time intervals
1062 self.disabled_warnings = {}
1063
1064
1065 def is_valid(self, timestamp, warning_type):
1066 """Indicates if a warning (based on the time it occured and its type)
1067 is a valid warning. A warning is considered "invalid" if this type of
1068 warning was marked as "disabled" at the time the warning occured."""
1069 disabled_intervals = self.disabled_warnings.get(warning_type, [])
1070 for start, end in disabled_intervals:
1071 if timestamp >= start and (end is None or timestamp < end):
1072 return False
1073 return True
1074
1075
1076 def disable_warnings(self, warning_type, current_time_func=time.time):
1077 """As of now, disables all further warnings of this type."""
1078 intervals = self.disabled_warnings.setdefault(warning_type, [])
1079 if not intervals or intervals[-1][1] is not None:
jadmanski16a7ff72009-04-01 18:19:53 +00001080 intervals.append((int(current_time_func()), None))
jadmanskif37df842009-02-11 00:03:26 +00001081
1082
1083 def enable_warnings(self, warning_type, current_time_func=time.time):
1084 """As of now, enables all further warnings of this type."""
1085 intervals = self.disabled_warnings.get(warning_type, [])
1086 if intervals and intervals[-1][1] is None:
jadmanski16a7ff72009-04-01 18:19:53 +00001087 intervals[-1] = (intervals[-1][0], int(current_time_func()))