blob: 69536cbffec23bad56ebb9f06ba6f88ef4156258 [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski6bb32d72009-03-19 20:25:24 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess
showard75cdfee2009-06-10 17:40:41 +000010import traceback, shutil, warnings, fcntl, pickle, logging
11from autotest_lib.client.bin import sysinfo
mbligh09108442008-10-15 16:27:38 +000012from autotest_lib.client.common_lib import error, log, utils, packages
showard75cdfee2009-06-10 17:40:41 +000013from autotest_lib.client.common_lib import logging_manager
jadmanski043e1132008-11-19 17:10:32 +000014from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000015from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000016
17
mbligh084bc172008-10-18 14:02:45 +000018def _control_segment_path(name):
19 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000020 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000021 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000022
23
mbligh084bc172008-10-18 14:02:45 +000024CLIENT_CONTROL_FILENAME = 'control'
25SERVER_CONTROL_FILENAME = 'control.srv'
26MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000027
mbligh084bc172008-10-18 14:02:45 +000028CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
29CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
30CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000031INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000032CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000033
mbligh084bc172008-10-18 14:02:45 +000034VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000035REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000036
37
mbligh062ed152009-01-13 00:57:14 +000038# by default provide a stub that generates no site data
39def _get_site_job_data_dummy(job):
40 return {}
41
42
jadmanski10646442008-08-13 14:05:21 +000043# load up site-specific code for generating site-specific job data
mbligh062ed152009-01-13 00:57:14 +000044get_site_job_data = utils.import_site_function(__file__,
jadmanskic0a623d2009-03-03 21:11:48 +000045 "autotest_lib.server.site_server_job", "get_site_job_data",
mbligh062ed152009-01-13 00:57:14 +000046 _get_site_job_data_dummy)
jadmanski10646442008-08-13 14:05:21 +000047
48
49class base_server_job(object):
mbligh2b92b862008-11-22 13:25:32 +000050 """
51 The actual job against which we do everything.
jadmanski10646442008-08-13 14:05:21 +000052
53 Properties:
54 autodir
55 The top level autotest directory (/usr/local/autotest).
56 serverdir
57 <autodir>/server/
58 clientdir
59 <autodir>/client/
60 conmuxdir
61 <autodir>/conmux/
62 testdir
63 <autodir>/server/tests/
64 site_testdir
65 <autodir>/server/site_tests/
66 control
67 the control file for this job
mblighb5dac432008-11-27 00:38:44 +000068 drop_caches_between_iterations
69 drop the pagecache between each iteration
jadmanski10646442008-08-13 14:05:21 +000070 """
71
72 STATUS_VERSION = 1
mblighc86113b2009-04-28 18:32:51 +000073 test_tag = None
jadmanski10646442008-08-13 14:05:21 +000074
75 def __init__(self, control, args, resultdir, label, user, machines,
76 client=False, parse_job='',
mbligh374f3412009-05-13 21:29:45 +000077 ssh_user='root', ssh_port=22, ssh_pass='',
78 group_name=''):
jadmanski10646442008-08-13 14:05:21 +000079 """
mbligh374f3412009-05-13 21:29:45 +000080 Create a server side job object.
mblighb5dac432008-11-27 00:38:44 +000081
mbligh374f3412009-05-13 21:29:45 +000082 @param control The pathname of the control file.
83 @param args Passed to the control file.
84 @param resultdir Where to throw the results.
85 @param label Description of the job.
86 @param user Username for the job (email address).
87 @param client True if this is a client-side control file.
88 @param parse_job bool, should the results be through the TKO parser.
89 @param ssh_user The SSH username. [root]
90 @param ssh_port The SSH port number. [22]
91 @param ssh_pass The SSH passphrase, if needed.
92 @param group_name If supplied, this will be written out as
93 host_group_name in the keyvals file for the parser.
jadmanski10646442008-08-13 14:05:21 +000094 """
95 path = os.path.dirname(__file__)
96 self.autodir = os.path.abspath(os.path.join(path, '..'))
97 self.serverdir = os.path.join(self.autodir, 'server')
98 self.testdir = os.path.join(self.serverdir, 'tests')
99 self.site_testdir = os.path.join(self.serverdir, 'site_tests')
100 self.tmpdir = os.path.join(self.serverdir, 'tmp')
101 self.conmuxdir = os.path.join(self.autodir, 'conmux')
102 self.clientdir = os.path.join(self.autodir, 'client')
103 self.toolsdir = os.path.join(self.autodir, 'client/tools')
104 if control:
jadmanskie432dd22009-01-30 15:04:51 +0000105 self.control = self._load_control_file(control)
jadmanski10646442008-08-13 14:05:21 +0000106 else:
showard45ae8192008-11-05 19:32:53 +0000107 self.control = ''
jadmanski10646442008-08-13 14:05:21 +0000108 self.resultdir = resultdir
mbligha788dc42009-03-26 21:10:16 +0000109 self.uncollected_log_file = None
mbligh80e1eba2008-11-19 00:26:18 +0000110 if resultdir:
mbligh374f3412009-05-13 21:29:45 +0000111 self.uncollected_log_file = os.path.join(resultdir,
112 'uncollected_logs')
mbligha788dc42009-03-26 21:10:16 +0000113 self.debugdir = os.path.join(resultdir, 'debug')
114
mbligh80e1eba2008-11-19 00:26:18 +0000115 if not os.path.exists(resultdir):
116 os.mkdir(resultdir)
mbligh80e1eba2008-11-19 00:26:18 +0000117 if not os.path.exists(self.debugdir):
118 os.mkdir(self.debugdir)
jadmanski10646442008-08-13 14:05:21 +0000119 self.label = label
120 self.user = user
121 self.args = args
122 self.machines = machines
123 self.client = client
124 self.record_prefix = ''
125 self.warning_loggers = set()
jadmanskif37df842009-02-11 00:03:26 +0000126 self.warning_manager = warning_manager()
jadmanski10646442008-08-13 14:05:21 +0000127 self.ssh_user = ssh_user
128 self.ssh_port = ssh_port
129 self.ssh_pass = ssh_pass
jadmanski23afbec2008-09-17 18:12:07 +0000130 self.run_test_cleanup = True
mbligh09108442008-10-15 16:27:38 +0000131 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000132 self.hosts = set()
mblighb5dac432008-11-27 00:38:44 +0000133 self.drop_caches_between_iterations = False
jadmanski10646442008-08-13 14:05:21 +0000134
showard75cdfee2009-06-10 17:40:41 +0000135 self.logging = logging_manager.get_logging_manager(
136 manage_stdout_and_stderr=True, redirect_fds=True)
137 subcommand.logging_manager_object = self.logging
jadmanski10646442008-08-13 14:05:21 +0000138
mbligh80e1eba2008-11-19 00:26:18 +0000139 if resultdir:
140 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000141 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000142
jadmanski025099d2008-09-23 14:13:48 +0000143 if not os.access(self.tmpdir, os.W_OK):
144 try:
145 os.makedirs(self.tmpdir, 0700)
146 except os.error, e:
147 # Thrown if the directory already exists, which it may.
148 pass
149
mbligh2b92b862008-11-22 13:25:32 +0000150 if not (os.access(self.tmpdir, os.W_OK) and os.path.isdir(self.tmpdir)):
jadmanski025099d2008-09-23 14:13:48 +0000151 self.tmpdir = os.path.join(tempfile.gettempdir(),
152 'autotest-' + getpass.getuser())
153 try:
154 os.makedirs(self.tmpdir, 0700)
155 except os.error, e:
156 # Thrown if the directory already exists, which it may.
157 # If the problem was something other than the
158 # directory already existing, this chmod should throw as well
159 # exception.
160 os.chmod(self.tmpdir, stat.S_IRWXU)
161
jadmanski10646442008-08-13 14:05:21 +0000162 job_data = {'label' : label, 'user' : user,
163 'hostname' : ','.join(machines),
showard170873e2009-01-07 00:22:26 +0000164 'status_version' : str(self.STATUS_VERSION),
165 'job_started' : str(int(time.time()))}
mbligh374f3412009-05-13 21:29:45 +0000166 if group_name:
167 job_data['host_group_name'] = group_name
mbligh80e1eba2008-11-19 00:26:18 +0000168 if self.resultdir:
jadmanski58962982009-04-21 19:54:34 +0000169 # only write these keyvals out on the first job in a resultdir
170 if 'job_started' not in utils.read_keyval(self.resultdir):
171 job_data.update(get_site_job_data(self))
172 utils.write_keyval(self.resultdir, job_data)
jadmanski10646442008-08-13 14:05:21 +0000173
174 self.parse_job = parse_job
175 if self.parse_job and len(machines) == 1:
176 self.using_parser = True
177 self.init_parser(resultdir)
178 else:
179 self.using_parser = False
mbligh2b92b862008-11-22 13:25:32 +0000180 self.pkgmgr = packages.PackageManager(self.autodir,
181 run_function_dargs={'timeout':600})
jadmanski10646442008-08-13 14:05:21 +0000182 self.pkgdir = os.path.join(self.autodir, 'packages')
183
showard21baa452008-10-21 00:08:39 +0000184 self.num_tests_run = 0
185 self.num_tests_failed = 0
186
jadmanski550fdc22008-11-20 16:32:08 +0000187 self._register_subcommand_hooks()
188
189
jadmanskie432dd22009-01-30 15:04:51 +0000190 @staticmethod
191 def _load_control_file(path):
192 f = open(path)
193 try:
194 control_file = f.read()
195 finally:
196 f.close()
197 return re.sub('\r', '', control_file)
198
199
jadmanski550fdc22008-11-20 16:32:08 +0000200 def _register_subcommand_hooks(self):
mbligh2b92b862008-11-22 13:25:32 +0000201 """
202 Register some hooks into the subcommand modules that allow us
203 to properly clean up self.hosts created in forked subprocesses.
204 """
jadmanski550fdc22008-11-20 16:32:08 +0000205 def on_fork(cmd):
206 self._existing_hosts_on_fork = set(self.hosts)
207 def on_join(cmd):
208 new_hosts = self.hosts - self._existing_hosts_on_fork
209 for host in new_hosts:
210 host.close()
211 subcommand.subcommand.register_fork_hook(on_fork)
212 subcommand.subcommand.register_join_hook(on_join)
213
jadmanski10646442008-08-13 14:05:21 +0000214
215 def init_parser(self, resultdir):
mbligh2b92b862008-11-22 13:25:32 +0000216 """
217 Start the continuous parsing of resultdir. This sets up
jadmanski10646442008-08-13 14:05:21 +0000218 the database connection and inserts the basic job object into
mbligh2b92b862008-11-22 13:25:32 +0000219 the database if necessary.
220 """
jadmanski10646442008-08-13 14:05:21 +0000221 # redirect parser debugging to .parse.log
222 parse_log = os.path.join(resultdir, '.parse.log')
223 parse_log = open(parse_log, 'w', 0)
224 tko_utils.redirect_parser_debugging(parse_log)
225 # create a job model object and set up the db
226 self.results_db = tko_db.db(autocommit=True)
227 self.parser = status_lib.parser(self.STATUS_VERSION)
228 self.job_model = self.parser.make_job(resultdir)
229 self.parser.start(self.job_model)
230 # check if a job already exists in the db and insert it if
231 # it does not
232 job_idx = self.results_db.find_job(self.parse_job)
233 if job_idx is None:
mbligh2b92b862008-11-22 13:25:32 +0000234 self.results_db.insert_job(self.parse_job, self.job_model)
jadmanski10646442008-08-13 14:05:21 +0000235 else:
mbligh2b92b862008-11-22 13:25:32 +0000236 machine_idx = self.results_db.lookup_machine(self.job_model.machine)
jadmanski10646442008-08-13 14:05:21 +0000237 self.job_model.index = job_idx
238 self.job_model.machine_idx = machine_idx
239
240
241 def cleanup_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000242 """
243 This should be called after the server job is finished
jadmanski10646442008-08-13 14:05:21 +0000244 to carry out any remaining cleanup (e.g. flushing any
mbligh2b92b862008-11-22 13:25:32 +0000245 remaining test results to the results db)
246 """
jadmanski10646442008-08-13 14:05:21 +0000247 if not self.using_parser:
248 return
249 final_tests = self.parser.end()
250 for test in final_tests:
251 self.__insert_test(test)
252 self.using_parser = False
253
254
255 def verify(self):
256 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000257 raise error.AutoservError('No machines specified to verify')
mbligh0fce4112008-11-27 00:37:17 +0000258 if self.resultdir:
259 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000260 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000261 namespace = {'machines' : self.machines, 'job' : self,
262 'ssh_user' : self.ssh_user,
263 'ssh_port' : self.ssh_port,
264 'ssh_pass' : self.ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000265 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000266 except Exception, e:
mbligh2b92b862008-11-22 13:25:32 +0000267 msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
jadmanski10646442008-08-13 14:05:21 +0000268 self.record('ABORT', None, None, msg)
269 raise
270
271
272 def repair(self, host_protection):
273 if not self.machines:
274 raise error.AutoservError('No machines specified to repair')
mbligh0fce4112008-11-27 00:37:17 +0000275 if self.resultdir:
276 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000277 namespace = {'machines': self.machines, 'job': self,
278 'ssh_user': self.ssh_user, 'ssh_port': self.ssh_port,
279 'ssh_pass': self.ssh_pass,
280 'protection_level': host_protection}
mbligh25c0b8c2009-01-24 01:44:17 +0000281
mbligh0931b0a2009-04-08 17:44:48 +0000282 self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000283
284
285 def precheck(self):
286 """
287 perform any additional checks in derived classes.
288 """
289 pass
290
291
292 def enable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000293 """
294 Start or restart external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000295 """
296 pass
297
298
299 def disable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000300 """
301 Pause or stop external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000302 """
303 pass
304
305
jadmanski23afbec2008-09-17 18:12:07 +0000306 def enable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000307 """
308 By default tests run test.cleanup
309 """
jadmanski23afbec2008-09-17 18:12:07 +0000310 self.run_test_cleanup = True
311
312
313 def disable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000314 """
315 By default tests do not run test.cleanup
316 """
jadmanski23afbec2008-09-17 18:12:07 +0000317 self.run_test_cleanup = False
318
319
jadmanski10646442008-08-13 14:05:21 +0000320 def use_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000321 """
322 Return True if external logging should be used.
jadmanski10646442008-08-13 14:05:21 +0000323 """
324 return False
325
326
mbligh415dc212009-06-15 21:53:34 +0000327 def _make_parallel_wrapper(self, function, machines, log):
328 """Wrap function as appropriate for calling by parallel_simple."""
mbligh2b92b862008-11-22 13:25:32 +0000329 is_forking = not (len(machines) == 1 and self.machines == machines)
jadmanski4dd1a002008-09-05 20:27:30 +0000330 if self.parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000331 def wrapper(machine):
332 self.parse_job += "/" + machine
333 self.using_parser = True
334 self.machines = [machine]
mbligh2b92b862008-11-22 13:25:32 +0000335 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000336 os.chdir(self.resultdir)
showard2bab8f42008-11-12 18:15:22 +0000337 utils.write_keyval(self.resultdir, {"hostname": machine})
jadmanski10646442008-08-13 14:05:21 +0000338 self.init_parser(self.resultdir)
339 result = function(machine)
340 self.cleanup_parser()
341 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000342 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000343 def wrapper(machine):
344 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000345 os.chdir(self.resultdir)
mbligh838d82d2009-03-11 17:14:31 +0000346 machine_data = {'hostname' : machine,
347 'status_version' : str(self.STATUS_VERSION)}
348 utils.write_keyval(self.resultdir, machine_data)
jadmanski10646442008-08-13 14:05:21 +0000349 result = function(machine)
350 return result
351 else:
352 wrapper = function
mbligh415dc212009-06-15 21:53:34 +0000353 return wrapper
354
355
356 def parallel_simple(self, function, machines, log=True, timeout=None,
357 return_results=False):
358 """
359 Run 'function' using parallel_simple, with an extra wrapper to handle
360 the necessary setup for continuous parsing, if possible. If continuous
361 parsing is already properly initialized then this should just work.
362
363 @param function: A callable to run in parallel given each machine.
364 @param machines: A list of machine names to be passed one per subcommand
365 invocation of function.
366 @param log: If True, output will be written to output in a subdirectory
367 named after each machine.
368 @param timeout: Seconds after which the function call should timeout.
369 @param return_results: If True instead of an AutoServError being raised
370 on any error a list of the results|exceptions from the function
371 called on each arg is returned. [default: False]
372
373 @raises error.AutotestError: If any of the functions failed.
374 """
375 wrapper = self._make_parallel_wrapper(function, machines, log)
376 return subcommand.parallel_simple(wrapper, machines,
377 log=log, timeout=timeout,
378 return_results=return_results)
379
380
381 def parallel_on_machines(self, function, machines, timeout=None):
382 """
383 @param func: Called in parallel with one machine as its argument.
384 @param machines: A list of machines to call function(machine) on.
385 @param timeout: Seconds after which the function call should timeout.
386
387 @returns A list of machines on which function(machine) returned
388 without raising an exception.
389 """
390 results = self.parallel_simple(func, machines, timeout=timeout,
391 return_results=True)
392 success_machines = []
393 for result, machine in itertools.izip(results, machines):
394 if not isinstance(result, Exception):
395 success_machines.append(machine)
396 return success_machines
jadmanski10646442008-08-13 14:05:21 +0000397
398
jadmanskie432dd22009-01-30 15:04:51 +0000399 USE_TEMP_DIR = object()
mbligh2b92b862008-11-22 13:25:32 +0000400 def run(self, cleanup=False, install_before=False, install_after=False,
jadmanskie432dd22009-01-30 15:04:51 +0000401 collect_crashdumps=True, namespace={}, control=None,
jadmanskidef0c3c2009-03-25 20:07:10 +0000402 control_file_dir=None, only_collect_crashinfo=False):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000403 # for a normal job, make sure the uncollected logs file exists
404 # for a crashinfo-only run it should already exist, bail out otherwise
405 if self.resultdir and not os.path.exists(self.uncollected_log_file):
406 if only_collect_crashinfo:
407 # if this is a crashinfo-only run, and there were no existing
408 # uncollected logs, just bail out early
409 logging.info("No existing uncollected logs, "
410 "skipping crashinfo collection")
411 return
412 else:
413 log_file = open(self.uncollected_log_file, "w")
414 pickle.dump([], log_file)
415 log_file.close()
416
jadmanski10646442008-08-13 14:05:21 +0000417 # use a copy so changes don't affect the original dictionary
418 namespace = namespace.copy()
419 machines = self.machines
jadmanskie432dd22009-01-30 15:04:51 +0000420 if control is None:
421 control = self.control
422 if control_file_dir is None:
423 control_file_dir = self.resultdir
jadmanski10646442008-08-13 14:05:21 +0000424
425 self.aborted = False
426 namespace['machines'] = machines
427 namespace['args'] = self.args
428 namespace['job'] = self
429 namespace['ssh_user'] = self.ssh_user
430 namespace['ssh_port'] = self.ssh_port
431 namespace['ssh_pass'] = self.ssh_pass
432 test_start_time = int(time.time())
433
mbligh80e1eba2008-11-19 00:26:18 +0000434 if self.resultdir:
435 os.chdir(self.resultdir)
jadmanski779bd292009-03-19 17:33:33 +0000436 # touch status.log so that the parser knows a job is running here
jadmanski382303a2009-04-21 19:53:39 +0000437 open(self.get_status_log_path(), 'a').close()
mbligh80e1eba2008-11-19 00:26:18 +0000438 self.enable_external_logging()
jadmanskie432dd22009-01-30 15:04:51 +0000439
jadmanskicdd0c402008-09-19 21:21:31 +0000440 collect_crashinfo = True
mblighaebe3b62008-12-22 14:45:40 +0000441 temp_control_file_dir = None
jadmanski10646442008-08-13 14:05:21 +0000442 try:
443 if install_before and machines:
mbligh084bc172008-10-18 14:02:45 +0000444 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanskie432dd22009-01-30 15:04:51 +0000445
jadmanskidef0c3c2009-03-25 20:07:10 +0000446 if not only_collect_crashinfo:
447 # determine the dir to write the control files to
448 cfd_specified = (control_file_dir
449 and control_file_dir is not self.USE_TEMP_DIR)
450 if cfd_specified:
451 temp_control_file_dir = None
452 else:
453 temp_control_file_dir = tempfile.mkdtemp(
454 suffix='temp_control_file_dir')
455 control_file_dir = temp_control_file_dir
456 server_control_file = os.path.join(control_file_dir,
457 SERVER_CONTROL_FILENAME)
458 client_control_file = os.path.join(control_file_dir,
459 CLIENT_CONTROL_FILENAME)
460 if self.client:
461 namespace['control'] = control
462 utils.open_write_close(client_control_file, control)
mblighfeac0102009-04-28 18:31:12 +0000463 shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
464 server_control_file)
jadmanskidef0c3c2009-03-25 20:07:10 +0000465 else:
466 utils.open_write_close(server_control_file, control)
467 self._execute_code(server_control_file, namespace)
jadmanski10646442008-08-13 14:05:21 +0000468
jadmanskidef0c3c2009-03-25 20:07:10 +0000469 # no error occured, so we don't need to collect crashinfo
470 collect_crashinfo = False
jadmanski10646442008-08-13 14:05:21 +0000471 finally:
mblighaebe3b62008-12-22 14:45:40 +0000472 if temp_control_file_dir:
jadmanskie432dd22009-01-30 15:04:51 +0000473 # Clean up temp directory used for copies of the control files
mblighaebe3b62008-12-22 14:45:40 +0000474 try:
475 shutil.rmtree(temp_control_file_dir)
476 except Exception, e:
jadmanskie432dd22009-01-30 15:04:51 +0000477 print 'Error %s removing dir %s' % (e,
478 temp_control_file_dir)
479
jadmanskicdd0c402008-09-19 21:21:31 +0000480 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000481 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000482 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000483 # includes crashdumps
484 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000485 else:
mbligh084bc172008-10-18 14:02:45 +0000486 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
mbligha788dc42009-03-26 21:10:16 +0000487 if self.uncollected_log_file:
488 os.remove(self.uncollected_log_file)
jadmanski10646442008-08-13 14:05:21 +0000489 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000490 if cleanup and machines:
491 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000492 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000493 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000494
495
mblighc86113b2009-04-28 18:32:51 +0000496 def set_test_tag(self, tag=''):
497 """Set tag to be added to test name of all following run_test steps."""
498 self.test_tag = tag
499
500
jadmanski10646442008-08-13 14:05:21 +0000501 def run_test(self, url, *args, **dargs):
mbligh2b92b862008-11-22 13:25:32 +0000502 """
503 Summon a test object and run it.
jadmanski10646442008-08-13 14:05:21 +0000504
505 tag
506 tag to add to testname
507 url
508 url of the test to run
509 """
510
511 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000512
513 tag = dargs.pop('tag', None)
mblighc86113b2009-04-28 18:32:51 +0000514 if tag is None:
515 tag = self.test_tag
jadmanski10646442008-08-13 14:05:21 +0000516 if tag:
mbligh8ad24202009-01-07 16:49:36 +0000517 testname += '.' + str(tag)
jadmanskide292df2008-08-26 20:51:14 +0000518 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000519
520 outputdir = os.path.join(self.resultdir, subdir)
521 if os.path.exists(outputdir):
522 msg = ("%s already exists, test <%s> may have"
mbligh2b92b862008-11-22 13:25:32 +0000523 " already run with tag <%s>" % (outputdir, testname, tag))
jadmanski10646442008-08-13 14:05:21 +0000524 raise error.TestError(msg)
525 os.mkdir(outputdir)
526
527 def group_func():
528 try:
529 test.runtest(self, url, tag, args, dargs)
530 except error.TestBaseException, e:
531 self.record(e.exit_status, subdir, testname, str(e))
532 raise
533 except Exception, e:
534 info = str(e) + "\n" + traceback.format_exc()
535 self.record('FAIL', subdir, testname, info)
536 raise
537 else:
mbligh2b92b862008-11-22 13:25:32 +0000538 self.record('GOOD', subdir, testname, 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000539
540 result, exc_info = self._run_group(testname, subdir, group_func)
541 if exc_info and isinstance(exc_info[1], error.TestBaseException):
542 return False
543 elif exc_info:
544 raise exc_info[0], exc_info[1], exc_info[2]
545 else:
546 return True
jadmanski10646442008-08-13 14:05:21 +0000547
548
549 def _run_group(self, name, subdir, function, *args, **dargs):
550 """\
551 Underlying method for running something inside of a group.
552 """
jadmanskide292df2008-08-26 20:51:14 +0000553 result, exc_info = None, None
jadmanski10646442008-08-13 14:05:21 +0000554 old_record_prefix = self.record_prefix
555 try:
556 self.record('START', subdir, name)
557 self.record_prefix += '\t'
558 try:
559 result = function(*args, **dargs)
560 finally:
561 self.record_prefix = old_record_prefix
562 except error.TestBaseException, e:
jadmanskib88d6dc2009-01-10 00:33:18 +0000563 self.record("END %s" % e.exit_status, subdir, name)
jadmanskide292df2008-08-26 20:51:14 +0000564 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000565 except Exception, e:
566 err_msg = str(e) + '\n'
567 err_msg += traceback.format_exc()
568 self.record('END ABORT', subdir, name, err_msg)
569 raise error.JobError(name + ' failed\n' + traceback.format_exc())
570 else:
571 self.record('END GOOD', subdir, name)
572
jadmanskide292df2008-08-26 20:51:14 +0000573 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000574
575
576 def run_group(self, function, *args, **dargs):
577 """\
578 function:
579 subroutine to run
580 *args:
581 arguments for the function
582 """
583
584 name = function.__name__
585
586 # Allow the tag for the group to be specified.
587 tag = dargs.pop('tag', None)
588 if tag:
589 name = tag
590
jadmanskide292df2008-08-26 20:51:14 +0000591 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000592
593
594 def run_reboot(self, reboot_func, get_kernel_func):
595 """\
596 A specialization of run_group meant specifically for handling
597 a reboot. Includes support for capturing the kernel version
598 after the reboot.
599
600 reboot_func: a function that carries out the reboot
601
602 get_kernel_func: a function that returns a string
603 representing the kernel version.
604 """
605
606 old_record_prefix = self.record_prefix
607 try:
608 self.record('START', None, 'reboot')
609 self.record_prefix += '\t'
610 reboot_func()
611 except Exception, e:
612 self.record_prefix = old_record_prefix
613 err_msg = str(e) + '\n' + traceback.format_exc()
614 self.record('END FAIL', None, 'reboot', err_msg)
jadmanski4b51d542009-04-08 14:17:16 +0000615 raise
jadmanski10646442008-08-13 14:05:21 +0000616 else:
617 kernel = get_kernel_func()
618 self.record_prefix = old_record_prefix
619 self.record('END GOOD', None, 'reboot',
620 optional_fields={"kernel": kernel})
621
622
jadmanskie432dd22009-01-30 15:04:51 +0000623 def run_control(self, path):
624 """Execute a control file found at path (relative to the autotest
625 path). Intended for executing a control file within a control file,
626 not for running the top-level job control file."""
627 path = os.path.join(self.autodir, path)
628 control_file = self._load_control_file(path)
629 self.run(control=control_file, control_file_dir=self.USE_TEMP_DIR)
630
631
jadmanskic09fc152008-10-15 17:56:59 +0000632 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
mbligh4395bbd2009-03-25 19:34:17 +0000633 self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
jadmanskic09fc152008-10-15 17:56:59 +0000634 on_every_test)
635
636
637 def add_sysinfo_logfile(self, file, on_every_test=False):
638 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
639
640
641 def _add_sysinfo_loggable(self, loggable, on_every_test):
642 if on_every_test:
643 self.sysinfo.test_loggables.add(loggable)
644 else:
645 self.sysinfo.boot_loggables.add(loggable)
646
647
jadmanski10646442008-08-13 14:05:21 +0000648 def record(self, status_code, subdir, operation, status='',
649 optional_fields=None):
650 """
651 Record job-level status
652
653 The intent is to make this file both machine parseable and
654 human readable. That involves a little more complexity, but
655 really isn't all that bad ;-)
656
657 Format is <status code>\t<subdir>\t<operation>\t<status>
658
mbligh1b3b3762008-09-25 02:46:34 +0000659 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000660 for valid status definition
661
662 subdir: MUST be a relevant subdirectory in the results,
663 or None, which will be represented as '----'
664
665 operation: description of what you ran (e.g. "dbench", or
666 "mkfs -t foobar /dev/sda9")
667
668 status: error message or "completed sucessfully"
669
670 ------------------------------------------------------------
671
672 Initial tabs indicate indent levels for grouping, and is
673 governed by self.record_prefix
674
675 multiline messages have secondary lines prefaced by a double
676 space (' ')
677
678 Executing this method will trigger the logging of all new
679 warnings to date from the various console loggers.
680 """
681 # poll all our warning loggers for new warnings
682 warnings = self._read_warnings()
jadmanski2de83112009-04-01 18:21:04 +0000683 old_record_prefix = self.record_prefix
684 try:
685 if status_code.startswith("END "):
686 self.record_prefix += "\t"
687 for timestamp, msg in warnings:
688 self._record("WARN", None, None, msg, timestamp)
689 finally:
690 self.record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000691
692 # write out the actual status log line
693 self._record(status_code, subdir, operation, status,
694 optional_fields=optional_fields)
695
696
697 def _read_warnings(self):
jadmanskif37df842009-02-11 00:03:26 +0000698 """Poll all the warning loggers and extract any new warnings that have
699 been logged. If the warnings belong to a category that is currently
700 disabled, this method will discard them and they will no longer be
701 retrievable.
702
703 Returns a list of (timestamp, message) tuples, where timestamp is an
704 integer epoch timestamp."""
jadmanski10646442008-08-13 14:05:21 +0000705 warnings = []
706 while True:
707 # pull in a line of output from every logger that has
708 # output ready to be read
mbligh2b92b862008-11-22 13:25:32 +0000709 loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
jadmanski10646442008-08-13 14:05:21 +0000710 closed_loggers = set()
711 for logger in loggers:
712 line = logger.readline()
713 # record any broken pipes (aka line == empty)
714 if len(line) == 0:
715 closed_loggers.add(logger)
716 continue
jadmanskif37df842009-02-11 00:03:26 +0000717 # parse out the warning
718 timestamp, msgtype, msg = line.split('\t', 2)
719 timestamp = int(timestamp)
720 # if the warning is valid, add it to the results
721 if self.warning_manager.is_valid(timestamp, msgtype):
722 warnings.append((timestamp, msg.strip()))
jadmanski10646442008-08-13 14:05:21 +0000723
724 # stop listening to loggers that are closed
725 self.warning_loggers -= closed_loggers
726
727 # stop if none of the loggers have any output left
728 if not loggers:
729 break
730
731 # sort into timestamp order
732 warnings.sort()
733 return warnings
734
735
jadmanski16a7ff72009-04-01 18:19:53 +0000736 def disable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000737 self.warning_manager.disable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000738 self.record("INFO", None, None,
739 "disabling %s warnings" % warning_type,
740 {"warnings.disable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000741
742
jadmanski16a7ff72009-04-01 18:19:53 +0000743 def enable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000744 self.warning_manager.enable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000745 self.record("INFO", None, None,
746 "enabling %s warnings" % warning_type,
747 {"warnings.enable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000748
749
jadmanski779bd292009-03-19 17:33:33 +0000750 def get_status_log_path(self, subdir=None):
751 """Return the path to the job status log.
752
753 @param subdir - Optional paramter indicating that you want the path
754 to a subdirectory status log.
755
756 @returns The path where the status log should be.
757 """
mbligh210bae62009-04-01 18:33:13 +0000758 if self.resultdir:
759 if subdir:
760 return os.path.join(self.resultdir, subdir, "status.log")
761 else:
762 return os.path.join(self.resultdir, "status.log")
jadmanski779bd292009-03-19 17:33:33 +0000763 else:
mbligh210bae62009-04-01 18:33:13 +0000764 return None
jadmanski779bd292009-03-19 17:33:33 +0000765
766
jadmanski6bb32d72009-03-19 20:25:24 +0000767 def _update_uncollected_logs_list(self, update_func):
768 """Updates the uncollected logs list in a multi-process safe manner.
769
770 @param update_func - a function that updates the list of uncollected
771 logs. Should take one parameter, the list to be updated.
772 """
mbligha788dc42009-03-26 21:10:16 +0000773 if self.uncollected_log_file:
774 log_file = open(self.uncollected_log_file, "r+")
775 fcntl.flock(log_file, fcntl.LOCK_EX)
jadmanski6bb32d72009-03-19 20:25:24 +0000776 try:
777 uncollected_logs = pickle.load(log_file)
778 update_func(uncollected_logs)
779 log_file.seek(0)
780 log_file.truncate()
781 pickle.dump(uncollected_logs, log_file)
jadmanski3bff9092009-04-22 18:09:47 +0000782 log_file.flush()
jadmanski6bb32d72009-03-19 20:25:24 +0000783 finally:
784 fcntl.flock(log_file, fcntl.LOCK_UN)
785 log_file.close()
786
787
788 def add_client_log(self, hostname, remote_path, local_path):
789 """Adds a new set of client logs to the list of uncollected logs,
790 to allow for future log recovery.
791
792 @param host - the hostname of the machine holding the logs
793 @param remote_path - the directory on the remote machine holding logs
794 @param local_path - the local directory to copy the logs into
795 """
796 def update_func(logs_list):
797 logs_list.append((hostname, remote_path, local_path))
798 self._update_uncollected_logs_list(update_func)
799
800
801 def remove_client_log(self, hostname, remote_path, local_path):
802 """Removes a set of client logs from the list of uncollected logs,
803 to allow for future log recovery.
804
805 @param host - the hostname of the machine holding the logs
806 @param remote_path - the directory on the remote machine holding logs
807 @param local_path - the local directory to copy the logs into
808 """
809 def update_func(logs_list):
810 logs_list.remove((hostname, remote_path, local_path))
811 self._update_uncollected_logs_list(update_func)
812
813
jadmanski10646442008-08-13 14:05:21 +0000814 def _render_record(self, status_code, subdir, operation, status='',
815 epoch_time=None, record_prefix=None,
816 optional_fields=None):
817 """
818 Internal Function to generate a record to be written into a
819 status log. For use by server_job.* classes only.
820 """
821 if subdir:
822 if re.match(r'[\n\t]', subdir):
mbligh2b92b862008-11-22 13:25:32 +0000823 raise ValueError('Invalid character in subdir string')
jadmanski10646442008-08-13 14:05:21 +0000824 substr = subdir
825 else:
826 substr = '----'
827
mbligh1b3b3762008-09-25 02:46:34 +0000828 if not log.is_valid_status(status_code):
mbligh2b92b862008-11-22 13:25:32 +0000829 raise ValueError('Invalid status code supplied: %s' % status_code)
jadmanski10646442008-08-13 14:05:21 +0000830 if not operation:
831 operation = '----'
832 if re.match(r'[\n\t]', operation):
mbligh2b92b862008-11-22 13:25:32 +0000833 raise ValueError('Invalid character in operation string')
jadmanski10646442008-08-13 14:05:21 +0000834 operation = operation.rstrip()
835 status = status.rstrip()
836 status = re.sub(r"\t", " ", status)
837 # Ensure any continuation lines are marked so we can
838 # detect them in the status file to ensure it is parsable.
839 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
840
841 if not optional_fields:
842 optional_fields = {}
843
844 # Generate timestamps for inclusion in the logs
845 if epoch_time is None:
846 epoch_time = int(time.time())
847 local_time = time.localtime(epoch_time)
848 optional_fields["timestamp"] = str(epoch_time)
849 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
850 local_time)
851
852 fields = [status_code, substr, operation]
853 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
854 fields.append(status)
855
856 if record_prefix is None:
857 record_prefix = self.record_prefix
858
859 msg = '\t'.join(str(x) for x in fields)
jadmanski10646442008-08-13 14:05:21 +0000860 return record_prefix + msg + '\n'
861
862
863 def _record_prerendered(self, msg):
864 """
865 Record a pre-rendered msg into the status logs. The only
866 change this makes to the message is to add on the local
867 indentation. Should not be called outside of server_job.*
868 classes. Unlike _record, this does not write the message
869 to standard output.
870 """
871 lines = []
jadmanski779bd292009-03-19 17:33:33 +0000872 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000873 status_log = open(status_file, 'a')
874 for line in msg.splitlines():
875 line = self.record_prefix + line + '\n'
876 lines.append(line)
877 status_log.write(line)
878 status_log.close()
879 self.__parse_status(lines)
880
881
mbligh084bc172008-10-18 14:02:45 +0000882 def _fill_server_control_namespace(self, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000883 """
884 Prepare a namespace to be used when executing server control files.
mbligh084bc172008-10-18 14:02:45 +0000885
886 This sets up the control file API by importing modules and making them
887 available under the appropriate names within namespace.
888
889 For use by _execute_code().
890
891 Args:
892 namespace: The namespace dictionary to fill in.
893 protect: Boolean. If True (the default) any operation that would
894 clobber an existing entry in namespace will cause an error.
895 Raises:
896 error.AutoservError: When a name would be clobbered by import.
897 """
898 def _import_names(module_name, names=()):
mbligh2b92b862008-11-22 13:25:32 +0000899 """
900 Import a module and assign named attributes into namespace.
mbligh084bc172008-10-18 14:02:45 +0000901
902 Args:
903 module_name: The string module name.
904 names: A limiting list of names to import from module_name. If
905 empty (the default), all names are imported from the module
906 similar to a "from foo.bar import *" statement.
907 Raises:
908 error.AutoservError: When a name being imported would clobber
909 a name already in namespace.
910 """
911 module = __import__(module_name, {}, {}, names)
912
913 # No names supplied? Import * from the lowest level module.
914 # (Ugh, why do I have to implement this part myself?)
915 if not names:
916 for submodule_name in module_name.split('.')[1:]:
917 module = getattr(module, submodule_name)
918 if hasattr(module, '__all__'):
919 names = getattr(module, '__all__')
920 else:
921 names = dir(module)
922
923 # Install each name into namespace, checking to make sure it
924 # doesn't override anything that already exists.
925 for name in names:
926 # Check for conflicts to help prevent future problems.
927 if name in namespace and protect:
928 if namespace[name] is not getattr(module, name):
929 raise error.AutoservError('importing name '
930 '%s from %s %r would override %r' %
931 (name, module_name, getattr(module, name),
932 namespace[name]))
933 else:
934 # Encourage cleanliness and the use of __all__ for a
935 # more concrete API with less surprises on '*' imports.
936 warnings.warn('%s (%r) being imported from %s for use '
937 'in server control files is not the '
938 'first occurrance of that import.' %
939 (name, namespace[name], module_name))
940
941 namespace[name] = getattr(module, name)
942
943
944 # This is the equivalent of prepending a bunch of import statements to
945 # the front of the control script.
946 namespace.update(os=os, sys=sys)
947 _import_names('autotest_lib.server',
948 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
949 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
950 _import_names('autotest_lib.server.subcommand',
951 ('parallel', 'parallel_simple', 'subcommand'))
952 _import_names('autotest_lib.server.utils',
953 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
954 _import_names('autotest_lib.client.common_lib.error')
955 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
956
957 # Inject ourself as the job object into other classes within the API.
958 # (Yuck, this injection is a gross thing be part of a public API. -gps)
959 #
960 # XXX Base & SiteAutotest do not appear to use .job. Who does?
961 namespace['autotest'].Autotest.job = self
962 # server.hosts.base_classes.Host uses .job.
963 namespace['hosts'].Host.job = self
964
965
966 def _execute_code(self, code_file, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000967 """
968 Execute code using a copy of namespace as a server control script.
mbligh084bc172008-10-18 14:02:45 +0000969
970 Unless protect_namespace is explicitly set to False, the dict will not
971 be modified.
972
973 Args:
974 code_file: The filename of the control file to execute.
975 namespace: A dict containing names to make available during execution.
976 protect: Boolean. If True (the default) a copy of the namespace dict
977 is used during execution to prevent the code from modifying its
978 contents outside of this function. If False the raw dict is
979 passed in and modifications will be allowed.
980 """
981 if protect:
982 namespace = namespace.copy()
983 self._fill_server_control_namespace(namespace, protect=protect)
984 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +0000985 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +0000986 machines_text = '\n'.join(self.machines) + '\n'
987 # Only rewrite the file if it does not match our machine list.
988 try:
989 machines_f = open(MACHINES_FILENAME, 'r')
990 existing_machines_text = machines_f.read()
991 machines_f.close()
992 except EnvironmentError:
993 existing_machines_text = None
994 if machines_text != existing_machines_text:
995 utils.open_write_close(MACHINES_FILENAME, machines_text)
996 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +0000997
998
999 def _record(self, status_code, subdir, operation, status='',
1000 epoch_time=None, optional_fields=None):
1001 """
1002 Actual function for recording a single line into the status
1003 logs. Should never be called directly, only by job.record as
1004 this would bypass the console monitor logging.
1005 """
1006
mbligh2b92b862008-11-22 13:25:32 +00001007 msg = self._render_record(status_code, subdir, operation, status,
1008 epoch_time, optional_fields=optional_fields)
jadmanski10646442008-08-13 14:05:21 +00001009
jadmanski779bd292009-03-19 17:33:33 +00001010 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +00001011 sys.stdout.write(msg)
mbligh210bae62009-04-01 18:33:13 +00001012 if status_file:
1013 open(status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001014 if subdir:
jadmanski779bd292009-03-19 17:33:33 +00001015 sub_status_file = self.get_status_log_path(subdir)
1016 open(sub_status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001017 self.__parse_status(msg.splitlines())
1018
1019
1020 def __parse_status(self, new_lines):
1021 if not self.using_parser:
1022 return
1023 new_tests = self.parser.process_lines(new_lines)
1024 for test in new_tests:
1025 self.__insert_test(test)
1026
1027
1028 def __insert_test(self, test):
mbligh2b92b862008-11-22 13:25:32 +00001029 """
1030 An internal method to insert a new test result into the
jadmanski10646442008-08-13 14:05:21 +00001031 database. This method will not raise an exception, even if an
1032 error occurs during the insert, to avoid failing a test
1033 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +00001034 self.num_tests_run += 1
1035 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
1036 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +00001037 try:
1038 self.results_db.insert_test(self.job_model, test)
1039 except Exception:
1040 msg = ("WARNING: An unexpected error occured while "
1041 "inserting test results into the database. "
1042 "Ignoring error.\n" + traceback.format_exc())
1043 print >> sys.stderr, msg
1044
mblighcaa62c22008-04-07 21:51:17 +00001045
mbligha7007722009-01-13 00:37:11 +00001046site_server_job = utils.import_site_class(
1047 __file__, "autotest_lib.server.site_server_job", "site_server_job",
1048 base_server_job)
jadmanski0afbb632008-06-06 21:10:57 +00001049
mbligh0a8c3322009-04-28 18:32:19 +00001050class server_job(site_server_job):
jadmanski0afbb632008-06-06 21:10:57 +00001051 pass
jadmanskif37df842009-02-11 00:03:26 +00001052
1053
1054class warning_manager(object):
1055 """Class for controlling warning logs. Manages the enabling and disabling
1056 of warnings."""
1057 def __init__(self):
1058 # a map of warning types to a list of disabled time intervals
1059 self.disabled_warnings = {}
1060
1061
1062 def is_valid(self, timestamp, warning_type):
1063 """Indicates if a warning (based on the time it occured and its type)
1064 is a valid warning. A warning is considered "invalid" if this type of
1065 warning was marked as "disabled" at the time the warning occured."""
1066 disabled_intervals = self.disabled_warnings.get(warning_type, [])
1067 for start, end in disabled_intervals:
1068 if timestamp >= start and (end is None or timestamp < end):
1069 return False
1070 return True
1071
1072
1073 def disable_warnings(self, warning_type, current_time_func=time.time):
1074 """As of now, disables all further warnings of this type."""
1075 intervals = self.disabled_warnings.setdefault(warning_type, [])
1076 if not intervals or intervals[-1][1] is not None:
jadmanski16a7ff72009-04-01 18:19:53 +00001077 intervals.append((int(current_time_func()), None))
jadmanskif37df842009-02-11 00:03:26 +00001078
1079
1080 def enable_warnings(self, warning_type, current_time_func=time.time):
1081 """As of now, enables all further warnings of this type."""
1082 intervals = self.disabled_warnings.get(warning_type, [])
1083 if intervals and intervals[-1][1] is None:
jadmanski16a7ff72009-04-01 18:19:53 +00001084 intervals[-1] = (intervals[-1][0], int(current_time_func()))