blob: 3feb4bfeddf95e73bfb98873735e8bf370447f5f [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski6bb32d72009-03-19 20:25:24 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess
showard75cdfee2009-06-10 17:40:41 +000010import traceback, shutil, warnings, fcntl, pickle, logging
mbligh78c0daa2009-06-15 21:54:50 +000011import itertools
showard75cdfee2009-06-10 17:40:41 +000012from autotest_lib.client.bin import sysinfo
mbligh09108442008-10-15 16:27:38 +000013from autotest_lib.client.common_lib import error, log, utils, packages
showard75cdfee2009-06-10 17:40:41 +000014from autotest_lib.client.common_lib import logging_manager
jadmanski043e1132008-11-19 17:10:32 +000015from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000016from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000017
18
mbligh084bc172008-10-18 14:02:45 +000019def _control_segment_path(name):
20 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000021 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000022 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000023
24
mbligh084bc172008-10-18 14:02:45 +000025CLIENT_CONTROL_FILENAME = 'control'
26SERVER_CONTROL_FILENAME = 'control.srv'
27MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000028
mbligh084bc172008-10-18 14:02:45 +000029CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
30CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
31CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000032INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000033CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000034
mbligh084bc172008-10-18 14:02:45 +000035VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000036REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000037
38
mbligh062ed152009-01-13 00:57:14 +000039# by default provide a stub that generates no site data
40def _get_site_job_data_dummy(job):
41 return {}
42
43
jadmanski10646442008-08-13 14:05:21 +000044# load up site-specific code for generating site-specific job data
mbligh062ed152009-01-13 00:57:14 +000045get_site_job_data = utils.import_site_function(__file__,
jadmanskic0a623d2009-03-03 21:11:48 +000046 "autotest_lib.server.site_server_job", "get_site_job_data",
mbligh062ed152009-01-13 00:57:14 +000047 _get_site_job_data_dummy)
jadmanski10646442008-08-13 14:05:21 +000048
49
50class base_server_job(object):
mbligh2b92b862008-11-22 13:25:32 +000051 """
52 The actual job against which we do everything.
jadmanski10646442008-08-13 14:05:21 +000053
54 Properties:
55 autodir
56 The top level autotest directory (/usr/local/autotest).
57 serverdir
58 <autodir>/server/
59 clientdir
60 <autodir>/client/
61 conmuxdir
62 <autodir>/conmux/
63 testdir
64 <autodir>/server/tests/
65 site_testdir
66 <autodir>/server/site_tests/
67 control
68 the control file for this job
mblighb5dac432008-11-27 00:38:44 +000069 drop_caches_between_iterations
70 drop the pagecache between each iteration
showarda6082ef2009-10-12 20:25:44 +000071 default_profile_only
72 default value for the test.execute profile_only parameter
jadmanski10646442008-08-13 14:05:21 +000073 """
74
75 STATUS_VERSION = 1
jadmanski10646442008-08-13 14:05:21 +000076
77 def __init__(self, control, args, resultdir, label, user, machines,
78 client=False, parse_job='',
mbligh374f3412009-05-13 21:29:45 +000079 ssh_user='root', ssh_port=22, ssh_pass='',
mblighe7d9c602009-07-02 19:02:33 +000080 group_name='', tag=''):
jadmanski10646442008-08-13 14:05:21 +000081 """
mbligh374f3412009-05-13 21:29:45 +000082 Create a server side job object.
mblighb5dac432008-11-27 00:38:44 +000083
mblighe7d9c602009-07-02 19:02:33 +000084 @param control: The pathname of the control file.
85 @param args: Passed to the control file.
86 @param resultdir: Where to throw the results.
87 @param label: Description of the job.
88 @param user: Username for the job (email address).
89 @param client: True if this is a client-side control file.
90 @param parse_job: string, if supplied it is the job execution tag that
91 the results will be passed through to the TKO parser with.
92 @param ssh_user: The SSH username. [root]
93 @param ssh_port: The SSH port number. [22]
94 @param ssh_pass: The SSH passphrase, if needed.
95 @param group_name: If supplied, this will be written out as
mbligh374f3412009-05-13 21:29:45 +000096 host_group_name in the keyvals file for the parser.
mblighe7d9c602009-07-02 19:02:33 +000097 @param tag: The job execution tag from the scheduler. [optional]
jadmanski10646442008-08-13 14:05:21 +000098 """
99 path = os.path.dirname(__file__)
100 self.autodir = os.path.abspath(os.path.join(path, '..'))
101 self.serverdir = os.path.join(self.autodir, 'server')
102 self.testdir = os.path.join(self.serverdir, 'tests')
103 self.site_testdir = os.path.join(self.serverdir, 'site_tests')
104 self.tmpdir = os.path.join(self.serverdir, 'tmp')
105 self.conmuxdir = os.path.join(self.autodir, 'conmux')
106 self.clientdir = os.path.join(self.autodir, 'client')
107 self.toolsdir = os.path.join(self.autodir, 'client/tools')
108 if control:
jadmanskie432dd22009-01-30 15:04:51 +0000109 self.control = self._load_control_file(control)
jadmanski10646442008-08-13 14:05:21 +0000110 else:
showard45ae8192008-11-05 19:32:53 +0000111 self.control = ''
jadmanski10646442008-08-13 14:05:21 +0000112 self.resultdir = resultdir
mbligha788dc42009-03-26 21:10:16 +0000113 self.uncollected_log_file = None
mbligh80e1eba2008-11-19 00:26:18 +0000114 if resultdir:
mbligh374f3412009-05-13 21:29:45 +0000115 self.uncollected_log_file = os.path.join(resultdir,
116 'uncollected_logs')
mbligha788dc42009-03-26 21:10:16 +0000117 self.debugdir = os.path.join(resultdir, 'debug')
118
mbligh80e1eba2008-11-19 00:26:18 +0000119 if not os.path.exists(resultdir):
120 os.mkdir(resultdir)
mbligh80e1eba2008-11-19 00:26:18 +0000121 if not os.path.exists(self.debugdir):
122 os.mkdir(self.debugdir)
jadmanski10646442008-08-13 14:05:21 +0000123 self.label = label
124 self.user = user
125 self.args = args
126 self.machines = machines
127 self.client = client
128 self.record_prefix = ''
129 self.warning_loggers = set()
jadmanskif37df842009-02-11 00:03:26 +0000130 self.warning_manager = warning_manager()
jadmanski10646442008-08-13 14:05:21 +0000131 self.ssh_user = ssh_user
132 self.ssh_port = ssh_port
133 self.ssh_pass = ssh_pass
mblighe7d9c602009-07-02 19:02:33 +0000134 self.tag = tag
showarda6082ef2009-10-12 20:25:44 +0000135 self.default_profile_only = False
jadmanski23afbec2008-09-17 18:12:07 +0000136 self.run_test_cleanup = True
mbligh09108442008-10-15 16:27:38 +0000137 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000138 self.hosts = set()
mblighb5dac432008-11-27 00:38:44 +0000139 self.drop_caches_between_iterations = False
jadmanski10646442008-08-13 14:05:21 +0000140
showard75cdfee2009-06-10 17:40:41 +0000141 self.logging = logging_manager.get_logging_manager(
142 manage_stdout_and_stderr=True, redirect_fds=True)
143 subcommand.logging_manager_object = self.logging
jadmanski10646442008-08-13 14:05:21 +0000144
mbligh80e1eba2008-11-19 00:26:18 +0000145 if resultdir:
146 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000147 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000148
jadmanski025099d2008-09-23 14:13:48 +0000149 if not os.access(self.tmpdir, os.W_OK):
150 try:
151 os.makedirs(self.tmpdir, 0700)
152 except os.error, e:
153 # Thrown if the directory already exists, which it may.
154 pass
155
mbligh2b92b862008-11-22 13:25:32 +0000156 if not (os.access(self.tmpdir, os.W_OK) and os.path.isdir(self.tmpdir)):
jadmanski025099d2008-09-23 14:13:48 +0000157 self.tmpdir = os.path.join(tempfile.gettempdir(),
158 'autotest-' + getpass.getuser())
159 try:
160 os.makedirs(self.tmpdir, 0700)
161 except os.error, e:
162 # Thrown if the directory already exists, which it may.
163 # If the problem was something other than the
164 # directory already existing, this chmod should throw as well
165 # exception.
166 os.chmod(self.tmpdir, stat.S_IRWXU)
167
jadmanski10646442008-08-13 14:05:21 +0000168 job_data = {'label' : label, 'user' : user,
169 'hostname' : ','.join(machines),
showard170873e2009-01-07 00:22:26 +0000170 'status_version' : str(self.STATUS_VERSION),
171 'job_started' : str(int(time.time()))}
mbligh374f3412009-05-13 21:29:45 +0000172 if group_name:
173 job_data['host_group_name'] = group_name
mbligh80e1eba2008-11-19 00:26:18 +0000174 if self.resultdir:
jadmanski58962982009-04-21 19:54:34 +0000175 # only write these keyvals out on the first job in a resultdir
176 if 'job_started' not in utils.read_keyval(self.resultdir):
177 job_data.update(get_site_job_data(self))
178 utils.write_keyval(self.resultdir, job_data)
jadmanski10646442008-08-13 14:05:21 +0000179
180 self.parse_job = parse_job
181 if self.parse_job and len(machines) == 1:
182 self.using_parser = True
183 self.init_parser(resultdir)
184 else:
185 self.using_parser = False
mbligh2b92b862008-11-22 13:25:32 +0000186 self.pkgmgr = packages.PackageManager(self.autodir,
187 run_function_dargs={'timeout':600})
jadmanski10646442008-08-13 14:05:21 +0000188 self.pkgdir = os.path.join(self.autodir, 'packages')
189
showard21baa452008-10-21 00:08:39 +0000190 self.num_tests_run = 0
191 self.num_tests_failed = 0
192
jadmanski550fdc22008-11-20 16:32:08 +0000193 self._register_subcommand_hooks()
mbligh7eacbc22009-07-28 23:13:56 +0000194 self._test_tag_prefix = None
jadmanski550fdc22008-11-20 16:32:08 +0000195
196
jadmanskie432dd22009-01-30 15:04:51 +0000197 @staticmethod
198 def _load_control_file(path):
199 f = open(path)
200 try:
201 control_file = f.read()
202 finally:
203 f.close()
204 return re.sub('\r', '', control_file)
205
206
jadmanski550fdc22008-11-20 16:32:08 +0000207 def _register_subcommand_hooks(self):
mbligh2b92b862008-11-22 13:25:32 +0000208 """
209 Register some hooks into the subcommand modules that allow us
210 to properly clean up self.hosts created in forked subprocesses.
211 """
jadmanski550fdc22008-11-20 16:32:08 +0000212 def on_fork(cmd):
213 self._existing_hosts_on_fork = set(self.hosts)
214 def on_join(cmd):
215 new_hosts = self.hosts - self._existing_hosts_on_fork
216 for host in new_hosts:
217 host.close()
218 subcommand.subcommand.register_fork_hook(on_fork)
219 subcommand.subcommand.register_join_hook(on_join)
220
jadmanski10646442008-08-13 14:05:21 +0000221
222 def init_parser(self, resultdir):
mbligh2b92b862008-11-22 13:25:32 +0000223 """
224 Start the continuous parsing of resultdir. This sets up
jadmanski10646442008-08-13 14:05:21 +0000225 the database connection and inserts the basic job object into
mbligh2b92b862008-11-22 13:25:32 +0000226 the database if necessary.
227 """
jadmanski10646442008-08-13 14:05:21 +0000228 # redirect parser debugging to .parse.log
229 parse_log = os.path.join(resultdir, '.parse.log')
230 parse_log = open(parse_log, 'w', 0)
231 tko_utils.redirect_parser_debugging(parse_log)
232 # create a job model object and set up the db
233 self.results_db = tko_db.db(autocommit=True)
234 self.parser = status_lib.parser(self.STATUS_VERSION)
235 self.job_model = self.parser.make_job(resultdir)
236 self.parser.start(self.job_model)
237 # check if a job already exists in the db and insert it if
238 # it does not
239 job_idx = self.results_db.find_job(self.parse_job)
240 if job_idx is None:
mbligh2b92b862008-11-22 13:25:32 +0000241 self.results_db.insert_job(self.parse_job, self.job_model)
jadmanski10646442008-08-13 14:05:21 +0000242 else:
mbligh2b92b862008-11-22 13:25:32 +0000243 machine_idx = self.results_db.lookup_machine(self.job_model.machine)
jadmanski10646442008-08-13 14:05:21 +0000244 self.job_model.index = job_idx
245 self.job_model.machine_idx = machine_idx
246
247
248 def cleanup_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000249 """
250 This should be called after the server job is finished
jadmanski10646442008-08-13 14:05:21 +0000251 to carry out any remaining cleanup (e.g. flushing any
mbligh2b92b862008-11-22 13:25:32 +0000252 remaining test results to the results db)
253 """
jadmanski10646442008-08-13 14:05:21 +0000254 if not self.using_parser:
255 return
256 final_tests = self.parser.end()
257 for test in final_tests:
258 self.__insert_test(test)
259 self.using_parser = False
260
261
262 def verify(self):
263 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000264 raise error.AutoservError('No machines specified to verify')
mbligh0fce4112008-11-27 00:37:17 +0000265 if self.resultdir:
266 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000267 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000268 namespace = {'machines' : self.machines, 'job' : self,
269 'ssh_user' : self.ssh_user,
270 'ssh_port' : self.ssh_port,
271 'ssh_pass' : self.ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000272 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000273 except Exception, e:
mbligh2b92b862008-11-22 13:25:32 +0000274 msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
jadmanski10646442008-08-13 14:05:21 +0000275 self.record('ABORT', None, None, msg)
276 raise
277
278
279 def repair(self, host_protection):
280 if not self.machines:
281 raise error.AutoservError('No machines specified to repair')
mbligh0fce4112008-11-27 00:37:17 +0000282 if self.resultdir:
283 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000284 namespace = {'machines': self.machines, 'job': self,
285 'ssh_user': self.ssh_user, 'ssh_port': self.ssh_port,
286 'ssh_pass': self.ssh_pass,
287 'protection_level': host_protection}
mbligh25c0b8c2009-01-24 01:44:17 +0000288
mbligh0931b0a2009-04-08 17:44:48 +0000289 self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000290
291
292 def precheck(self):
293 """
294 perform any additional checks in derived classes.
295 """
296 pass
297
298
299 def enable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000300 """
301 Start or restart external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000302 """
303 pass
304
305
306 def disable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000307 """
308 Pause or stop external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000309 """
310 pass
311
312
showard07e27af2009-10-12 20:32:01 +0000313 def set_default_profile_only(self, val):
314 """ Set the default_profile_only mode. """
315 self.default_profile_only = val
316
317
jadmanski23afbec2008-09-17 18:12:07 +0000318 def enable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000319 """
320 By default tests run test.cleanup
321 """
jadmanski23afbec2008-09-17 18:12:07 +0000322 self.run_test_cleanup = True
323
324
325 def disable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000326 """
327 By default tests do not run test.cleanup
328 """
jadmanski23afbec2008-09-17 18:12:07 +0000329 self.run_test_cleanup = False
330
331
jadmanski10646442008-08-13 14:05:21 +0000332 def use_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000333 """
334 Return True if external logging should be used.
jadmanski10646442008-08-13 14:05:21 +0000335 """
336 return False
337
338
mbligh415dc212009-06-15 21:53:34 +0000339 def _make_parallel_wrapper(self, function, machines, log):
340 """Wrap function as appropriate for calling by parallel_simple."""
mbligh2b92b862008-11-22 13:25:32 +0000341 is_forking = not (len(machines) == 1 and self.machines == machines)
jadmanski4dd1a002008-09-05 20:27:30 +0000342 if self.parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000343 def wrapper(machine):
344 self.parse_job += "/" + machine
345 self.using_parser = True
346 self.machines = [machine]
mbligh2b92b862008-11-22 13:25:32 +0000347 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000348 os.chdir(self.resultdir)
showard2bab8f42008-11-12 18:15:22 +0000349 utils.write_keyval(self.resultdir, {"hostname": machine})
jadmanski10646442008-08-13 14:05:21 +0000350 self.init_parser(self.resultdir)
351 result = function(machine)
352 self.cleanup_parser()
353 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000354 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000355 def wrapper(machine):
356 self.resultdir = os.path.join(self.resultdir, machine)
jadmanski609a5f42008-08-26 20:52:42 +0000357 os.chdir(self.resultdir)
mbligh838d82d2009-03-11 17:14:31 +0000358 machine_data = {'hostname' : machine,
359 'status_version' : str(self.STATUS_VERSION)}
360 utils.write_keyval(self.resultdir, machine_data)
jadmanski10646442008-08-13 14:05:21 +0000361 result = function(machine)
362 return result
363 else:
364 wrapper = function
mbligh415dc212009-06-15 21:53:34 +0000365 return wrapper
366
367
368 def parallel_simple(self, function, machines, log=True, timeout=None,
369 return_results=False):
370 """
371 Run 'function' using parallel_simple, with an extra wrapper to handle
372 the necessary setup for continuous parsing, if possible. If continuous
373 parsing is already properly initialized then this should just work.
374
375 @param function: A callable to run in parallel given each machine.
376 @param machines: A list of machine names to be passed one per subcommand
377 invocation of function.
378 @param log: If True, output will be written to output in a subdirectory
379 named after each machine.
380 @param timeout: Seconds after which the function call should timeout.
381 @param return_results: If True instead of an AutoServError being raised
382 on any error a list of the results|exceptions from the function
383 called on each arg is returned. [default: False]
384
385 @raises error.AutotestError: If any of the functions failed.
386 """
387 wrapper = self._make_parallel_wrapper(function, machines, log)
388 return subcommand.parallel_simple(wrapper, machines,
389 log=log, timeout=timeout,
390 return_results=return_results)
391
392
393 def parallel_on_machines(self, function, machines, timeout=None):
394 """
showardcd5fac42009-07-06 20:19:43 +0000395 @param function: Called in parallel with one machine as its argument.
mbligh415dc212009-06-15 21:53:34 +0000396 @param machines: A list of machines to call function(machine) on.
397 @param timeout: Seconds after which the function call should timeout.
398
399 @returns A list of machines on which function(machine) returned
400 without raising an exception.
401 """
showardcd5fac42009-07-06 20:19:43 +0000402 results = self.parallel_simple(function, machines, timeout=timeout,
mbligh415dc212009-06-15 21:53:34 +0000403 return_results=True)
404 success_machines = []
405 for result, machine in itertools.izip(results, machines):
406 if not isinstance(result, Exception):
407 success_machines.append(machine)
408 return success_machines
jadmanski10646442008-08-13 14:05:21 +0000409
410
jadmanskie432dd22009-01-30 15:04:51 +0000411 USE_TEMP_DIR = object()
mbligh2b92b862008-11-22 13:25:32 +0000412 def run(self, cleanup=False, install_before=False, install_after=False,
jadmanskie432dd22009-01-30 15:04:51 +0000413 collect_crashdumps=True, namespace={}, control=None,
jadmanskidef0c3c2009-03-25 20:07:10 +0000414 control_file_dir=None, only_collect_crashinfo=False):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000415 # for a normal job, make sure the uncollected logs file exists
416 # for a crashinfo-only run it should already exist, bail out otherwise
417 if self.resultdir and not os.path.exists(self.uncollected_log_file):
418 if only_collect_crashinfo:
419 # if this is a crashinfo-only run, and there were no existing
420 # uncollected logs, just bail out early
421 logging.info("No existing uncollected logs, "
422 "skipping crashinfo collection")
423 return
424 else:
425 log_file = open(self.uncollected_log_file, "w")
426 pickle.dump([], log_file)
427 log_file.close()
428
jadmanski10646442008-08-13 14:05:21 +0000429 # use a copy so changes don't affect the original dictionary
430 namespace = namespace.copy()
431 machines = self.machines
jadmanskie432dd22009-01-30 15:04:51 +0000432 if control is None:
433 control = self.control
434 if control_file_dir is None:
435 control_file_dir = self.resultdir
jadmanski10646442008-08-13 14:05:21 +0000436
437 self.aborted = False
438 namespace['machines'] = machines
439 namespace['args'] = self.args
440 namespace['job'] = self
441 namespace['ssh_user'] = self.ssh_user
442 namespace['ssh_port'] = self.ssh_port
443 namespace['ssh_pass'] = self.ssh_pass
444 test_start_time = int(time.time())
445
mbligh80e1eba2008-11-19 00:26:18 +0000446 if self.resultdir:
447 os.chdir(self.resultdir)
jadmanski779bd292009-03-19 17:33:33 +0000448 # touch status.log so that the parser knows a job is running here
jadmanski382303a2009-04-21 19:53:39 +0000449 open(self.get_status_log_path(), 'a').close()
mbligh80e1eba2008-11-19 00:26:18 +0000450 self.enable_external_logging()
jadmanskie432dd22009-01-30 15:04:51 +0000451
jadmanskicdd0c402008-09-19 21:21:31 +0000452 collect_crashinfo = True
mblighaebe3b62008-12-22 14:45:40 +0000453 temp_control_file_dir = None
jadmanski10646442008-08-13 14:05:21 +0000454 try:
showardcf8d4922009-10-14 16:08:39 +0000455 try:
456 if install_before and machines:
457 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanskie432dd22009-01-30 15:04:51 +0000458
showardcf8d4922009-10-14 16:08:39 +0000459 if only_collect_crashinfo:
460 return
461
jadmanskidef0c3c2009-03-25 20:07:10 +0000462 # determine the dir to write the control files to
463 cfd_specified = (control_file_dir
464 and control_file_dir is not self.USE_TEMP_DIR)
465 if cfd_specified:
466 temp_control_file_dir = None
467 else:
468 temp_control_file_dir = tempfile.mkdtemp(
469 suffix='temp_control_file_dir')
470 control_file_dir = temp_control_file_dir
471 server_control_file = os.path.join(control_file_dir,
472 SERVER_CONTROL_FILENAME)
473 client_control_file = os.path.join(control_file_dir,
474 CLIENT_CONTROL_FILENAME)
475 if self.client:
476 namespace['control'] = control
477 utils.open_write_close(client_control_file, control)
mblighfeac0102009-04-28 18:31:12 +0000478 shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
479 server_control_file)
jadmanskidef0c3c2009-03-25 20:07:10 +0000480 else:
481 utils.open_write_close(server_control_file, control)
mbligh26f0d882009-06-22 18:30:01 +0000482 logging.info("Processing control file")
jadmanskidef0c3c2009-03-25 20:07:10 +0000483 self._execute_code(server_control_file, namespace)
mbligh26f0d882009-06-22 18:30:01 +0000484 logging.info("Finished processing control file")
jadmanski10646442008-08-13 14:05:21 +0000485
jadmanskidef0c3c2009-03-25 20:07:10 +0000486 # no error occured, so we don't need to collect crashinfo
487 collect_crashinfo = False
showardcf8d4922009-10-14 16:08:39 +0000488 except:
489 try:
490 logging.exception(
491 'Exception escaped control file, job aborting:')
492 except:
493 pass # don't let logging exceptions here interfere
494 raise
jadmanski10646442008-08-13 14:05:21 +0000495 finally:
mblighaebe3b62008-12-22 14:45:40 +0000496 if temp_control_file_dir:
jadmanskie432dd22009-01-30 15:04:51 +0000497 # Clean up temp directory used for copies of the control files
mblighaebe3b62008-12-22 14:45:40 +0000498 try:
499 shutil.rmtree(temp_control_file_dir)
500 except Exception, e:
mblighe7d9c602009-07-02 19:02:33 +0000501 logging.warn('Could not remove temp directory %s: %s',
502 temp_control_file_dir, e)
jadmanskie432dd22009-01-30 15:04:51 +0000503
jadmanskicdd0c402008-09-19 21:21:31 +0000504 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000505 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000506 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000507 # includes crashdumps
508 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000509 else:
mbligh084bc172008-10-18 14:02:45 +0000510 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
mbligha788dc42009-03-26 21:10:16 +0000511 if self.uncollected_log_file:
512 os.remove(self.uncollected_log_file)
jadmanski10646442008-08-13 14:05:21 +0000513 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000514 if cleanup and machines:
515 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000516 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000517 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000518
519
mbligh7eacbc22009-07-28 23:13:56 +0000520 def set_test_tag_prefix(self, tag=''):
521 """
522 Set tag to be prepended (separated by a '.') to test name of all
523 following run_test steps.
524 """
525 self._test_tag_prefix = tag
mblighc86113b2009-04-28 18:32:51 +0000526
527
jadmanski10646442008-08-13 14:05:21 +0000528 def run_test(self, url, *args, **dargs):
mbligh2b92b862008-11-22 13:25:32 +0000529 """
530 Summon a test object and run it.
jadmanski10646442008-08-13 14:05:21 +0000531
532 tag
533 tag to add to testname
534 url
535 url of the test to run
536 """
537
538 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000539
540 tag = dargs.pop('tag', None)
mblighc86113b2009-04-28 18:32:51 +0000541 if tag is None:
mbligh7eacbc22009-07-28 23:13:56 +0000542 tag = self._test_tag_prefix
543 elif self._test_tag_prefix:
544 tag = '%s.%s' % (self._test_tag_prefix, tag)
545
jadmanski10646442008-08-13 14:05:21 +0000546 if tag:
mbligh8ad24202009-01-07 16:49:36 +0000547 testname += '.' + str(tag)
jadmanskide292df2008-08-26 20:51:14 +0000548 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000549
550 outputdir = os.path.join(self.resultdir, subdir)
551 if os.path.exists(outputdir):
552 msg = ("%s already exists, test <%s> may have"
mbligh2b92b862008-11-22 13:25:32 +0000553 " already run with tag <%s>" % (outputdir, testname, tag))
jadmanski10646442008-08-13 14:05:21 +0000554 raise error.TestError(msg)
555 os.mkdir(outputdir)
556
557 def group_func():
558 try:
559 test.runtest(self, url, tag, args, dargs)
560 except error.TestBaseException, e:
561 self.record(e.exit_status, subdir, testname, str(e))
562 raise
563 except Exception, e:
564 info = str(e) + "\n" + traceback.format_exc()
565 self.record('FAIL', subdir, testname, info)
566 raise
567 else:
mbligh2b92b862008-11-22 13:25:32 +0000568 self.record('GOOD', subdir, testname, 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000569
570 result, exc_info = self._run_group(testname, subdir, group_func)
571 if exc_info and isinstance(exc_info[1], error.TestBaseException):
572 return False
573 elif exc_info:
574 raise exc_info[0], exc_info[1], exc_info[2]
575 else:
576 return True
jadmanski10646442008-08-13 14:05:21 +0000577
578
579 def _run_group(self, name, subdir, function, *args, **dargs):
580 """\
581 Underlying method for running something inside of a group.
582 """
jadmanskide292df2008-08-26 20:51:14 +0000583 result, exc_info = None, None
jadmanski10646442008-08-13 14:05:21 +0000584 old_record_prefix = self.record_prefix
585 try:
586 self.record('START', subdir, name)
587 self.record_prefix += '\t'
588 try:
589 result = function(*args, **dargs)
590 finally:
591 self.record_prefix = old_record_prefix
592 except error.TestBaseException, e:
jadmanskib88d6dc2009-01-10 00:33:18 +0000593 self.record("END %s" % e.exit_status, subdir, name)
jadmanskide292df2008-08-26 20:51:14 +0000594 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000595 except Exception, e:
596 err_msg = str(e) + '\n'
597 err_msg += traceback.format_exc()
598 self.record('END ABORT', subdir, name, err_msg)
599 raise error.JobError(name + ' failed\n' + traceback.format_exc())
600 else:
601 self.record('END GOOD', subdir, name)
602
jadmanskide292df2008-08-26 20:51:14 +0000603 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000604
605
606 def run_group(self, function, *args, **dargs):
607 """\
608 function:
609 subroutine to run
610 *args:
611 arguments for the function
612 """
613
614 name = function.__name__
615
616 # Allow the tag for the group to be specified.
617 tag = dargs.pop('tag', None)
618 if tag:
619 name = tag
620
jadmanskide292df2008-08-26 20:51:14 +0000621 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000622
623
624 def run_reboot(self, reboot_func, get_kernel_func):
625 """\
626 A specialization of run_group meant specifically for handling
627 a reboot. Includes support for capturing the kernel version
628 after the reboot.
629
630 reboot_func: a function that carries out the reboot
631
632 get_kernel_func: a function that returns a string
633 representing the kernel version.
634 """
635
636 old_record_prefix = self.record_prefix
637 try:
638 self.record('START', None, 'reboot')
639 self.record_prefix += '\t'
640 reboot_func()
641 except Exception, e:
642 self.record_prefix = old_record_prefix
643 err_msg = str(e) + '\n' + traceback.format_exc()
644 self.record('END FAIL', None, 'reboot', err_msg)
jadmanski4b51d542009-04-08 14:17:16 +0000645 raise
jadmanski10646442008-08-13 14:05:21 +0000646 else:
647 kernel = get_kernel_func()
648 self.record_prefix = old_record_prefix
649 self.record('END GOOD', None, 'reboot',
650 optional_fields={"kernel": kernel})
651
652
jadmanskie432dd22009-01-30 15:04:51 +0000653 def run_control(self, path):
654 """Execute a control file found at path (relative to the autotest
655 path). Intended for executing a control file within a control file,
656 not for running the top-level job control file."""
657 path = os.path.join(self.autodir, path)
658 control_file = self._load_control_file(path)
659 self.run(control=control_file, control_file_dir=self.USE_TEMP_DIR)
660
661
jadmanskic09fc152008-10-15 17:56:59 +0000662 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
mbligh4395bbd2009-03-25 19:34:17 +0000663 self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
jadmanskic09fc152008-10-15 17:56:59 +0000664 on_every_test)
665
666
667 def add_sysinfo_logfile(self, file, on_every_test=False):
668 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
669
670
671 def _add_sysinfo_loggable(self, loggable, on_every_test):
672 if on_every_test:
673 self.sysinfo.test_loggables.add(loggable)
674 else:
675 self.sysinfo.boot_loggables.add(loggable)
676
677
jadmanski10646442008-08-13 14:05:21 +0000678 def record(self, status_code, subdir, operation, status='',
679 optional_fields=None):
680 """
681 Record job-level status
682
683 The intent is to make this file both machine parseable and
684 human readable. That involves a little more complexity, but
685 really isn't all that bad ;-)
686
687 Format is <status code>\t<subdir>\t<operation>\t<status>
688
mbligh1b3b3762008-09-25 02:46:34 +0000689 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000690 for valid status definition
691
692 subdir: MUST be a relevant subdirectory in the results,
693 or None, which will be represented as '----'
694
695 operation: description of what you ran (e.g. "dbench", or
696 "mkfs -t foobar /dev/sda9")
697
698 status: error message or "completed sucessfully"
699
700 ------------------------------------------------------------
701
702 Initial tabs indicate indent levels for grouping, and is
703 governed by self.record_prefix
704
705 multiline messages have secondary lines prefaced by a double
706 space (' ')
707
708 Executing this method will trigger the logging of all new
709 warnings to date from the various console loggers.
710 """
711 # poll all our warning loggers for new warnings
712 warnings = self._read_warnings()
jadmanski2de83112009-04-01 18:21:04 +0000713 old_record_prefix = self.record_prefix
714 try:
715 if status_code.startswith("END "):
716 self.record_prefix += "\t"
717 for timestamp, msg in warnings:
718 self._record("WARN", None, None, msg, timestamp)
719 finally:
720 self.record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000721
722 # write out the actual status log line
723 self._record(status_code, subdir, operation, status,
724 optional_fields=optional_fields)
725
726
727 def _read_warnings(self):
jadmanskif37df842009-02-11 00:03:26 +0000728 """Poll all the warning loggers and extract any new warnings that have
729 been logged. If the warnings belong to a category that is currently
730 disabled, this method will discard them and they will no longer be
731 retrievable.
732
733 Returns a list of (timestamp, message) tuples, where timestamp is an
734 integer epoch timestamp."""
jadmanski10646442008-08-13 14:05:21 +0000735 warnings = []
736 while True:
737 # pull in a line of output from every logger that has
738 # output ready to be read
mbligh2b92b862008-11-22 13:25:32 +0000739 loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
jadmanski10646442008-08-13 14:05:21 +0000740 closed_loggers = set()
741 for logger in loggers:
742 line = logger.readline()
743 # record any broken pipes (aka line == empty)
744 if len(line) == 0:
745 closed_loggers.add(logger)
746 continue
jadmanskif37df842009-02-11 00:03:26 +0000747 # parse out the warning
748 timestamp, msgtype, msg = line.split('\t', 2)
749 timestamp = int(timestamp)
750 # if the warning is valid, add it to the results
751 if self.warning_manager.is_valid(timestamp, msgtype):
752 warnings.append((timestamp, msg.strip()))
jadmanski10646442008-08-13 14:05:21 +0000753
754 # stop listening to loggers that are closed
755 self.warning_loggers -= closed_loggers
756
757 # stop if none of the loggers have any output left
758 if not loggers:
759 break
760
761 # sort into timestamp order
762 warnings.sort()
763 return warnings
764
765
jadmanski16a7ff72009-04-01 18:19:53 +0000766 def disable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000767 self.warning_manager.disable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000768 self.record("INFO", None, None,
769 "disabling %s warnings" % warning_type,
770 {"warnings.disable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000771
772
jadmanski16a7ff72009-04-01 18:19:53 +0000773 def enable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000774 self.warning_manager.enable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000775 self.record("INFO", None, None,
776 "enabling %s warnings" % warning_type,
777 {"warnings.enable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000778
779
jadmanski779bd292009-03-19 17:33:33 +0000780 def get_status_log_path(self, subdir=None):
781 """Return the path to the job status log.
782
783 @param subdir - Optional paramter indicating that you want the path
784 to a subdirectory status log.
785
786 @returns The path where the status log should be.
787 """
mbligh210bae62009-04-01 18:33:13 +0000788 if self.resultdir:
789 if subdir:
790 return os.path.join(self.resultdir, subdir, "status.log")
791 else:
792 return os.path.join(self.resultdir, "status.log")
jadmanski779bd292009-03-19 17:33:33 +0000793 else:
mbligh210bae62009-04-01 18:33:13 +0000794 return None
jadmanski779bd292009-03-19 17:33:33 +0000795
796
jadmanski6bb32d72009-03-19 20:25:24 +0000797 def _update_uncollected_logs_list(self, update_func):
798 """Updates the uncollected logs list in a multi-process safe manner.
799
800 @param update_func - a function that updates the list of uncollected
801 logs. Should take one parameter, the list to be updated.
802 """
mbligha788dc42009-03-26 21:10:16 +0000803 if self.uncollected_log_file:
804 log_file = open(self.uncollected_log_file, "r+")
805 fcntl.flock(log_file, fcntl.LOCK_EX)
jadmanski6bb32d72009-03-19 20:25:24 +0000806 try:
807 uncollected_logs = pickle.load(log_file)
808 update_func(uncollected_logs)
809 log_file.seek(0)
810 log_file.truncate()
811 pickle.dump(uncollected_logs, log_file)
jadmanski3bff9092009-04-22 18:09:47 +0000812 log_file.flush()
jadmanski6bb32d72009-03-19 20:25:24 +0000813 finally:
814 fcntl.flock(log_file, fcntl.LOCK_UN)
815 log_file.close()
816
817
818 def add_client_log(self, hostname, remote_path, local_path):
819 """Adds a new set of client logs to the list of uncollected logs,
820 to allow for future log recovery.
821
822 @param host - the hostname of the machine holding the logs
823 @param remote_path - the directory on the remote machine holding logs
824 @param local_path - the local directory to copy the logs into
825 """
826 def update_func(logs_list):
827 logs_list.append((hostname, remote_path, local_path))
828 self._update_uncollected_logs_list(update_func)
829
830
831 def remove_client_log(self, hostname, remote_path, local_path):
832 """Removes a set of client logs from the list of uncollected logs,
833 to allow for future log recovery.
834
835 @param host - the hostname of the machine holding the logs
836 @param remote_path - the directory on the remote machine holding logs
837 @param local_path - the local directory to copy the logs into
838 """
839 def update_func(logs_list):
840 logs_list.remove((hostname, remote_path, local_path))
841 self._update_uncollected_logs_list(update_func)
842
843
jadmanski10646442008-08-13 14:05:21 +0000844 def _render_record(self, status_code, subdir, operation, status='',
845 epoch_time=None, record_prefix=None,
846 optional_fields=None):
847 """
848 Internal Function to generate a record to be written into a
849 status log. For use by server_job.* classes only.
850 """
851 if subdir:
852 if re.match(r'[\n\t]', subdir):
mbligh2b92b862008-11-22 13:25:32 +0000853 raise ValueError('Invalid character in subdir string')
jadmanski10646442008-08-13 14:05:21 +0000854 substr = subdir
855 else:
856 substr = '----'
857
mbligh1b3b3762008-09-25 02:46:34 +0000858 if not log.is_valid_status(status_code):
mbligh2b92b862008-11-22 13:25:32 +0000859 raise ValueError('Invalid status code supplied: %s' % status_code)
jadmanski10646442008-08-13 14:05:21 +0000860 if not operation:
861 operation = '----'
862 if re.match(r'[\n\t]', operation):
mbligh2b92b862008-11-22 13:25:32 +0000863 raise ValueError('Invalid character in operation string')
jadmanski10646442008-08-13 14:05:21 +0000864 operation = operation.rstrip()
865 status = status.rstrip()
866 status = re.sub(r"\t", " ", status)
867 # Ensure any continuation lines are marked so we can
868 # detect them in the status file to ensure it is parsable.
869 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
870
871 if not optional_fields:
872 optional_fields = {}
873
874 # Generate timestamps for inclusion in the logs
875 if epoch_time is None:
876 epoch_time = int(time.time())
877 local_time = time.localtime(epoch_time)
878 optional_fields["timestamp"] = str(epoch_time)
879 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
880 local_time)
881
882 fields = [status_code, substr, operation]
883 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
884 fields.append(status)
885
886 if record_prefix is None:
887 record_prefix = self.record_prefix
888
889 msg = '\t'.join(str(x) for x in fields)
jadmanski10646442008-08-13 14:05:21 +0000890 return record_prefix + msg + '\n'
891
892
893 def _record_prerendered(self, msg):
894 """
895 Record a pre-rendered msg into the status logs. The only
896 change this makes to the message is to add on the local
897 indentation. Should not be called outside of server_job.*
898 classes. Unlike _record, this does not write the message
899 to standard output.
900 """
901 lines = []
jadmanski779bd292009-03-19 17:33:33 +0000902 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000903 status_log = open(status_file, 'a')
904 for line in msg.splitlines():
905 line = self.record_prefix + line + '\n'
906 lines.append(line)
907 status_log.write(line)
908 status_log.close()
909 self.__parse_status(lines)
910
911
mbligh084bc172008-10-18 14:02:45 +0000912 def _fill_server_control_namespace(self, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000913 """
914 Prepare a namespace to be used when executing server control files.
mbligh084bc172008-10-18 14:02:45 +0000915
916 This sets up the control file API by importing modules and making them
917 available under the appropriate names within namespace.
918
919 For use by _execute_code().
920
921 Args:
922 namespace: The namespace dictionary to fill in.
923 protect: Boolean. If True (the default) any operation that would
924 clobber an existing entry in namespace will cause an error.
925 Raises:
926 error.AutoservError: When a name would be clobbered by import.
927 """
928 def _import_names(module_name, names=()):
mbligh2b92b862008-11-22 13:25:32 +0000929 """
930 Import a module and assign named attributes into namespace.
mbligh084bc172008-10-18 14:02:45 +0000931
932 Args:
933 module_name: The string module name.
934 names: A limiting list of names to import from module_name. If
935 empty (the default), all names are imported from the module
936 similar to a "from foo.bar import *" statement.
937 Raises:
938 error.AutoservError: When a name being imported would clobber
939 a name already in namespace.
940 """
941 module = __import__(module_name, {}, {}, names)
942
943 # No names supplied? Import * from the lowest level module.
944 # (Ugh, why do I have to implement this part myself?)
945 if not names:
946 for submodule_name in module_name.split('.')[1:]:
947 module = getattr(module, submodule_name)
948 if hasattr(module, '__all__'):
949 names = getattr(module, '__all__')
950 else:
951 names = dir(module)
952
953 # Install each name into namespace, checking to make sure it
954 # doesn't override anything that already exists.
955 for name in names:
956 # Check for conflicts to help prevent future problems.
957 if name in namespace and protect:
958 if namespace[name] is not getattr(module, name):
959 raise error.AutoservError('importing name '
960 '%s from %s %r would override %r' %
961 (name, module_name, getattr(module, name),
962 namespace[name]))
963 else:
964 # Encourage cleanliness and the use of __all__ for a
965 # more concrete API with less surprises on '*' imports.
966 warnings.warn('%s (%r) being imported from %s for use '
967 'in server control files is not the '
968 'first occurrance of that import.' %
969 (name, namespace[name], module_name))
970
971 namespace[name] = getattr(module, name)
972
973
974 # This is the equivalent of prepending a bunch of import statements to
975 # the front of the control script.
mbligha2b07dd2009-06-22 18:26:13 +0000976 namespace.update(os=os, sys=sys, logging=logging)
mbligh084bc172008-10-18 14:02:45 +0000977 _import_names('autotest_lib.server',
978 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
979 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
980 _import_names('autotest_lib.server.subcommand',
981 ('parallel', 'parallel_simple', 'subcommand'))
982 _import_names('autotest_lib.server.utils',
983 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
984 _import_names('autotest_lib.client.common_lib.error')
985 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
986
987 # Inject ourself as the job object into other classes within the API.
988 # (Yuck, this injection is a gross thing be part of a public API. -gps)
989 #
990 # XXX Base & SiteAutotest do not appear to use .job. Who does?
991 namespace['autotest'].Autotest.job = self
992 # server.hosts.base_classes.Host uses .job.
993 namespace['hosts'].Host.job = self
994
995
996 def _execute_code(self, code_file, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000997 """
998 Execute code using a copy of namespace as a server control script.
mbligh084bc172008-10-18 14:02:45 +0000999
1000 Unless protect_namespace is explicitly set to False, the dict will not
1001 be modified.
1002
1003 Args:
1004 code_file: The filename of the control file to execute.
1005 namespace: A dict containing names to make available during execution.
1006 protect: Boolean. If True (the default) a copy of the namespace dict
1007 is used during execution to prevent the code from modifying its
1008 contents outside of this function. If False the raw dict is
1009 passed in and modifications will be allowed.
1010 """
1011 if protect:
1012 namespace = namespace.copy()
1013 self._fill_server_control_namespace(namespace, protect=protect)
1014 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +00001015 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +00001016 machines_text = '\n'.join(self.machines) + '\n'
1017 # Only rewrite the file if it does not match our machine list.
1018 try:
1019 machines_f = open(MACHINES_FILENAME, 'r')
1020 existing_machines_text = machines_f.read()
1021 machines_f.close()
1022 except EnvironmentError:
1023 existing_machines_text = None
1024 if machines_text != existing_machines_text:
1025 utils.open_write_close(MACHINES_FILENAME, machines_text)
1026 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +00001027
1028
1029 def _record(self, status_code, subdir, operation, status='',
1030 epoch_time=None, optional_fields=None):
1031 """
1032 Actual function for recording a single line into the status
1033 logs. Should never be called directly, only by job.record as
1034 this would bypass the console monitor logging.
1035 """
1036
mbligh2b92b862008-11-22 13:25:32 +00001037 msg = self._render_record(status_code, subdir, operation, status,
1038 epoch_time, optional_fields=optional_fields)
jadmanski10646442008-08-13 14:05:21 +00001039
jadmanski779bd292009-03-19 17:33:33 +00001040 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +00001041 sys.stdout.write(msg)
mbligh210bae62009-04-01 18:33:13 +00001042 if status_file:
1043 open(status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001044 if subdir:
jadmanski779bd292009-03-19 17:33:33 +00001045 sub_status_file = self.get_status_log_path(subdir)
1046 open(sub_status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001047 self.__parse_status(msg.splitlines())
1048
1049
1050 def __parse_status(self, new_lines):
1051 if not self.using_parser:
1052 return
1053 new_tests = self.parser.process_lines(new_lines)
1054 for test in new_tests:
1055 self.__insert_test(test)
1056
1057
1058 def __insert_test(self, test):
mbligh2b92b862008-11-22 13:25:32 +00001059 """
1060 An internal method to insert a new test result into the
jadmanski10646442008-08-13 14:05:21 +00001061 database. This method will not raise an exception, even if an
1062 error occurs during the insert, to avoid failing a test
1063 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +00001064 self.num_tests_run += 1
1065 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
1066 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +00001067 try:
1068 self.results_db.insert_test(self.job_model, test)
1069 except Exception:
1070 msg = ("WARNING: An unexpected error occured while "
1071 "inserting test results into the database. "
1072 "Ignoring error.\n" + traceback.format_exc())
1073 print >> sys.stderr, msg
1074
mblighcaa62c22008-04-07 21:51:17 +00001075
mbligha7007722009-01-13 00:37:11 +00001076site_server_job = utils.import_site_class(
1077 __file__, "autotest_lib.server.site_server_job", "site_server_job",
1078 base_server_job)
jadmanski0afbb632008-06-06 21:10:57 +00001079
mbligh0a8c3322009-04-28 18:32:19 +00001080class server_job(site_server_job):
jadmanski0afbb632008-06-06 21:10:57 +00001081 pass
jadmanskif37df842009-02-11 00:03:26 +00001082
1083
1084class warning_manager(object):
1085 """Class for controlling warning logs. Manages the enabling and disabling
1086 of warnings."""
1087 def __init__(self):
1088 # a map of warning types to a list of disabled time intervals
1089 self.disabled_warnings = {}
1090
1091
1092 def is_valid(self, timestamp, warning_type):
1093 """Indicates if a warning (based on the time it occured and its type)
1094 is a valid warning. A warning is considered "invalid" if this type of
1095 warning was marked as "disabled" at the time the warning occured."""
1096 disabled_intervals = self.disabled_warnings.get(warning_type, [])
1097 for start, end in disabled_intervals:
1098 if timestamp >= start and (end is None or timestamp < end):
1099 return False
1100 return True
1101
1102
1103 def disable_warnings(self, warning_type, current_time_func=time.time):
1104 """As of now, disables all further warnings of this type."""
1105 intervals = self.disabled_warnings.setdefault(warning_type, [])
1106 if not intervals or intervals[-1][1] is not None:
jadmanski16a7ff72009-04-01 18:19:53 +00001107 intervals.append((int(current_time_func()), None))
jadmanskif37df842009-02-11 00:03:26 +00001108
1109
1110 def enable_warnings(self, warning_type, current_time_func=time.time):
1111 """As of now, enables all further warnings of this type."""
1112 intervals = self.disabled_warnings.get(warning_type, [])
1113 if intervals and intervals[-1][1] is None:
jadmanski16a7ff72009-04-01 18:19:53 +00001114 intervals[-1] = (intervals[-1][0], int(current_time_func()))