blob: 073aa7b357124f649f19267706eecd5f841891c5 [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski6bb32d72009-03-19 20:25:24 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess
showard75cdfee2009-06-10 17:40:41 +000010import traceback, shutil, warnings, fcntl, pickle, logging
mbligh78c0daa2009-06-15 21:54:50 +000011import itertools
showard75cdfee2009-06-10 17:40:41 +000012from autotest_lib.client.bin import sysinfo
mbligh0d0f67d2009-11-06 03:15:03 +000013from autotest_lib.client.common_lib import base_job
mbligh09108442008-10-15 16:27:38 +000014from autotest_lib.client.common_lib import error, log, utils, packages
showard75cdfee2009-06-10 17:40:41 +000015from autotest_lib.client.common_lib import logging_manager
jadmanski043e1132008-11-19 17:10:32 +000016from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000017from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000018
19
mbligh084bc172008-10-18 14:02:45 +000020def _control_segment_path(name):
21 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000022 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000023 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000024
25
mbligh084bc172008-10-18 14:02:45 +000026CLIENT_CONTROL_FILENAME = 'control'
27SERVER_CONTROL_FILENAME = 'control.srv'
28MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000029
mbligh084bc172008-10-18 14:02:45 +000030CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
31CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
32CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000033INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000034CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000035
mbligh084bc172008-10-18 14:02:45 +000036VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000037REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000038
39
mbligh062ed152009-01-13 00:57:14 +000040# by default provide a stub that generates no site data
41def _get_site_job_data_dummy(job):
42 return {}
43
44
jadmanski10646442008-08-13 14:05:21 +000045# load up site-specific code for generating site-specific job data
mbligh062ed152009-01-13 00:57:14 +000046get_site_job_data = utils.import_site_function(__file__,
jadmanskic0a623d2009-03-03 21:11:48 +000047 "autotest_lib.server.site_server_job", "get_site_job_data",
mbligh062ed152009-01-13 00:57:14 +000048 _get_site_job_data_dummy)
jadmanski10646442008-08-13 14:05:21 +000049
50
mbligh0d0f67d2009-11-06 03:15:03 +000051class base_server_job(base_job.base_job):
52 """The server-side concrete implementation of base_job.
jadmanski10646442008-08-13 14:05:21 +000053
mbligh0d0f67d2009-11-06 03:15:03 +000054 Optional properties provided by this implementation:
55 serverdir
56 conmuxdir
57
58 num_tests_run
59 num_tests_failed
60
61 warning_manager
62 warning_loggers
jadmanski10646442008-08-13 14:05:21 +000063 """
64
mbligh0d0f67d2009-11-06 03:15:03 +000065 _STATUS_VERSION = 1
jadmanski10646442008-08-13 14:05:21 +000066
67 def __init__(self, control, args, resultdir, label, user, machines,
68 client=False, parse_job='',
mbligh374f3412009-05-13 21:29:45 +000069 ssh_user='root', ssh_port=22, ssh_pass='',
mblighe7d9c602009-07-02 19:02:33 +000070 group_name='', tag=''):
jadmanski10646442008-08-13 14:05:21 +000071 """
mbligh374f3412009-05-13 21:29:45 +000072 Create a server side job object.
mblighb5dac432008-11-27 00:38:44 +000073
mblighe7d9c602009-07-02 19:02:33 +000074 @param control: The pathname of the control file.
75 @param args: Passed to the control file.
76 @param resultdir: Where to throw the results.
77 @param label: Description of the job.
78 @param user: Username for the job (email address).
79 @param client: True if this is a client-side control file.
80 @param parse_job: string, if supplied it is the job execution tag that
81 the results will be passed through to the TKO parser with.
82 @param ssh_user: The SSH username. [root]
83 @param ssh_port: The SSH port number. [22]
84 @param ssh_pass: The SSH passphrase, if needed.
85 @param group_name: If supplied, this will be written out as
mbligh374f3412009-05-13 21:29:45 +000086 host_group_name in the keyvals file for the parser.
mblighe7d9c602009-07-02 19:02:33 +000087 @param tag: The job execution tag from the scheduler. [optional]
jadmanski10646442008-08-13 14:05:21 +000088 """
mbligh0d0f67d2009-11-06 03:15:03 +000089 super(base_server_job, self).__init__(resultdir=resultdir)
mbligha788dc42009-03-26 21:10:16 +000090
mbligh0d0f67d2009-11-06 03:15:03 +000091 path = os.path.dirname(__file__)
92 self.control = control
93 self._uncollected_log_file = os.path.join(self.resultdir,
94 'uncollected_logs')
95 debugdir = os.path.join(self.resultdir, 'debug')
96 if not os.path.exists(debugdir):
97 os.mkdir(debugdir)
98
99 if user:
100 self.user = user
101 else:
102 self.user = getpass.getuser()
103
104 self._args = args
jadmanski10646442008-08-13 14:05:21 +0000105 self.machines = machines
mbligh0d0f67d2009-11-06 03:15:03 +0000106 self._client = client
107 self._record_prefix = ''
jadmanski10646442008-08-13 14:05:21 +0000108 self.warning_loggers = set()
jadmanskif37df842009-02-11 00:03:26 +0000109 self.warning_manager = warning_manager()
mbligh0d0f67d2009-11-06 03:15:03 +0000110 self._ssh_user = ssh_user
111 self._ssh_port = ssh_port
112 self._ssh_pass = ssh_pass
mblighe7d9c602009-07-02 19:02:33 +0000113 self.tag = tag
showarda6082ef2009-10-12 20:25:44 +0000114 self.default_profile_only = False
jadmanski23afbec2008-09-17 18:12:07 +0000115 self.run_test_cleanup = True
mbligh09108442008-10-15 16:27:38 +0000116 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000117 self.hosts = set()
mbligh0d0f67d2009-11-06 03:15:03 +0000118 self.drop_caches = False
mblighb5dac432008-11-27 00:38:44 +0000119 self.drop_caches_between_iterations = False
jadmanski10646442008-08-13 14:05:21 +0000120
showard75cdfee2009-06-10 17:40:41 +0000121 self.logging = logging_manager.get_logging_manager(
122 manage_stdout_and_stderr=True, redirect_fds=True)
123 subcommand.logging_manager_object = self.logging
jadmanski10646442008-08-13 14:05:21 +0000124
mbligh0d0f67d2009-11-06 03:15:03 +0000125 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000126 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000127
jadmanski10646442008-08-13 14:05:21 +0000128 job_data = {'label' : label, 'user' : user,
129 'hostname' : ','.join(machines),
mbligh0d0f67d2009-11-06 03:15:03 +0000130 'status_version' : str(self._STATUS_VERSION),
showard170873e2009-01-07 00:22:26 +0000131 'job_started' : str(int(time.time()))}
mbligh374f3412009-05-13 21:29:45 +0000132 if group_name:
133 job_data['host_group_name'] = group_name
jadmanski10646442008-08-13 14:05:21 +0000134
mbligh0d0f67d2009-11-06 03:15:03 +0000135 # only write these keyvals out on the first job in a resultdir
136 if 'job_started' not in utils.read_keyval(self.resultdir):
137 job_data.update(get_site_job_data(self))
138 utils.write_keyval(self.resultdir, job_data)
139
140 self._parse_job = parse_job
141 if self._parse_job and len(machines) == 1:
142 self._using_parser = True
143 self.init_parser(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000144 else:
mbligh0d0f67d2009-11-06 03:15:03 +0000145 self._using_parser = False
146 self.pkgmgr = packages.PackageManager(
147 self.autodir, run_function_dargs={'timeout':600})
showard21baa452008-10-21 00:08:39 +0000148 self.num_tests_run = 0
149 self.num_tests_failed = 0
150
jadmanski550fdc22008-11-20 16:32:08 +0000151 self._register_subcommand_hooks()
mbligh7eacbc22009-07-28 23:13:56 +0000152 self._test_tag_prefix = None
jadmanski550fdc22008-11-20 16:32:08 +0000153
mbligh0d0f67d2009-11-06 03:15:03 +0000154 # these components aren't usable on the server
155 self.bootloader = None
156 self.harness = None
157
158
159 @classmethod
160 def _find_base_directories(cls):
161 """
162 Determine locations of autodir, clientdir and serverdir. Assumes
163 that this file is located within serverdir and uses __file__ along
164 with relative paths to resolve the location.
165 """
166 serverdir = os.path.abspath(os.path.dirname(__file__))
167 autodir = os.path.normpath(os.path.join(serverdir, '..'))
168 clientdir = os.path.join(autodir, 'client')
169 return autodir, clientdir, serverdir
170
171
172 def _find_resultdir(self, resultdir):
173 """
174 Determine the location of resultdir. For server jobs we expect one to
175 always be explicitly passed in to __init__, so just return that.
176 """
177 if resultdir:
178 return os.path.normpath(resultdir)
179 else:
180 return None
181
jadmanski550fdc22008-11-20 16:32:08 +0000182
jadmanskie432dd22009-01-30 15:04:51 +0000183 @staticmethod
184 def _load_control_file(path):
185 f = open(path)
186 try:
187 control_file = f.read()
188 finally:
189 f.close()
190 return re.sub('\r', '', control_file)
191
192
jadmanski550fdc22008-11-20 16:32:08 +0000193 def _register_subcommand_hooks(self):
mbligh2b92b862008-11-22 13:25:32 +0000194 """
195 Register some hooks into the subcommand modules that allow us
196 to properly clean up self.hosts created in forked subprocesses.
197 """
jadmanski550fdc22008-11-20 16:32:08 +0000198 def on_fork(cmd):
199 self._existing_hosts_on_fork = set(self.hosts)
200 def on_join(cmd):
201 new_hosts = self.hosts - self._existing_hosts_on_fork
202 for host in new_hosts:
203 host.close()
204 subcommand.subcommand.register_fork_hook(on_fork)
205 subcommand.subcommand.register_join_hook(on_join)
206
jadmanski10646442008-08-13 14:05:21 +0000207
208 def init_parser(self, resultdir):
mbligh2b92b862008-11-22 13:25:32 +0000209 """
210 Start the continuous parsing of resultdir. This sets up
jadmanski10646442008-08-13 14:05:21 +0000211 the database connection and inserts the basic job object into
mbligh2b92b862008-11-22 13:25:32 +0000212 the database if necessary.
213 """
jadmanski10646442008-08-13 14:05:21 +0000214 # redirect parser debugging to .parse.log
215 parse_log = os.path.join(resultdir, '.parse.log')
216 parse_log = open(parse_log, 'w', 0)
217 tko_utils.redirect_parser_debugging(parse_log)
218 # create a job model object and set up the db
219 self.results_db = tko_db.db(autocommit=True)
mbligh0d0f67d2009-11-06 03:15:03 +0000220 self.parser = status_lib.parser(self._STATUS_VERSION)
jadmanski10646442008-08-13 14:05:21 +0000221 self.job_model = self.parser.make_job(resultdir)
222 self.parser.start(self.job_model)
223 # check if a job already exists in the db and insert it if
224 # it does not
mbligh0d0f67d2009-11-06 03:15:03 +0000225 job_idx = self.results_db.find_job(self._parse_job)
jadmanski10646442008-08-13 14:05:21 +0000226 if job_idx is None:
mbligh0d0f67d2009-11-06 03:15:03 +0000227 self.results_db.insert_job(self._parse_job, self.job_model)
jadmanski10646442008-08-13 14:05:21 +0000228 else:
mbligh2b92b862008-11-22 13:25:32 +0000229 machine_idx = self.results_db.lookup_machine(self.job_model.machine)
jadmanski10646442008-08-13 14:05:21 +0000230 self.job_model.index = job_idx
231 self.job_model.machine_idx = machine_idx
232
233
234 def cleanup_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000235 """
236 This should be called after the server job is finished
jadmanski10646442008-08-13 14:05:21 +0000237 to carry out any remaining cleanup (e.g. flushing any
mbligh2b92b862008-11-22 13:25:32 +0000238 remaining test results to the results db)
239 """
mbligh0d0f67d2009-11-06 03:15:03 +0000240 if not self._using_parser:
jadmanski10646442008-08-13 14:05:21 +0000241 return
242 final_tests = self.parser.end()
243 for test in final_tests:
244 self.__insert_test(test)
mbligh0d0f67d2009-11-06 03:15:03 +0000245 self._using_parser = False
jadmanski10646442008-08-13 14:05:21 +0000246
247
248 def verify(self):
249 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000250 raise error.AutoservError('No machines specified to verify')
mbligh0fce4112008-11-27 00:37:17 +0000251 if self.resultdir:
252 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000253 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000254 namespace = {'machines' : self.machines, 'job' : self,
mbligh0d0f67d2009-11-06 03:15:03 +0000255 'ssh_user' : self._ssh_user,
256 'ssh_port' : self._ssh_port,
257 'ssh_pass' : self._ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000258 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000259 except Exception, e:
mbligh2b92b862008-11-22 13:25:32 +0000260 msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
jadmanski10646442008-08-13 14:05:21 +0000261 self.record('ABORT', None, None, msg)
262 raise
263
264
265 def repair(self, host_protection):
266 if not self.machines:
267 raise error.AutoservError('No machines specified to repair')
mbligh0fce4112008-11-27 00:37:17 +0000268 if self.resultdir:
269 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000270 namespace = {'machines': self.machines, 'job': self,
mbligh0d0f67d2009-11-06 03:15:03 +0000271 'ssh_user': self._ssh_user, 'ssh_port': self._ssh_port,
272 'ssh_pass': self._ssh_pass,
jadmanski10646442008-08-13 14:05:21 +0000273 'protection_level': host_protection}
mbligh25c0b8c2009-01-24 01:44:17 +0000274
mbligh0931b0a2009-04-08 17:44:48 +0000275 self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000276
277
278 def precheck(self):
279 """
280 perform any additional checks in derived classes.
281 """
282 pass
283
284
285 def enable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000286 """
287 Start or restart external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000288 """
289 pass
290
291
292 def disable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000293 """
294 Pause or stop external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000295 """
296 pass
297
298
showard07e27af2009-10-12 20:32:01 +0000299 def set_default_profile_only(self, val):
300 """ Set the default_profile_only mode. """
301 self.default_profile_only = val
302
303
jadmanski23afbec2008-09-17 18:12:07 +0000304 def enable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000305 """
306 By default tests run test.cleanup
307 """
jadmanski23afbec2008-09-17 18:12:07 +0000308 self.run_test_cleanup = True
309
310
311 def disable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000312 """
313 By default tests do not run test.cleanup
314 """
jadmanski23afbec2008-09-17 18:12:07 +0000315 self.run_test_cleanup = False
316
317
jadmanski10646442008-08-13 14:05:21 +0000318 def use_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000319 """
320 Return True if external logging should be used.
jadmanski10646442008-08-13 14:05:21 +0000321 """
322 return False
323
324
mbligh415dc212009-06-15 21:53:34 +0000325 def _make_parallel_wrapper(self, function, machines, log):
326 """Wrap function as appropriate for calling by parallel_simple."""
mbligh2b92b862008-11-22 13:25:32 +0000327 is_forking = not (len(machines) == 1 and self.machines == machines)
mbligh0d0f67d2009-11-06 03:15:03 +0000328 if self._parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000329 def wrapper(machine):
mbligh0d0f67d2009-11-06 03:15:03 +0000330 self._parse_job += "/" + machine
331 self._using_parser = True
jadmanski10646442008-08-13 14:05:21 +0000332 self.machines = [machine]
mbligh0d0f67d2009-11-06 03:15:03 +0000333 self.push_execution_context(machine)
jadmanski609a5f42008-08-26 20:52:42 +0000334 os.chdir(self.resultdir)
showard2bab8f42008-11-12 18:15:22 +0000335 utils.write_keyval(self.resultdir, {"hostname": machine})
jadmanski10646442008-08-13 14:05:21 +0000336 self.init_parser(self.resultdir)
337 result = function(machine)
338 self.cleanup_parser()
339 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000340 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000341 def wrapper(machine):
mbligh0d0f67d2009-11-06 03:15:03 +0000342 self.push_execution_context(machine)
jadmanski609a5f42008-08-26 20:52:42 +0000343 os.chdir(self.resultdir)
mbligh838d82d2009-03-11 17:14:31 +0000344 machine_data = {'hostname' : machine,
mbligh0d0f67d2009-11-06 03:15:03 +0000345 'status_version' : str(self._STATUS_VERSION)}
mbligh838d82d2009-03-11 17:14:31 +0000346 utils.write_keyval(self.resultdir, machine_data)
jadmanski10646442008-08-13 14:05:21 +0000347 result = function(machine)
348 return result
349 else:
350 wrapper = function
mbligh415dc212009-06-15 21:53:34 +0000351 return wrapper
352
353
354 def parallel_simple(self, function, machines, log=True, timeout=None,
355 return_results=False):
356 """
357 Run 'function' using parallel_simple, with an extra wrapper to handle
358 the necessary setup for continuous parsing, if possible. If continuous
359 parsing is already properly initialized then this should just work.
360
361 @param function: A callable to run in parallel given each machine.
362 @param machines: A list of machine names to be passed one per subcommand
363 invocation of function.
364 @param log: If True, output will be written to output in a subdirectory
365 named after each machine.
366 @param timeout: Seconds after which the function call should timeout.
367 @param return_results: If True instead of an AutoServError being raised
368 on any error a list of the results|exceptions from the function
369 called on each arg is returned. [default: False]
370
371 @raises error.AutotestError: If any of the functions failed.
372 """
373 wrapper = self._make_parallel_wrapper(function, machines, log)
374 return subcommand.parallel_simple(wrapper, machines,
375 log=log, timeout=timeout,
376 return_results=return_results)
377
378
379 def parallel_on_machines(self, function, machines, timeout=None):
380 """
showardcd5fac42009-07-06 20:19:43 +0000381 @param function: Called in parallel with one machine as its argument.
mbligh415dc212009-06-15 21:53:34 +0000382 @param machines: A list of machines to call function(machine) on.
383 @param timeout: Seconds after which the function call should timeout.
384
385 @returns A list of machines on which function(machine) returned
386 without raising an exception.
387 """
showardcd5fac42009-07-06 20:19:43 +0000388 results = self.parallel_simple(function, machines, timeout=timeout,
mbligh415dc212009-06-15 21:53:34 +0000389 return_results=True)
390 success_machines = []
391 for result, machine in itertools.izip(results, machines):
392 if not isinstance(result, Exception):
393 success_machines.append(machine)
394 return success_machines
jadmanski10646442008-08-13 14:05:21 +0000395
396
mbligh0d0f67d2009-11-06 03:15:03 +0000397 _USE_TEMP_DIR = object()
mbligh2b92b862008-11-22 13:25:32 +0000398 def run(self, cleanup=False, install_before=False, install_after=False,
jadmanskie432dd22009-01-30 15:04:51 +0000399 collect_crashdumps=True, namespace={}, control=None,
jadmanskidef0c3c2009-03-25 20:07:10 +0000400 control_file_dir=None, only_collect_crashinfo=False):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000401 # for a normal job, make sure the uncollected logs file exists
402 # for a crashinfo-only run it should already exist, bail out otherwise
mbligh0d0f67d2009-11-06 03:15:03 +0000403 if self.resultdir and not os.path.exists(self._uncollected_log_file):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000404 if only_collect_crashinfo:
405 # if this is a crashinfo-only run, and there were no existing
406 # uncollected logs, just bail out early
407 logging.info("No existing uncollected logs, "
408 "skipping crashinfo collection")
409 return
410 else:
mbligh0d0f67d2009-11-06 03:15:03 +0000411 log_file = open(self._uncollected_log_file, "w")
jadmanskifb9c0fa2009-04-29 17:39:16 +0000412 pickle.dump([], log_file)
413 log_file.close()
414
jadmanski10646442008-08-13 14:05:21 +0000415 # use a copy so changes don't affect the original dictionary
416 namespace = namespace.copy()
417 machines = self.machines
jadmanskie432dd22009-01-30 15:04:51 +0000418 if control is None:
jadmanski02a3ba22009-11-13 20:47:27 +0000419 if self.control is None:
420 control = ''
421 else:
422 control = self._load_control_file(self.control)
jadmanskie432dd22009-01-30 15:04:51 +0000423 if control_file_dir is None:
424 control_file_dir = self.resultdir
jadmanski10646442008-08-13 14:05:21 +0000425
426 self.aborted = False
427 namespace['machines'] = machines
mbligh0d0f67d2009-11-06 03:15:03 +0000428 namespace['args'] = self._args
jadmanski10646442008-08-13 14:05:21 +0000429 namespace['job'] = self
mbligh0d0f67d2009-11-06 03:15:03 +0000430 namespace['ssh_user'] = self._ssh_user
431 namespace['ssh_port'] = self._ssh_port
432 namespace['ssh_pass'] = self._ssh_pass
jadmanski10646442008-08-13 14:05:21 +0000433 test_start_time = int(time.time())
434
mbligh80e1eba2008-11-19 00:26:18 +0000435 if self.resultdir:
436 os.chdir(self.resultdir)
jadmanski779bd292009-03-19 17:33:33 +0000437 # touch status.log so that the parser knows a job is running here
jadmanski382303a2009-04-21 19:53:39 +0000438 open(self.get_status_log_path(), 'a').close()
mbligh80e1eba2008-11-19 00:26:18 +0000439 self.enable_external_logging()
jadmanskie432dd22009-01-30 15:04:51 +0000440
jadmanskicdd0c402008-09-19 21:21:31 +0000441 collect_crashinfo = True
mblighaebe3b62008-12-22 14:45:40 +0000442 temp_control_file_dir = None
jadmanski10646442008-08-13 14:05:21 +0000443 try:
showardcf8d4922009-10-14 16:08:39 +0000444 try:
445 if install_before and machines:
446 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanskie432dd22009-01-30 15:04:51 +0000447
showardcf8d4922009-10-14 16:08:39 +0000448 if only_collect_crashinfo:
449 return
450
jadmanskidef0c3c2009-03-25 20:07:10 +0000451 # determine the dir to write the control files to
452 cfd_specified = (control_file_dir
mbligh0d0f67d2009-11-06 03:15:03 +0000453 and control_file_dir is not self._USE_TEMP_DIR)
jadmanskidef0c3c2009-03-25 20:07:10 +0000454 if cfd_specified:
455 temp_control_file_dir = None
456 else:
457 temp_control_file_dir = tempfile.mkdtemp(
458 suffix='temp_control_file_dir')
459 control_file_dir = temp_control_file_dir
460 server_control_file = os.path.join(control_file_dir,
461 SERVER_CONTROL_FILENAME)
462 client_control_file = os.path.join(control_file_dir,
463 CLIENT_CONTROL_FILENAME)
mbligh0d0f67d2009-11-06 03:15:03 +0000464 if self._client:
jadmanskidef0c3c2009-03-25 20:07:10 +0000465 namespace['control'] = control
466 utils.open_write_close(client_control_file, control)
mblighfeac0102009-04-28 18:31:12 +0000467 shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
468 server_control_file)
jadmanskidef0c3c2009-03-25 20:07:10 +0000469 else:
470 utils.open_write_close(server_control_file, control)
mbligh26f0d882009-06-22 18:30:01 +0000471 logging.info("Processing control file")
jadmanskidef0c3c2009-03-25 20:07:10 +0000472 self._execute_code(server_control_file, namespace)
mbligh26f0d882009-06-22 18:30:01 +0000473 logging.info("Finished processing control file")
jadmanski10646442008-08-13 14:05:21 +0000474
jadmanskidef0c3c2009-03-25 20:07:10 +0000475 # no error occured, so we don't need to collect crashinfo
476 collect_crashinfo = False
showardcf8d4922009-10-14 16:08:39 +0000477 except:
478 try:
479 logging.exception(
480 'Exception escaped control file, job aborting:')
481 except:
482 pass # don't let logging exceptions here interfere
483 raise
jadmanski10646442008-08-13 14:05:21 +0000484 finally:
mblighaebe3b62008-12-22 14:45:40 +0000485 if temp_control_file_dir:
jadmanskie432dd22009-01-30 15:04:51 +0000486 # Clean up temp directory used for copies of the control files
mblighaebe3b62008-12-22 14:45:40 +0000487 try:
488 shutil.rmtree(temp_control_file_dir)
489 except Exception, e:
mblighe7d9c602009-07-02 19:02:33 +0000490 logging.warn('Could not remove temp directory %s: %s',
491 temp_control_file_dir, e)
jadmanskie432dd22009-01-30 15:04:51 +0000492
jadmanskicdd0c402008-09-19 21:21:31 +0000493 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000494 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000495 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000496 # includes crashdumps
497 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000498 else:
mbligh084bc172008-10-18 14:02:45 +0000499 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
mbligh0d0f67d2009-11-06 03:15:03 +0000500 if self._uncollected_log_file:
501 os.remove(self._uncollected_log_file)
jadmanski10646442008-08-13 14:05:21 +0000502 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000503 if cleanup and machines:
504 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000505 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000506 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000507
508
mbligh7eacbc22009-07-28 23:13:56 +0000509 def set_test_tag_prefix(self, tag=''):
510 """
511 Set tag to be prepended (separated by a '.') to test name of all
512 following run_test steps.
513 """
514 self._test_tag_prefix = tag
mblighc86113b2009-04-28 18:32:51 +0000515
516
jadmanski10646442008-08-13 14:05:21 +0000517 def run_test(self, url, *args, **dargs):
mbligh2b92b862008-11-22 13:25:32 +0000518 """
519 Summon a test object and run it.
jadmanski10646442008-08-13 14:05:21 +0000520
521 tag
522 tag to add to testname
523 url
524 url of the test to run
525 """
526
527 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000528
529 tag = dargs.pop('tag', None)
mblighc86113b2009-04-28 18:32:51 +0000530 if tag is None:
mbligh7eacbc22009-07-28 23:13:56 +0000531 tag = self._test_tag_prefix
532 elif self._test_tag_prefix:
533 tag = '%s.%s' % (self._test_tag_prefix, tag)
534
jadmanski10646442008-08-13 14:05:21 +0000535 if tag:
mbligh8ad24202009-01-07 16:49:36 +0000536 testname += '.' + str(tag)
jadmanskide292df2008-08-26 20:51:14 +0000537 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000538
539 outputdir = os.path.join(self.resultdir, subdir)
540 if os.path.exists(outputdir):
541 msg = ("%s already exists, test <%s> may have"
mbligh2b92b862008-11-22 13:25:32 +0000542 " already run with tag <%s>" % (outputdir, testname, tag))
jadmanski10646442008-08-13 14:05:21 +0000543 raise error.TestError(msg)
544 os.mkdir(outputdir)
545
546 def group_func():
547 try:
548 test.runtest(self, url, tag, args, dargs)
549 except error.TestBaseException, e:
550 self.record(e.exit_status, subdir, testname, str(e))
551 raise
552 except Exception, e:
553 info = str(e) + "\n" + traceback.format_exc()
554 self.record('FAIL', subdir, testname, info)
555 raise
556 else:
mbligh2b92b862008-11-22 13:25:32 +0000557 self.record('GOOD', subdir, testname, 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000558
559 result, exc_info = self._run_group(testname, subdir, group_func)
560 if exc_info and isinstance(exc_info[1], error.TestBaseException):
561 return False
562 elif exc_info:
563 raise exc_info[0], exc_info[1], exc_info[2]
564 else:
565 return True
jadmanski10646442008-08-13 14:05:21 +0000566
567
568 def _run_group(self, name, subdir, function, *args, **dargs):
569 """\
570 Underlying method for running something inside of a group.
571 """
jadmanskide292df2008-08-26 20:51:14 +0000572 result, exc_info = None, None
mbligh0d0f67d2009-11-06 03:15:03 +0000573 old_record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000574 try:
575 self.record('START', subdir, name)
mbligh0d0f67d2009-11-06 03:15:03 +0000576 self._record_prefix += '\t'
jadmanski10646442008-08-13 14:05:21 +0000577 try:
578 result = function(*args, **dargs)
579 finally:
mbligh0d0f67d2009-11-06 03:15:03 +0000580 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000581 except error.TestBaseException, e:
jadmanskib88d6dc2009-01-10 00:33:18 +0000582 self.record("END %s" % e.exit_status, subdir, name)
jadmanskide292df2008-08-26 20:51:14 +0000583 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000584 except Exception, e:
585 err_msg = str(e) + '\n'
586 err_msg += traceback.format_exc()
587 self.record('END ABORT', subdir, name, err_msg)
588 raise error.JobError(name + ' failed\n' + traceback.format_exc())
589 else:
590 self.record('END GOOD', subdir, name)
591
jadmanskide292df2008-08-26 20:51:14 +0000592 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000593
594
595 def run_group(self, function, *args, **dargs):
596 """\
597 function:
598 subroutine to run
599 *args:
600 arguments for the function
601 """
602
603 name = function.__name__
604
605 # Allow the tag for the group to be specified.
606 tag = dargs.pop('tag', None)
607 if tag:
608 name = tag
609
jadmanskide292df2008-08-26 20:51:14 +0000610 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000611
612
613 def run_reboot(self, reboot_func, get_kernel_func):
614 """\
615 A specialization of run_group meant specifically for handling
616 a reboot. Includes support for capturing the kernel version
617 after the reboot.
618
619 reboot_func: a function that carries out the reboot
620
621 get_kernel_func: a function that returns a string
622 representing the kernel version.
623 """
624
mbligh0d0f67d2009-11-06 03:15:03 +0000625 old_record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000626 try:
627 self.record('START', None, 'reboot')
mbligh0d0f67d2009-11-06 03:15:03 +0000628 self._record_prefix += '\t'
jadmanski10646442008-08-13 14:05:21 +0000629 reboot_func()
630 except Exception, e:
mbligh0d0f67d2009-11-06 03:15:03 +0000631 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000632 err_msg = str(e) + '\n' + traceback.format_exc()
633 self.record('END FAIL', None, 'reboot', err_msg)
jadmanski4b51d542009-04-08 14:17:16 +0000634 raise
jadmanski10646442008-08-13 14:05:21 +0000635 else:
636 kernel = get_kernel_func()
mbligh0d0f67d2009-11-06 03:15:03 +0000637 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000638 self.record('END GOOD', None, 'reboot',
639 optional_fields={"kernel": kernel})
640
641
jadmanskie432dd22009-01-30 15:04:51 +0000642 def run_control(self, path):
643 """Execute a control file found at path (relative to the autotest
644 path). Intended for executing a control file within a control file,
645 not for running the top-level job control file."""
646 path = os.path.join(self.autodir, path)
647 control_file = self._load_control_file(path)
mbligh0d0f67d2009-11-06 03:15:03 +0000648 self.run(control=control_file, control_file_dir=self._USE_TEMP_DIR)
jadmanskie432dd22009-01-30 15:04:51 +0000649
650
jadmanskic09fc152008-10-15 17:56:59 +0000651 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
mbligh4395bbd2009-03-25 19:34:17 +0000652 self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
jadmanskic09fc152008-10-15 17:56:59 +0000653 on_every_test)
654
655
656 def add_sysinfo_logfile(self, file, on_every_test=False):
657 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
658
659
660 def _add_sysinfo_loggable(self, loggable, on_every_test):
661 if on_every_test:
662 self.sysinfo.test_loggables.add(loggable)
663 else:
664 self.sysinfo.boot_loggables.add(loggable)
665
666
jadmanski10646442008-08-13 14:05:21 +0000667 def record(self, status_code, subdir, operation, status='',
668 optional_fields=None):
669 """
670 Record job-level status
671
672 The intent is to make this file both machine parseable and
673 human readable. That involves a little more complexity, but
674 really isn't all that bad ;-)
675
676 Format is <status code>\t<subdir>\t<operation>\t<status>
677
mbligh1b3b3762008-09-25 02:46:34 +0000678 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000679 for valid status definition
680
681 subdir: MUST be a relevant subdirectory in the results,
682 or None, which will be represented as '----'
683
684 operation: description of what you ran (e.g. "dbench", or
685 "mkfs -t foobar /dev/sda9")
686
687 status: error message or "completed sucessfully"
688
689 ------------------------------------------------------------
690
691 Initial tabs indicate indent levels for grouping, and is
mbligh0d0f67d2009-11-06 03:15:03 +0000692 governed by self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000693
694 multiline messages have secondary lines prefaced by a double
695 space (' ')
696
697 Executing this method will trigger the logging of all new
698 warnings to date from the various console loggers.
699 """
700 # poll all our warning loggers for new warnings
701 warnings = self._read_warnings()
mbligh0d0f67d2009-11-06 03:15:03 +0000702 old_record_prefix = self._record_prefix
jadmanski2de83112009-04-01 18:21:04 +0000703 try:
704 if status_code.startswith("END "):
mbligh0d0f67d2009-11-06 03:15:03 +0000705 self._record_prefix += "\t"
jadmanski2de83112009-04-01 18:21:04 +0000706 for timestamp, msg in warnings:
707 self._record("WARN", None, None, msg, timestamp)
708 finally:
mbligh0d0f67d2009-11-06 03:15:03 +0000709 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000710
711 # write out the actual status log line
712 self._record(status_code, subdir, operation, status,
713 optional_fields=optional_fields)
714
715
716 def _read_warnings(self):
jadmanskif37df842009-02-11 00:03:26 +0000717 """Poll all the warning loggers and extract any new warnings that have
718 been logged. If the warnings belong to a category that is currently
719 disabled, this method will discard them and they will no longer be
720 retrievable.
721
722 Returns a list of (timestamp, message) tuples, where timestamp is an
723 integer epoch timestamp."""
jadmanski10646442008-08-13 14:05:21 +0000724 warnings = []
725 while True:
726 # pull in a line of output from every logger that has
727 # output ready to be read
mbligh2b92b862008-11-22 13:25:32 +0000728 loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
jadmanski10646442008-08-13 14:05:21 +0000729 closed_loggers = set()
730 for logger in loggers:
731 line = logger.readline()
732 # record any broken pipes (aka line == empty)
733 if len(line) == 0:
734 closed_loggers.add(logger)
735 continue
jadmanskif37df842009-02-11 00:03:26 +0000736 # parse out the warning
737 timestamp, msgtype, msg = line.split('\t', 2)
738 timestamp = int(timestamp)
739 # if the warning is valid, add it to the results
740 if self.warning_manager.is_valid(timestamp, msgtype):
741 warnings.append((timestamp, msg.strip()))
jadmanski10646442008-08-13 14:05:21 +0000742
743 # stop listening to loggers that are closed
744 self.warning_loggers -= closed_loggers
745
746 # stop if none of the loggers have any output left
747 if not loggers:
748 break
749
750 # sort into timestamp order
751 warnings.sort()
752 return warnings
753
754
jadmanski16a7ff72009-04-01 18:19:53 +0000755 def disable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000756 self.warning_manager.disable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000757 self.record("INFO", None, None,
758 "disabling %s warnings" % warning_type,
759 {"warnings.disable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000760
761
jadmanski16a7ff72009-04-01 18:19:53 +0000762 def enable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000763 self.warning_manager.enable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000764 self.record("INFO", None, None,
765 "enabling %s warnings" % warning_type,
766 {"warnings.enable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000767
768
jadmanski779bd292009-03-19 17:33:33 +0000769 def get_status_log_path(self, subdir=None):
770 """Return the path to the job status log.
771
772 @param subdir - Optional paramter indicating that you want the path
773 to a subdirectory status log.
774
775 @returns The path where the status log should be.
776 """
mbligh210bae62009-04-01 18:33:13 +0000777 if self.resultdir:
778 if subdir:
779 return os.path.join(self.resultdir, subdir, "status.log")
780 else:
781 return os.path.join(self.resultdir, "status.log")
jadmanski779bd292009-03-19 17:33:33 +0000782 else:
mbligh210bae62009-04-01 18:33:13 +0000783 return None
jadmanski779bd292009-03-19 17:33:33 +0000784
785
jadmanski6bb32d72009-03-19 20:25:24 +0000786 def _update_uncollected_logs_list(self, update_func):
787 """Updates the uncollected logs list in a multi-process safe manner.
788
789 @param update_func - a function that updates the list of uncollected
790 logs. Should take one parameter, the list to be updated.
791 """
mbligh0d0f67d2009-11-06 03:15:03 +0000792 if self._uncollected_log_file:
793 log_file = open(self._uncollected_log_file, "r+")
mbligha788dc42009-03-26 21:10:16 +0000794 fcntl.flock(log_file, fcntl.LOCK_EX)
jadmanski6bb32d72009-03-19 20:25:24 +0000795 try:
796 uncollected_logs = pickle.load(log_file)
797 update_func(uncollected_logs)
798 log_file.seek(0)
799 log_file.truncate()
800 pickle.dump(uncollected_logs, log_file)
jadmanski3bff9092009-04-22 18:09:47 +0000801 log_file.flush()
jadmanski6bb32d72009-03-19 20:25:24 +0000802 finally:
803 fcntl.flock(log_file, fcntl.LOCK_UN)
804 log_file.close()
805
806
807 def add_client_log(self, hostname, remote_path, local_path):
808 """Adds a new set of client logs to the list of uncollected logs,
809 to allow for future log recovery.
810
811 @param host - the hostname of the machine holding the logs
812 @param remote_path - the directory on the remote machine holding logs
813 @param local_path - the local directory to copy the logs into
814 """
815 def update_func(logs_list):
816 logs_list.append((hostname, remote_path, local_path))
817 self._update_uncollected_logs_list(update_func)
818
819
820 def remove_client_log(self, hostname, remote_path, local_path):
821 """Removes a set of client logs from the list of uncollected logs,
822 to allow for future log recovery.
823
824 @param host - the hostname of the machine holding the logs
825 @param remote_path - the directory on the remote machine holding logs
826 @param local_path - the local directory to copy the logs into
827 """
828 def update_func(logs_list):
829 logs_list.remove((hostname, remote_path, local_path))
830 self._update_uncollected_logs_list(update_func)
831
832
mbligh0d0f67d2009-11-06 03:15:03 +0000833 def get_client_logs(self):
834 """Retrieves the list of uncollected logs, if it exists.
835
836 @returns A list of (host, remote_path, local_path) tuples. Returns
837 an empty list if no uncollected logs file exists.
838 """
839 log_exists = (self._uncollected_log_file and
840 os.path.exists(self._uncollected_log_file))
841 if log_exists:
842 return pickle.load(open(self._uncollected_log_file))
843 else:
844 return []
845
846
jadmanski10646442008-08-13 14:05:21 +0000847 def _render_record(self, status_code, subdir, operation, status='',
848 epoch_time=None, record_prefix=None,
849 optional_fields=None):
850 """
851 Internal Function to generate a record to be written into a
852 status log. For use by server_job.* classes only.
853 """
854 if subdir:
855 if re.match(r'[\n\t]', subdir):
mbligh2b92b862008-11-22 13:25:32 +0000856 raise ValueError('Invalid character in subdir string')
jadmanski10646442008-08-13 14:05:21 +0000857 substr = subdir
858 else:
859 substr = '----'
860
mbligh1b3b3762008-09-25 02:46:34 +0000861 if not log.is_valid_status(status_code):
mbligh2b92b862008-11-22 13:25:32 +0000862 raise ValueError('Invalid status code supplied: %s' % status_code)
jadmanski10646442008-08-13 14:05:21 +0000863 if not operation:
864 operation = '----'
865 if re.match(r'[\n\t]', operation):
mbligh2b92b862008-11-22 13:25:32 +0000866 raise ValueError('Invalid character in operation string')
jadmanski10646442008-08-13 14:05:21 +0000867 operation = operation.rstrip()
868 status = status.rstrip()
869 status = re.sub(r"\t", " ", status)
870 # Ensure any continuation lines are marked so we can
871 # detect them in the status file to ensure it is parsable.
mbligh0d0f67d2009-11-06 03:15:03 +0000872 status = re.sub(r"\n", "\n" + self._record_prefix + " ", status)
jadmanski10646442008-08-13 14:05:21 +0000873
874 if not optional_fields:
875 optional_fields = {}
876
877 # Generate timestamps for inclusion in the logs
878 if epoch_time is None:
879 epoch_time = int(time.time())
880 local_time = time.localtime(epoch_time)
881 optional_fields["timestamp"] = str(epoch_time)
882 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
883 local_time)
884
885 fields = [status_code, substr, operation]
886 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
887 fields.append(status)
888
889 if record_prefix is None:
mbligh0d0f67d2009-11-06 03:15:03 +0000890 record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000891
892 msg = '\t'.join(str(x) for x in fields)
jadmanski10646442008-08-13 14:05:21 +0000893 return record_prefix + msg + '\n'
894
895
896 def _record_prerendered(self, msg):
897 """
898 Record a pre-rendered msg into the status logs. The only
899 change this makes to the message is to add on the local
900 indentation. Should not be called outside of server_job.*
901 classes. Unlike _record, this does not write the message
902 to standard output.
903 """
904 lines = []
jadmanski779bd292009-03-19 17:33:33 +0000905 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000906 status_log = open(status_file, 'a')
907 for line in msg.splitlines():
mbligh0d0f67d2009-11-06 03:15:03 +0000908 line = self._record_prefix + line + '\n'
jadmanski10646442008-08-13 14:05:21 +0000909 lines.append(line)
910 status_log.write(line)
911 status_log.close()
912 self.__parse_status(lines)
913
914
mbligh084bc172008-10-18 14:02:45 +0000915 def _fill_server_control_namespace(self, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000916 """
917 Prepare a namespace to be used when executing server control files.
mbligh084bc172008-10-18 14:02:45 +0000918
919 This sets up the control file API by importing modules and making them
920 available under the appropriate names within namespace.
921
922 For use by _execute_code().
923
924 Args:
925 namespace: The namespace dictionary to fill in.
926 protect: Boolean. If True (the default) any operation that would
927 clobber an existing entry in namespace will cause an error.
928 Raises:
929 error.AutoservError: When a name would be clobbered by import.
930 """
931 def _import_names(module_name, names=()):
mbligh2b92b862008-11-22 13:25:32 +0000932 """
933 Import a module and assign named attributes into namespace.
mbligh084bc172008-10-18 14:02:45 +0000934
935 Args:
936 module_name: The string module name.
937 names: A limiting list of names to import from module_name. If
938 empty (the default), all names are imported from the module
939 similar to a "from foo.bar import *" statement.
940 Raises:
941 error.AutoservError: When a name being imported would clobber
942 a name already in namespace.
943 """
944 module = __import__(module_name, {}, {}, names)
945
946 # No names supplied? Import * from the lowest level module.
947 # (Ugh, why do I have to implement this part myself?)
948 if not names:
949 for submodule_name in module_name.split('.')[1:]:
950 module = getattr(module, submodule_name)
951 if hasattr(module, '__all__'):
952 names = getattr(module, '__all__')
953 else:
954 names = dir(module)
955
956 # Install each name into namespace, checking to make sure it
957 # doesn't override anything that already exists.
958 for name in names:
959 # Check for conflicts to help prevent future problems.
960 if name in namespace and protect:
961 if namespace[name] is not getattr(module, name):
962 raise error.AutoservError('importing name '
963 '%s from %s %r would override %r' %
964 (name, module_name, getattr(module, name),
965 namespace[name]))
966 else:
967 # Encourage cleanliness and the use of __all__ for a
968 # more concrete API with less surprises on '*' imports.
969 warnings.warn('%s (%r) being imported from %s for use '
970 'in server control files is not the '
971 'first occurrance of that import.' %
972 (name, namespace[name], module_name))
973
974 namespace[name] = getattr(module, name)
975
976
977 # This is the equivalent of prepending a bunch of import statements to
978 # the front of the control script.
mbligha2b07dd2009-06-22 18:26:13 +0000979 namespace.update(os=os, sys=sys, logging=logging)
mbligh084bc172008-10-18 14:02:45 +0000980 _import_names('autotest_lib.server',
981 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
982 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
983 _import_names('autotest_lib.server.subcommand',
984 ('parallel', 'parallel_simple', 'subcommand'))
985 _import_names('autotest_lib.server.utils',
986 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
987 _import_names('autotest_lib.client.common_lib.error')
988 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
989
990 # Inject ourself as the job object into other classes within the API.
991 # (Yuck, this injection is a gross thing be part of a public API. -gps)
992 #
993 # XXX Base & SiteAutotest do not appear to use .job. Who does?
994 namespace['autotest'].Autotest.job = self
995 # server.hosts.base_classes.Host uses .job.
996 namespace['hosts'].Host.job = self
997
998
999 def _execute_code(self, code_file, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +00001000 """
1001 Execute code using a copy of namespace as a server control script.
mbligh084bc172008-10-18 14:02:45 +00001002
1003 Unless protect_namespace is explicitly set to False, the dict will not
1004 be modified.
1005
1006 Args:
1007 code_file: The filename of the control file to execute.
1008 namespace: A dict containing names to make available during execution.
1009 protect: Boolean. If True (the default) a copy of the namespace dict
1010 is used during execution to prevent the code from modifying its
1011 contents outside of this function. If False the raw dict is
1012 passed in and modifications will be allowed.
1013 """
1014 if protect:
1015 namespace = namespace.copy()
1016 self._fill_server_control_namespace(namespace, protect=protect)
1017 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +00001018 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +00001019 machines_text = '\n'.join(self.machines) + '\n'
1020 # Only rewrite the file if it does not match our machine list.
1021 try:
1022 machines_f = open(MACHINES_FILENAME, 'r')
1023 existing_machines_text = machines_f.read()
1024 machines_f.close()
1025 except EnvironmentError:
1026 existing_machines_text = None
1027 if machines_text != existing_machines_text:
1028 utils.open_write_close(MACHINES_FILENAME, machines_text)
1029 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +00001030
1031
1032 def _record(self, status_code, subdir, operation, status='',
1033 epoch_time=None, optional_fields=None):
1034 """
1035 Actual function for recording a single line into the status
1036 logs. Should never be called directly, only by job.record as
1037 this would bypass the console monitor logging.
1038 """
1039
mbligh2b92b862008-11-22 13:25:32 +00001040 msg = self._render_record(status_code, subdir, operation, status,
1041 epoch_time, optional_fields=optional_fields)
jadmanski10646442008-08-13 14:05:21 +00001042
jadmanski779bd292009-03-19 17:33:33 +00001043 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +00001044 sys.stdout.write(msg)
mbligh210bae62009-04-01 18:33:13 +00001045 if status_file:
1046 open(status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001047 if subdir:
jadmanski779bd292009-03-19 17:33:33 +00001048 sub_status_file = self.get_status_log_path(subdir)
1049 open(sub_status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001050 self.__parse_status(msg.splitlines())
1051
1052
1053 def __parse_status(self, new_lines):
mbligh0d0f67d2009-11-06 03:15:03 +00001054 if not self._using_parser:
jadmanski10646442008-08-13 14:05:21 +00001055 return
1056 new_tests = self.parser.process_lines(new_lines)
1057 for test in new_tests:
1058 self.__insert_test(test)
1059
1060
1061 def __insert_test(self, test):
mbligh2b92b862008-11-22 13:25:32 +00001062 """
1063 An internal method to insert a new test result into the
jadmanski10646442008-08-13 14:05:21 +00001064 database. This method will not raise an exception, even if an
1065 error occurs during the insert, to avoid failing a test
1066 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +00001067 self.num_tests_run += 1
1068 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
1069 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +00001070 try:
1071 self.results_db.insert_test(self.job_model, test)
1072 except Exception:
1073 msg = ("WARNING: An unexpected error occured while "
1074 "inserting test results into the database. "
1075 "Ignoring error.\n" + traceback.format_exc())
1076 print >> sys.stderr, msg
1077
mblighcaa62c22008-04-07 21:51:17 +00001078
mbligha7007722009-01-13 00:37:11 +00001079site_server_job = utils.import_site_class(
1080 __file__, "autotest_lib.server.site_server_job", "site_server_job",
1081 base_server_job)
jadmanski0afbb632008-06-06 21:10:57 +00001082
mbligh0a8c3322009-04-28 18:32:19 +00001083class server_job(site_server_job):
jadmanski0afbb632008-06-06 21:10:57 +00001084 pass
jadmanskif37df842009-02-11 00:03:26 +00001085
1086
1087class warning_manager(object):
1088 """Class for controlling warning logs. Manages the enabling and disabling
1089 of warnings."""
1090 def __init__(self):
1091 # a map of warning types to a list of disabled time intervals
1092 self.disabled_warnings = {}
1093
1094
1095 def is_valid(self, timestamp, warning_type):
1096 """Indicates if a warning (based on the time it occured and its type)
1097 is a valid warning. A warning is considered "invalid" if this type of
1098 warning was marked as "disabled" at the time the warning occured."""
1099 disabled_intervals = self.disabled_warnings.get(warning_type, [])
1100 for start, end in disabled_intervals:
1101 if timestamp >= start and (end is None or timestamp < end):
1102 return False
1103 return True
1104
1105
1106 def disable_warnings(self, warning_type, current_time_func=time.time):
1107 """As of now, disables all further warnings of this type."""
1108 intervals = self.disabled_warnings.setdefault(warning_type, [])
1109 if not intervals or intervals[-1][1] is not None:
jadmanski16a7ff72009-04-01 18:19:53 +00001110 intervals.append((int(current_time_func()), None))
jadmanskif37df842009-02-11 00:03:26 +00001111
1112
1113 def enable_warnings(self, warning_type, current_time_func=time.time):
1114 """As of now, enables all further warnings of this type."""
1115 intervals = self.disabled_warnings.get(warning_type, [])
1116 if intervals and intervals[-1][1] is None:
jadmanski16a7ff72009-04-01 18:19:53 +00001117 intervals[-1] = (intervals[-1][0], int(current_time_func()))