blob: eb0ce28ba9b171457c52274094955a4956e131f2 [file] [log] [blame]
mbligh57e78662008-06-17 19:53:49 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
jadmanski6bb32d72009-03-19 20:25:24 +00009import getpass, os, sys, re, stat, tempfile, time, select, subprocess
showard75cdfee2009-06-10 17:40:41 +000010import traceback, shutil, warnings, fcntl, pickle, logging
mbligh78c0daa2009-06-15 21:54:50 +000011import itertools
showard75cdfee2009-06-10 17:40:41 +000012from autotest_lib.client.bin import sysinfo
mbligh0d0f67d2009-11-06 03:15:03 +000013from autotest_lib.client.common_lib import base_job
mbligh09108442008-10-15 16:27:38 +000014from autotest_lib.client.common_lib import error, log, utils, packages
showard75cdfee2009-06-10 17:40:41 +000015from autotest_lib.client.common_lib import logging_manager
jadmanski043e1132008-11-19 17:10:32 +000016from autotest_lib.server import test, subcommand, profilers
jadmanski10646442008-08-13 14:05:21 +000017from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
jadmanski10646442008-08-13 14:05:21 +000018
19
mbligh084bc172008-10-18 14:02:45 +000020def _control_segment_path(name):
21 """Get the pathname of the named control segment file."""
jadmanski10646442008-08-13 14:05:21 +000022 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh084bc172008-10-18 14:02:45 +000023 return os.path.join(server_dir, "control_segments", name)
jadmanski10646442008-08-13 14:05:21 +000024
25
mbligh084bc172008-10-18 14:02:45 +000026CLIENT_CONTROL_FILENAME = 'control'
27SERVER_CONTROL_FILENAME = 'control.srv'
28MACHINES_FILENAME = '.machines'
jadmanski10646442008-08-13 14:05:21 +000029
mbligh084bc172008-10-18 14:02:45 +000030CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
31CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
32CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
mbligh084bc172008-10-18 14:02:45 +000033INSTALL_CONTROL_FILE = _control_segment_path('install')
showard45ae8192008-11-05 19:32:53 +000034CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
jadmanski10646442008-08-13 14:05:21 +000035
mbligh084bc172008-10-18 14:02:45 +000036VERIFY_CONTROL_FILE = _control_segment_path('verify')
mbligh084bc172008-10-18 14:02:45 +000037REPAIR_CONTROL_FILE = _control_segment_path('repair')
jadmanski10646442008-08-13 14:05:21 +000038
39
mbligh062ed152009-01-13 00:57:14 +000040# by default provide a stub that generates no site data
41def _get_site_job_data_dummy(job):
42 return {}
43
44
jadmanski10646442008-08-13 14:05:21 +000045# load up site-specific code for generating site-specific job data
mbligh062ed152009-01-13 00:57:14 +000046get_site_job_data = utils.import_site_function(__file__,
jadmanskic0a623d2009-03-03 21:11:48 +000047 "autotest_lib.server.site_server_job", "get_site_job_data",
mbligh062ed152009-01-13 00:57:14 +000048 _get_site_job_data_dummy)
jadmanski10646442008-08-13 14:05:21 +000049
50
mbligh0d0f67d2009-11-06 03:15:03 +000051class base_server_job(base_job.base_job):
52 """The server-side concrete implementation of base_job.
jadmanski10646442008-08-13 14:05:21 +000053
mbligh0d0f67d2009-11-06 03:15:03 +000054 Optional properties provided by this implementation:
55 serverdir
56 conmuxdir
57
58 num_tests_run
59 num_tests_failed
60
61 warning_manager
62 warning_loggers
jadmanski10646442008-08-13 14:05:21 +000063 """
64
mbligh0d0f67d2009-11-06 03:15:03 +000065 _STATUS_VERSION = 1
jadmanski10646442008-08-13 14:05:21 +000066
67 def __init__(self, control, args, resultdir, label, user, machines,
68 client=False, parse_job='',
mbligh374f3412009-05-13 21:29:45 +000069 ssh_user='root', ssh_port=22, ssh_pass='',
mblighe7d9c602009-07-02 19:02:33 +000070 group_name='', tag=''):
jadmanski10646442008-08-13 14:05:21 +000071 """
mbligh374f3412009-05-13 21:29:45 +000072 Create a server side job object.
mblighb5dac432008-11-27 00:38:44 +000073
mblighe7d9c602009-07-02 19:02:33 +000074 @param control: The pathname of the control file.
75 @param args: Passed to the control file.
76 @param resultdir: Where to throw the results.
77 @param label: Description of the job.
78 @param user: Username for the job (email address).
79 @param client: True if this is a client-side control file.
80 @param parse_job: string, if supplied it is the job execution tag that
81 the results will be passed through to the TKO parser with.
82 @param ssh_user: The SSH username. [root]
83 @param ssh_port: The SSH port number. [22]
84 @param ssh_pass: The SSH passphrase, if needed.
85 @param group_name: If supplied, this will be written out as
mbligh374f3412009-05-13 21:29:45 +000086 host_group_name in the keyvals file for the parser.
mblighe7d9c602009-07-02 19:02:33 +000087 @param tag: The job execution tag from the scheduler. [optional]
jadmanski10646442008-08-13 14:05:21 +000088 """
mbligh0d0f67d2009-11-06 03:15:03 +000089 super(base_server_job, self).__init__(resultdir=resultdir)
mbligha788dc42009-03-26 21:10:16 +000090
mbligh0d0f67d2009-11-06 03:15:03 +000091 path = os.path.dirname(__file__)
92 self.control = control
93 self._uncollected_log_file = os.path.join(self.resultdir,
94 'uncollected_logs')
95 debugdir = os.path.join(self.resultdir, 'debug')
96 if not os.path.exists(debugdir):
97 os.mkdir(debugdir)
98
99 if user:
100 self.user = user
101 else:
102 self.user = getpass.getuser()
103
104 self._args = args
jadmanski10646442008-08-13 14:05:21 +0000105 self.machines = machines
mbligh0d0f67d2009-11-06 03:15:03 +0000106 self._client = client
107 self._record_prefix = ''
jadmanski10646442008-08-13 14:05:21 +0000108 self.warning_loggers = set()
jadmanskif37df842009-02-11 00:03:26 +0000109 self.warning_manager = warning_manager()
mbligh0d0f67d2009-11-06 03:15:03 +0000110 self._ssh_user = ssh_user
111 self._ssh_port = ssh_port
112 self._ssh_pass = ssh_pass
mblighe7d9c602009-07-02 19:02:33 +0000113 self.tag = tag
showarda6082ef2009-10-12 20:25:44 +0000114 self.default_profile_only = False
jadmanski23afbec2008-09-17 18:12:07 +0000115 self.run_test_cleanup = True
mbligh09108442008-10-15 16:27:38 +0000116 self.last_boot_tag = None
jadmanski53aaf382008-11-17 16:22:31 +0000117 self.hosts = set()
mbligh0d0f67d2009-11-06 03:15:03 +0000118 self.drop_caches = False
mblighb5dac432008-11-27 00:38:44 +0000119 self.drop_caches_between_iterations = False
jadmanski10646442008-08-13 14:05:21 +0000120
showard75cdfee2009-06-10 17:40:41 +0000121 self.logging = logging_manager.get_logging_manager(
122 manage_stdout_and_stderr=True, redirect_fds=True)
123 subcommand.logging_manager_object = self.logging
jadmanski10646442008-08-13 14:05:21 +0000124
mbligh0d0f67d2009-11-06 03:15:03 +0000125 self.sysinfo = sysinfo.sysinfo(self.resultdir)
jadmanski043e1132008-11-19 17:10:32 +0000126 self.profilers = profilers.profilers(self)
jadmanskic09fc152008-10-15 17:56:59 +0000127
jadmanski10646442008-08-13 14:05:21 +0000128 job_data = {'label' : label, 'user' : user,
129 'hostname' : ','.join(machines),
mbligh0d0f67d2009-11-06 03:15:03 +0000130 'status_version' : str(self._STATUS_VERSION),
showard170873e2009-01-07 00:22:26 +0000131 'job_started' : str(int(time.time()))}
mbligh374f3412009-05-13 21:29:45 +0000132 if group_name:
133 job_data['host_group_name'] = group_name
jadmanski10646442008-08-13 14:05:21 +0000134
mbligh0d0f67d2009-11-06 03:15:03 +0000135 # only write these keyvals out on the first job in a resultdir
136 if 'job_started' not in utils.read_keyval(self.resultdir):
137 job_data.update(get_site_job_data(self))
138 utils.write_keyval(self.resultdir, job_data)
139
140 self._parse_job = parse_job
141 if self._parse_job and len(machines) == 1:
142 self._using_parser = True
143 self.init_parser(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000144 else:
mbligh0d0f67d2009-11-06 03:15:03 +0000145 self._using_parser = False
146 self.pkgmgr = packages.PackageManager(
147 self.autodir, run_function_dargs={'timeout':600})
showard21baa452008-10-21 00:08:39 +0000148 self.num_tests_run = 0
149 self.num_tests_failed = 0
150
jadmanski550fdc22008-11-20 16:32:08 +0000151 self._register_subcommand_hooks()
mbligh7eacbc22009-07-28 23:13:56 +0000152 self._test_tag_prefix = None
jadmanski550fdc22008-11-20 16:32:08 +0000153
mbligh0d0f67d2009-11-06 03:15:03 +0000154 # these components aren't usable on the server
155 self.bootloader = None
156 self.harness = None
157
158
159 @classmethod
160 def _find_base_directories(cls):
161 """
162 Determine locations of autodir, clientdir and serverdir. Assumes
163 that this file is located within serverdir and uses __file__ along
164 with relative paths to resolve the location.
165 """
166 serverdir = os.path.abspath(os.path.dirname(__file__))
167 autodir = os.path.normpath(os.path.join(serverdir, '..'))
168 clientdir = os.path.join(autodir, 'client')
169 return autodir, clientdir, serverdir
170
171
172 def _find_resultdir(self, resultdir):
173 """
174 Determine the location of resultdir. For server jobs we expect one to
175 always be explicitly passed in to __init__, so just return that.
176 """
177 if resultdir:
178 return os.path.normpath(resultdir)
179 else:
180 return None
181
jadmanski550fdc22008-11-20 16:32:08 +0000182
jadmanskie432dd22009-01-30 15:04:51 +0000183 @staticmethod
184 def _load_control_file(path):
185 f = open(path)
186 try:
187 control_file = f.read()
188 finally:
189 f.close()
190 return re.sub('\r', '', control_file)
191
192
jadmanski550fdc22008-11-20 16:32:08 +0000193 def _register_subcommand_hooks(self):
mbligh2b92b862008-11-22 13:25:32 +0000194 """
195 Register some hooks into the subcommand modules that allow us
196 to properly clean up self.hosts created in forked subprocesses.
197 """
jadmanski550fdc22008-11-20 16:32:08 +0000198 def on_fork(cmd):
199 self._existing_hosts_on_fork = set(self.hosts)
200 def on_join(cmd):
201 new_hosts = self.hosts - self._existing_hosts_on_fork
202 for host in new_hosts:
203 host.close()
204 subcommand.subcommand.register_fork_hook(on_fork)
205 subcommand.subcommand.register_join_hook(on_join)
206
jadmanski10646442008-08-13 14:05:21 +0000207
208 def init_parser(self, resultdir):
mbligh2b92b862008-11-22 13:25:32 +0000209 """
210 Start the continuous parsing of resultdir. This sets up
jadmanski10646442008-08-13 14:05:21 +0000211 the database connection and inserts the basic job object into
mbligh2b92b862008-11-22 13:25:32 +0000212 the database if necessary.
213 """
jadmanski10646442008-08-13 14:05:21 +0000214 # redirect parser debugging to .parse.log
215 parse_log = os.path.join(resultdir, '.parse.log')
216 parse_log = open(parse_log, 'w', 0)
217 tko_utils.redirect_parser_debugging(parse_log)
218 # create a job model object and set up the db
219 self.results_db = tko_db.db(autocommit=True)
mbligh0d0f67d2009-11-06 03:15:03 +0000220 self.parser = status_lib.parser(self._STATUS_VERSION)
jadmanski10646442008-08-13 14:05:21 +0000221 self.job_model = self.parser.make_job(resultdir)
222 self.parser.start(self.job_model)
223 # check if a job already exists in the db and insert it if
224 # it does not
mbligh0d0f67d2009-11-06 03:15:03 +0000225 job_idx = self.results_db.find_job(self._parse_job)
jadmanski10646442008-08-13 14:05:21 +0000226 if job_idx is None:
mbligh0d0f67d2009-11-06 03:15:03 +0000227 self.results_db.insert_job(self._parse_job, self.job_model)
jadmanski10646442008-08-13 14:05:21 +0000228 else:
mbligh2b92b862008-11-22 13:25:32 +0000229 machine_idx = self.results_db.lookup_machine(self.job_model.machine)
jadmanski10646442008-08-13 14:05:21 +0000230 self.job_model.index = job_idx
231 self.job_model.machine_idx = machine_idx
232
233
234 def cleanup_parser(self):
mbligh2b92b862008-11-22 13:25:32 +0000235 """
236 This should be called after the server job is finished
jadmanski10646442008-08-13 14:05:21 +0000237 to carry out any remaining cleanup (e.g. flushing any
mbligh2b92b862008-11-22 13:25:32 +0000238 remaining test results to the results db)
239 """
mbligh0d0f67d2009-11-06 03:15:03 +0000240 if not self._using_parser:
jadmanski10646442008-08-13 14:05:21 +0000241 return
242 final_tests = self.parser.end()
243 for test in final_tests:
244 self.__insert_test(test)
mbligh0d0f67d2009-11-06 03:15:03 +0000245 self._using_parser = False
jadmanski10646442008-08-13 14:05:21 +0000246
247
248 def verify(self):
249 if not self.machines:
mbligh084bc172008-10-18 14:02:45 +0000250 raise error.AutoservError('No machines specified to verify')
mbligh0fce4112008-11-27 00:37:17 +0000251 if self.resultdir:
252 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000253 try:
jadmanskicdd0c402008-09-19 21:21:31 +0000254 namespace = {'machines' : self.machines, 'job' : self,
mbligh0d0f67d2009-11-06 03:15:03 +0000255 'ssh_user' : self._ssh_user,
256 'ssh_port' : self._ssh_port,
257 'ssh_pass' : self._ssh_pass}
mbligh084bc172008-10-18 14:02:45 +0000258 self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000259 except Exception, e:
mbligh2b92b862008-11-22 13:25:32 +0000260 msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
jadmanski10646442008-08-13 14:05:21 +0000261 self.record('ABORT', None, None, msg)
262 raise
263
264
265 def repair(self, host_protection):
266 if not self.machines:
267 raise error.AutoservError('No machines specified to repair')
mbligh0fce4112008-11-27 00:37:17 +0000268 if self.resultdir:
269 os.chdir(self.resultdir)
jadmanski10646442008-08-13 14:05:21 +0000270 namespace = {'machines': self.machines, 'job': self,
mbligh0d0f67d2009-11-06 03:15:03 +0000271 'ssh_user': self._ssh_user, 'ssh_port': self._ssh_port,
272 'ssh_pass': self._ssh_pass,
jadmanski10646442008-08-13 14:05:21 +0000273 'protection_level': host_protection}
mbligh25c0b8c2009-01-24 01:44:17 +0000274
mbligh0931b0a2009-04-08 17:44:48 +0000275 self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
jadmanski10646442008-08-13 14:05:21 +0000276
277
278 def precheck(self):
279 """
280 perform any additional checks in derived classes.
281 """
282 pass
283
284
285 def enable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000286 """
287 Start or restart external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000288 """
289 pass
290
291
292 def disable_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000293 """
294 Pause or stop external logging mechanism.
jadmanski10646442008-08-13 14:05:21 +0000295 """
296 pass
297
298
showard07e27af2009-10-12 20:32:01 +0000299 def set_default_profile_only(self, val):
300 """ Set the default_profile_only mode. """
301 self.default_profile_only = val
302
303
jadmanski23afbec2008-09-17 18:12:07 +0000304 def enable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000305 """
306 By default tests run test.cleanup
307 """
jadmanski23afbec2008-09-17 18:12:07 +0000308 self.run_test_cleanup = True
309
310
311 def disable_test_cleanup(self):
mbligh2b92b862008-11-22 13:25:32 +0000312 """
313 By default tests do not run test.cleanup
314 """
jadmanski23afbec2008-09-17 18:12:07 +0000315 self.run_test_cleanup = False
316
317
jadmanski10646442008-08-13 14:05:21 +0000318 def use_external_logging(self):
mbligh2b92b862008-11-22 13:25:32 +0000319 """
320 Return True if external logging should be used.
jadmanski10646442008-08-13 14:05:21 +0000321 """
322 return False
323
324
mbligh415dc212009-06-15 21:53:34 +0000325 def _make_parallel_wrapper(self, function, machines, log):
326 """Wrap function as appropriate for calling by parallel_simple."""
mbligh2b92b862008-11-22 13:25:32 +0000327 is_forking = not (len(machines) == 1 and self.machines == machines)
mbligh0d0f67d2009-11-06 03:15:03 +0000328 if self._parse_job and is_forking and log:
jadmanski10646442008-08-13 14:05:21 +0000329 def wrapper(machine):
mbligh0d0f67d2009-11-06 03:15:03 +0000330 self._parse_job += "/" + machine
331 self._using_parser = True
jadmanski10646442008-08-13 14:05:21 +0000332 self.machines = [machine]
mbligh0d0f67d2009-11-06 03:15:03 +0000333 self.push_execution_context(machine)
jadmanski609a5f42008-08-26 20:52:42 +0000334 os.chdir(self.resultdir)
showard2bab8f42008-11-12 18:15:22 +0000335 utils.write_keyval(self.resultdir, {"hostname": machine})
jadmanski10646442008-08-13 14:05:21 +0000336 self.init_parser(self.resultdir)
337 result = function(machine)
338 self.cleanup_parser()
339 return result
jadmanski4dd1a002008-09-05 20:27:30 +0000340 elif len(machines) > 1 and log:
jadmanski10646442008-08-13 14:05:21 +0000341 def wrapper(machine):
mbligh0d0f67d2009-11-06 03:15:03 +0000342 self.push_execution_context(machine)
jadmanski609a5f42008-08-26 20:52:42 +0000343 os.chdir(self.resultdir)
mbligh838d82d2009-03-11 17:14:31 +0000344 machine_data = {'hostname' : machine,
mbligh0d0f67d2009-11-06 03:15:03 +0000345 'status_version' : str(self._STATUS_VERSION)}
mbligh838d82d2009-03-11 17:14:31 +0000346 utils.write_keyval(self.resultdir, machine_data)
jadmanski10646442008-08-13 14:05:21 +0000347 result = function(machine)
348 return result
349 else:
350 wrapper = function
mbligh415dc212009-06-15 21:53:34 +0000351 return wrapper
352
353
354 def parallel_simple(self, function, machines, log=True, timeout=None,
355 return_results=False):
356 """
357 Run 'function' using parallel_simple, with an extra wrapper to handle
358 the necessary setup for continuous parsing, if possible. If continuous
359 parsing is already properly initialized then this should just work.
360
361 @param function: A callable to run in parallel given each machine.
362 @param machines: A list of machine names to be passed one per subcommand
363 invocation of function.
364 @param log: If True, output will be written to output in a subdirectory
365 named after each machine.
366 @param timeout: Seconds after which the function call should timeout.
367 @param return_results: If True instead of an AutoServError being raised
368 on any error a list of the results|exceptions from the function
369 called on each arg is returned. [default: False]
370
371 @raises error.AutotestError: If any of the functions failed.
372 """
373 wrapper = self._make_parallel_wrapper(function, machines, log)
374 return subcommand.parallel_simple(wrapper, machines,
375 log=log, timeout=timeout,
376 return_results=return_results)
377
378
379 def parallel_on_machines(self, function, machines, timeout=None):
380 """
showardcd5fac42009-07-06 20:19:43 +0000381 @param function: Called in parallel with one machine as its argument.
mbligh415dc212009-06-15 21:53:34 +0000382 @param machines: A list of machines to call function(machine) on.
383 @param timeout: Seconds after which the function call should timeout.
384
385 @returns A list of machines on which function(machine) returned
386 without raising an exception.
387 """
showardcd5fac42009-07-06 20:19:43 +0000388 results = self.parallel_simple(function, machines, timeout=timeout,
mbligh415dc212009-06-15 21:53:34 +0000389 return_results=True)
390 success_machines = []
391 for result, machine in itertools.izip(results, machines):
392 if not isinstance(result, Exception):
393 success_machines.append(machine)
394 return success_machines
jadmanski10646442008-08-13 14:05:21 +0000395
396
mbligh0d0f67d2009-11-06 03:15:03 +0000397 _USE_TEMP_DIR = object()
mbligh2b92b862008-11-22 13:25:32 +0000398 def run(self, cleanup=False, install_before=False, install_after=False,
jadmanskie432dd22009-01-30 15:04:51 +0000399 collect_crashdumps=True, namespace={}, control=None,
jadmanskidef0c3c2009-03-25 20:07:10 +0000400 control_file_dir=None, only_collect_crashinfo=False):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000401 # for a normal job, make sure the uncollected logs file exists
402 # for a crashinfo-only run it should already exist, bail out otherwise
mbligh0d0f67d2009-11-06 03:15:03 +0000403 if self.resultdir and not os.path.exists(self._uncollected_log_file):
jadmanskifb9c0fa2009-04-29 17:39:16 +0000404 if only_collect_crashinfo:
405 # if this is a crashinfo-only run, and there were no existing
406 # uncollected logs, just bail out early
407 logging.info("No existing uncollected logs, "
408 "skipping crashinfo collection")
409 return
410 else:
mbligh0d0f67d2009-11-06 03:15:03 +0000411 log_file = open(self._uncollected_log_file, "w")
jadmanskifb9c0fa2009-04-29 17:39:16 +0000412 pickle.dump([], log_file)
413 log_file.close()
414
jadmanski10646442008-08-13 14:05:21 +0000415 # use a copy so changes don't affect the original dictionary
416 namespace = namespace.copy()
417 machines = self.machines
jadmanskie432dd22009-01-30 15:04:51 +0000418 if control is None:
mbligh0d0f67d2009-11-06 03:15:03 +0000419 control = self._load_control_file(self.control)
jadmanskie432dd22009-01-30 15:04:51 +0000420 if control_file_dir is None:
421 control_file_dir = self.resultdir
jadmanski10646442008-08-13 14:05:21 +0000422
423 self.aborted = False
424 namespace['machines'] = machines
mbligh0d0f67d2009-11-06 03:15:03 +0000425 namespace['args'] = self._args
jadmanski10646442008-08-13 14:05:21 +0000426 namespace['job'] = self
mbligh0d0f67d2009-11-06 03:15:03 +0000427 namespace['ssh_user'] = self._ssh_user
428 namespace['ssh_port'] = self._ssh_port
429 namespace['ssh_pass'] = self._ssh_pass
jadmanski10646442008-08-13 14:05:21 +0000430 test_start_time = int(time.time())
431
mbligh80e1eba2008-11-19 00:26:18 +0000432 if self.resultdir:
433 os.chdir(self.resultdir)
jadmanski779bd292009-03-19 17:33:33 +0000434 # touch status.log so that the parser knows a job is running here
jadmanski382303a2009-04-21 19:53:39 +0000435 open(self.get_status_log_path(), 'a').close()
mbligh80e1eba2008-11-19 00:26:18 +0000436 self.enable_external_logging()
jadmanskie432dd22009-01-30 15:04:51 +0000437
jadmanskicdd0c402008-09-19 21:21:31 +0000438 collect_crashinfo = True
mblighaebe3b62008-12-22 14:45:40 +0000439 temp_control_file_dir = None
jadmanski10646442008-08-13 14:05:21 +0000440 try:
showardcf8d4922009-10-14 16:08:39 +0000441 try:
442 if install_before and machines:
443 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanskie432dd22009-01-30 15:04:51 +0000444
showardcf8d4922009-10-14 16:08:39 +0000445 if only_collect_crashinfo:
446 return
447
jadmanskidef0c3c2009-03-25 20:07:10 +0000448 # determine the dir to write the control files to
449 cfd_specified = (control_file_dir
mbligh0d0f67d2009-11-06 03:15:03 +0000450 and control_file_dir is not self._USE_TEMP_DIR)
jadmanskidef0c3c2009-03-25 20:07:10 +0000451 if cfd_specified:
452 temp_control_file_dir = None
453 else:
454 temp_control_file_dir = tempfile.mkdtemp(
455 suffix='temp_control_file_dir')
456 control_file_dir = temp_control_file_dir
457 server_control_file = os.path.join(control_file_dir,
458 SERVER_CONTROL_FILENAME)
459 client_control_file = os.path.join(control_file_dir,
460 CLIENT_CONTROL_FILENAME)
mbligh0d0f67d2009-11-06 03:15:03 +0000461 if self._client:
jadmanskidef0c3c2009-03-25 20:07:10 +0000462 namespace['control'] = control
463 utils.open_write_close(client_control_file, control)
mblighfeac0102009-04-28 18:31:12 +0000464 shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
465 server_control_file)
jadmanskidef0c3c2009-03-25 20:07:10 +0000466 else:
467 utils.open_write_close(server_control_file, control)
mbligh26f0d882009-06-22 18:30:01 +0000468 logging.info("Processing control file")
jadmanskidef0c3c2009-03-25 20:07:10 +0000469 self._execute_code(server_control_file, namespace)
mbligh26f0d882009-06-22 18:30:01 +0000470 logging.info("Finished processing control file")
jadmanski10646442008-08-13 14:05:21 +0000471
jadmanskidef0c3c2009-03-25 20:07:10 +0000472 # no error occured, so we don't need to collect crashinfo
473 collect_crashinfo = False
showardcf8d4922009-10-14 16:08:39 +0000474 except:
475 try:
476 logging.exception(
477 'Exception escaped control file, job aborting:')
478 except:
479 pass # don't let logging exceptions here interfere
480 raise
jadmanski10646442008-08-13 14:05:21 +0000481 finally:
mblighaebe3b62008-12-22 14:45:40 +0000482 if temp_control_file_dir:
jadmanskie432dd22009-01-30 15:04:51 +0000483 # Clean up temp directory used for copies of the control files
mblighaebe3b62008-12-22 14:45:40 +0000484 try:
485 shutil.rmtree(temp_control_file_dir)
486 except Exception, e:
mblighe7d9c602009-07-02 19:02:33 +0000487 logging.warn('Could not remove temp directory %s: %s',
488 temp_control_file_dir, e)
jadmanskie432dd22009-01-30 15:04:51 +0000489
jadmanskicdd0c402008-09-19 21:21:31 +0000490 if machines and (collect_crashdumps or collect_crashinfo):
jadmanski10646442008-08-13 14:05:21 +0000491 namespace['test_start_time'] = test_start_time
jadmanskicdd0c402008-09-19 21:21:31 +0000492 if collect_crashinfo:
mbligh084bc172008-10-18 14:02:45 +0000493 # includes crashdumps
494 self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
jadmanskicdd0c402008-09-19 21:21:31 +0000495 else:
mbligh084bc172008-10-18 14:02:45 +0000496 self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
mbligh0d0f67d2009-11-06 03:15:03 +0000497 if self._uncollected_log_file:
498 os.remove(self._uncollected_log_file)
jadmanski10646442008-08-13 14:05:21 +0000499 self.disable_external_logging()
showard45ae8192008-11-05 19:32:53 +0000500 if cleanup and machines:
501 self._execute_code(CLEANUP_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000502 if install_after and machines:
mbligh084bc172008-10-18 14:02:45 +0000503 self._execute_code(INSTALL_CONTROL_FILE, namespace)
jadmanski10646442008-08-13 14:05:21 +0000504
505
mbligh7eacbc22009-07-28 23:13:56 +0000506 def set_test_tag_prefix(self, tag=''):
507 """
508 Set tag to be prepended (separated by a '.') to test name of all
509 following run_test steps.
510 """
511 self._test_tag_prefix = tag
mblighc86113b2009-04-28 18:32:51 +0000512
513
jadmanski10646442008-08-13 14:05:21 +0000514 def run_test(self, url, *args, **dargs):
mbligh2b92b862008-11-22 13:25:32 +0000515 """
516 Summon a test object and run it.
jadmanski10646442008-08-13 14:05:21 +0000517
518 tag
519 tag to add to testname
520 url
521 url of the test to run
522 """
523
524 (group, testname) = self.pkgmgr.get_package_name(url, 'test')
jadmanski10646442008-08-13 14:05:21 +0000525
526 tag = dargs.pop('tag', None)
mblighc86113b2009-04-28 18:32:51 +0000527 if tag is None:
mbligh7eacbc22009-07-28 23:13:56 +0000528 tag = self._test_tag_prefix
529 elif self._test_tag_prefix:
530 tag = '%s.%s' % (self._test_tag_prefix, tag)
531
jadmanski10646442008-08-13 14:05:21 +0000532 if tag:
mbligh8ad24202009-01-07 16:49:36 +0000533 testname += '.' + str(tag)
jadmanskide292df2008-08-26 20:51:14 +0000534 subdir = testname
jadmanski10646442008-08-13 14:05:21 +0000535
536 outputdir = os.path.join(self.resultdir, subdir)
537 if os.path.exists(outputdir):
538 msg = ("%s already exists, test <%s> may have"
mbligh2b92b862008-11-22 13:25:32 +0000539 " already run with tag <%s>" % (outputdir, testname, tag))
jadmanski10646442008-08-13 14:05:21 +0000540 raise error.TestError(msg)
541 os.mkdir(outputdir)
542
543 def group_func():
544 try:
545 test.runtest(self, url, tag, args, dargs)
546 except error.TestBaseException, e:
547 self.record(e.exit_status, subdir, testname, str(e))
548 raise
549 except Exception, e:
550 info = str(e) + "\n" + traceback.format_exc()
551 self.record('FAIL', subdir, testname, info)
552 raise
553 else:
mbligh2b92b862008-11-22 13:25:32 +0000554 self.record('GOOD', subdir, testname, 'completed successfully')
jadmanskide292df2008-08-26 20:51:14 +0000555
556 result, exc_info = self._run_group(testname, subdir, group_func)
557 if exc_info and isinstance(exc_info[1], error.TestBaseException):
558 return False
559 elif exc_info:
560 raise exc_info[0], exc_info[1], exc_info[2]
561 else:
562 return True
jadmanski10646442008-08-13 14:05:21 +0000563
564
565 def _run_group(self, name, subdir, function, *args, **dargs):
566 """\
567 Underlying method for running something inside of a group.
568 """
jadmanskide292df2008-08-26 20:51:14 +0000569 result, exc_info = None, None
mbligh0d0f67d2009-11-06 03:15:03 +0000570 old_record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000571 try:
572 self.record('START', subdir, name)
mbligh0d0f67d2009-11-06 03:15:03 +0000573 self._record_prefix += '\t'
jadmanski10646442008-08-13 14:05:21 +0000574 try:
575 result = function(*args, **dargs)
576 finally:
mbligh0d0f67d2009-11-06 03:15:03 +0000577 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000578 except error.TestBaseException, e:
jadmanskib88d6dc2009-01-10 00:33:18 +0000579 self.record("END %s" % e.exit_status, subdir, name)
jadmanskide292df2008-08-26 20:51:14 +0000580 exc_info = sys.exc_info()
jadmanski10646442008-08-13 14:05:21 +0000581 except Exception, e:
582 err_msg = str(e) + '\n'
583 err_msg += traceback.format_exc()
584 self.record('END ABORT', subdir, name, err_msg)
585 raise error.JobError(name + ' failed\n' + traceback.format_exc())
586 else:
587 self.record('END GOOD', subdir, name)
588
jadmanskide292df2008-08-26 20:51:14 +0000589 return result, exc_info
jadmanski10646442008-08-13 14:05:21 +0000590
591
592 def run_group(self, function, *args, **dargs):
593 """\
594 function:
595 subroutine to run
596 *args:
597 arguments for the function
598 """
599
600 name = function.__name__
601
602 # Allow the tag for the group to be specified.
603 tag = dargs.pop('tag', None)
604 if tag:
605 name = tag
606
jadmanskide292df2008-08-26 20:51:14 +0000607 return self._run_group(name, None, function, *args, **dargs)[0]
jadmanski10646442008-08-13 14:05:21 +0000608
609
610 def run_reboot(self, reboot_func, get_kernel_func):
611 """\
612 A specialization of run_group meant specifically for handling
613 a reboot. Includes support for capturing the kernel version
614 after the reboot.
615
616 reboot_func: a function that carries out the reboot
617
618 get_kernel_func: a function that returns a string
619 representing the kernel version.
620 """
621
mbligh0d0f67d2009-11-06 03:15:03 +0000622 old_record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000623 try:
624 self.record('START', None, 'reboot')
mbligh0d0f67d2009-11-06 03:15:03 +0000625 self._record_prefix += '\t'
jadmanski10646442008-08-13 14:05:21 +0000626 reboot_func()
627 except Exception, e:
mbligh0d0f67d2009-11-06 03:15:03 +0000628 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000629 err_msg = str(e) + '\n' + traceback.format_exc()
630 self.record('END FAIL', None, 'reboot', err_msg)
jadmanski4b51d542009-04-08 14:17:16 +0000631 raise
jadmanski10646442008-08-13 14:05:21 +0000632 else:
633 kernel = get_kernel_func()
mbligh0d0f67d2009-11-06 03:15:03 +0000634 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000635 self.record('END GOOD', None, 'reboot',
636 optional_fields={"kernel": kernel})
637
638
jadmanskie432dd22009-01-30 15:04:51 +0000639 def run_control(self, path):
640 """Execute a control file found at path (relative to the autotest
641 path). Intended for executing a control file within a control file,
642 not for running the top-level job control file."""
643 path = os.path.join(self.autodir, path)
644 control_file = self._load_control_file(path)
mbligh0d0f67d2009-11-06 03:15:03 +0000645 self.run(control=control_file, control_file_dir=self._USE_TEMP_DIR)
jadmanskie432dd22009-01-30 15:04:51 +0000646
647
jadmanskic09fc152008-10-15 17:56:59 +0000648 def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
mbligh4395bbd2009-03-25 19:34:17 +0000649 self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
jadmanskic09fc152008-10-15 17:56:59 +0000650 on_every_test)
651
652
653 def add_sysinfo_logfile(self, file, on_every_test=False):
654 self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
655
656
657 def _add_sysinfo_loggable(self, loggable, on_every_test):
658 if on_every_test:
659 self.sysinfo.test_loggables.add(loggable)
660 else:
661 self.sysinfo.boot_loggables.add(loggable)
662
663
jadmanski10646442008-08-13 14:05:21 +0000664 def record(self, status_code, subdir, operation, status='',
665 optional_fields=None):
666 """
667 Record job-level status
668
669 The intent is to make this file both machine parseable and
670 human readable. That involves a little more complexity, but
671 really isn't all that bad ;-)
672
673 Format is <status code>\t<subdir>\t<operation>\t<status>
674
mbligh1b3b3762008-09-25 02:46:34 +0000675 status code: see common_lib.log.is_valid_status()
jadmanski10646442008-08-13 14:05:21 +0000676 for valid status definition
677
678 subdir: MUST be a relevant subdirectory in the results,
679 or None, which will be represented as '----'
680
681 operation: description of what you ran (e.g. "dbench", or
682 "mkfs -t foobar /dev/sda9")
683
684 status: error message or "completed sucessfully"
685
686 ------------------------------------------------------------
687
688 Initial tabs indicate indent levels for grouping, and is
mbligh0d0f67d2009-11-06 03:15:03 +0000689 governed by self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000690
691 multiline messages have secondary lines prefaced by a double
692 space (' ')
693
694 Executing this method will trigger the logging of all new
695 warnings to date from the various console loggers.
696 """
697 # poll all our warning loggers for new warnings
698 warnings = self._read_warnings()
mbligh0d0f67d2009-11-06 03:15:03 +0000699 old_record_prefix = self._record_prefix
jadmanski2de83112009-04-01 18:21:04 +0000700 try:
701 if status_code.startswith("END "):
mbligh0d0f67d2009-11-06 03:15:03 +0000702 self._record_prefix += "\t"
jadmanski2de83112009-04-01 18:21:04 +0000703 for timestamp, msg in warnings:
704 self._record("WARN", None, None, msg, timestamp)
705 finally:
mbligh0d0f67d2009-11-06 03:15:03 +0000706 self._record_prefix = old_record_prefix
jadmanski10646442008-08-13 14:05:21 +0000707
708 # write out the actual status log line
709 self._record(status_code, subdir, operation, status,
710 optional_fields=optional_fields)
711
712
713 def _read_warnings(self):
jadmanskif37df842009-02-11 00:03:26 +0000714 """Poll all the warning loggers and extract any new warnings that have
715 been logged. If the warnings belong to a category that is currently
716 disabled, this method will discard them and they will no longer be
717 retrievable.
718
719 Returns a list of (timestamp, message) tuples, where timestamp is an
720 integer epoch timestamp."""
jadmanski10646442008-08-13 14:05:21 +0000721 warnings = []
722 while True:
723 # pull in a line of output from every logger that has
724 # output ready to be read
mbligh2b92b862008-11-22 13:25:32 +0000725 loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
jadmanski10646442008-08-13 14:05:21 +0000726 closed_loggers = set()
727 for logger in loggers:
728 line = logger.readline()
729 # record any broken pipes (aka line == empty)
730 if len(line) == 0:
731 closed_loggers.add(logger)
732 continue
jadmanskif37df842009-02-11 00:03:26 +0000733 # parse out the warning
734 timestamp, msgtype, msg = line.split('\t', 2)
735 timestamp = int(timestamp)
736 # if the warning is valid, add it to the results
737 if self.warning_manager.is_valid(timestamp, msgtype):
738 warnings.append((timestamp, msg.strip()))
jadmanski10646442008-08-13 14:05:21 +0000739
740 # stop listening to loggers that are closed
741 self.warning_loggers -= closed_loggers
742
743 # stop if none of the loggers have any output left
744 if not loggers:
745 break
746
747 # sort into timestamp order
748 warnings.sort()
749 return warnings
750
751
jadmanski16a7ff72009-04-01 18:19:53 +0000752 def disable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000753 self.warning_manager.disable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000754 self.record("INFO", None, None,
755 "disabling %s warnings" % warning_type,
756 {"warnings.disable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000757
758
jadmanski16a7ff72009-04-01 18:19:53 +0000759 def enable_warnings(self, warning_type):
jadmanskif37df842009-02-11 00:03:26 +0000760 self.warning_manager.enable_warnings(warning_type)
jadmanski16a7ff72009-04-01 18:19:53 +0000761 self.record("INFO", None, None,
762 "enabling %s warnings" % warning_type,
763 {"warnings.enable": warning_type})
jadmanskif37df842009-02-11 00:03:26 +0000764
765
jadmanski779bd292009-03-19 17:33:33 +0000766 def get_status_log_path(self, subdir=None):
767 """Return the path to the job status log.
768
769 @param subdir - Optional paramter indicating that you want the path
770 to a subdirectory status log.
771
772 @returns The path where the status log should be.
773 """
mbligh210bae62009-04-01 18:33:13 +0000774 if self.resultdir:
775 if subdir:
776 return os.path.join(self.resultdir, subdir, "status.log")
777 else:
778 return os.path.join(self.resultdir, "status.log")
jadmanski779bd292009-03-19 17:33:33 +0000779 else:
mbligh210bae62009-04-01 18:33:13 +0000780 return None
jadmanski779bd292009-03-19 17:33:33 +0000781
782
jadmanski6bb32d72009-03-19 20:25:24 +0000783 def _update_uncollected_logs_list(self, update_func):
784 """Updates the uncollected logs list in a multi-process safe manner.
785
786 @param update_func - a function that updates the list of uncollected
787 logs. Should take one parameter, the list to be updated.
788 """
mbligh0d0f67d2009-11-06 03:15:03 +0000789 if self._uncollected_log_file:
790 log_file = open(self._uncollected_log_file, "r+")
mbligha788dc42009-03-26 21:10:16 +0000791 fcntl.flock(log_file, fcntl.LOCK_EX)
jadmanski6bb32d72009-03-19 20:25:24 +0000792 try:
793 uncollected_logs = pickle.load(log_file)
794 update_func(uncollected_logs)
795 log_file.seek(0)
796 log_file.truncate()
797 pickle.dump(uncollected_logs, log_file)
jadmanski3bff9092009-04-22 18:09:47 +0000798 log_file.flush()
jadmanski6bb32d72009-03-19 20:25:24 +0000799 finally:
800 fcntl.flock(log_file, fcntl.LOCK_UN)
801 log_file.close()
802
803
804 def add_client_log(self, hostname, remote_path, local_path):
805 """Adds a new set of client logs to the list of uncollected logs,
806 to allow for future log recovery.
807
808 @param host - the hostname of the machine holding the logs
809 @param remote_path - the directory on the remote machine holding logs
810 @param local_path - the local directory to copy the logs into
811 """
812 def update_func(logs_list):
813 logs_list.append((hostname, remote_path, local_path))
814 self._update_uncollected_logs_list(update_func)
815
816
817 def remove_client_log(self, hostname, remote_path, local_path):
818 """Removes a set of client logs from the list of uncollected logs,
819 to allow for future log recovery.
820
821 @param host - the hostname of the machine holding the logs
822 @param remote_path - the directory on the remote machine holding logs
823 @param local_path - the local directory to copy the logs into
824 """
825 def update_func(logs_list):
826 logs_list.remove((hostname, remote_path, local_path))
827 self._update_uncollected_logs_list(update_func)
828
829
mbligh0d0f67d2009-11-06 03:15:03 +0000830 def get_client_logs(self):
831 """Retrieves the list of uncollected logs, if it exists.
832
833 @returns A list of (host, remote_path, local_path) tuples. Returns
834 an empty list if no uncollected logs file exists.
835 """
836 log_exists = (self._uncollected_log_file and
837 os.path.exists(self._uncollected_log_file))
838 if log_exists:
839 return pickle.load(open(self._uncollected_log_file))
840 else:
841 return []
842
843
jadmanski10646442008-08-13 14:05:21 +0000844 def _render_record(self, status_code, subdir, operation, status='',
845 epoch_time=None, record_prefix=None,
846 optional_fields=None):
847 """
848 Internal Function to generate a record to be written into a
849 status log. For use by server_job.* classes only.
850 """
851 if subdir:
852 if re.match(r'[\n\t]', subdir):
mbligh2b92b862008-11-22 13:25:32 +0000853 raise ValueError('Invalid character in subdir string')
jadmanski10646442008-08-13 14:05:21 +0000854 substr = subdir
855 else:
856 substr = '----'
857
mbligh1b3b3762008-09-25 02:46:34 +0000858 if not log.is_valid_status(status_code):
mbligh2b92b862008-11-22 13:25:32 +0000859 raise ValueError('Invalid status code supplied: %s' % status_code)
jadmanski10646442008-08-13 14:05:21 +0000860 if not operation:
861 operation = '----'
862 if re.match(r'[\n\t]', operation):
mbligh2b92b862008-11-22 13:25:32 +0000863 raise ValueError('Invalid character in operation string')
jadmanski10646442008-08-13 14:05:21 +0000864 operation = operation.rstrip()
865 status = status.rstrip()
866 status = re.sub(r"\t", " ", status)
867 # Ensure any continuation lines are marked so we can
868 # detect them in the status file to ensure it is parsable.
mbligh0d0f67d2009-11-06 03:15:03 +0000869 status = re.sub(r"\n", "\n" + self._record_prefix + " ", status)
jadmanski10646442008-08-13 14:05:21 +0000870
871 if not optional_fields:
872 optional_fields = {}
873
874 # Generate timestamps for inclusion in the logs
875 if epoch_time is None:
876 epoch_time = int(time.time())
877 local_time = time.localtime(epoch_time)
878 optional_fields["timestamp"] = str(epoch_time)
879 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
880 local_time)
881
882 fields = [status_code, substr, operation]
883 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
884 fields.append(status)
885
886 if record_prefix is None:
mbligh0d0f67d2009-11-06 03:15:03 +0000887 record_prefix = self._record_prefix
jadmanski10646442008-08-13 14:05:21 +0000888
889 msg = '\t'.join(str(x) for x in fields)
jadmanski10646442008-08-13 14:05:21 +0000890 return record_prefix + msg + '\n'
891
892
893 def _record_prerendered(self, msg):
894 """
895 Record a pre-rendered msg into the status logs. The only
896 change this makes to the message is to add on the local
897 indentation. Should not be called outside of server_job.*
898 classes. Unlike _record, this does not write the message
899 to standard output.
900 """
901 lines = []
jadmanski779bd292009-03-19 17:33:33 +0000902 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +0000903 status_log = open(status_file, 'a')
904 for line in msg.splitlines():
mbligh0d0f67d2009-11-06 03:15:03 +0000905 line = self._record_prefix + line + '\n'
jadmanski10646442008-08-13 14:05:21 +0000906 lines.append(line)
907 status_log.write(line)
908 status_log.close()
909 self.__parse_status(lines)
910
911
mbligh084bc172008-10-18 14:02:45 +0000912 def _fill_server_control_namespace(self, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000913 """
914 Prepare a namespace to be used when executing server control files.
mbligh084bc172008-10-18 14:02:45 +0000915
916 This sets up the control file API by importing modules and making them
917 available under the appropriate names within namespace.
918
919 For use by _execute_code().
920
921 Args:
922 namespace: The namespace dictionary to fill in.
923 protect: Boolean. If True (the default) any operation that would
924 clobber an existing entry in namespace will cause an error.
925 Raises:
926 error.AutoservError: When a name would be clobbered by import.
927 """
928 def _import_names(module_name, names=()):
mbligh2b92b862008-11-22 13:25:32 +0000929 """
930 Import a module and assign named attributes into namespace.
mbligh084bc172008-10-18 14:02:45 +0000931
932 Args:
933 module_name: The string module name.
934 names: A limiting list of names to import from module_name. If
935 empty (the default), all names are imported from the module
936 similar to a "from foo.bar import *" statement.
937 Raises:
938 error.AutoservError: When a name being imported would clobber
939 a name already in namespace.
940 """
941 module = __import__(module_name, {}, {}, names)
942
943 # No names supplied? Import * from the lowest level module.
944 # (Ugh, why do I have to implement this part myself?)
945 if not names:
946 for submodule_name in module_name.split('.')[1:]:
947 module = getattr(module, submodule_name)
948 if hasattr(module, '__all__'):
949 names = getattr(module, '__all__')
950 else:
951 names = dir(module)
952
953 # Install each name into namespace, checking to make sure it
954 # doesn't override anything that already exists.
955 for name in names:
956 # Check for conflicts to help prevent future problems.
957 if name in namespace and protect:
958 if namespace[name] is not getattr(module, name):
959 raise error.AutoservError('importing name '
960 '%s from %s %r would override %r' %
961 (name, module_name, getattr(module, name),
962 namespace[name]))
963 else:
964 # Encourage cleanliness and the use of __all__ for a
965 # more concrete API with less surprises on '*' imports.
966 warnings.warn('%s (%r) being imported from %s for use '
967 'in server control files is not the '
968 'first occurrance of that import.' %
969 (name, namespace[name], module_name))
970
971 namespace[name] = getattr(module, name)
972
973
974 # This is the equivalent of prepending a bunch of import statements to
975 # the front of the control script.
mbligha2b07dd2009-06-22 18:26:13 +0000976 namespace.update(os=os, sys=sys, logging=logging)
mbligh084bc172008-10-18 14:02:45 +0000977 _import_names('autotest_lib.server',
978 ('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
979 'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
980 _import_names('autotest_lib.server.subcommand',
981 ('parallel', 'parallel_simple', 'subcommand'))
982 _import_names('autotest_lib.server.utils',
983 ('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
984 _import_names('autotest_lib.client.common_lib.error')
985 _import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
986
987 # Inject ourself as the job object into other classes within the API.
988 # (Yuck, this injection is a gross thing be part of a public API. -gps)
989 #
990 # XXX Base & SiteAutotest do not appear to use .job. Who does?
991 namespace['autotest'].Autotest.job = self
992 # server.hosts.base_classes.Host uses .job.
993 namespace['hosts'].Host.job = self
994
995
996 def _execute_code(self, code_file, namespace, protect=True):
mbligh2b92b862008-11-22 13:25:32 +0000997 """
998 Execute code using a copy of namespace as a server control script.
mbligh084bc172008-10-18 14:02:45 +0000999
1000 Unless protect_namespace is explicitly set to False, the dict will not
1001 be modified.
1002
1003 Args:
1004 code_file: The filename of the control file to execute.
1005 namespace: A dict containing names to make available during execution.
1006 protect: Boolean. If True (the default) a copy of the namespace dict
1007 is used during execution to prevent the code from modifying its
1008 contents outside of this function. If False the raw dict is
1009 passed in and modifications will be allowed.
1010 """
1011 if protect:
1012 namespace = namespace.copy()
1013 self._fill_server_control_namespace(namespace, protect=protect)
1014 # TODO: Simplify and get rid of the special cases for only 1 machine.
showard3e66e8c2008-10-27 19:20:51 +00001015 if len(self.machines) > 1:
mbligh084bc172008-10-18 14:02:45 +00001016 machines_text = '\n'.join(self.machines) + '\n'
1017 # Only rewrite the file if it does not match our machine list.
1018 try:
1019 machines_f = open(MACHINES_FILENAME, 'r')
1020 existing_machines_text = machines_f.read()
1021 machines_f.close()
1022 except EnvironmentError:
1023 existing_machines_text = None
1024 if machines_text != existing_machines_text:
1025 utils.open_write_close(MACHINES_FILENAME, machines_text)
1026 execfile(code_file, namespace, namespace)
jadmanski10646442008-08-13 14:05:21 +00001027
1028
1029 def _record(self, status_code, subdir, operation, status='',
1030 epoch_time=None, optional_fields=None):
1031 """
1032 Actual function for recording a single line into the status
1033 logs. Should never be called directly, only by job.record as
1034 this would bypass the console monitor logging.
1035 """
1036
mbligh2b92b862008-11-22 13:25:32 +00001037 msg = self._render_record(status_code, subdir, operation, status,
1038 epoch_time, optional_fields=optional_fields)
jadmanski10646442008-08-13 14:05:21 +00001039
jadmanski779bd292009-03-19 17:33:33 +00001040 status_file = self.get_status_log_path()
jadmanski10646442008-08-13 14:05:21 +00001041 sys.stdout.write(msg)
mbligh210bae62009-04-01 18:33:13 +00001042 if status_file:
1043 open(status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001044 if subdir:
jadmanski779bd292009-03-19 17:33:33 +00001045 sub_status_file = self.get_status_log_path(subdir)
1046 open(sub_status_file, "a").write(msg)
jadmanski10646442008-08-13 14:05:21 +00001047 self.__parse_status(msg.splitlines())
1048
1049
1050 def __parse_status(self, new_lines):
mbligh0d0f67d2009-11-06 03:15:03 +00001051 if not self._using_parser:
jadmanski10646442008-08-13 14:05:21 +00001052 return
1053 new_tests = self.parser.process_lines(new_lines)
1054 for test in new_tests:
1055 self.__insert_test(test)
1056
1057
1058 def __insert_test(self, test):
mbligh2b92b862008-11-22 13:25:32 +00001059 """
1060 An internal method to insert a new test result into the
jadmanski10646442008-08-13 14:05:21 +00001061 database. This method will not raise an exception, even if an
1062 error occurs during the insert, to avoid failing a test
1063 simply because of unexpected database issues."""
showard21baa452008-10-21 00:08:39 +00001064 self.num_tests_run += 1
1065 if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
1066 self.num_tests_failed += 1
jadmanski10646442008-08-13 14:05:21 +00001067 try:
1068 self.results_db.insert_test(self.job_model, test)
1069 except Exception:
1070 msg = ("WARNING: An unexpected error occured while "
1071 "inserting test results into the database. "
1072 "Ignoring error.\n" + traceback.format_exc())
1073 print >> sys.stderr, msg
1074
mblighcaa62c22008-04-07 21:51:17 +00001075
mbligha7007722009-01-13 00:37:11 +00001076site_server_job = utils.import_site_class(
1077 __file__, "autotest_lib.server.site_server_job", "site_server_job",
1078 base_server_job)
jadmanski0afbb632008-06-06 21:10:57 +00001079
mbligh0a8c3322009-04-28 18:32:19 +00001080class server_job(site_server_job):
jadmanski0afbb632008-06-06 21:10:57 +00001081 pass
jadmanskif37df842009-02-11 00:03:26 +00001082
1083
1084class warning_manager(object):
1085 """Class for controlling warning logs. Manages the enabling and disabling
1086 of warnings."""
1087 def __init__(self):
1088 # a map of warning types to a list of disabled time intervals
1089 self.disabled_warnings = {}
1090
1091
1092 def is_valid(self, timestamp, warning_type):
1093 """Indicates if a warning (based on the time it occured and its type)
1094 is a valid warning. A warning is considered "invalid" if this type of
1095 warning was marked as "disabled" at the time the warning occured."""
1096 disabled_intervals = self.disabled_warnings.get(warning_type, [])
1097 for start, end in disabled_intervals:
1098 if timestamp >= start and (end is None or timestamp < end):
1099 return False
1100 return True
1101
1102
1103 def disable_warnings(self, warning_type, current_time_func=time.time):
1104 """As of now, disables all further warnings of this type."""
1105 intervals = self.disabled_warnings.setdefault(warning_type, [])
1106 if not intervals or intervals[-1][1] is not None:
jadmanski16a7ff72009-04-01 18:19:53 +00001107 intervals.append((int(current_time_func()), None))
jadmanskif37df842009-02-11 00:03:26 +00001108
1109
1110 def enable_warnings(self, warning_type, current_time_func=time.time):
1111 """As of now, enables all further warnings of this type."""
1112 intervals = self.disabled_warnings.get(warning_type, [])
1113 if intervals and intervals[-1][1] is None:
jadmanski16a7ff72009-04-01 18:19:53 +00001114 intervals[-1] = (intervals[-1][0], int(current_time_func()))