Move distribute_across_machines and gtest to site extension.

(Patch 11: Simplify how site_server_job.run() inserts gtest_runner
           into the namespace parameter of server_job.run().)

(Patch 10: Update gtest_parser to warn and gracefully exit if the test
           log file does not exist.  This happens in a gtest suite
           throws a test exception before completing and copying the
           test log file back to the server.

           Also updated FailureDescription() to not return the name of
           the test to simplify checking for failure lines.  This
           makes "if failures: then use failures" work instead of
           "if len(failures) >1: then use failures[1:]")

(Patch 9: Code review fixes, clearer comments and one line argument
           parsing.)

(Patch 8: Fix PyAuto parse failure with no error lines.)

To make keeping in step with upstream Autotest moving the
distribute_across_machines and gtest_runner into site extensions.

Clean up the old include, exclude, action test attributes from the old
(test_name, {args}, [include], [exclude], [action]) to a cleaner form
of (test_name, {args}, {attributes} where the attributes dictionary
is keyed to include, exclude, and attributes for the same behavior as
before in a nicer format.

Updated BVT and Regressions to use the new format.

Server_Job:
    Removed unused imports I added for removed functions.
    Move site functions to end of file to enable importing
        base_server_job.
    Removed distribute_across_machines() and record_skipped_test().
    Removed gtest_runner from the default namespace.

Site_Server_Job:
    Added imports and functions removed from server_job.
    Changed distribute_across_machines from using threads that
        launched subprocesses to just using subprocesses.

Site_Server_Job_Utils:
    Fixed test attributes to use a dictionary instead of 3 lists.
    Enabled running server jobs in addition to client jobs.
    Removed base thread class from machine_worker since the instances
        are run by subcommands now.

logging_KernelCrashServer
http://pauldean.kir/afe/#tab_id=view_job&object_id=327

BVT
http://pauldean.kir/afe/#tab_id=view_job&object_id=328

Regression
http://pauldean.kir/afe/#tab_id=view_job&object_id=330

BUG=None.
TEST=Local Runs.

Change-Id: I118ae8bdc2b49d4190051d59a748ecb01d0da33c
Reviewed-on: http://gerrit.chromium.org/gerrit/2698
Reviewed-by: Dale Curtis <dalecurtis@chromium.org>
Tested-by: Paul Pendlebury <pauldean@chromium.org>
diff --git a/server/server_job.py b/server/server_job.py
index ba5f0a2..7b207b2 100644
--- a/server/server_job.py
+++ b/server/server_job.py
@@ -10,14 +10,12 @@
 """
 
 import getpass, os, sys, re, stat, tempfile, time, select, subprocess, platform
-import multiprocessing
 import traceback, shutil, warnings, fcntl, pickle, logging, itertools, errno
 from autotest_lib.client.bin import sysinfo
 from autotest_lib.client.common_lib import base_job
 from autotest_lib.client.common_lib import error, log, utils, packages
 from autotest_lib.client.common_lib import logging_manager
-from autotest_lib.server import test, subcommand, profilers, server_job_utils
-from autotest_lib.server import gtest_runner
+from autotest_lib.server import test, subcommand, profilers
 from autotest_lib.server.hosts import abstract_ssh
 from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
 
@@ -47,12 +45,6 @@
     return {}
 
 
-# load up site-specific code for generating site-specific job data
-get_site_job_data = utils.import_site_function(__file__,
-    "autotest_lib.server.site_server_job", "get_site_job_data",
-    _get_site_job_data_dummy)
-
-
 class status_indenter(base_job.status_indenter):
     """Provide a simple integer-backed status indenter."""
     def __init__(self):
@@ -466,75 +458,6 @@
         return success_machines
 
 
-    def distribute_across_machines(self, tests, machines,
-                                   continuous_parsing=False):
-        """Run each test in tests once using machines.
-
-        Instead of running each test on each machine like parallel_on_machines,
-        run each test once across all machines. Put another way, the total
-        number of tests run by parallel_on_machines is len(tests) *
-        len(machines). The number of tests run by distribute_across_machines is
-        len(tests).
-
-        Args:
-            tests: List of tests to run.
-            machines: List of machines to use.
-            continuous_parsing: Bool, if true parse job while running.
-        """
-        # The Queue is thread safe, but since a machine may have to search
-        # through the queue to find a valid test the lock provides exclusive
-        # queue access for more than just the get call.
-        test_queue = multiprocessing.JoinableQueue()
-        test_queue_lock = multiprocessing.Lock()
-
-        machine_workers = [server_job_utils.machine_worker(self,
-                                                           machine,
-                                                           self.resultdir,
-                                                           test_queue,
-                                                           test_queue_lock,
-                                                           continuous_parsing)
-                           for machine in machines]
-
-        # To (potentially) speed up searching for valid tests create a list of
-        # unique attribute sets present in the machines for this job. If sets
-        # were hashable we could just use a dictionary for fast verification.
-        # This at least reduces the search space from the number of machines to
-        # the number of unique machines.
-        unique_machine_attributes = []
-        for mw in machine_workers:
-            if not mw.attribute_set in unique_machine_attributes:
-                unique_machine_attributes.append(mw.attribute_set)
-
-        # Only queue tests which are valid on at least one machine.  Record
-        # skipped tests in the status.log file using record_skipped_test().
-        for test_entry in tests:
-            ti = server_job_utils.test_item(*test_entry)
-            machine_found = False
-            for ma in unique_machine_attributes:
-                if ti.validate(ma):
-                    test_queue.put(ti)
-                    machine_found = True
-                    break
-            if not machine_found:
-                self.record_skipped_test(ti)
-
-        # Run valid tests and wait for completion.
-        for worker in machine_workers:
-            worker.start()
-        test_queue.join()
-
-
-    def record_skipped_test(self, skipped_test, message=None):
-        """Insert a failure record into status.log for this test."""
-        msg = message
-        if msg is None:
-            msg = 'No valid machines found for test %s.' % skipped_test
-        logging.info(msg)
-        self.record('START', None, skipped_test.test_name)
-        self.record('INFO', None, skipped_test.test_name, msg)
-        self.record('END TEST_NA', None, skipped_test.test_name, msg)
-
-
     _USE_TEMP_DIR = object()
     def run(self, cleanup=False, install_before=False, install_after=False,
             collect_crashdumps=True, namespace={}, control=None,
@@ -567,7 +490,6 @@
             control_file_dir = self.resultdir
 
         self.aborted = False
-        namespace['gtest_runner'] = gtest_runner.gtest_runner()
         namespace['machines'] = machines
         namespace['args'] = self.args
         namespace['job'] = self
@@ -1183,14 +1105,6 @@
                 host.clear_known_hosts()
 
 
-site_server_job = utils.import_site_class(
-    __file__, "autotest_lib.server.site_server_job", "site_server_job",
-    base_server_job)
-
-class server_job(site_server_job):
-    pass
-
-
 class warning_manager(object):
     """Class for controlling warning logs. Manages the enabling and disabling
     of warnings."""
@@ -1222,3 +1136,18 @@
         intervals = self.disabled_warnings.get(warning_type, [])
         if intervals and intervals[-1][1] is None:
             intervals[-1] = (intervals[-1][0], int(current_time_func()))
+
+
+# load up site-specific code for generating site-specific job data
+get_site_job_data = utils.import_site_function(__file__,
+    "autotest_lib.server.site_server_job", "get_site_job_data",
+    _get_site_job_data_dummy)
+
+
+site_server_job = utils.import_site_class(
+    __file__, "autotest_lib.server.site_server_job", "site_server_job",
+    base_server_job)
+
+
+class server_job(site_server_job):
+    pass
\ No newline at end of file